blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c0a93e368941fa37baddd6151ac2f3deaa92f650 | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/rpkg/nipals.py | 75d104e2c794d340891ef8ba4750caa91d159918 | [
"MIT",
"BSD-3-Clause"
] | permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | from .base import RPackageBase
from ..base import SourceMixin
class NipalsRecipe(RPackageBase):
def __init__(self, *args, **kwargs):
super(NipalsRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'bae93f8254166ee62ced3ae372c25271' \
'3945f5fc51f8303ba574744264ed3241'
self.name = 'r-nipals'
self.version = '0.4'
self.url = 'https://cran.r-project.org/src/contrib/' \
'nipals_$version.tar.gz'
self.configure_strip_cross_compile()
class NipalsSourceRecipe(SourceMixin, NipalsRecipe):
def __init__(self, *args, **kwargs):
super(NipalsSourceRecipe, self).__init__(*args, **kwargs)
| [
"clayton.stangeland@gmail.com"
] | clayton.stangeland@gmail.com |
0c5af194c9baa72bf561be3829d0c96e8dfe3a76 | a67a987ed078da0a1de2908c8c0e08070dee65b1 | /genice/lattices/Struct57.py | 8ab6ddccad2875c664059d05fa3fe325fbd074fa | [] | no_license | Python3pkg/GenIce | ef1ce7ee2997c10e08dde75ac36050a653cd4fc5 | 1e9458b7bf8e0fd2ad5d0c4f8987cea0ae7ca0b0 | refs/heads/master | 2021-01-21T17:31:51.595858 | 2017-05-21T14:09:32 | 2017-05-21T14:09:32 | 91,962,047 | 0 | 0 | null | 2017-05-21T14:09:28 | 2017-05-21T14:09:28 | null | UTF-8 | Python | false | false | 6,675 | py | """
Data source: Dutour Sikirić, Mathieu, Olaf Delgado-Friedrichs, and Michel Deza. “Space Fullerenes: a Computer Search for New Frank-Kasper Structures” Acta Crystallographica Section A Foundations of Crystallography 66.Pt 5 (2010): 602–615.
Cage composition:
(12,14,15,16) = (12,12,0,4,)
"""
pairs="""
8 79
132 124
126 134
61 149
49 142
48 36
10 159
136 104
110 3
28 18
29 3
41 149
20 140
67 72
28 136
34 68
139 81
152 71
20 12
131 133
104 49
72 14
109 53
152 133
37 73
86 121
13 133
134 145
108 55
106 54
147 127
135 151
32 153
21 45
85 132
8 55
86 137
92 48
41 112
126 14
69 9
159 142
109 47
83 94
86 96
17 58
13 19
102 143
109 159
46 149
28 82
78 50
115 70
108 142
97 120
44 53
24 61
17 158
5 31
115 126
0 105
137 101
60 89
31 53
40 95
48 156
117 76
26 49
35 3
35 73
96 103
84 89
110 11
127 68
121 41
78 36
157 52
144 96
65 148
69 112
20 30
83 46
29 89
16 73
92 102
22 74
100 155
109 148
100 157
63 62
123 47
117 87
108 15
106 21
124 75
1 150
156 143
107 25
22 87
11 0
106 74
52 111
146 111
133 81
25 57
91 140
59 130
125 116
96 87
144 49
26 103
0 129
68 71
2 131
27 34
18 137
148 57
115 153
144 33
67 73
61 39
131 45
125 87
43 45
7 18
23 8
147 24
8 113
62 122
29 127
75 46
141 3
154 25
74 77
1 54
86 139
11 113
34 155
9 104
107 32
22 57
156 50
44 66
60 135
93 101
93 88
6 103
148 77
156 32
100 72
29 130
102 123
10 26
136 78
90 61
107 93
134 32
7 40
26 76
150 56
84 158
154 82
123 129
89 138
125 131
124 40
35 99
27 98
5 159
151 146
55 70
121 39
64 132
134 66
90 98
128 84
52 38
92 4
67 62
94 152
81 39
78 65
93 85
38 79
104 51
120 98
130 24
64 46
80 119
55 51
97 157
88 9
13 95
18 41
7 54
125 139
23 42
74 103
30 79
2 95
27 40
10 77
37 63
12 35
152 43
16 59
33 105
36 77
52 84
147 19
13 68
150 75
114 124
16 94
136 101
7 56
138 111
6 116
145 53
118 70
15 138
155 62
116 76
129 118
16 141
4 47
145 65
9 76
141 71
150 122
48 25
63 75
119 38
97 128
153 51
108 11
64 139
129 151
42 143
141 135
83 81
12 90
27 63
60 59
50 51
110 138
54 82
2 22
132 137
44 146
58 122
17 71
67 140
30 99
4 117
119 14
21 19
144 101
91 79
1 45
107 65
60 118
85 69
23 50
56 149
64 112
106 95
114 154
157 30
142 105
72 99
33 88
58 43
126 113
114 1
114 2
58 128
0 115
4 57
19 39
116 112
59 140
44 123
80 146
98 56
97 34
91 14
117 33
23 145
31 111
15 5
21 6
120 122
128 127
6 121
147 43
66 143
80 158
28 69
47 105
91 70
12 130
90 37
99 113
94 24
31 42
85 154
110 151
37 83
42 38
15 118
20 120
92 10
66 119
135 158
100 80
88 153
17 155
82 36
102 5
"""
waters="""
0.8125 0.80241 0.19017
0.5 0.25631 0.72057
0.8125 0.19759 0.80983
0.0 0.57611 0.125
0.6875 0.97648 0.9375
0.3125 0.82131 0.94195
0.3125 0.17869 0.05805
0.1875 0.24211 0.56695
0.1875 0.75789 0.43305
0.5 0.01882 0.28211
0.3125 0.97648 0.9375
0.0 0.74369 0.22057
0.1875 0.50402 0.3125
0.0 0.33606 0.9033
0.6875 0.67813 0.49612
0.3125 0.75789 0.06695
0.6875 0.50402 0.1875
0.6875 0.49598 0.8125
0.1875 0.17869 0.44195
0.1875 0.32187 0.99612
0.3125 0.53806 0.4375
0.3125 0.24211 0.93305
0.875 0.11065 0.87233
0.1875 0.82131 0.55805
0.375 0.41372 0.15533
0.6875 0.03293 0.6875
0.3125 0.02352 0.0625
0.0 0.37164 0.59468
0.3125 0.10594 0.47228
0.1875 0.53806 0.0625
0.1875 0.60732 0.49879
0.1875 0.77679 0.81517
0.6875 0.89406 0.52773
0.8125 0.96707 0.1875
0.0 0.44091 0.71903
0.0 0.55909 0.28097
0.3125 0.03293 0.6875
0.875 0.41372 0.34468
0.375 0.69233 0.6533
0.125 0.30767 0.1533
0.0 0.27487 0.62678
0.3125 0.22321 0.31517
0.3125 0.77679 0.68483
0.5 0.37164 0.90533
0.8125 0.77679 0.81517
0.5 0.27487 0.87322
0.625 0.30767 0.3467
0.8125 0.89406 0.97228
0.5 0.98118 0.7179
0.1875 0.96707 0.1875
0.3125 0.89406 0.52773
0.375 0.88936 0.37233
0.25 0.63854 0.75
0.0 0.82602 0.783
0.3125 0.19759 0.69017
0.3125 0.80241 0.30983
0.3125 0.32187 0.50388
0.8125 0.03293 0.8125
0.5 0.44091 0.78097
0.5 0.55909 0.21903
0.5 0.62836 0.09468
0.25 0.36147 0.25
0.6875 0.46195 0.5625
0.8125 0.39268 0.50121
0.6875 0.22321 0.31517
0.0 0.95296 0.625
0.6875 0.77679 0.68483
0.6875 0.53806 0.4375
0.0 0.42389 0.875
0.5 0.10124 0.37767
0.5 0.74369 0.27944
0.8125 0.46195 0.9375
0.8125 0.60732 0.49879
0.8125 0.50402 0.3125
0.125 0.11065 0.87233
0.6875 0.32187 0.50388
0.5 0.04705 0.125
0.1875 0.03293 0.8125
0.1875 0.97648 0.5625
0.3125 0.67813 0.49612
0.75 0.63854 0.75
0.875 0.30767 0.1533
0.375 0.11065 0.62767
0.75 0.36147 0.25
0.375 0.58629 0.84468
0.6875 0.10594 0.47228
0.0 0.17399 0.21701
0.8125 0.10594 0.02773
0.6875 0.96707 0.3125
0.3125 0.60732 0.00121
0.125 0.41372 0.34468
0.5 0.66394 0.4033
0.5 0.95296 0.875
0.8125 0.02352 0.4375
0.625 0.41372 0.15533
0.0 0.25631 0.77944
0.0 0.10124 0.12233
0.1875 0.49598 0.6875
0.1875 0.39268 0.50121
0.0 0.62836 0.40533
0.875 0.58629 0.65533
0.0 0.04705 0.375
0.5 0.85198 0.87678
0.1875 0.10594 0.02773
0.3125 0.96707 0.3125
0.875 0.88936 0.12767
0.1875 0.19759 0.80983
0.8125 0.97648 0.5625
0.1875 0.80241 0.19017
0.0 0.89877 0.87767
0.0 0.66394 0.0967
0.125 0.69233 0.8467
0.5 0.17399 0.283
0.0 0.72513 0.37322
0.6875 0.19759 0.69017
0.6875 0.80241 0.30983
0.5 0.14802 0.12322
0.6875 0.02352 0.0625
0.5 0.72513 0.12678
0.625 0.69233 0.6533
0.3125 0.46195 0.5625
0.1875 0.22321 0.18483
0.5 0.42389 0.625
0.6875 0.82131 0.94195
0.8125 0.24211 0.56695
0.6875 0.17869 0.05805
0.8125 0.75789 0.43305
0.1875 0.46195 0.9375
0.3125 0.49598 0.8125
0.6875 0.75789 0.06695
0.3125 0.50402 0.1875
0.6875 0.24211 0.93305
0.8125 0.17869 0.44195
0.8125 0.32187 0.99612
0.8125 0.82131 0.55805
0.6875 0.60732 0.00121
0.1875 0.02352 0.4375
0.0 0.14802 0.37678
0.1875 0.67813 0.00388
0.8125 0.22321 0.18483
0.5 0.57611 0.375
0.8125 0.53806 0.0625
0.125 0.88936 0.12767
0.5 0.82602 0.71701
0.0 0.01882 0.2179
0.0 0.85198 0.62322
0.875 0.69233 0.8467
0.3125 0.39268 0.99879
0.0 0.98118 0.78211
0.375 0.30767 0.3467
0.5 0.33606 0.5967
0.8125 0.67813 0.00388
0.6875 0.39268 0.99879
0.625 0.88936 0.37233
0.625 0.11065 0.62767
0.8125 0.49598 0.6875
0.5 0.89877 0.62233
0.125 0.58629 0.65533
0.625 0.58629 0.84468
0.1875 0.89406 0.97228
"""
coord= "relative"
cages="""
12 0.0 -0.21676 1.01068
14 0.0 0.43193 0.12387
14 -0.25 -0.65222 -0.25
12 0.25 0.09409 0.25
12 0.5 0.21676 0.51068
12 -0.25 -0.09409 -0.25
14 0.25 0.65222 0.25
16 0.5 0.11291 -0.12842
12 0.5 -0.5 1.0
14 0.5 -0.291 0.8787
14 0.25 -0.65222 0.75
14 0.5 0.291 0.1213
16 0.0 0.11291 0.62842
12 0.25 -0.09409 0.75
14 0.5 0.43193 0.37613
12 0.0 0.5 0.5
14 0.0 -0.291 -0.3787
12 0.0 0.21676 -0.01068
16 0.0 -0.11291 -0.62842
12 -0.5 -0.21676 -0.51068
14 -0.25 0.65222 -0.75
12 0.5 0.0 0.5
14 0.0 -0.43193 -0.12387
12 -0.25 0.09409 -0.75
14 0.5 -0.43193 0.62387
16 0.5 -0.11291 1.12842
14 0.0 0.291 0.3787
12 0.0 0.0 0.0
"""
bondlen = 3
celltype = 'rect'
cell = """
13.167286191434481 31.492589961461622 18.629903136229707
"""
density = 0.6190653349484135
| [
"vitroid@gmail.com"
] | vitroid@gmail.com |
1ef2de447a021e6da5b57bb6ff88ba0555796709 | 036c69a0c27cfdc0b0d9d6169caf027635b66325 | /leetcode633.py | 6f082a59a764909a1c91da134cf980861e95a51a | [] | no_license | Marco2018/leetcode | 19b6b2c19452babf0198f8705e0dd1f31c321017 | c7dc709a7a9b83ef85fbc2d0aad7a8829f1035d1 | refs/heads/master | 2020-03-23T17:34:09.637831 | 2020-01-18T11:00:36 | 2020-01-18T11:00:36 | 141,865,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | import math
class Solution(object):
def judgeSquareSum(self, c):
"""
:type c: int
:rtype: bool
"""
i,j=0,int(math.sqrt(c))
while i<=j:
if c==i*i+j*j:
return True
elif c<i*i+j*j:
j-=1
else:
i+=1
return False
s=Solution()
k=5
print(s.judgeSquareSum(k))
| [
"513045631@qq.com"
] | 513045631@qq.com |
9c65c480160a9c5018e9682e03fd097c1d440b6e | 2558ee962f83929830c00741e8ad5b04f0dbc254 | /05/14.py | fc265def275e601f03cefd5fdb369d4c0b3f6e8d | [] | no_license | loveQt/PythonWebSourceCode | 80ffc7cdd8882acb2ab70c4b8fa46a51560d2e04 | 086ac2acbb388eda0656c99e86b00c7020e4acba | refs/heads/master | 2021-01-10T21:08:32.485624 | 2015-06-16T15:39:55 | 2015-06-16T15:39:55 | 37,539,522 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | userList = ['0001' , '0004' , '0006' , '0002' , '0005' , '0003']
userList.reverse()
print userList | [
"shmilydxc@vip.qq.com"
] | shmilydxc@vip.qq.com |
0a1d6f058d79fafc887ca3d6f1ded85f8244d634 | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /tools/md_browser/gitiles_ext_blocks.py | b1a53795e1750d12737e6a32144106c9d9ba5cac | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"APSL-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown",
"MIT",
"Zlib"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 2,797 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements Gitiles' notification, aside and promotion blocks.
This extention makes the Markdown parser recognize the Gitiles' extended
blocks notation. The syntax is explained at:
https://gerrit.googlesource.com/gitiles/+/master/Documentation/markdown.md#Notification_aside_promotion-blocks
"""
from markdown.blockprocessors import BlockProcessor
from markdown.extensions import Extension
from markdown.util import etree
import re
class _GitilesExtBlockProcessor(BlockProcessor):
"""Process Gitiles' notification, aside and promotion blocks."""
RE_START = re.compile(r'^\*\*\* (note|aside|promo) *\n')
RE_END = re.compile(r'\n\*\*\* *\n?$')
def __init__(self, *args, **kwargs):
self._last_parent = None
BlockProcessor.__init__(self, *args, **kwargs)
def test(self, parent, block):
return self.RE_START.search(block) or self.RE_END.search(block)
def run(self, parent, blocks):
raw_block = blocks.pop(0)
match_start = self.RE_START.search(raw_block)
if match_start:
# Opening a new block.
rest = raw_block[match_start.end():]
if self._last_parent:
# Inconsistent state (nested starting markers). Ignore the marker
# and keep going.
blocks.insert(0, rest)
return
div = etree.SubElement(parent, 'div')
# Setting the class name is sufficient, because doc.css already has
# styles for these classes.
div.set('class', match_start.group(1))
self._last_parent = parent
blocks.insert(0, rest)
self.parser.parseBlocks(div, blocks)
return
match_end = self.RE_END.search(raw_block)
if match_end:
# Ending an existing block.
# Process the text preceding the ending marker in the current context
# (i.e. within the div block).
rest = raw_block[:match_end.start()]
self.parser.parseBlocks(parent, [rest])
if not self._last_parent:
# Inconsistent state (the ending marker is found but there is no
# matching starting marker).
# Let's continue as if we did not see the ending marker.
return
last_parent = self._last_parent
self._last_parent = None
self.parser.parseBlocks(last_parent, blocks)
return
class _GitilesExtBlockExtension(Extension):
"""Add Gitiles' extended blocks to Markdown."""
def extendMarkdown(self, md, md_globals):
md.parser.blockprocessors.add('gitilesextblocks',
_GitilesExtBlockProcessor(md.parser),
'_begin')
def makeExtension(*args, **kwargs):
return _GitilesExtBlockExtension(*args, **kwargs)
| [
"sunny.nam@samsung.com"
] | sunny.nam@samsung.com |
54a3fad2138ebacedcf8939c0b6196bb4d9b3dbb | edcd74f8f65119bdbe737360c2ca33b4a6da160a | /python/problem-google-code-jam/2018_b.py | 98699ff0f46a03da210a2e0def950bde3e33e108 | [] | no_license | hyunjun/practice | 72e83de6a1d5e04ddcd16526f16110ea2dd00373 | 5376dd48b1cefb4faba9d2ef6a8a497b6b1d6c67 | refs/heads/master | 2023-08-31T07:00:37.320351 | 2023-08-17T07:29:24 | 2023-08-17T07:29:24 | 2,704,126 | 3 | 2 | null | 2022-12-14T20:25:07 | 2011-11-03T18:28:44 | Python | UTF-8 | Python | false | false | 804 | py | # https://blog.naver.com/ndb796/221247631646
class Solution:
def troubleSort(self, inp):
if inp is None or 0 == len(inp):
return []
idx = len(inp) - 1
while 0 < idx:
tmpIdx = idx
while 1 < tmpIdx and inp[tmpIdx - 2] > inp[tmpIdx]:
inp[tmpIdx - 2], inp[tmpIdx] = inp[tmpIdx], inp[tmpIdx - 2]
tmpIdx -= 2
idx -= 1
idx = 0
while idx < len(inp) - 1:
if inp[idx] > inp[idx + 1]:
return idx
idx += 1
return 'OK'
s = Solution()
data = [([5, 6, 8, 4, 3], 'OK'), ([8, 9, 7], 1)]
for inp, expected in data:
real = s.troubleSort(inp)
print('inp {}, expected {}, real {}, result {}'.format(inp, expected, real, expected == real))
| [
"morpheus.0@kakaocorp.com"
] | morpheus.0@kakaocorp.com |
ff7a09345bd02f4ebdab16d53b8302d3dce3e7ee | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2453/60870/282722.py | 88542fe2fc3834eef30ed5e16b8c623fc16f52c9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | array = input().split(',')
array = [int(x) for x in array]
num = int(input())
if num in array:
print('True')
else:
print('False') | [
"1069583789@qq.com"
] | 1069583789@qq.com |
cb485c661040f1cf200e0437e69f0fea29343ef5 | caa175a933aca08a475c6277e22cdde1654aca7b | /tests/test_version.py | e7294ded840af2ff9d2315172936d70b828d4787 | [
"MIT"
] | permissive | simonsobs/acondbs | 01d68ae40866461b85a6c9fcabdfbea46ef5f920 | d18c7b06474b0dacb1dcf1c6dbd1e743407645e2 | refs/heads/main | 2023-07-07T04:33:40.561273 | 2023-06-28T22:08:00 | 2023-06-28T22:08:00 | 239,022,783 | 0 | 1 | MIT | 2023-06-26T20:36:39 | 2020-02-07T21:07:46 | Python | UTF-8 | Python | false | false | 139 | py | import acondbs
def test_version() -> None:
'''test if the version string is attached to the module'''
assert acondbs.__version__
| [
"tai.sakuma@gmail.com"
] | tai.sakuma@gmail.com |
fa497cff7ed5d5a4a2209a08b49b728ff7acf821 | 855501a4cb8a54e0c977d53e6f5d76d8938f99cb | /Quicksort 2 - Sorting.py | 299b80349cb57d654b3016753c09c626f9a9cdef | [] | no_license | Beowulfdgo/HackerRank | 3d7713f68a595af76d857ac9955ae55565b8391f | e4384253f27eee296e0cad39a402cadf47c90164 | refs/heads/master | 2023-05-31T05:30:21.425792 | 2021-06-29T08:47:11 | 2021-06-29T08:47:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | def quicksort(ar):
if len(ar) < 2:
return ar
lt, eq, rt = [], [], []
for item in ar:
if item < ar[0]:
lt.append(item)
elif item > ar[0]:
rt.append(item)
else:
eq.append(item)
sub = quicksort(lt) + eq + quicksort(rt)
print(' '.join([str(x) for x in sub]))
return(sub)
n = input().strip().split()
ar = [int(x) for x in input().strip().split()]
quicksort(ar)
| [
"54479676+CormacKrum@users.noreply.github.com"
] | 54479676+CormacKrum@users.noreply.github.com |
a7b4e3f4a3f9ff40a6a3bb1db9cfec26f6dbcff9 | 0e083f405af00029c9ec31849f0f7f81c56844b5 | /demo/python/pipeline.py | f949a910f602bf36b5b2c010c68e3d8b50f4771f | [
"Apache-2.0"
] | permissive | open-mmlab/mmdeploy | 39b9e7b611caab2c76a6142fcb99f0bf1d92ad24 | 5479c8774f5b88d7ed9d399d4e305cb42cc2e73a | refs/heads/main | 2023-09-01T21:29:25.315371 | 2023-08-31T09:59:29 | 2023-08-31T09:59:29 | 441,467,833 | 2,164 | 605 | Apache-2.0 | 2023-09-14T10:39:04 | 2021-12-24T13:04:44 | Python | UTF-8 | Python | false | false | 1,952 | py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
import cv2
from mmdeploy_runtime import Context, Device, Model, Pipeline
def parse_args():
parser = argparse.ArgumentParser(
description='Demo of MMDeploy SDK pipeline API')
parser.add_argument('device', help='name of device, cuda or cpu')
parser.add_argument('det_model_path', help='path of detection model')
parser.add_argument('cls_model_path', help='path of classification model')
parser.add_argument('image_path', help='path to test image')
args = parser.parse_args()
return args
def main():
args = parse_args()
det_model = Model(args.det_model_path)
reg_model = Model(args.cls_model_path)
config = dict(
type='Pipeline',
input='img',
tasks=[
dict(
type='Inference',
input='img',
output='dets',
params=dict(model=det_model)),
dict(
type='Pipeline',
# flatten dets ([[a]] -> [a]) and broadcast img
input=['boxes=*dets', 'imgs=+img'],
tasks=[
dict(
type='Task',
module='CropBox',
input=['imgs', 'boxes'],
output='patches'),
dict(
type='Inference',
input='patches',
output='labels',
params=dict(model=reg_model))
],
# unflatten labels ([a] -> [[a]])
output='*labels')
],
output=['dets', 'labels'])
device = Device(args.device)
pipeline = Pipeline(config, Context(device))
img = cv2.imread(args.image_path)
output = pipeline(dict(ori_img=img))
print(json.dumps(output, indent=4))
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
0ecebed8a726b83ea5c9897feeff12131b5ad2e4 | bf548b525bdd0f229fd727b59b329abb74776ac0 | /crm_serverhost/crm/views.py | 5870c92a05c62c1163e497648b80223a262f3043 | [] | no_license | xuliwei1216/idc_server_crm | 6b07e9e4a33622498fe10d1cd3fda44d80000c62 | 71159e4ec51b0343067bf943db73499ed986b7d6 | refs/heads/master | 2020-03-27T22:55:06.152440 | 2018-09-04T01:41:13 | 2018-09-04T01:41:13 | 147,271,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,131 | py | from django.shortcuts import render,redirect
from crm import models
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
from crm import forms
from crm import check01_up_down
from crm import check02_up_down
import math
from crm.permissions import check_permission
# Create your views here.
count_page = 10
def dashboard(request):
return render(request,'crm/hostserver.html')
# 通过装饰器实现用户权限关联
#@check_permission
def hostserver(request):
#check01_up_down.check()
hostserver_list = models.HostServer.objects.all()
paginator = Paginator(hostserver_list,count_page)
page = request.GET.get('page') # 前端请求传入
try:
hostserver_objs = paginator.page(page)
except PageNotAnInteger: # 如果输入的不是数字的话 等同于第一次打开页面就跳转到第一页面
hostserver_objs = paginator.page(1) # 输入错误返回第一页
except EmptyPage: # 如果输入超出范围
hostserver_objs = paginator.page(paginator.num_pages) # 一共多少页就返回最后那页面
return render(request,'crm/hostserver.html',{'hostserver_list':hostserver_objs})
@check_permission
def hostserver_detail(request,hostserver_id):
# 两种情况 一种是获取客户详细信息 另一种是修改客户详细信息 同过前端传入hostserver_id根据其取值
hostserver_obj = models.HostServer.objects.get(id=hostserver_id)
if request.method == "POST":
# 根据前端提交过来的值进行验证 request.POST把用户提交的数据传入 需要修改的是instance=hostserver_obj
form = forms.HostServerModelForm(request.POST,instance=hostserver_obj)
print(request.POST)
if form.is_valid():
form.save()
# 需要通过redirect进行动态跳转
print('url:', request.path) # 获取当前页面的url
a = "/".join(request.path.split("/")[-2])
b = math.ceil(int(a) / int(count_page))
print(a)
print(b)
#base_url = "/".join(request.path.split("/")[0:-2]) # 以"/"分割取需要字段去除id
base_url = "/".join(request.path.split("/")[0:-2]) + "/?page=" + "%s" %(b) # 以"/"分割取需要字段去除id
print(base_url)
print('url:', base_url)
return redirect(base_url)
else:
# 将hostserver_obj读到的数据装入form(instance=hostserver_obj)
form = forms.HostServerModelForm(instance=hostserver_obj)
return render(request,'crm/hostserver_detail.html',{'hostserver_form':form})
###############################################################################################################################################################
#@check_permission
def hostserver01(request):
#check02_up_down.check()
hostserver_list = models.HostServer01.objects.all()
paginator = Paginator(hostserver_list, count_page)
page = request.GET.get('page') # 前端请求传入
try:
hostserver_objs = paginator.page(page)
except PageNotAnInteger: # 如果输入的不是数字的话 等同于第一次打开页面就跳转到第一页面
hostserver_objs = paginator.page(1) # 输入错误返回第一页
except EmptyPage: # 如果输入超出范围
hostserver_objs = paginator.page(paginator.num_pages) # 一共多少页就返回最后那页面
return render(request, 'crm/hostserver01.html', {'hostserver_list': hostserver_objs})
@check_permission
def hostserver_detail01(request, hostserver_id):
# 两种情况 一种是获取客户详细信息 另一种是修改客户详细信息 同过前端传入hostserver_id根据其取值
hostserver_obj = models.HostServer01.objects.get(id=hostserver_id)
if request.method == "POST":
# 根据前端提交过来的值进行验证 request.POST把用户提交的数据传入 需要修改的是instance=hostserver_obj
form = forms.HostServer01ModelForm(request.POST, instance=hostserver_obj)
print(request.POST)
if form.is_valid():
form.save()
# 需要通过redirect进行动态跳转
print('url:', request.path) # 获取当前页面的url
a = "/".join(request.path.split("/")[-2])
b = math.ceil(int(a) / int(count_page))
print(a)
print(b)
# base_url = "/".join(request.path.split("/")[0:-2]) # 以"/"分割取需要字段去除id
base_url = "/".join(request.path.split("/")[0:-2]) + "/?page=" + "%s" % (b) # 以"/"分割取需要字段去除id
print(base_url)
print('url:', base_url)
return redirect(base_url)
else:
# 将hostserver_obj读到的数据装入form(instance=hostserver_obj)
form = forms.HostServer01ModelForm(instance=hostserver_obj)
return render(request, 'crm/hostserver_detail01.html', {'hostserver_form': form})
######################################################################################################################################
#@check_permission
def hostserver02(request):
hostserver_list = models.HostServer02.objects.all()
paginator = Paginator(hostserver_list, count_page)
page = request.GET.get('page') # 前端请求传入
try:
hostserver_objs = paginator.page(page)
except PageNotAnInteger: # 如果输入的不是数字的话 等同于第一次打开页面就跳转到第一页面
hostserver_objs = paginator.page(1) # 输入错误返回第一页
except EmptyPage: # 如果输入超出范围
hostserver_objs = paginator.page(paginator.num_pages) # 一共多少页就返回最后那页面
return render(request, 'crm/hostserver02.html', {'hostserver_list': hostserver_objs})
@check_permission
def hostserver_detail02(request, hostserver_id):
# 两种情况 一种是获取客户详细信息 另一种是修改客户详细信息 同过前端传入hostserver_id根据其取值
hostserver_obj = models.HostServer02.objects.get(id=hostserver_id)
if request.method == "POST":
# 根据前端提交过来的值进行验证 request.POST把用户提交的数据传入 需要修改的是instance=hostserver_obj
form = forms.HostServer02ModelForm(request.POST, instance=hostserver_obj)
print(request.POST)
if form.is_valid():
form.save()
# 需要通过redirect进行动态跳转
print('url:', request.path) # 获取当前页面的url
a = "/".join(request.path.split("/")[-2])
b = math.ceil(int(a) / int(count_page))
print(a)
print(b)
# base_url = "/".join(request.path.split("/")[0:-2]) # 以"/"分割取需要字段去除id
base_url = "/".join(request.path.split("/")[0:-2]) + "/?page=" + "%s" % (b) # 以"/"分割取需要字段去除id
print(base_url)
print('url:', base_url)
return redirect(base_url)
else:
# 将hostserver_obj读到的数据装入form(instance=hostserver_obj)
form = forms.HostServer02ModelForm(instance=hostserver_obj)
return render(request, 'crm/hostserver_detail02.html', {'hostserver_form': form})
######################################################################################################################################
#@check_permission
def hostserver03(request):
hostserver_list = models.HostServer03.objects.all()
paginator = Paginator(hostserver_list, count_page)
page = request.GET.get('page') # 前端请求传入
try:
hostserver_objs = paginator.page(page)
except PageNotAnInteger: # 如果输入的不是数字的话 等同于第一次打开页面就跳转到第一页面
hostserver_objs = paginator.page(1) # 输入错误返回第一页
except EmptyPage: # 如果输入超出范围
hostserver_objs = paginator.page(paginator.num_pages) # 一共多少页就返回最后那页面
return render(request, 'crm/hostserver03.html', {'hostserver_list': hostserver_objs})
@check_permission
def hostserver_detail03(request, hostserver_id):
# 两种情况 一种是获取客户详细信息 另一种是修改客户详细信息 同过前端传入hostserver_id根据其取值
hostserver_obj = models.HostServer03.objects.get(id=hostserver_id)
if request.method == "POST":
# 根据前端提交过来的值进行验证 request.POST把用户提交的数据传入 需要修改的是instance=hostserver_obj
form = forms.HostServer03ModelForm(request.POST, instance=hostserver_obj)
print(request.POST)
if form.is_valid():
form.save()
# 需要通过redirect进行动态跳转
print('url:', request.path) # 获取当前页面的url
a = "/".join(request.path.split("/")[-2])
b = math.ceil(int(a) / int(count_page))
print(a)
print(b)
# base_url = "/".join(request.path.split("/")[0:-2]) # 以"/"分割取需要字段去除id
base_url = "/".join(request.path.split("/")[0:-2]) + "/?page=" + "%s" % (b) # 以"/"分割取需要字段去除id
print(base_url)
print('url:', base_url)
return redirect(base_url)
else:
# 将hostserver_obj读到的数据装入form(instance=hostserver_obj)
form = forms.HostServer03ModelForm(instance=hostserver_obj)
return render(request, 'crm/hostserver_detail03.html', {'hostserver_form': form})
################################################################################################################################
#@check_permission
def hostserver04(request):
hostserver_list = models.HostServer04.objects.all()
paginator = Paginator(hostserver_list, count_page)
page = request.GET.get('page') # 前端请求传入
try:
hostserver_objs = paginator.page(page)
except PageNotAnInteger: # 如果输入的不是数字的话 等同于第一次打开页面就跳转到第一页面
hostserver_objs = paginator.page(1) # 输入错误返回第一页
except EmptyPage: # 如果输入超出范围
hostserver_objs = paginator.page(paginator.num_pages) # 一共多少页就返回最后那页面
return render(request, 'crm/hostserver04.html', {'hostserver_list': hostserver_objs})
@check_permission
def hostserver_detail04(request, hostserver_id):
# 两种情况 一种是获取客户详细信息 另一种是修改客户详细信息 同过前端传入hostserver_id根据其取值
hostserver_obj = models.HostServer04.objects.get(id=hostserver_id)
if request.method == "POST":
# 根据前端提交过来的值进行验证 request.POST把用户提交的数据传入 需要修改的是instance=hostserver_obj
form = forms.HostServer04ModelForm(request.POST, instance=hostserver_obj)
print(request.POST)
if form.is_valid():
form.save()
# 需要通过redirect进行动态跳转
print('url:', request.path) # 获取当前页面的url
a = "/".join(request.path.split("/")[-2])
b = math.ceil(int(a) / int(count_page))
print(a)
print(b)
# base_url = "/".join(request.path.split("/")[0:-2]) # 以"/"分割取需要字段去除id
base_url = "/".join(request.path.split("/")[0:-2]) + "/?page=" + "%s" % (b) # 以"/"分割取需要字段去除id
print(base_url)
print('url:', base_url)
return redirect(base_url)
else:
# 将hostserver_obj读到的数据装入form(instance=hostserver_obj)
form = forms.HostServer04ModelForm(instance=hostserver_obj)
return render(request, 'crm/hostserver_detail04.html', {'hostserver_form': form})
###################################################################################################################################################
#@check_permission
def hostserver05(request):
hostserver_list = models.HostServer05.objects.all()
paginator = Paginator(hostserver_list, count_page)
page = request.GET.get('page') # 前端请求传入
try:
hostserver_objs = paginator.page(page)
except PageNotAnInteger: # 如果输入的不是数字的话 等同于第一次打开页面就跳转到第一页面
hostserver_objs = paginator.page(1) # 输入错误返回第一页
except EmptyPage: # 如果输入超出范围
hostserver_objs = paginator.page(paginator.num_pages) # 一共多少页就返回最后那页面
return render(request, 'crm/hostserver05.html', {'hostserver_list': hostserver_objs})
@check_permission
def hostserver_detail05(request, hostserver_id):
# 两种情况 一种是获取客户详细信息 另一种是修改客户详细信息 同过前端传入hostserver_id根据其取值
hostserver_obj = models.HostServer05.objects.get(id=hostserver_id)
if request.method == "POST":
# 根据前端提交过来的值进行验证 request.POST把用户提交的数据传入 需要修改的是instance=hostserver_obj
form = forms.HostServer05ModelForm(request.POST, instance=hostserver_obj)
print(request.POST)
if form.is_valid():
form.save()
# 需要通过redirect进行动态跳转
print('url:', request.path) # 获取当前页面的url
a = "/".join(request.path.split("/")[-2])
b = math.ceil(int(a) / int(count_page))
print(a)
print(b)
# base_url = "/".join(request.path.split("/")[0:-2]) # 以"/"分割取需要字段去除id
base_url = "/".join(request.path.split("/")[0:-2]) + "/?page=" + "%s" % (b) # 以"/"分割取需要字段去除id
print(base_url)
print('url:', base_url)
return redirect(base_url)
else:
# 将hostserver_obj读到的数据装入form(instance=hostserver_obj)
form = forms.HostServer05ModelForm(instance=hostserver_obj)
return render(request, 'crm/hostserver_detail05.html', {'hostserver_form': form})
###################################################################################################################################
#@check_permission
def hostserver06(request):
hostserver_list = models.HostServer06.objects.all()
paginator = Paginator(hostserver_list, count_page)
page = request.GET.get('page') # 前端请求传入
try:
hostserver_objs = paginator.page(page)
except PageNotAnInteger: # 如果输入的不是数字的话 等同于第一次打开页面就跳转到第一页面
hostserver_objs = paginator.page(1) # 输入错误返回第一页
except EmptyPage: # 如果输入超出范围
hostserver_objs = paginator.page(paginator.num_pages) # 一共多少页就返回最后那页面
return render(request, 'crm/hostserver06.html', {'hostserver_list': hostserver_objs})
@check_permission
def hostserver_detail06(request, hostserver_id):
# 两种情况 一种是获取客户详细信息 另一种是修改客户详细信息 同过前端传入hostserver_id根据其取值
hostserver_obj = models.HostServer06.objects.get(id=hostserver_id)
if request.method == "POST":
# 根据前端提交过来的值进行验证 request.POST把用户提交的数据传入 需要修改的是instance=hostserver_obj
form = forms.HostServer06ModelForm(request.POST, instance=hostserver_obj)
print(request.POST)
if form.is_valid():
form.save()
# 需要通过redirect进行动态跳转
print('url:', request.path) # 获取当前页面的url
a = "/".join(request.path.split("/")[-2])
b = math.ceil(int(a) / int(count_page))
print(a)
print(b)
# base_url = "/".join(request.path.split("/")[0:-2]) # 以"/"分割取需要字段去除id
base_url = "/".join(request.path.split("/")[0:-2]) + "/?page=" + "%s" % (b) # 以"/"分割取需要字段去除id
print(base_url)
print('url:', base_url)
return redirect(base_url)
else:
# 将hostserver_obj读到的数据装入form(instance=hostserver_obj)
form = forms.HostServer06ModelForm(instance=hostserver_obj)
return render(request, 'crm/hostserver_detail06.html', {'hostserver_form': form})
######################################################################################################################################
#@check_permission
def hostserver07(request):
hostserver_list = models.HostServer07.objects.all()
paginator = Paginator(hostserver_list, count_page)
page = request.GET.get('page') # 前端请求传入
try:
hostserver_objs = paginator.page(page)
except PageNotAnInteger: # 如果输入的不是数字的话 等同于第一次打开页面就跳转到第一页面
hostserver_objs = paginator.page(1) # 输入错误返回第一页
except EmptyPage: # 如果输入超出范围
hostserver_objs = paginator.page(paginator.num_pages) # 一共多少页就返回最后那页面
return render(request, 'crm/hostserver07.html', {'hostserver_list': hostserver_objs})
@check_permission
def hostserver_detail07(request, hostserver_id):
# 两种情况 一种是获取客户详细信息 另一种是修改客户详细信息 同过前端传入hostserver_id根据其取值
hostserver_obj = models.HostServer07.objects.get(id=hostserver_id)
if request.method == "POST":
# 根据前端提交过来的值进行验证 request.POST把用户提交的数据传入 需要修改的是instance=hostserver_obj
form = forms.HostServer07ModelForm(request.POST, instance=hostserver_obj)
print(request.POST)
if form.is_valid():
form.save()
# 需要通过redirect进行动态跳转
print('url:', request.path) # 获取当前页面的url
a = "/".join(request.path.split("/")[-2])
b = math.ceil(int(a) / int(count_page))
print(a)
print(b)
# base_url = "/".join(request.path.split("/")[0:-2]) # 以"/"分割取需要字段去除id
base_url = "/".join(request.path.split("/")[0:-2]) + "/?page=" + "%s" % (b) # 以"/"分割取需要字段去除id
print(base_url)
print('url:', base_url)
return redirect(base_url)
else:
# 将hostserver_obj读到的数据装入form(instance=hostserver_obj)
form = forms.HostServer07ModelForm(instance=hostserver_obj)
return render(request, 'crm/hostserver_detail07.html', {'hostserver_form': form})
########################################################################################################################################
#@check_permission
def hostserver08(request):
hostserver_list = models.HostServer08.objects.all()
paginator = Paginator(hostserver_list, count_page)
page = request.GET.get('page') # 前端请求传入
try:
hostserver_objs = paginator.page(page)
except PageNotAnInteger: # 如果输入的不是数字的话 等同于第一次打开页面就跳转到第一页面
hostserver_objs = paginator.page(1) # 输入错误返回第一页
except EmptyPage: # 如果输入超出范围
hostserver_objs = paginator.page(paginator.num_pages) # 一共多少页就返回最后那页面
return render(request, 'crm/hostserver08.html', {'hostserver_list': hostserver_objs})
@check_permission
def hostserver_detail08(request, hostserver_id):
# 两种情况 一种是获取客户详细信息 另一种是修改客户详细信息 同过前端传入hostserver_id根据其取值
hostserver_obj = models.HostServer08.objects.get(id=hostserver_id)
if request.method == "POST":
# 根据前端提交过来的值进行验证 request.POST把用户提交的数据传入 需要修改的是instance=hostserver_obj
form = forms.HostServer08ModelForm(request.POST, instance=hostserver_obj)
print(request.POST)
if form.is_valid():
form.save()
# 需要通过redirect进行动态跳转
print('url:', request.path) # 获取当前页面的url
a = "/".join(request.path.split("/")[-2])
b = math.ceil(int(a) / int(count_page))
print(a)
print(b)
# base_url = "/".join(request.path.split("/")[0:-2]) # 以"/"分割取需要字段去除id
base_url = "/".join(request.path.split("/")[0:-2]) + "/?page=" + "%s" % (b) # 以"/"分割取需要字段去除id
print(base_url)
print('url:', base_url)
return redirect(base_url)
else:
# 将hostserver_obj读到的数据装入form(instance=hostserver_obj)
form = forms.HostServer08ModelForm(instance=hostserver_obj)
return render(request, 'crm/hostserver_detail08.html', {'hostserver_form': form})
| [
"admin@example.com"
] | admin@example.com |
7dc37255f0bda7cbe80a39dbbbb19e810041493e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03049/s443206428.py | 3f28d62a3383c1b8d8566f4abf3168dd0e4cec49 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | N = int(input())
count_ab = 0
count_start_b = 0
count_finish_a = 0
count_start_b_finish_a = 0
for i in range(N):
s = input()
count_ab += s.count('AB')
# print(s, s[0], s[-1])
if s[0] == 'B' and s[-1] == 'A':
count_start_b_finish_a += 1
elif s[0] == 'B':
count_start_b += 1
elif s[-1] == 'A':
count_finish_a += 1
# print(count_start_b_finish_a, count_finish_a, count_start_b)
if not count_start_b_finish_a:
print(count_ab + min(count_finish_a, count_start_b))
elif max(count_finish_a, count_start_b) > 0 and count_start_b_finish_a:
print(count_ab + count_start_b_finish_a + min(count_finish_a, count_start_b))
elif count_start_b_finish_a:
print(count_ab + count_start_b_finish_a - 1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2af4da4361839bb82ba5aca5151b060f1fa31da5 | 7a658cf845650b8ef0fdbc77bd75a6e668d91889 | /contrib/devtools/copyright_header.py | 23a2ba27932c73716c306dd16f2a8fe79ed33e9f | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | ahmedbodi/nyc3 | a6cb8c41db5d2eac445b9f7650970479d0642f57 | 36880f1b75acc2ff4642be927fac953f48c8ea49 | refs/heads/master | 2022-01-23T09:54:03.782147 | 2019-07-21T20:34:39 | 2019-07-21T20:34:39 | 197,980,317 | 0 | 1 | MIT | 2019-07-27T21:20:05 | 2019-07-20T20:56:32 | C++ | UTF-8 | Python | false | false | 22,639 | py | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2017 The Nyc3 Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# libsecp256k1:
'src/secp256k1/include/secp256k1.h',
'src/secp256k1/include/secp256k1_ecdh.h',
'src/secp256k1/include/secp256k1_recovery.h',
'src/secp256k1/include/secp256k1_schnorr.h',
'src/secp256k1/src/java/org_nyc3_NativeSecp256k1.c',
'src/secp256k1/src/java/org_nyc3_NativeSecp256k1.h',
'src/secp256k1/src/java/org_nyc3_Secp256k1Context.c',
'src/secp256k1/src/java/org_nyc3_Secp256k1Context.h',
# auto generated:
'src/univalue/lib/univalue_escapes.h',
'src/qt/nyc3strings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'src/leveldb/util/env_win.cc',
'src/crypto/ctaes/bench.c',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files'
def call_git_ls():
out = subprocess.check_output(GIT_LS_CMD.split(' '))
return [f for f in out.decode("utf-8").split('\n') if f != '']
def get_filenames_to_examine():
filenames = call_git_ls()
return sorted([filename for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = 'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile('%s %s %s' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
"Satoshi Nakamoto\n",
"The Nyc3 Core developers\n",
"The Nyc3 Core developers \n",
"Nyc3 Core Developers\n",
"the Nyc3 Core developers\n",
"The Nyc3 developers\n",
"The LevelDB Authors\. All rights reserved\.\n",
"BitPay Inc\.\n",
"BitPay, Inc\.\n",
"University of Illinois at Urbana-Champaign\.\n",
"MarcoFalke\n",
"Pieter Wuille\n",
"Pieter Wuille +\*\n",
"Pieter Wuille, Gregory Maxwell +\*\n",
"Pieter Wuille, Andrew Poelstra +\*\n",
"Andrew Poelstra +\*\n",
"Wladimir J. van der Laan\n",
"Jeff Garzik\n",
"Diederik Huys, Pieter Wuille +\*\n",
"Thomas Daede, Cory Fields +\*\n",
"Jan-Klaas Kollhof\n",
"Sam Rushing\n",
"ArtForz -- public domain half-a-node\n",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(os.path.abspath(filename), 'r').read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
original_cwd = os.getcwd()
os.chdir(base_directory)
filenames = get_filenames_to_examine()
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
os.chdir(original_cwd)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a nyc3 source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(os.path.abspath(filename), 'r')
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(os.path.abspath(filename), 'w')
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = 'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Nyc3 Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
original_cwd = os.getcwd()
os.chdir(base_directory)
for filename in get_filenames_to_examine():
update_updatable_copyright(filename)
os.chdir(original_cwd)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Nyc3 Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
// Copyright (c) 2017 The Nyc3 Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
// Copyright (c) 2017 The Nyc3 Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Bitcoin Core developers
// Copyright (c) 2017 The Nyc3 Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
// Copyright (c) 2017 The Nyc3 Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a nyc3 source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Bitcoin Core developers
// Copyright (c) 2017 The Nyc3 Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Copyright (c) 2017 The Nyc3 Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index != None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Nyc3 Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Nyc3 Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the nyc3 repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Nyc3
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
| [
"william.kibbler@googlemail.com"
] | william.kibbler@googlemail.com |
7a68101804a74608cbce1f189b3552132caaf3b5 | e95236dd5ca3c39c39586b5cafeacd06d923e20b | /models/btcmodel.py | e516c93d630307aa632512597fdf051e994c708a | [] | no_license | raoden1/Minotaur | 14bc7e56ca2437af3ec4d6af3c85fc1062bade65 | 8787244cc6ac9cd6347e84705a2908026ec78f25 | refs/heads/master | 2020-04-26T11:42:16.314463 | 2019-02-02T19:20:30 | 2019-02-02T19:20:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,455 | py | import datetime
from binance.client import Client
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
# Instanciate Binance client
client = Client('API_KEY', 'SECRET_KEY')
# get data
symbol = 'BTCUSDT'
BTC = client.get_historical_klines(symbol=symbol, interval=Client.KLINE_INTERVAL_30MINUTE, start_str="1 year ago UTC")
BTC = pd.DataFrame(BTC, columns=['Open time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close time', 'Quote asset volume', 'Number of trades', 'Taker buy base asset volume', 'Taker buy quote asset volume', 'Ignore'])
BTC['Open time'] = pd.to_datetime(BTC['Open time'], unit='ms')
BTC.set_index('Open time', inplace=True)
BTC['Close']=BTC['Close'].astype(float)
data = BTC.iloc[:,3:4].astype(float).values
scaler= MinMaxScaler()
data= scaler.fit_transform(data)
training_set = data[:10000]
test_set = data[10000:]
# Data preprocessing (Dividing datasets to training and testing data)
X_train = training_set[0:len(training_set)-1]
y_train = training_set[1:len(training_set)]
X_test = test_set[0:len(test_set)-1]
y_test = test_set[1:len(test_set)]
X_train = np.reshape(X_train, (len(X_train), 1, X_train.shape[1]))
X_test = np.reshape(X_test, (len(X_test), 1, X_test.shape[1]))
# Init the model
model = Sequential()
model.add(LSTM(256, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(LSTM(256))
model.add(Dense(1))
# Compile the model
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(X_train, y_train, epochs=50, batch_size=16, shuffle=False)
# Save the model
model.save('bitcoin_model.h5')
# Perform predictions on test data
predicted_price = model.predict(X_test)
predicted_price = scaler.inverse_transform(predicted_price)
real_price = scaler.inverse_transform(y_test)
# Display graph of our prediction
plt.figure(figsize=(10,4))
red_patch = mpatches.Patch(color='red', label='Predicted Price of Bitcoin')
blue_patch = mpatches.Patch(color='blue', label='Real Price of Bitcoin')
plt.legend(handles=[blue_patch, red_patch])
plt.plot(predicted_price, color='red', label='Predicted Price of Bitcoin')
plt.plot(real_price, color='blue', label='Real Price of Bitcoin')
plt.title('Predicted vs. Real Price of Bitcoin')
plt.xlabel('Time')
plt.ylabel('Price')
plt.show()
| [
"merwanedr@gmail.com"
] | merwanedr@gmail.com |
1d6acadc9d5be41109f9aa503af6f638f48eb0fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02963/s382016550.py | b17e4a66ac52bc376bdd76a24fac8abb77cb2820 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | S=int(input())
if S<10**18:
X1,Y1=0,0
mod=10**9
X2=mod
Y3=S//mod + 1
Y2=1
X3=mod-S%mod
else:
X1,Y1,X2,Y3=0,0,0,0
Y2=10**9
X3=10**9
print(X1,Y1,X2,Y2,X3,Y3)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1d99f2aa5cd3ff105870bfab76919d26926ef28b | c29b838371729ac04744b40d486f0b55212990b6 | /Spider-Learn/Spider/scrapyuniversal/scrapyuniversal/spiders/china.py | 9e6f76e736b259561061cc615cecc85c5d0e24cc | [] | no_license | Sugarsugarzz/PyPractice | 93c3155a94d162c9eabf0d1a641d28bc6d639c22 | d91b7d6ca996792fe409c08862fa9da5b1dc319b | refs/heads/master | 2023-02-13T01:51:24.909947 | 2021-01-20T02:57:22 | 2021-01-20T02:57:22 | 163,177,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapyuniversal.items import NewsItem
from scrapyuniversal.loader import ChinaLoader
class ChinaSpider(CrawlSpider):
name = 'china'
allowed_domains = ['tech.china.com']
start_urls = ['http://tech.china.com/articles']
# 定义的Rule已经实现页面的打开详情页和翻页
rules = (
Rule(LinkExtractor(allow='article\/.*.\.html', restrict_xpaths='//div[@id="left_side"]//div[@class="con_item"]'),
callback='parse_item'),
Rule(LinkExtractor(restrict_xpaths='//div[@id="pageStyle"]//a[contains(., "下一页")]'))
)
# 通常写法:
# def parse_item(self, response):
# item = NewsItem()
# item['title'] = response.xpath('//h1[@id="chan_newsTitle"]/text()').extract_first()
# item['url'] = response.url
# item['text'] = ''.join(response.xpath('//div[@id="chan_newsDetail"]//text()').extract()).strip()
# item['datatime'] = response.xpath('//div[@id="chan_newsInfo"]/text()').re_first('(\d+-\d+-\d+\s\d+:\d+:\d+)')
# item['source'] = response.xpath('//div[@id="chan_newsInfo"]/text()').re_first('来源:(.*)').strip()
# item['website'] = '中华网'
# yield item
# 以上写法不规整,下面采用ItemLoader实现配置化提取
def parse_item(self, response):
loader = ChinaLoader(item=NewsItem(), response=response)
loader.add_xpath('title', '//h1[@id="chan_newsTitle"]/text()')
loader.add_value('url', response.url)
loader.add_xpath('text', '//div[@id="chan_newsDetail"]//text()')
loader.add_xpath('datetime', '//div[@id="chan_newsInfo"]/text()', re='(\d+-\d+-\d+\s\d+:\d+:\d+)')
loader.add_xpath('source', '//div[@id="chan_newsInfo"]/text()', re='来源:(.*)')
loader.add_value('website', '中华网')
yield loader.load_item()
| [
"406857586@qq.com"
] | 406857586@qq.com |
0f2ffe38ae9cf44d15befe3270fbe6ca5ee63422 | 3bda0851de1224b524fbcddece1f502a67e9def9 | /test/test_markdown_blank_lines.py | 695aea9b92244d67e2b23590352def8d0c3975e2 | [
"MIT"
] | permissive | jtprince/pymarkdown | 65b3f3b06c88bc0d4652a990bd134ef6996bcc15 | 17304b3ef580ec71678c450ab6c2a1e669b4e90a | refs/heads/main | 2023-04-29T15:51:05.761440 | 2021-05-16T22:55:11 | 2021-05-16T22:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | """
https://github.github.com/gfm/#paragraphs
"""
import pytest
from .utils import act_and_assert
@pytest.mark.gfm
def test_blank_lines_197():
"""
Test case 197: Blank lines at the beginning and end of the document are also ignored.
"""
# Arrange
source_markdown = """\a\a
aaa
\a\a
# aaa
""".replace(
"\a", " "
)
expected_tokens = [
"[BLANK(1,1): ]",
"[BLANK(2,1):]",
"[para(3,1):]",
"[text(3,1):aaa:]",
"[end-para:::True]",
"[BLANK(4,1): ]",
"[BLANK(5,1):]",
"[atx(6,1):1:0:]",
"[text(6,3):aaa: ]",
"[end-atx::]",
"[BLANK(7,1):]",
"[BLANK(8,1): ]",
]
expected_gfm = """<p>aaa</p>
<h1>aaa</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_blank_lines_197a():
"""
Test case 197a: Extra blanks to test
"""
# Arrange
source_markdown = """\a\a
\a
aaa
""".replace(
"\a", " "
)
expected_tokens = [
"[BLANK(1,1): ]",
"[BLANK(2,1): ]",
"[para(3,1):]",
"[text(3,1):aaa:]",
"[end-para:::True]",
"[BLANK(4,1):]",
]
expected_gfm = """<p>aaa</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| [
"jack.de.winter@outlook.com"
] | jack.de.winter@outlook.com |
8b2c1d35e6d1eb3c36cb9ae0333b23f8507f3f4f | 44b3d66dce1b8b87ed7a20b9f2a57d5c40a6c010 | /enso/utils/__init__.py | 22ab49ca975d091327f74789edc96e9100955cdf | [
"BSD-2-Clause"
] | permissive | blackdaemon/enso-launcher-continued | 0b203567c9670d5a6fa95b546d7edf64953ee94c | 346f82811e77caf73560619cdeb16afabfbf1fce | refs/heads/master | 2020-06-03T16:29:31.579370 | 2019-05-22T22:39:32 | 2019-05-22T22:39:32 | 30,513,152 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,739 | py | import traceback
from contextlib import contextmanager
try:
from contextlib import suppress
except ImportError:
@contextmanager
def suppress(*exceptions):
"""Provides the ability to not have to write try/catch blocks when just
passing on the except.
Thanks to Raymond Hettinger from "Transforming Code into Beautiful
Idiotmatic Python"
This will be included in the standard library in 3.4.
Args:
exceptions: A list of exceptions to ignore
Example:
.. code-block:: python
# instead of...
try:
do_something()
except:
pass
# use this:
with suppress(Exception):
do_something()
"""
assert exceptions, "'exceptions' parameter in suppress() can't be empty!"
try:
yield
except exceptions:
pass
# Deprecated
ignored = suppress
def __do_once(ignore_args, func, *args, **kwargs):
""" Execute the function just once """
global __DO_ONCE_CACHE
stack = traceback.extract_stack()
stack.pop()
stack.pop()
code_location = "|".join(str(i) for i in stack.pop()[:-1])
cache_id = "{0}|{1}|{2}".format(
code_location,
func,
"|".join(str(arg) for arg in args) if not ignore_args else "",
"|".join(kwargs.values()) if not ignore_args else "",
)
try:
if cache_id in __DO_ONCE_CACHE:
return
except NameError:
__DO_ONCE_CACHE = {}
try:
return func(*args, **kwargs)
finally:
__DO_ONCE_CACHE[cache_id] = 1
# TODO: Move to decorators module
def call_once(func):
""" Function decorator. Execute the function just once,
no matter the arguments values
"""
def func_wrapper(*args, **kwargs):
return __do_once(True, func, *args, **kwargs)
return func_wrapper
def do_once(func, *args, **kwargs):
""" Execute the function just once, no matter the arguments values """
return __do_once(True, func, *args, **kwargs)
# TODO: Move to decorators module
def call_once_for_given_args(func):
""" Function decorator. Execute the function just once (with given argument values).
Using the function with different argument values will execute it again.
"""
def func_wrapper(*args, **kwargs):
return __do_once(False, func, *args, **kwargs)
return func_wrapper
def do_once_for_given_args(func, *args, **kwargs):
""" Execute the function just once (with given argument values)
Using the function with different argument values will execute it again.
"""
return __do_once(False, func, *args, **kwargs)
| [
"pavelvitis@gmail.com"
] | pavelvitis@gmail.com |
1778e5aef03806c0b569aea2e647ba4dd0beba08 | 786de89be635eb21295070a6a3452f3a7fe6712c | /CalibManager/tags/V00-00-77/src/NotificationDBForCL.py | db374f5242f2d042a61fa4e7d4602826bf1f6928 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# Module NotificationDBForCL.py...
#
#------------------------------------------------------------------------
"""
This software was developed for the SIT project. If you use all or
part of it, please give an appropriate acknowledgment.
@see
@version $Id$
@author Mikhail S. Dubrovin
"""
#------------------------------
# Module's version from SVN --
#------------------------------
__version__ = "$Revision$"
# $Source$
#--------------------------------
# Imports of standard modules --
#--------------------------------
from NotificationDB import *
#------------------------------
class NotificationDBForCL (NotificationDB):
"""Is intended for submission of notification records in db
"""
def __init__(self) :
NotificationDB.__init__(self, table='calibrun')
#------------------------------
if __name__ == "__main__" :
ndb = NotificationDBForCL()
main_test(ndb)
ndb.close()
sys.exit ( 'End of test NotificationDBForCL' )
#------------------------------
| [
"dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 |
a98f0fb1c4d3a3e117fcdbeb3051d91faf064b7e | 368be25e37bafa8cc795f7c9f34e4585e017091f | /.history/app_fav_books/models_20201113162142.py | c210c05a528dd41d08ad9cbd4d2ed524b38985db | [] | no_license | steven-halla/fav_books_proj | ebcfbfda0e7f3cdc49d592c86c633b1d331da513 | 512005deb84ac906c9f24d4ab0939bd0db096716 | refs/heads/master | 2023-03-30T09:37:38.016063 | 2021-04-02T20:27:22 | 2021-04-02T20:27:22 | 354,125,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,033 | py | from django.db import models
import re
class UserManager(models.Manager):
def basic_validator(self, postData):
errors = {}
if len(post_data['first_name']) < 3:
errors['first_name'] = "First name must be 3 characters"
if post_data['first_name'].isalpha() == False:
errors['first_name'] = "letters only"
if len(post_data['last_name']) < 3:
errors['last_name'] = "Last name must be 3 characters"
if post_data['last_name'].isalpha() == False:
errors['last_name'] = "letters only"
if len(post_data['email']) < 8:
errors['email'] = "Email must contain 8 characters"
if post_data['email'].find("@") == -1:
errors['email'] = "email must contain @ and .com"
if post_data['email'].find(".com") == -1:
errors['email'] = "email must contain @ and .com"
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
# test whether a field matches the pattern
if not EMAIL_REGEX.match(postData['email']):
errors['email'] = "Invalid email address!"
return errors
if post_data['password'] != post_data['confirm_password']:
errors['pass_match'] = "password must match confirm password"
if len(post_data['password']) < 8:
errors['pass_length'] = "password must be longer than 8 characters"
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
email = models.CharField(max_length=20)
password = models.CharField(max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Books(models.Model):
title = models.CharField(max_length=20)
desc = models.CharField(max_length=40)
likes
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"69405488+steven-halla@users.noreply.github.com"
] | 69405488+steven-halla@users.noreply.github.com |
9a4a00470f775aab477c5809d1cbada4f45d60c0 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_3_1/kochkarash/rc1.py | 1bb08aa5d72a0c470695223cde3bfcb31d8173c2 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 815 | py | import sys
fileinput = sys.stdin
import StringIO
#fileinput = StringIO.StringIO(inputstr)
from heapq import *
import string
A=string.ascii_uppercase
A=[a for a in A]
T=int(fileinput.readline().strip())
for t in range(T):
N=fileinput.readline().strip()
N=int(N)
P=fileinput.readline().strip().split()
P=[int(p) for p in P]
h = []
S=0
for n in range(N):
heappush(h, (-P[n], A[n]))
S += P[n]
O=[]
while True:
p=heappop(h)
if p[0]==0:
break
S=S-1
heappush(h, (-(-p[0]-1), p[1]))
o=p[1]
if -h[0][0]>S/2:
p=heappop(h)
S=S-1
heappush(h, (-(-p[0]-1), p[1]))
o=o+p[1]
O.append(o)
O=" ".join(O)
print "Case #%s: %s" % (t+1, O)
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
2fba65138dedfa05ac5795d0149d804b47a9398c | 6846a0469efc79b89edc8f856944d5a8005d7244 | /id_0017.py | c70e7b4a000fac5e79b162fb6191d28364c95eea | [] | no_license | CGenie/project_euler | 42cb966e13645339490046eb44a729660ae0c092 | cc90edd061b0f4d9e076d5a684b842c202a6812a | refs/heads/master | 2020-06-05T00:41:49.266961 | 2014-01-13T19:11:31 | 2014-01-13T19:11:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | #!/usr/bin/python2
# #####################################################################
# id_0017.py
#
# Przemyslaw Kaminski <cgenie@gmail.com>
# Time-stamp: <>
######################################################################
def write_number(n):
numbers = {1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
14: 'fourteen',
15: 'fifteen',
16: 'sixteen',
17: 'seventeen',
18: 'eighteen',
19: 'nineteen',
20: 'twenty',
30: 'thirty',
40: 'forty',
50: 'fifty',
60: 'sixty',
70: 'seventy',
80: 'eighty',
90: 'ninety',
100: 'hundred'
}
if n <= 20:
return numbers[n]
if 20 < n <= 99:
s = str(n)
D = int(s[0])
d = int(s[1])
r = numbers[D*10]
if d > 0:
r += numbers[d]
return r
if 99 < n <= 999:
s = str(n)
h = int(s[0])
D = int(s[1])
d = int(s[2])
r = numbers[h] + numbers[100]
if D*10 + d > 0:
r += "and" + write_number(D*10 + d)
return r
if n == 1000:
return 'onethousand'
if __name__ == '__main__':
l = 0
for x in range(1, 1001):
print "x = " + str(x) + ", write_number = " + write_number(x)
l += len(write_number(x))
print l
| [
"cgenie@gmail.com"
] | cgenie@gmail.com |
8071aa8e26876969192138ed0b63995bbe14ae2b | cf1476710c4117865fe459f0d698520321810c56 | /cerveceria/migrations/0005_cerveza_nombre.py | 2677cbe9dfe5211b057f198c9021030bc0cb6a9b | [] | no_license | BoiwkoMartin/boiwkosbeers | 42b6ab47573ee1b282bbfbe915b17a449c9b8038 | 74f6ec93d684badc80a8c62844479fc978548acd | refs/heads/main | 2023-04-09T03:36:00.812684 | 2021-04-15T23:55:59 | 2021-04-15T23:55:59 | 358,417,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | # Generated by Django 3.1.7 on 2021-03-05 19:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cerveceria', '0004_remove_cerveza_nombre'),
]
operations = [
migrations.AddField(
model_name='cerveza',
name='nombre',
field=models.CharField(default="Boiwko's", max_length=50),
),
]
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
6c103c6cdb892cf317c59775a2c53c3c793c326b | 3ee1bb0d0acfa5c412b37365a4564f0df1c093fb | /keras/keras40_mnist2_cnn.py | 9b67f325ca5a78b12e49628a58edede98eb0bff1 | [] | no_license | moileehyeji/Study | 3a20bf0d74e1faec7a2a5981c1c7e7861c08c073 | 188843c6415a4c546fdf6648400d072359d1a22b | refs/heads/main | 2023-04-18T02:30:15.810749 | 2021-05-04T08:43:53 | 2021-05-04T08:43:53 | 324,901,835 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,855 | py |
# 실습
# 지표는 acc (0.985 이상)
# 응용
# y_test 10개와 y_pred 10개를 출력하시오
# y_test[:10] = (???)
# y_pred[:10] = (???)
import numpy as np
import matplotlib.pyplot as plt
# 1. mnist 데이터 셋
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape, y_train.shape) #(60000, 28, 28) (60000,) ->흑백(60000, 28, 28, 1)
print(x_test.shape, y_test.shape) #(10000, 28, 28) (10000,)
print(x_train[0])
print(y_train[0])
print(x_train[0].shape) #(28, 28)
# plt.imshow(x_train[0], 'gray')
# plt.imshow(x_train[0])
# plt.show()
# X 전처리
# 이미지의 특성을 찾아 숫자를 맞춰야 함 3차원--> 4차원 --> float타입 변경 -->/255 (0~1 수렴) ~~~~~~>전처리
x_train = x_train.reshape(60000, 28, 28, 1).astype('float')/255.
x_test = x_test.reshape(10000, 28, 28, 1)/255.
# (x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1))
# 다중분류
# Y 전처리
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
#2. 모델구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
model = Sequential()
model.add(Conv2D(filters=500, kernel_size=(2,2), padding='same', input_shape = (28,28,1)))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=200, kernel_size=2, padding='same', strides=2))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=100, kernel_size=2, padding='same', strides=4))
model.add(Flatten())
model.add(Dense(520, activation='relu'))
model.add(Dense(200, activation='relu'))
# model.add(Dense(150, activation='relu'))
# model.add(Dense(100, activation='relu'))
# model.add(Dense(50, activation='relu'))
model.add(Dense(15, activation='relu'))
model.add(Dense(10, activation='softmax'))
# 3. 컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping
early = EarlyStopping(monitor='acc', patience=20, mode= 'auto')
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='acc')
model.fit(x_train, y_train, epochs=100, batch_size=90, callbacks=[early])
# 4. 평가, 예측
loss = model.evaluate(x_test, y_test, batch_size=90)
print(loss)
x_pre = x_test[:10]
y_pre = model.predict(x_pre)
y_pre = np.argmax(y_pre, axis=1)
y_test_pre = np.argmax(y_test[:10], axis=1)
print('y_pred[:10] : ', y_pre)
print('y_test[:10] : ', y_test_pre)
print(x_test[10].shape)
""" import matplotlib.pyplot as plt
plt.imshow(x_test[10], 'gray')
plt.show() """
'''
mnist_CNN :
[0.15593186020851135, 0.9835000038146973]
y_pred[:10] : [7 2 1 0 4 1 4 9 5 9]
y_test[:10] : [7 2 1 0 4 1 4 9 5 9]
'''
| [
"noreply@github.com"
] | moileehyeji.noreply@github.com |
2673e7c0021d9a2293deb9d460d04ff8919609da | cbf0f5dbbbea5be1c46d777c939eb21f0a65c434 | /Repository/w13/ex3_w13.py | 7bcaadf3064d7c137b19e7663533186fce898a0b | [] | no_license | ARBaranov/Third-semester | 2c128ceee257bcee1bb96c7a65b3f388898db8d5 | 9128f8eab9a73d8d8439e4727b3f499df049bec6 | refs/heads/master | 2023-08-11T03:25:28.266061 | 2021-09-18T23:34:35 | 2021-09-18T23:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | # 1 a 1 2 b ---> a, b
# z 2 y ---> z, y
REGEXP_1 = ''
# aaa bbb ccc ---> aaa, bbb, ccc
# ddd eee fgh ---> ddd, eee, fgh
# a1b c2d e3f ---> a1b, c2d, e3f
REGEXP_2 = ''
# a aa aaa ---> aa, aaa
# b bb bbb ---> bb, bbb
# a bb aaa ---> bb, aaa
REGEXP_3 = ''
# 1.1.1.1 aaaa bbbbb ---> 1.1.1.1
# a.a.a.a bbbb 2.2.2.2 ---> 2.2.2.2
# 3.3.3.3 cccc 4.4.4.4 ---> 3.3.3.3, 4.4.4.4
# 255.23.0.1 cccc 4.4.4.4 ---> 255.23.0.1, 4.4.4.4
# 255.0.23.1 cccc 4.4.4.4 ---> 255.0.23.1, 4.4.4.4
REGEXP_4 = ''
# aaa Abbb ccc ---> Abbb
# Aaa Abbb ccc ---> Aaa, Abbb
# Caa Cbb Accc ---> Accc
REGEXP_5 = ''
# a b c d e f ---> a, b, e, f
# abcdef ---> a, b, e, f
# adf ---> a, f
# acf ---> a, f
REGEXP_6 = ''
# aaa +1.0 bb ---> +1.0
# aaa -1.0 bb ---> -1.0
# aaa -123.234 bb +111.999 ---> -123.234, +111.999
REGEXP_7 = ''
# aaa 18-04-2016 bbb ---> 18-04-2016
# aaa 18.04.2016 bbb ---> 18.04.2016
# aaa 18-04-ABCD bbb 18.04.2016 ---> 18.04.2016
# aaa 18/04/ABCD bbb 18/04/2016 ---> 18/04/2016
# aaa 18/04/ABCD bbb 18/4/2016 ---> 18/4/2016
REGEXP_8 = '' | [
"123"
] | 123 |
f1688f69f84bec29b575ecb8c7f89e356fd63ee2 | 5d28c38dfdd185875ba0edaf77281e684c81da0c | /mlflow/pipelines/step.py | 928cafc573317d7152207fd68f104567d58ea753 | [
"Apache-2.0"
] | permissive | imrehg/mlflow | 3a68acc1730b3ee6326c1366760d6ddc7e66099c | 5ddfe9a1b48e065540094d83125040d3273c48fa | refs/heads/master | 2022-09-24T05:39:02.767657 | 2022-09-20T00:14:07 | 2022-09-20T00:14:07 | 244,945,486 | 1 | 0 | Apache-2.0 | 2020-03-04T16:11:54 | 2020-03-04T16:11:53 | null | UTF-8 | Python | false | false | 13,668 | py | import abc
import json
import logging
import os
import time
import traceback
import yaml
from enum import Enum
from typing import TypeVar, Dict, Any
from mlflow.pipelines.cards import BaseCard, CARD_PICKLE_NAME, FailureCard, CARD_HTML_NAME
from mlflow.pipelines.utils import get_pipeline_name
from mlflow.pipelines.utils.step import display_html
from mlflow.tracking import MlflowClient
from mlflow.utils.annotations import experimental
from mlflow.utils.databricks_utils import is_in_databricks_runtime
_logger = logging.getLogger(__name__)
class StepStatus(Enum):
"""
Represents the execution status of a step.
"""
# Indicates that no execution status information is available for the step,
# which may occur if the step has never been run or its outputs have been cleared
UNKNOWN = "UNKNOWN"
# Indicates that the step is currently running
RUNNING = "RUNNING"
# Indicates that the step completed successfully
SUCCEEDED = "SUCCEEDED"
# Indicates that the step completed with one or more failures
FAILED = "FAILED"
StepExecutionStateType = TypeVar("StepExecutionStateType", bound="StepExecutionState")
class StepExecutionState:
"""
Represents execution state for a step, including the current status and
the time of the last status update.
"""
_KEY_STATUS = "pipeline_step_execution_status"
_KEY_LAST_UPDATED_TIMESTAMP = "pipeline_step_execution_last_updated_timestamp"
def __init__(self, status: StepStatus, last_updated_timestamp: int):
"""
:param status: The execution status of the step.
:param last_updated_timestamp: The timestamp of the last execution status update, measured
in seconds since the UNIX epoch.
"""
self.status = status
self.last_updated_timestamp = last_updated_timestamp
def to_dict(self) -> Dict[str, Any]:
"""
Creates a dictionary representation of the step execution state.
"""
return {
StepExecutionState._KEY_STATUS: self.status.value,
StepExecutionState._KEY_LAST_UPDATED_TIMESTAMP: self.last_updated_timestamp,
}
@classmethod
def from_dict(cls, state_dict) -> StepExecutionStateType:
"""
Creates a ``StepExecutionState`` instance from the specified execution state dictionary.
"""
return cls(
status=StepStatus[state_dict[StepExecutionState._KEY_STATUS]],
last_updated_timestamp=state_dict[StepExecutionState._KEY_LAST_UPDATED_TIMESTAMP],
)
StepType = TypeVar("StepType", bound="BaseStep")
@experimental
class BaseStep(metaclass=abc.ABCMeta):
"""
Base class representing a step in an MLflow Pipeline
"""
_EXECUTION_STATE_FILE_NAME = "execution_state.json"
@experimental
def __init__(self, step_config: Dict[str, Any], pipeline_root: str):
"""
:param step_config: dictionary of the config needed to
run/implement the step.
:param pipeline_root: String file path to the directory where step
are defined.
"""
self.step_config = step_config
self.pipeline_root = pipeline_root
self.pipeline_name = get_pipeline_name(pipeline_root_path=pipeline_root)
self.step_card = None
@experimental
def run(self, output_directory: str):
"""
Executes the step by running common setup operations and invoking
step-specific code (as defined in ``_run()``).
:param output_directory: String file path to the directory where step
outputs should be stored.
:return: None
"""
_logger.info(f"Running step {self.name}...")
start_timestamp = time.time()
self._initialize_databricks_spark_connection_and_hooks_if_applicable()
try:
self._update_status(status=StepStatus.RUNNING, output_directory=output_directory)
self.step_card = self._run(output_directory=output_directory)
self._update_status(status=StepStatus.SUCCEEDED, output_directory=output_directory)
except Exception:
self._update_status(status=StepStatus.FAILED, output_directory=output_directory)
self.step_card = FailureCard(
pipeline_name=self.pipeline_name,
step_name=self.name,
failure_traceback=traceback.format_exc(),
)
raise
finally:
self._serialize_card(start_timestamp, output_directory)
@experimental
def inspect(self, output_directory: str):
"""
Inspect the step output state by running the generic inspect information here and
running the step specific inspection code in the step's _inspect() method.
:param output_directory: String file path where to the directory where step
outputs are located.
:return: None
"""
card_path = os.path.join(output_directory, CARD_PICKLE_NAME)
if not os.path.exists(card_path):
_logger.info(
"Unable to locate runtime info for step '%s'. Re-run the step before inspect.",
self.name,
)
return None
card = BaseCard.load(card_path)
card_html_path = os.path.join(output_directory, CARD_HTML_NAME)
display_html(html_data=card.to_html(), html_file_path=card_html_path)
@experimental
@abc.abstractmethod
def _run(self, output_directory: str) -> BaseCard:
"""
This function is responsible for executing the step, writing outputs
to the specified directory, and returning results to the user. It
is invoked by the internal step runner.
:param output_directory: String file path to the directory where step outputs
should be stored.
:return: A BaseCard containing step execution information.
"""
pass
@experimental
@classmethod
@abc.abstractmethod
def from_pipeline_config(cls, pipeline_config: Dict[str, Any], pipeline_root: str) -> StepType:
"""
Constructs a step class instance by creating a step config using the pipeline
config.
Subclasses must implement this method to produce the config required to correctly
run the corresponding step.
:param pipeline_config: Dictionary representation of the full pipeline config.
:param pipeline_root: String file path to the pipeline root directory.
:return: class instance of the step.
"""
pass
@experimental
@classmethod
def from_step_config_path(cls, step_config_path: str, pipeline_root: str) -> StepType:
"""
Constructs a step class instance using the config specified in the
configuration file.
:param step_config_path: String path to the step-specific configuration
on the local filesystem.
:param pipeline_root: String path to the pipeline root directory on
the local filesystem.
:return: class instance of the step.
"""
with open(step_config_path, "r") as f:
step_config = yaml.safe_load(f)
return cls(step_config, pipeline_root)
@experimental
@property
@abc.abstractmethod
def name(self) -> str:
"""
Returns back the name of the step for the current class instance. This is used
downstream by the execution engine to create step-specific directory structures.
"""
pass
@experimental
@property
def environment(self) -> Dict[str, str]:
"""
Returns environment variables associated with step that should be set when the
step is executed.
"""
return {}
@experimental
def get_execution_state(self, output_directory: str) -> StepExecutionState:
"""
Returns the execution state of the step, which provides information about its
status (succeeded, failed, unknown), last update time, and, if applicable, encountered
stacktraces.
:param output_directory: String file path to the directory where step
outputs are stored.
:return: A ``StepExecutionState`` instance containing the step execution state.
"""
execution_state_file_path = os.path.join(
output_directory, BaseStep._EXECUTION_STATE_FILE_NAME
)
if os.path.exists(execution_state_file_path):
with open(execution_state_file_path, "r") as f:
return StepExecutionState.from_dict(json.load(f))
else:
return StepExecutionState(StepStatus.UNKNOWN, 0)
def _serialize_card(self, start_timestamp: float, output_directory: str) -> None:
if self.step_card is None:
return
execution_duration = time.time() - start_timestamp
tab = self.step_card.get_tab("Run Summary")
if tab is not None:
tab.add_markdown("EXE_DURATION", f"**Run duration (s)**: {execution_duration:.3g}")
tab.add_markdown(
"LAST_UPDATE_TIME",
f"**Last updated:** {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}",
)
self.step_card.save(path=output_directory)
self.step_card.save_as_html(path=output_directory)
def _update_status(self, status: StepStatus, output_directory: str) -> None:
execution_state = StepExecutionState(status=status, last_updated_timestamp=time.time())
with open(os.path.join(output_directory, BaseStep._EXECUTION_STATE_FILE_NAME), "w") as f:
json.dump(execution_state.to_dict(), f)
def _initialize_databricks_spark_connection_and_hooks_if_applicable(self) -> None:
"""
Initializes a connection to the Databricks Spark Gateway and sets up associated hooks
(e.g. MLflow Run creation notification hooks) if MLflow Pipelines is running in the
Databricks Runtime.
"""
if is_in_databricks_runtime():
try:
from IPython.utils.io import capture_output
from dbruntime.spark_connection import (
initialize_spark_connection,
is_pinn_mode_enabled,
)
with capture_output():
spark_handles, entry_point = initialize_spark_connection(is_pinn_mode_enabled())
except Exception as e:
_logger.warning(
"Encountered unexpected failure while initializing Spark connection. Spark"
" operations may not succeed. Exception: %s",
e,
)
else:
try:
from dbruntime.MlflowCreateRunHook import get_mlflow_create_run_hook
# `get_mlflow_create_run_hook` sets up a patch to trigger a Databricks command
# notification every time an MLflow Run is created. This notification is
# visible to users in notebook environments
get_mlflow_create_run_hook(spark_handles["sc"], entry_point)
except Exception as e:
_logger.warning(
"Encountered unexpected failure while setting up Databricks MLflow Run"
" creation hooks. Exception: %s",
e,
)
def _log_step_card(self, run_id: str, step_name: str) -> None:
"""
Logs a step card as an artifact (destination: <step_name>/card.html) in a specified run.
If the step card does not exist, logging is skipped.
:param run_id: Run ID to which the step card is logged.
:param step_name: Step name.
"""
from mlflow.pipelines.utils.execution import get_step_output_path
local_card_path = get_step_output_path(
pipeline_root_path=self.pipeline_root,
step_name=step_name,
relative_path=CARD_HTML_NAME,
)
if os.path.exists(local_card_path):
MlflowClient().log_artifact(run_id, local_card_path, artifact_path=step_name)
else:
_logger.warning(
"Failed to log step card for step %s. Run ID: %s. Card local path: %s",
step_name,
run_id,
local_card_path,
)
@staticmethod
def _generate_worst_examples_dataframe(
dataframe,
predictions,
target_col,
worst_k=10,
):
"""
Generate dataframe containing worst k examples with largest prediction error.
Dataframe contains columns of all features, prediction, error, and target_col column.
The prediction error is defined as absolute error between target value and
prediction value.
"""
import numpy as np
predictions = np.array(predictions)
abs_error = np.absolute(predictions - dataframe[target_col].to_numpy())
worst_k_indexes = np.argsort(abs_error)[::-1][:worst_k]
result_df = dataframe.iloc[worst_k_indexes].assign(
prediction=predictions[worst_k_indexes],
absolute_error=abs_error[worst_k_indexes],
)
front_columns = ["absolute_error", "prediction", target_col]
reordered_columns = front_columns + result_df.columns.drop(front_columns).tolist()
return result_df[reordered_columns].reset_index(drop=True)
| [
"noreply@github.com"
] | imrehg.noreply@github.com |
ea721d412e4cf76279c7d215c8ee478d195c91c0 | 78260a7ab56c581684f5cb6efbf37ae6468cb092 | /payserver/test/unittest/merchant/test_merchant_manager.py | 544a53fe518b1e881f939ffb679ded1cf1502b3d | [] | no_license | yiyuhao/FukuanUnion | 4702ee0c60b9a8d74a48c4c4c981c2f949dac04e | 0d32f98f42591b43e0b4da5e978b627da517f758 | refs/heads/master | 2022-01-09T21:44:57.767264 | 2019-06-19T02:48:22 | 2019-06-19T02:48:22 | 192,504,062 | 0 | 0 | null | 2022-01-06T22:36:08 | 2019-06-18T09:00:40 | Python | UTF-8 | Python | false | false | 14,529 | py | # File: test_merchant_manager.py
# Project: payunion
# Author: Yi Yuhao
# Create: 2018/6/22
#
# Copyright (c) 2018 麦禾互动. All rights reserved.
from contextlib import contextmanager
from functools import partial
from django.test import TestCase
from django.utils.timezone import timedelta
from django.utils import timezone
from config import TRANSACTION_TYPE, COUPON_STATUS, SYSTEM_USER_STATUS, MERCHANT_ADMIN_TYPES, PAYMENT_STATUS, \
REFUND_STATUS
from common.model_manager.merchant_manager import MerchantManager
from common.model_manager.utils import set_amount
from test.unittest.fake_factory import PayunionFactory
class TestMerchantManager(TestCase):
@classmethod
def setUpTestData(cls):
cls.factory = PayunionFactory()
cls.account = cls.factory.create_account(
balance=set_amount(1000.00),
withdrawable_balance=set_amount('500.05'),
alipay_balance=set_amount(2000),
alipay_withdrawable_balance=set_amount(0.99),
bank_card_number='1234567890123'
)
cls.merchant = cls.factory.create_merchant(
name='就是这个公司',
account=cls.account,
day_begin_minute=8 * 60, # 商户订单日结时间设置为08:00
)
cls.merchant_admin = cls.factory.create_merchant_admin(
status=SYSTEM_USER_STATUS['USING'],
work_merchant=cls.merchant,
merchant_admin_type=MERCHANT_ADMIN_TYPES['ADMIN']
)
cls.manager = MerchantManager(cls.merchant)
# create cashiers
cls.normal_cashier_a = cls.factory.create_merchant_admin(
status=SYSTEM_USER_STATUS['USING'],
work_merchant=cls.merchant,
merchant_admin_type=MERCHANT_ADMIN_TYPES['CASHIER']
)
cls.normal_cashier_b = cls.factory.create_merchant_admin(
status=SYSTEM_USER_STATUS['USING'],
work_merchant=cls.merchant,
merchant_admin_type=MERCHANT_ADMIN_TYPES['CASHIER']
)
cls.other_merchant_cashier = cls.factory.create_merchant_admin(
status=SYSTEM_USER_STATUS['USING'],
merchant_admin_type=MERCHANT_ADMIN_TYPES['CASHIER']
)
cls.disabled_cashier = cls.factory.create_merchant_admin(
status=SYSTEM_USER_STATUS['DISABLED'],
work_merchant=cls.merchant,
merchant_admin_type=MERCHANT_ADMIN_TYPES['CASHIER']
)
@contextmanager
def not_change_account(self):
old_balance = self.account.balance
old_withdrawable_balance = self.account.withdrawable_balance
old_alipay_balance = self.account.alipay_balance
old_alipay_withdrawable_balance = self.account.alipay_withdrawable_balance
yield
self.account.balance = old_balance
self.account.withdrawable_balance = old_withdrawable_balance
self.account.alipay_balance = old_alipay_balance
self.account.alipay_withdrawable_balance = old_alipay_withdrawable_balance
self.account.save()
def test_wechat_account_balance(self):
self.assertEqual(self.manager.account_wechat_balance, 1000)
def test_account_wechat_withdraw_balance(self):
# 能正确查询到商户账户微信可提现余额
self.assertEqual(self.manager.account_wechat_withdraw_balance, 500.05)
# 修改余额为0.01后不足微信最小可提现余额, 可提现余额逻辑显示为0.01(前端显示为0)
with self.not_change_account():
self.account.withdrawable_balance = set_amount('0.01')
self.account.save()
self.assertEqual(self.manager.account_wechat_withdraw_balance, 0.01)
def test_alipay_account_balance(self):
self.assertEqual(self.manager.account_alipay_balance, 2000)
def test_alipay_wechat_withdraw_balance(self):
# 不足支付宝最小可提现余额, 可提现余额逻辑显示为0
self.assertEqual(self.manager.account_alipay_withdraw_balance, 0.99)
# 修改余额为100.3后能正确查询到
with self.not_change_account():
self.account.alipay_withdrawable_balance = set_amount('100.3')
self.account.save()
self.assertEqual(self.manager.account_alipay_withdraw_balance, 100.3)
def test_merchant_admin(self):
merchant_admin = self.manager.merchant_admin
self.assertEqual(merchant_admin, self.merchant_admin)
def test_merchant_statistics(self):
first_day = timezone.now().replace(year=2018, month=1, day=1, hour=9)
second_day = timezone.now().replace(year=2018, month=1, day=2, hour=9)
# 1. 创建一笔退款订单
refund_payment = self.factory.create_payment(
datetime=second_day,
merchant=self.merchant,
order_price=set_amount(100),
status=PAYMENT_STATUS.REFUND
)
# 退款订单
self.factory.create_transaction(
transaction_type=TRANSACTION_TYPE.MERCHANT_RECEIVE,
datetime=second_day,
account=self.account,
amount=set_amount(100),
content_object=refund_payment
)
self.factory.create_transaction(
transaction_type=TRANSACTION_TYPE.MERCHANT_REFUND,
datetime=second_day,
account=self.account,
amount=-set_amount(100),
content_object=self.factory.create_refund(
datetime=second_day, status=REFUND_STATUS.FINISHED, payment=refund_payment)
)
# 2. 两天分别创建一笔未支付订单, 一笔优惠订单, 一笔普通订单, 一笔引流收益
for datetime_ in (first_day, second_day):
# 未支付订单
self.factory.create_payment(
datetime=datetime_,
status=PAYMENT_STATUS.UNPAID,
merchant=self.merchant,
order_price=set_amount(100),
)
# 优惠券订单
coupon = self.factory.create_coupon(
discount=set_amount(10),
min_charge=set_amount(100),
status=COUPON_STATUS.USED,
use_datetime=datetime_,
)
use_coupon_payment = self.factory.create_payment(
datetime=datetime_,
status=PAYMENT_STATUS.FINISHED,
merchant=self.merchant,
order_price=set_amount(110),
coupon=coupon,
platform_share=set_amount(3),
originator_share=set_amount(1),
inviter_share=set_amount(1)
)
self.factory.create_transaction(
transaction_type=TRANSACTION_TYPE.MERCHANT_RECEIVE,
datetime=datetime_,
account=self.account,
amount=set_amount(95),
content_object=use_coupon_payment,
)
# 普通订单
not_use_coupon_payment = self.factory.create_payment(
datetime=datetime_,
status=PAYMENT_STATUS.FINISHED,
merchant=self.merchant,
order_price=set_amount(100),
platform_share=set_amount(0),
originator_share=set_amount(0),
inviter_share=set_amount(0)
)
self.factory.create_transaction(
transaction_type=TRANSACTION_TYPE.MERCHANT_RECEIVE,
datetime=datetime_,
account=self.account,
amount=set_amount(100),
content_object=not_use_coupon_payment,
)
# 引流收益
self.factory.create_transaction(
transaction_type=TRANSACTION_TYPE.MERCHANT_SHARE,
datetime=datetime_,
account=self.account,
amount=set_amount(1),
)
# 3. 检查第二日
second_day_begin = second_day.replace(hour=0, minute=0, second=0, microsecond=0)
result = self.manager.merchant_business_report(
datetime_start=second_day_begin,
datetime_end=second_day_begin + timedelta(days=1)
)
self.assertEqual(result, {'turnover': 200,
'originator_expenditure': 5,
'originator_earning': 1,
'payment': {'use_coupon': 1, 'not_use_coupon': 2}})
# 4. 检查第一日
first_day_begin = first_day.replace(hour=0, minute=0, second=0, microsecond=0)
result = self.manager.merchant_business_report(
datetime_start=first_day_begin,
datetime_end=first_day_begin + timedelta(days=1)
)
self.assertEqual(result, {'turnover': 200,
'originator_expenditure': 5,
'originator_earning': 1,
'payment': {'use_coupon': 1, 'not_use_coupon': 1}})
def test_merchant_employer_num(self):
"""收银员数量"""
self.assertEqual(self.manager.employer_num, 3)
self.factory.create_merchant_admin(number=5, work_merchant=self.merchant)
self.assertEqual(self.manager.employer_num, 8)
self.factory.create_merchant_admin(number=5, work_merchant=self.merchant)
self.assertEqual(self.manager.employer_num, 13)
self.factory.create_merchant_admin(5)
self.assertEqual(self.manager.employer_num, 13)
def test_merchant_earning_list_by_day(self):
first_day = timezone.now().replace(year=2018, month=1, day=1, hour=9)
second_day = timezone.now().replace(year=2018, month=1, day=2, hour=9)
third_day = timezone.now().replace(year=2018, month=1, day=4, hour=9)
first_day_begin = first_day.replace(hour=0, minute=0, second=0, microsecond=0)
# 1. 创建一笔退款订单
refund_payment = self.factory.create_payment(
datetime=second_day,
merchant=self.merchant,
order_price=set_amount(100),
status=PAYMENT_STATUS.REFUND
)
# 退款订单
self.factory.create_transaction(
transaction_type=TRANSACTION_TYPE.MERCHANT_RECEIVE,
datetime=second_day,
account=self.account,
amount=set_amount(100),
content_object=refund_payment
)
self.factory.create_transaction(
transaction_type=TRANSACTION_TYPE.MERCHANT_REFUND,
datetime=second_day,
account=self.account,
amount=-set_amount(100),
content_object=self.factory.create_refund(
datetime=second_day, status=REFUND_STATUS.FINISHED, payment=refund_payment)
)
# 2. 三天分别创建一笔未支付订单, 一笔优惠订单, 一笔普通订单, 一笔引流收益
for datetime_ in (first_day, second_day, third_day):
# 未支付订单
self.factory.create_payment(
datetime=datetime_,
status=PAYMENT_STATUS.UNPAID,
merchant=self.merchant,
order_price=set_amount(100),
)
# 优惠券订单
coupon = self.factory.create_coupon(
discount=set_amount(10),
min_charge=set_amount(100),
status=COUPON_STATUS.USED,
use_datetime=datetime_,
)
use_coupon_payment = self.factory.create_payment(
datetime=datetime_,
status=PAYMENT_STATUS.FINISHED,
merchant=self.merchant,
order_price=set_amount(110),
coupon=coupon,
platform_share=set_amount(3),
originator_share=set_amount(1),
inviter_share=set_amount(1)
)
self.factory.create_transaction(
transaction_type=TRANSACTION_TYPE.MERCHANT_RECEIVE,
datetime=datetime_,
account=self.account,
amount=set_amount(95),
content_object=use_coupon_payment,
)
# 普通订单
not_use_coupon_payment = self.factory.create_payment(
datetime=datetime_,
status=PAYMENT_STATUS.FINISHED,
merchant=self.merchant,
order_price=set_amount(100),
platform_share=set_amount(0),
originator_share=set_amount(0),
inviter_share=set_amount(0)
)
self.factory.create_transaction(
transaction_type=TRANSACTION_TYPE.MERCHANT_RECEIVE,
datetime=datetime_,
account=self.account,
amount=set_amount(100),
content_object=not_use_coupon_payment,
)
# 引流收益
self.factory.create_transaction(
transaction_type=TRANSACTION_TYPE.MERCHANT_SHARE,
datetime=datetime_,
account=self.account,
amount=set_amount(1),
)
result = self.manager.merchant_earning_list_by_day(
date_start=first_day_begin.date() - timedelta(days=1),
date_end=first_day_begin.date() + timedelta(days=5)
)
self.assertEqual(
result,
[{'date': '2017-12-31', 'amount': 0},
{'date': '2018-01-01', 'amount': 195 + 1},
{'date': '2018-01-02', 'amount': 195 + 1},
{'date': '2018-01-03', 'amount': 0},
{'date': '2018-01-04', 'amount': 195 + 1},
{'date': '2018-01-05', 'amount': 0},
{'date': '2018-01-06', 'amount': 0}]
)
def test_get_cashier(self):
not_exist_id = 100000
self.assertEqual(self.manager.get_cashier(self.normal_cashier_a.id), self.normal_cashier_a)
self.assertEqual(self.manager.get_cashier(self.normal_cashier_b.id), self.normal_cashier_b)
self.assertEqual(self.manager.get_cashier(self.other_merchant_cashier.id), None)
self.assertEqual(self.manager.get_cashier(self.disabled_cashier.id), None)
self.assertEqual(self.manager.get_cashier(not_exist_id), None)
def test_cashiers(self):
cashiers = self.manager.cashiers
self.assertEqual(len(cashiers), 2)
self.assertEqual(set(cashiers), {self.normal_cashier_a, self.normal_cashier_b})
| [
"yiyuhao@mixadx.com"
] | yiyuhao@mixadx.com |
96d3df244cea79335053732ec879dd09d42ceb26 | 0613b082bd90462e190bc51943356ce6ce990815 | /attendance/migrations/0004_worksheet.py | 5180b56f06b1064cd4bee92effa57a857c9d7479 | [] | no_license | Hamidnet220/salary | 1068aac4bc921436c03b627899370a86ca5e99be | 4dc1f32dfa1d990e6c9f527b4a8d0e1df939262a | refs/heads/master | 2020-05-04T18:09:24.086491 | 2019-04-22T20:22:32 | 2019-04-22T20:22:32 | 179,342,004 | 0 | 1 | null | 2019-04-11T10:43:29 | 2019-04-03T17:53:36 | Python | UTF-8 | Python | false | false | 1,008 | py | # Generated by Django 2.2 on 2019-04-19 06:03
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('wage', '0007_auto_20190415_1650'),
('baseinfo', '0006_auto_20190419_0525'),
('attendance', '0003_delete_worksheet'),
]
operations = [
migrations.CreateModel(
name='Worksheet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('work_days_stat', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(blank=True, null=True), size=31)),
('employee', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='baseinfo.Employee')),
('wage', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='wage.Wage')),
],
),
]
| [
"kiani.hamidreza@gmail.com"
] | kiani.hamidreza@gmail.com |
86e17ca45516850f568d789e62255634cb4dabe3 | ea1373d9a58ad198c15d35a6daddb4e06d21aa39 | /netScripts/3DCNN_Voxel/pyScripts/mask_data_prepare.py | 0547f1154bb2df74ea1c22ba3ad2632e773555ba | [] | no_license | screnary/VoxSegNet | bb2778dfc460dfafdbd923f79755f7f0776dc36f | 264f2efc0a589018a1fc68c111626beacbe095a5 | refs/heads/master | 2020-09-04T03:41:47.067129 | 2019-11-07T06:34:29 | 2019-11-07T06:34:29 | 219,649,857 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,915 | py | """ preparing mask data for mask prediction net --- 20180313 """
""" mask gt: [instance, height, width]
bbox gt: [instance, (x1,y1,z1, x2,y2,z2)]
{this x,y,z is for 3D tensor, min and max corner}
part_ids: 1D array of part IDs of the part mask and bbox, start from 1
padding 0 for non-exist part
Max_Instance_Num set to be 15
"""
import os
import sys
import numpy as np
from scipy.interpolate import griddata
import h5py
import warnings
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import time
import pdb
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'utils'))
Common_SET_DIR = os.path.join(BASE_DIR, '../CommonFile')
sys.path.append(Common_SET_DIR)
import globals as g_
import gen_part_box as Box
COLOR_PALETTE = np.loadtxt(os.path.join(BASE_DIR,
'utils/palette_float.txt'),dtype='float32')
def tic():
globals()['tt'] = time.clock()
def toc():
print('\nElapsed time: %.8f seconds' % (time.clock()-globals()['tt']))
def pts_to_volume(points, vsize):
""" input is Nx3 points. (normalized)
output is vsize*vsize*vsize
assumes points are in range [0, 1]
(actually, after nomalization, all points are in a unit sphere)
"""
# make sure each point on grid point [0,1,2,3,4] for vsize 5
shift = -0.5
vol = np.zeros((vsize,vsize,vsize))
voxel = 1.0/float(vsize)
locations = points/voxel
locations = np.round(locations+shift).astype(int) # np.floor().astype(int)
# dealing with box boundary
# locations[locations > (vsize - 1)] = vsize - 1
# locations[locations < 0] = 0
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
return vol
def gen_masks_info_v1(parts_bag, label_bag, mask_shape=5):
""" pts version: compute from parts_bag (points bag)
parts_bag: (n_l) list; label_bag: (n_l) list
boxes: (n,6) np.array; labels: (n,1); masks: (n, MASK_SHAPE)
MASK_SHAPE = (5,5,5)
not use interpolation, mask could be not good
"""
# boxes = []
# masks = []
# part_ids = []
part_pts = [] # part points group
for n in range(len(parts_bag)):
part = parts_bag[n] # for this label, sub_parts in one bag: part
# label = label_bag[n]
for i in range(part.shape[0]):
# box = []
sub_part = part[i] # np.array shape=(num_pts, 3)
# minCoord = np.amin(sub_part, axis=0)
# maxCoord = np.amax(sub_part, axis=0)
# box.extend(minCoord)
# box.extend(maxCoord)
# # sub_part --> mini_mask
# """ normalize """
# pts = (sub_part - minCoord)/(maxCoord - minCoord)
# mask = pts_to_volume(pts, mask_shape)
# # save the result info
# boxes.append(box)
# masks.append(mask)
# part_ids.append(label)
part_pts.append(sub_part)
return part_pts
# return masks, boxes, part_ids, part_pts
def cropping_mask(bbox, mask):
""" crop mask """
x_m = bbox[0].astype(int)
y_m = bbox[1].astype(int)
z_m = bbox[2].astype(int)
x_M = bbox[3].astype(int)
y_M = bbox[4].astype(int)
z_M = bbox[5].astype(int)
m = mask[x_m:(x_M+1), y_m:(y_M+1), z_m:(z_M+1)] # crop mask
if m.size == 0:
raise Exception("Invalid bounding box with area of zero")
return m
def minimize_mask(bbox, mask, mini_shape):
""" crop and resize to MASK_SHAPE, minimize_mask()
Resize masks to a smaller version to cut memory load.
Mini-masks can then resized back to image scale using expand_masks()
"""
m = cropping_mask(bbox, mask)
h,w,d = m.shape
maskAux = m.reshape(h*w*d)
# ijk is an (h*w*d, 3) array with the indexes of the reshaped array
ijk = np.mgrid[0:h, 0:w, 0:d].reshape(3, h*w*d).T
# interpolate position num
n_in = complex(0, mini_shape)
i,j,k = np.mgrid[0:(h-1):n_in, 0:(w-1):n_in, 0:(d-1):n_in]
# method could be "nearest", "linear", or "cubic"
mini_mask = griddata(ijk, maskAux, (i,j,k), method="linear") >= 0.5
return mini_mask
def expand_mask(bbox, mini_mask, vox_shape):
"""Resizes mini masks back to voxel size. Reverses the change
of minimize_mask().
return mask : [vox_shape, vox_shape, vox_shape], {0,1} value
"""
h,w,d = mini_mask.shape
mini_maskAux = mini_mask.reshape(h*w*d)
# ijk is an (h*w*d, 3) array with the indexes of the reshaped array
ijk = np.mgrid[0:h, 0:w, 0:d].reshape(3, h*w*d).T
# interpolate
x_m = bbox[0].astype(int)
y_m = bbox[1].astype(int)
z_m = bbox[2].astype(int)
x_M = bbox[3].astype(int)
y_M = bbox[4].astype(int)
z_M = bbox[5].astype(int)
b_h = x_M - x_m + 1
b_w = y_M - y_m + 1
b_d = z_M - z_m + 1
n_in = [complex(0, b_h), complex(0, b_w), complex(0, b_d)]
i,j,k = np.mgrid[0:(h-1):n_in[0], 0:(w-1):n_in[1], 0:(d-1):n_in[2]]
# method could be "nearest", "linear", or "cubic"
mask_cropped = griddata(ijk, mini_maskAux, (i,j,k), method="linear") >= 0.5
mask = np.zeros([vox_shape, vox_shape, vox_shape])
mask[x_m:(x_M+1), y_m:(y_M+1), z_m:(z_M+1)] = mask_cropped
return mask, mask_cropped
def gen_masks_info_v2(vox_seg, Boxes, Blabels, mask_shape=16):
""" vox version: compute from boxes
mask_shape: mini_mask_shape
mimic minimize_mask() in MASK_RCNN project
use interpolation, mask smooth
"""
boxes = []
masks = []
masks_crop = []
mini_masks = []
part_ids = []
for n in range(len(Blabels)):
# get part_instance full size mask
label = Blabels[n]
box = Boxes[n]
minCoord = box[0, :]
maxCoord = box[-2, :]
x_m = minCoord[0].astype(int) # x,y,z is r,c,d
y_m = minCoord[1].astype(int)
z_m = minCoord[2].astype(int)
x_M = maxCoord[0].astype(int)
y_M = maxCoord[1].astype(int)
z_M = maxCoord[2].astype(int)
embed_box = np.zeros(vox_seg.shape)
embed_box[x_m:(x_M+1), y_m:(y_M+1), z_m:(z_M+1)] = 1
mask = np.multiply((vox_seg == label), embed_box) # elementwise, same as *
mask_pts = np.transpose(np.array(np.where(mask)))
bbox = np.array([x_m,y_m,z_m,x_M,y_M,z_M])
# print(n)
# if n == 8:
# pdb.set_trace()
# crop and resize to MASK_SHAPE, minimize_mask()
mini_mask = minimize_mask(bbox, mask, mask_shape)
boxes.append(bbox)
masks.append(mask_pts)
masks_crop.append(cropping_mask(bbox, mask))
mini_masks.append(mini_mask)
part_ids.append(label) # start from 1
return masks_crop, mini_masks, boxes, part_ids
def gen_masks_info_v3(vox_seg, Boxes, Blabels, mask_shape=16):
""" vox version: compute from boxes
mask_shape: mini_mask_shape
mimic minimize_mask() in MASK_RCNN project
use interpolation, mask smooth
get proposal voxel in roi, also (mini_voxels)
"""
boxes = []
mini_masks = []
mini_voxels = []
part_ids = []
for n in range(len(Blabels)):
# get part_instance full size mask
label = Blabels[n]
box = Boxes[n]
minCoord = box[0, :]
maxCoord = box[-2, :]
x_m = minCoord[0].astype(int) # x,y,z is r,c,d
y_m = minCoord[1].astype(int)
z_m = minCoord[2].astype(int)
x_M = maxCoord[0].astype(int)
y_M = maxCoord[1].astype(int)
z_M = maxCoord[2].astype(int)
embed_box = np.zeros(vox_seg.shape)
embed_box[x_m:(x_M+1), y_m:(y_M+1), z_m:(z_M+1)] = 1
mask = np.multiply((vox_seg == label), embed_box) # elementwise, same as *
voxel = np.multiply((vox_seg > 0), embed_box)
# voxel_pts = np.transpose(np.array(np.where(voxel)))
bbox = np.array([x_m,y_m,z_m,x_M,y_M,z_M])
# pdb.set_trace()
# print(n)
# if n == 8:
# pdb.set_trace()
# crop and resize to MASK_SHAPE, minimize_mask()
mini_mask = minimize_mask(bbox, mask, mask_shape)
mini_voxel = minimize_mask(bbox, voxel, mask_shape)
boxes.append(bbox) # pred_bbox
mini_masks.append(mini_mask) # target_masks
mini_voxels.append(mini_voxel) # proposals
part_ids.append(label) # start from 1
return mini_masks, mini_voxels, boxes, part_ids
def gen_rois_info(Boxes, Blabels):
""" vox version: compute from boxes
mask_shape: mini_mask_shape
mimic minimize_mask() in MASK_RCNN project
use interpolation, mask smooth
get proposal voxel in roi, also (mini_voxels)
"""
boxes = []
part_ids = []
for n in range(len(Blabels)):
# get part_instance full size mask
label = Blabels[n]
box = Boxes[n]
minCoord = box[0, :]
maxCoord = box[-2, :]
x_m = minCoord[0].astype(int) # x,y,z is r,c,d
y_m = minCoord[1].astype(int)
z_m = minCoord[2].astype(int)
x_M = maxCoord[0].astype(int)
y_M = maxCoord[1].astype(int)
z_M = maxCoord[2].astype(int)
bbox = np.array([x_m,y_m,z_m,x_M,y_M,z_M])
boxes.append(bbox) # pred_bbox
part_ids.append(label) # start from 1
return boxes, part_ids
def draw_pts(part_pts):
""" for visualization, quick """
# print('drawing mask...')
plt.close('all')
fig = plt.figure()
# plot the mini_mask
# ax = fig.gca(projection='3d'
# plot the full size mask: in pts format
ax_1 = fig.add_subplot(111, projection='3d')
ax_1.set_title('tfgraph computed box')
ax_1.set_aspect('equal')
ax_1.set_xlabel('--X->')
ax_1.set_ylabel('--Y->')
ax_1.set_zlabel('--Z->')
# ax_1.view_init(elev=79., azim=-65.)
ax_1.view_init(elev=17., azim=165.)
# pdb.set_trace()
X_pts = part_pts[:,0]
Y_pts = part_pts[:,1]
Z_pts = part_pts[:,2]
# axl_m = np.amin(BOXES, axis=0)-1
# axl_M = np.amax(BOXES, axis=0)+1
# max_range = axl_M - axl_m
# axl_m = np.min(BOXES)-1
# axl_M = np.max(BOXES)+1
axl_m = np.min(part_pts)
axl_M = np.max(part_pts)
ax_1.set_xlim(axl_m,axl_M)
ax_1.set_ylim(axl_m,axl_M)
ax_1.set_zlim(axl_m,axl_M)
ax_1.scatter(X_pts,Y_pts,Z_pts,alpha=0.8)
def draw_voxel(voxel_data, ax):
'''
=============
3D voxel plot
=============
'''
def explode(data):
# explode voxel data
size = np.array(data.shape)*2
# insert gaps between valid grid
data_e = np.zeros(size - 1, dtype=data.dtype)
data_e[::2, ::2, ::2] = data
return data_e
# build up the numpy logo
facecolors = np.where(voxel_data, '#FFD65DC0', '#7A88CC00') # '#7A88CCC0'
edgecolors = np.where(voxel_data, '#BFAB6E', '#7D84A600') # '#7D84A6'
filled = np.ones(voxel_data.shape)
# upscale the above voxel image, leaving gaps
filled_2 = explode(filled)
fcolors_2 = explode(facecolors)
ecolors_2 = explode(edgecolors)
# pdb.set_trace()
# Shrink the gaps, gap size = 0.1
x, y, z = np.indices(np.array(filled_2.shape) + 1).astype(float) // 2
x[0::2, :, :] += 0.05
y[:, 0::2, :] += 0.05
z[:, :, 0::2] += 0.05
x[1::2, :, :] += 0.95
y[:, 1::2, :] += 0.95
z[:, :, 1::2] += 0.95
# fig = plt.figure()
# ax = fig.gca(projection='3d')
ax.voxels(x, y, z, filled_2, facecolors=fcolors_2, edgecolors=ecolors_2)
def draw_mask(mini_mask, full_mask):
# print('drawing mask...')
plt.close('all')
fig = plt.figure()
# plot the mini_mask
# ax = fig.gca(projection='3d')
ax = fig.add_subplot(121, projection='3d')
ax.set_title('mini mask')
axl_m = 0
axl_M = np.max(mini_mask.shape)
ax.set_aspect('equal')
ax.set_xlim(axl_m,axl_M)
ax.set_ylim(axl_m,axl_M)
ax.set_zlim(axl_m,axl_M)
ax.set_xlabel('--X->')
ax.set_ylabel('--Y->')
ax.set_zlabel('--Z->')
ax.view_init(elev=79., azim=-65.)
draw_voxel(mini_mask, ax)
# plot the full size mask
ax_1 = fig.add_subplot(122, projection='3d')
ax_1.set_title('crop mask')
ax_1.set_aspect('equal')
ax_1.set_xlabel('--X->')
ax_1.set_ylabel('--Y->')
ax_1.set_zlabel('--Z->')
ax_1.view_init(elev=79., azim=-65.)
if len(full_mask.shape) == 2:
part_pts = full_mask
X_pts = part_pts[:,0]
Y_pts = part_pts[:,1]
Z_pts = part_pts[:,2]
axl_m = -1
axl_M = 48
ax_1.set_xlim(axl_m,axl_M)
ax_1.set_ylim(axl_m,axl_M)
ax_1.set_zlim(axl_m,axl_M)
ax_1.scatter(X_pts,Y_pts,Z_pts,alpha=0.8)
elif len(full_mask.shape) == 3:
# print('Warning: full_mask should be pts format, vox is too costly!')
axl_m = 0
axl_M = np.max(full_mask.shape)
ax_1.set_xlim(axl_m,axl_M)
ax_1.set_ylim(axl_m,axl_M)
ax_1.set_zlim(axl_m,axl_M)
draw_voxel(full_mask, ax_1) # mask crop
def draw_mask_v2(mini_mask, full_mask, box_bag, part_ids):
""" for visualization, quick """
# print('drawing mask...')
plt.close('all')
fig = plt.figure()
# plot the mini_mask
# ax = fig.gca(projection='3d')
ax = fig.add_subplot(121, projection='3d')
ax.set_title('mini mask')
axl_m = 0
axl_M = np.max(mini_mask.shape)
ax.set_aspect('equal')
ax.set_xlim(axl_m,axl_M)
ax.set_ylim(axl_m,axl_M)
ax.set_zlim(axl_m,axl_M)
ax.set_xlabel('--X->')
ax.set_ylabel('--Y->')
ax.set_zlabel('--Z->')
ax.view_init(elev=41., azim=-64.)
draw_voxel(mini_mask, ax)
# plot the full size mask: in pts format
ax_1 = fig.add_subplot(122, projection='3d')
ax_1.set_title('full mask')
ax_1.set_aspect('equal')
ax_1.set_xlabel('--X->')
ax_1.set_ylabel('--Y->')
ax_1.set_zlabel('--Z->')
# ax_1.view_init(elev=79., azim=-65.)
ax_1.view_init(elev=41., azim=-64.)
# pdb.set_trace()
if len(full_mask.shape) == 1:
full_mask = np.concatenate(full_mask,0)
if len(full_mask.shape) == 2:
part_pts = full_mask
X_pts = part_pts[:,0]
Y_pts = part_pts[:,1]
Z_pts = part_pts[:,2]
BOXES = np.asarray(box_bag).reshape([-1,3])
# axl_m = np.amin(BOXES, axis=0)-1
# axl_M = np.amax(BOXES, axis=0)+1
# max_range = axl_M - axl_m
axl_m = np.min(BOXES)-1
axl_M = np.max(BOXES)+1
max_range = axl_M - axl_m
ax_1.set_xlim(axl_m,axl_M)
ax_1.set_ylim(axl_m,axl_M)
ax_1.set_zlim(axl_m,axl_M)
ax_1.scatter(X_pts,Y_pts,Z_pts,alpha=0.8)
elif len(full_mask.shape) == 3:
# print('Warning: full_mask should be pts format, vox is too costly!')
axl_m = 0
axl_M = np.max(full_mask.shape)
max_range = axl_M - axl_m
ax_1.set_xlim(axl_m,axl_M)
ax_1.set_ylim(axl_m,axl_M)
ax_1.set_zlim(axl_m,axl_M)
draw_voxel(full_mask, ax_1) # mask crop
# draw the bounding boxes of the whole shape
def drawCubes(Boxes, id_bag, ax_in):
""" Input Boxes: list of 8*3 np.array, 8 vertices """
# list of sides' polygons of figure; cube surface
print('Boxes num:', len(Boxes))
for i in range(len(Boxes)):
Z = Boxes[i]
verts = [[Z[0],Z[1],Z[2],Z[3]],
[Z[4],Z[5],Z[6],Z[7]],
[Z[0],Z[1],Z[5],Z[4]],
[Z[2],Z[3],Z[7],Z[6]],
[Z[1],Z[2],Z[6],Z[5]],
[Z[4],Z[7],Z[3],Z[0]]]
# plot sides
collection = Poly3DCollection(verts,
linewidths=.25, edgecolors='r', alpha=.15)
# face_color = [0, 1, 1]
face_color = COLOR_PALETTE[id_bag[i].astype(int)]
collection.set_facecolor(face_color)
ax_in.add_collection3d(collection)
drawCubes(box_bag, part_ids, ax_1)
# Create cubic bounding box to simulate equal aspect ratio
# pdb.set_trace()
try:
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2].flatten() + 0.5*(axl_M+axl_m)
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2].flatten() + 0.5*(axl_M+axl_m)
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2].flatten() + 0.5*(axl_M+axl_m)
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax_1.plot([xb], [yb], [zb], 'w')
except UnboundLocalError as e:
print('Error! ', e)
pdb.set_trace()
def draw_mask_box(full_mask, box_bag, part_ids):
""" for visualization, quick """
# print('drawing mask...')
plt.close('all')
fig = plt.figure()
# plot the mini_mask
# ax = fig.gca(projection='3d'
# plot the full size mask: in pts format
ax_1 = fig.add_subplot(111, projection='3d')
ax_1.set_title('tfgraph computed box')
ax_1.set_aspect('equal')
ax_1.set_xlabel('--X->')
ax_1.set_ylabel('--Y->')
ax_1.set_zlabel('--Z->')
# ax_1.view_init(elev=79., azim=-65.)
ax_1.view_init(elev=41., azim=-64.)
# pdb.set_trace()
if len(full_mask.shape) == 1:
full_mask = np.concatenate(full_mask,0)
if len(full_mask.shape) == 2:
part_pts = full_mask
X_pts = part_pts[:,0]
Y_pts = part_pts[:,1]
Z_pts = part_pts[:,2]
BOXES = np.asarray(box_bag).reshape([-1,3])
# axl_m = np.amin(BOXES, axis=0)-1
# axl_M = np.amax(BOXES, axis=0)+1
# max_range = axl_M - axl_m
# axl_m = np.min(BOXES)-1
# axl_M = np.max(BOXES)+1
axl_m = -1
axl_M = 48
max_range = axl_M - axl_m
ax_1.set_xlim(axl_m,axl_M)
ax_1.set_ylim(axl_m,axl_M)
ax_1.set_zlim(axl_m,axl_M)
ax_1.scatter(X_pts,Y_pts,Z_pts,alpha=0.8)
elif len(full_mask.shape) == 3:
# print('Warning: full_mask should be pts format, vox is too costly!')
axl_m = 0
axl_M = np.max(full_mask.shape)
max_range = axl_M - axl_m
ax_1.set_xlim(axl_m,axl_M)
ax_1.set_ylim(axl_m,axl_M)
ax_1.set_zlim(axl_m,axl_M)
draw_voxel(full_mask, ax_1) # mask crop
# draw the bounding boxes of the whole shape
def drawCubes_roi(Boxes, id_bag, ax_in):
""" Input Boxes: list of 8*3 np.array, 8 vertices """
# list of sides' polygons of figure; cube surface
# print('Boxes num:', Boxes.shape[0])
for i in range(Boxes.shape[0]):
minCoord = Boxes[i,0:3]
maxCoord = Boxes[i,3:6]
x_m = minCoord[0]
y_m = minCoord[1]
z_m = minCoord[2]
x_M = maxCoord[0]
y_M = maxCoord[1]
z_M = maxCoord[2]
boxes = []
boxes.append([x_m, y_m, z_m]) # A
boxes.append([x_M, y_m, z_m]) # B
boxes.append([x_M, y_m, z_M]) # C
boxes.append([x_m, y_m, z_M]) # D
boxes.append([x_m, y_M, z_m]) # E
boxes.append([x_M, y_M, z_m]) # F
boxes.append([x_M, y_M, z_M]) # G
boxes.append([x_m, y_M, z_M])
Z = boxes
verts = [[Z[0],Z[1],Z[2],Z[3]],
[Z[4],Z[5],Z[6],Z[7]],
[Z[0],Z[1],Z[5],Z[4]],
[Z[2],Z[3],Z[7],Z[6]],
[Z[1],Z[2],Z[6],Z[5]],
[Z[4],Z[7],Z[3],Z[0]]]
# plot sides
collection = Poly3DCollection(verts,
linewidths=.25, edgecolors='r', alpha=.15)
# face_color = [0, 1, 1]
face_color = COLOR_PALETTE[id_bag[i].astype(int)]
collection.set_facecolor(face_color)
ax_in.add_collection3d(collection)
drawCubes_roi(box_bag, part_ids, ax_1)
# Create cubic bounding box to simulate equal aspect ratio
# pdb.set_trace()
try:
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2].flatten() + 0.5*(axl_M+axl_m)
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2].flatten() + 0.5*(axl_M+axl_m)
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2].flatten() + 0.5*(axl_M+axl_m)
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax_1.plot([xb], [yb], [zb], 'w')
except UnboundLocalError as e:
print('Error! ', e)
pdb.set_trace()
def draw_vox_pts(part_pts):
""" for visualization, quick """
# print('drawing mask...')
plt.close('all')
fig = plt.figure()
# plot the mini_mask
# ax = fig.gca(projection='3d'
# plot the full size mask: in pts format
ax_1 = fig.add_subplot(111, projection='3d')
ax_1.set_title('tfgraph computed box')
ax_1.set_aspect('equal')
ax_1.set_xlabel('--X->')
ax_1.set_ylabel('--Y->')
ax_1.set_zlabel('--Z->')
# ax_1.view_init(elev=79., azim=-65.)
ax_1.view_init(elev=17., azim=165.)
# pdb.set_trace()
X_pts = part_pts[:,0]
Y_pts = part_pts[:,1]
Z_pts = part_pts[:,2]
# axl_m = np.amin(BOXES, axis=0)-1
# axl_M = np.amax(BOXES, axis=0)+1
# max_range = axl_M - axl_m
# axl_m = np.min(BOXES)-1
# axl_M = np.max(BOXES)+1
axl_m = -1
axl_M = 16
ax_1.set_xlim(axl_m,axl_M)
ax_1.set_ylim(axl_m,axl_M)
ax_1.set_zlim(axl_m,axl_M)
ax_1.scatter(X_pts,Y_pts,Z_pts,alpha=0.8)
def draw_volume_shape(voxel_data):
print('drawing volumes...')
plt.close('all')
fig = plt.figure()
# plot the voxel_data
# ax = fig.gca(projection='3d')
ax = fig.add_subplot(111, projection='3d')
ax.set_title('voxel data')
axl_m = -1
axl_M = np.max(voxel_data.shape)+1
ax.set_aspect('equal')
ax.set_xlim(axl_m,axl_M)
ax.set_ylim(axl_m,axl_M)
ax.set_zlim(axl_m,axl_M)
ax.set_xlabel('--X->')
ax.set_ylabel('--Y->')
ax.set_zlabel('--Z->')
ax.view_init(elev=17., azim=165.)
draw_voxel(voxel_data, ax)
def draw_volume_seg(vox_seg):
print('drawing seg volumes...')
plt.close('all')
def draw_voxel_v2(voxel_data, ax):
def explode(data):
# explode voxel data
size = np.array(data.shape)*2
# insert gaps between valid grid
data_e = np.zeros(size - 1, dtype=data.dtype)
data_e[::2, ::2, ::2] = data
return data_e
# build up the color voxel grid
h,w,d = voxel_data.shape
colors = np.empty((h,w,d), dtype=object) # (r,g,b,a) at each position
colors[voxel_data==0] = '#7A88CC00'
colors[voxel_data==1] = 'red'
colors[voxel_data==2] = 'blue'
colors[voxel_data==3] = 'green'
colors[voxel_data==4] = 'violet'
colors[voxel_data==5] = 'cyan'
colors[voxel_data==6] = 'lightsalmon'
colors[voxel_data==7] = 'gray'
facecolors = colors
# facecolors = np.where(voxel_data, '#FFD65DC0', '#7A88CC00') # '#7A88CCC0'
edgecolors = np.where(voxel_data, '#BFAB6E', '#7D84A600') # '#7D84A6'
filled = np.ones(voxel_data.shape)
# upscale the above voxel image, leaving gaps
filled_2 = explode(filled)
fcolors_2 = explode(facecolors)
ecolors_2 = explode(edgecolors)
# pdb.set_trace()
# Shrink the gaps, gap size = 0.1
x, y, z = np.indices(np.array(filled_2.shape) + 1).astype(float) // 2
x[0::2, :, :] += 0.05
y[:, 0::2, :] += 0.05
z[:, :, 0::2] += 0.05
x[1::2, :, :] += 0.95
y[:, 1::2, :] += 0.95
z[:, :, 1::2] += 0.95
# fig = plt.figure()
# ax = fig.gca(projection='3d')
ax.voxels(x, y, z, filled_2, facecolors=fcolors_2, edgecolors=fcolors_2)
fig = plt.figure()
# plot the vox_seg
# ax = fig.gca(projection='3d')
ax = fig.add_subplot(111, projection='3d')
ax.set_title('seg volumes')
axl_m = -1
axl_M = np.max(vox_seg.shape)+1
ax.set_aspect('equal')
ax.set_xlim(axl_m,axl_M)
ax.set_ylim(axl_m,axl_M)
ax.set_zlim(axl_m,axl_M)
ax.set_xlabel('--X->')
ax.set_ylabel('--Y->')
ax.set_zlabel('--Z->')
ax.view_init(elev=17., azim=165.)
draw_voxel_v2(vox_seg, ax)
def load_h5_mask(h5_filename):
f = h5py.File(h5_filename, 'r')
bbox = f['bbox'][:]
mask = f['mask'][:]
label = f['label'][:]
return (mask, bbox, label)
def get_mask_data(vox_datas, pred_segs, vox_segs, Mini_Shape, Max_Ins_Num):
""" input a batch of voxels and gt_segs, pred_segs
compute boxes (rois factors)
output: proposals (crop from voxels)
mask targets (crop from segs)
"""
batch_size = vox_datas.shape[0]
Bbox_list = []
Mask_list = []
Prop_list = []
Pid_list = []
for b in range(batch_size):
cur_vox = np.squeeze(vox_datas[b, ...])
cur_pred_seg = np.squeeze(pred_segs[b, ...])
cur_gt_seg = np.squeeze(vox_segs[b, ...])
pts = np.transpose(np.array(np.where(cur_vox)))
plb = cur_pred_seg[pts[:,0], pts[:,1], pts[:,2]]
pts = pts.astype(float)
# pdb.set_trace()
""" process seg pts into subpart groups (distance metric, group box)"""
# tic()
# with warnings.catch_warnings():
# warnings.filterwarnings('error')
# try:
# Boxes, BLabels, _, _ = Box.computeBox(pts=pts, plb=plb, alpha=1.5)
# except RuntimeWarning as e:
# print('Error found:', e)
# pdb.set_trace()
Boxes, BLabels, _, _ = Box.computeBox(pts=pts, plb=plb, alpha=1.5)
# toc() # computeBox(): 0.5~3 seconds per model
# ### Todo: augment Boxes data
# tic()
gt_masks, proposals, boxes, part_ids = gen_masks_info_v3(
cur_gt_seg, Boxes, BLabels, mask_shape=Mini_Shape) # mini_masks: gt mask
""" gt_masks: (subpart_num, mini_shape**3)
proposals: (subpart_num, mini_shape**3)
part_ids: (subpart_num,)
"""
# toc() # gen_masks_info_v3 spend too much time, almost 4~7 times of computeBox
Bbox_data = np.zeros((Max_Ins_Num, 6)) # [rois, x1,y1,z1, x2,y2,z2]
Mask_data = np.zeros((Max_Ins_Num, Mini_Shape, Mini_Shape, Mini_Shape))
Prop_data = np.zeros((Max_Ins_Num, Mini_Shape, Mini_Shape, Mini_Shape))
Label_data = np.zeros((Max_Ins_Num,))
b_count = len(part_ids) # box count
if b_count <= Max_Ins_Num:
Bbox_data[0:b_count, ...] = np.asarray(boxes)
Mask_data[0:b_count, ...] = np.asarray(gt_masks)
Prop_data[0:b_count, ...] = np.asarray(proposals)
Label_data[0:b_count, ...] = np.asarray(part_ids)
elif b_count > Max_Ins_Num:
print("Warning: box count larger than Max_Ins_Num! ", b_count, '/', Max_Ins_Num)
b_count = Max_Ins_Num
Bbox_data[0:b_count, ...] = np.asarray(boxes)[0:b_count, ...]
Mask_data[0:b_count, ...] = np.asarray(gt_masks)[0:b_count, ...]
Prop_data[0:b_count, ...] = np.asarray(proposals)[0:b_count, ...]
Label_data[0:b_count, ...] = np.asarray(part_ids)[0:b_count, ...]
Bbox_list.append(np.expand_dims(Bbox_data, axis=0))
Mask_list.append(np.expand_dims(Mask_data, axis=0))
Prop_list.append(np.expand_dims(Prop_data, axis=0))
Pid_list.append(np.expand_dims(Label_data, axis=0))
return np.concatenate(Prop_list, 0), np.concatenate(Mask_list, 0),\
np.concatenate(Pid_list, 0), np.concatenate(Bbox_list, 0)
# if __name__ == '__main__':
# """ Settings """
# DATA_DIR = os.path.join(BASE_DIR, 'hdf5_data'+g_.Data_suf, '48')
# # ['Motorbike', 'Earphone', 'Rocket', 'Airplane', 'Chair']
# CAT_LIST = ['Motorbike']
# dataType = 'vol' # 'pts'
# train_or_test_list = ['test', 'train', 'val'] # ['test', 'train', 'val']
# debug = False
# view_flag = True
# write_flag = False
# Max_Ins_Num = g_.MAX_INS_NUM # 15
# Mini_Shape = g_.MASK_SHAPE # 15
# for CAT_NAME in CAT_LIST:
# for train_or_test in train_or_test_list:
# print('Category: ', CAT_NAME, '\t', train_or_test)
# out_dir = os.path.join(DATA_DIR, 'mask')
# if not os.path.exists(out_dir):
# os.mkdir(out_dir)
# dump_dir = './cubeImgs/'+CAT_NAME+g_.Data_suf
# if not os.path.exists(dump_dir):
# os.mkdir(dump_dir)
# m_file = os.path.join(DATA_DIR, CAT_NAME+'_'+dataType+'_'+train_or_test+'.h5')
# model_num = Box.get_data_num(m_file)
# current_data, current_seg = Box.load_h5_volumes_data(m_file)
# input_cur_data = current_data[2:3, ...]
# input_cur_seg = current_seg[2:3, ...]
# pred_seg = input_cur_seg
# cur_proposals, cur_targets, cur_partids, _= get_mask_data(
# input_cur_data, pred_seg, input_cur_seg,
# Mini_Shape=g_.MASK_SHAPE, Max_Ins_Num=g_.MAX_INS_NUM)
# if view_flag and (train_or_test=='test'):
# mini_mask = cur_proposals[0,2,...]
# part_id = cur_partids[0,2]
# # draw_mask(mini_mask, crop_mask)
# # draw_mask_v2(mini_mask, mask_pts, box_bag=Boxes, part_ids=part_ids)
# draw_volume_shape(mini_mask)
# # figname = os.path.join(dump_dir, 'model_'+str(m_idx)+
# # '-part_'+str(part_id)+'-box_'+str(i)+'.png')
# # plt.savefig(figname)
# pdb.set_trace()
""" __main__ """
if __name__ == '__main__':
""" Settings """
DATA_DIR = os.path.join(BASE_DIR, 'hdf5_data'+g_.Data_suf, '48')
# ['Motorbike', 'Earphone', 'Rocket', 'Airplane', 'Chair']
CAT_LIST = ['Motorbike', 'Earphone', 'Airplane']
dataType = 'vol' # 'pts'
train_or_test_list = ['test', 'train'] # ['test', 'train', 'val']
debug = False
view_flag = True
write_flag = True
Max_Ins_Num = g_.MAX_INS_NUM # 30
Mini_Shape = g_.MASK_SHAPE # 16
for CAT_NAME in CAT_LIST:
for train_or_test in train_or_test_list:
print('Category: ', CAT_NAME, '\t', train_or_test)
out_dir = os.path.join(DATA_DIR, 'bbox')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
dump_dir = './bbox_gt/'+CAT_NAME+g_.Data_suf
if not os.path.exists(dump_dir):
os.mkdir(dump_dir)
m_file = os.path.join(DATA_DIR, CAT_NAME+'_'+dataType+'_'+train_or_test+'.h5')
model_num = Box.get_data_num(m_file)
if write_flag:
h5_fname = os.path.join(out_dir, CAT_NAME+'_'+dataType+'_'+train_or_test+'.h5')
h5_fout = h5py.File(h5_fname, 'a')
for m_idx in range(model_num):
if debug:
m_idx = 8
print('-----proccessing model in ', train_or_test, ' set: ', m_idx+1, ' of ', model_num, '-----')
if dataType == 'pts':
current_pts, current_plb = Box.load_h5_pts_data(m_file)
pts, indices = np.unique(current_pts[m_idx], return_index=True, axis=0)
plb = current_plb[m_idx][indices]
elif dataType == 'vol':
vox_data, vox_seg = Box.load_h5_volumes_data(m_file)
cur_vox = np.squeeze(vox_data[m_idx, ...])
cur_seg = np.squeeze(vox_seg[m_idx, ...])
pts = np.transpose(np.array(np.where(cur_vox)))
plb = cur_seg[pts[:,0], pts[:,1], pts[:,2]]
pts = pts.astype(float)
# pdb.set_trace()
""" process seg pts into subpart groups (distance metric, group box)"""
tic()
Boxes, BLabels, parts_bag, label_bag = Box.computeBox(pts=pts, plb=plb, alpha=1.5)
boxes, part_ids = Box.gen_rois_info(Boxes, BLabels)
toc() # computeBox(): 0.5 seconds per model
if write_flag:
""" write to h5 file
bbox, mini_mask, box_label
"""
# unify the data to be saved
Bbox_data = np.zeros((Max_Ins_Num, 6)) # [rois, x1,y1,z1, x2,y2,z2]
Label_data = np.zeros((Max_Ins_Num,))
b_count = len(part_ids) # box count
if b_count <= Max_Ins_Num:
Bbox_data[0:b_count, ...] = np.asarray(boxes)
Label_data[0:b_count, ...] = np.asarray(part_ids)
elif b_count > Max_Ins_Num:
print("Warning: box count larger than Max_Ins_Num!")
b_count = Max_Ins_Num
Bbox_data[0:b_count, ...] = np.asarray(boxes)[0:b_count, ...]
Label_data[0:b_count, ...] = np.asarray(part_ids)[0:b_count, ...]
if m_idx == 0:
bbox_set = h5_fout.create_dataset('bbox', (1, Max_Ins_Num, 6),
maxshape=(None, Max_Ins_Num, 6))
label_set = h5_fout.create_dataset('label', (1, Max_Ins_Num),
maxshape=(None, Max_Ins_Num))
bbox_set[:] = np.expand_dims(Bbox_data, axis=0)
label_set[:] = np.expand_dims(Label_data, axis=0)
else: # resize the 0 axis
bbox_set = h5_fout['bbox']
label_set = h5_fout['label']
bbox_set.resize(bbox_set.shape[0]+1, axis=0)
label_set.resize(label_set.shape[0]+1, axis=0)
bbox_set[-1:, ...] = np.expand_dims(Bbox_data, axis=0)
label_set[-1:, ...] = np.expand_dims(Label_data, axis=0)
h5_fout.flush()
""" visualization """
if view_flag and (train_or_test=='test'):
bboxes = np.asarray(boxes)
partids = part_ids
part_list = []
for i in range(bboxes.shape[0]):
part_id = partids[i]
if part_id == 0 or part_id in part_list:
continue
pts_idx = plb == part_id
cur_partpts = pts[pts_idx,:]
draw_mask_box(cur_partpts, bboxes, partids)
figname = os.path.join(dump_dir, 'model_'+train_or_test+'_'+str(m_idx)+
'-part_'+str(part_id)+'-box_num'+str(bboxes.shape[0])+'.png')
part_list.append(part_id)
plt.savefig(figname)
# pdb.set_trace()
if debug:
pdb.set_trace()
# compare mini_mask and expand_mask
if write_flag:
h5_fout.close()
# if __name__ == '__main__':
# """ Settings """
# DATA_DIR = os.path.join(BASE_DIR, 'hdf5_data'+g_.Data_suf, '48')
# # ['Motorbike', 'Earphone', 'Rocket', 'Airplane', 'Chair']
# CAT_LIST = ['Airplane']
# dataType = 'vol' # 'pts'
# train_or_test_list = ['test', 'train', 'val'] # ['test', 'train', 'val']
# debug = False
# view_flag = False
# write_flag = False
# Max_Ins_Num = g_.MAX_INS_NUM # 15
# Mini_Shape = g_.MASK_SHAPE # 15
# for CAT_NAME in CAT_LIST:
# for train_or_test in train_or_test_list:
# print('Category: ', CAT_NAME, '\t', train_or_test)
# out_dir = os.path.join(DATA_DIR, 'mask')
# if not os.path.exists(out_dir):
# os.mkdir(out_dir)
# dump_dir = './cubeImgs/'+CAT_NAME+g_.Data_suf
# if not os.path.exists(dump_dir):
# os.mkdir(dump_dir)
# m_file = os.path.join(DATA_DIR, CAT_NAME+'_'+dataType+'_'+train_or_test+'.h5')
# model_num = Box.get_data_num(m_file)
# if write_flag:
# h5_fname = os.path.join(out_dir, CAT_NAME+'_'+dataType+'_'+train_or_test+'.h5')
# h5_fout = h5py.File(h5_fname, 'a')
# for m_idx in range(model_num):
# if debug:
# m_idx = 8
# print('-----proccessing model in ', train_or_test, ' set: ', m_idx+1, ' of ', model_num, '-----')
# if dataType == 'pts':
# current_pts, current_plb = Box.load_h5_pts_data(m_file)
# pts, indices = np.unique(current_pts[m_idx], return_index=True, axis=0)
# plb = current_plb[m_idx][indices]
# elif dataType == 'vol':
# vox_data, vox_seg = Box.load_h5_volumes_data(m_file)
# cur_vox = np.squeeze(vox_data[m_idx, ...])
# cur_seg = np.squeeze(vox_seg[m_idx, ...])
# pts = np.transpose(np.array(np.where(cur_vox)))
# plb = cur_seg[pts[:,0], pts[:,1], pts[:,2]]
# pts = pts.astype(float)
# # pdb.set_trace()
# """ process seg pts into subpart groups (distance metric, group box)"""
# tic()
# Boxes, BLabels, parts_bag, label_bag = Box.computeBox(pts=pts, plb=plb, alpha=1.5)
# toc() # computeBox(): 0.5 seconds per model
# crop_masks, mini_masks, boxes, part_ids = gen_masks_info_v2(
# cur_seg, Boxes, BLabels, mask_shape=Mini_Shape)
# part_pts = gen_masks_info_v1(parts_bag, label_bag, mask_shape=5)
# if write_flag:
# """ write to h5 file
# bbox, mini_mask, box_label
# """
# # unify the data to be saved
# Bbox_data = np.zeros((Max_Ins_Num, 6)) # [rois, x1,y1,z1, x2,y2,z2]
# Mask_data = np.zeros((Max_Ins_Num, Mini_Shape, Mini_Shape, Mini_Shape))
# Label_data = np.zeros((Max_Ins_Num,))
# b_count = len(part_ids) # box count
# if b_count <= Max_Ins_Num:
# Bbox_data[0:b_count, ...] = np.asarray(boxes)
# Mask_data[0:b_count, ...] = np.asarray(mini_masks)
# Label_data[0:b_count, ...] = np.asarray(part_ids)
# elif b_count > Max_Ins_Num:
# print("Warning: box count larger than Max_Ins_Num!")
# b_count = Max_Ins_Num
# Bbox_data[0:b_count, ...] = np.asarray(boxes)[0:b_count, ...]
# Mask_data[0:b_count, ...] = np.asarray(mini_masks)[0:b_count, ...]
# Label_data[0:b_count, ...] = np.asarray(part_ids)[0:b_count, ...]
# if m_idx == 0:
# bbox_set = h5_fout.create_dataset('bbox', (1, Max_Ins_Num, 6),
# maxshape=(None, Max_Ins_Num, 6))
# mask_set = h5_fout.create_dataset('mask',
# (1, Max_Ins_Num, Mini_Shape, Mini_Shape, Mini_Shape),
# maxshape=(None, Max_Ins_Num, Mini_Shape, Mini_Shape, Mini_Shape))
# label_set = h5_fout.create_dataset('label', (1, Max_Ins_Num),
# maxshape=(None, Max_Ins_Num))
# bbox_set[:] = np.expand_dims(Bbox_data, axis=0)
# mask_set[:] = np.expand_dims(Mask_data, axis=0)
# label_set[:] = np.expand_dims(Label_data, axis=0)
# else: # resize the 0 axis
# bbox_set = h5_fout['bbox']
# mask_set = h5_fout['mask']
# label_set = h5_fout['label']
# bbox_set.resize(bbox_set.shape[0]+1, axis=0)
# mask_set.resize(mask_set.shape[0]+1, axis=0)
# label_set.resize(label_set.shape[0]+1, axis=0)
# bbox_set[-1:, ...] = np.expand_dims(Bbox_data, axis=0)
# mask_set[-1:, ...] = np.expand_dims(Mask_data, axis=0)
# label_set[-1:, ...] = np.expand_dims(Label_data, axis=0)
# h5_fout.flush()
# """ visualization """
# if view_flag and (train_or_test=='test'):
# for i in range(len(part_ids)):
# crop_mask = crop_masks[i]
# mask_pts = part_pts[i]
# mini_mask = mini_masks[i]
# part_id = part_ids[i]
# # draw_mask(mini_mask, crop_mask)
# draw_mask_v2(mini_mask, mask_pts, box_bag=Boxes, part_ids=part_ids)
# figname = os.path.join(dump_dir, 'model_'+str(m_idx)+
# '-part_'+str(part_id)+'-box_'+str(i)+'.png')
# plt.savefig(figname)
# # pdb.set_trace()
# if debug:
# draw_mask(mini_masks[3], crop_masks[3])
# pdb.set_trace()
# # compare mini_mask and expand_mask
# ip = 3
# _, m_excrop = expand_mask(boxes[ip], mini_masks[ip], vox_shape=48)
# draw_mask(m_excrop, crop_masks[ip])
# pdb.set_trace()
# if write_flag:
# h5_fout.close()
| [
"screnary@qq.com"
] | screnary@qq.com |
4fc606787313fb230101c49ae628fcb273a6111f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02997/s386871412.py | 7432b9652080f5697617f98cbfca83378aa831a4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | N, K = map(int, input().split())
if K>N*(N-1)//2-(N-1):
print(-1)
exit()
is_ban = [[False]*N for _ in range(N)]
for i in range(1, N):
for j in range(i+1, N):
if K==0:
break
is_ban[i][j] = True
K -= 1
ans = []
for i in range(N):
for j in range(i+1, N):
if not is_ban[i][j]:
ans.append((i, j))
print(len(ans))
for u, v in ans:
print(u+1, v+1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8e36788a9166458db4757af09f813273790f05c5 | 0af29dc561a34a8191f456ec24f6a77bea104b89 | /recurrent-neural-networks/neural-language-models/character-language-model-generator/modelv2.py | ed3cb95374e52c5f2969993e62ddb2fd3467ca76 | [] | no_license | cheeyeo/Machine_learning_portfolio | c4eea8390b2540706d9b8e9df0b491f3f434494b | 927cc9eb3de394dcaa00a4178d873df9798921e4 | refs/heads/master | 2020-05-04T11:16:22.188024 | 2019-05-25T14:18:58 | 2019-05-25T14:18:58 | 179,104,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | from keras.models import Sequential
from keras.layers import LSTM, Dense, TimeDistributed
from keras.optimizers import Adam
# Defines a seq2seq model
def define_model_v2(seq_len, vocab_size):
model = Sequential()
model.add(LSTM(100, input_shape=(seq_len, vocab_size), return_sequences=True))
model.add(TimeDistributed(Dense(vocab_size, activation='softmax')))
opt = Adam(lr=0.01, clipvalue=5.0)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['acc'])
model.summary()
return model
| [
"ckyeo.1@gmail.com"
] | ckyeo.1@gmail.com |
f575ffda2877c4371a24118ba2a30b386e009dbf | eb0f18d7782fe22a3d5edf54f78828ce0094d98b | /history/run_v5.py | 699688f57d9152d64d0cfcc76bcde929b3afe0a0 | [] | no_license | renke2/spark_clustering | 3d50d2c01ef1f9e37079e9cec52541505951d661 | e66295727146a414f63582190d9daf4e3a62afb6 | refs/heads/master | 2021-01-20T00:46:12.615285 | 2016-07-04T14:04:24 | 2016-07-04T14:04:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,219 | py | # -*- coding:utf-8 -*-
# some improvement after discussion
import os
import math
import numpy as np
from operator import add
from utils import local2mfs, now
from load_data import load_data_from_mongo, cut_words_local
from pyspark import SparkContext
from pyspark.mllib.linalg import Vectors
AB_PATH = os.path.dirname(os.path.abspath(__file__))
# print AB_PATH
RB_ITER = 1
INITIAL_ITER = 10
CLUSTERING_ITER = 10
def parseKV(line):
tid, leng, term = line.split('\t')
return ((tid, term), 1.0 / float(leng))
def vector_length(x):
result = math.sqrt(np.dot(x,x))
return result
def cosine_dist(x, y, x_length, y_length):
result = 0.0
numerator = np.dot(x, y)
denominator = x_length * y_length
result = numerator / denominator
return result
def closestPoint(p, centers, withDist=False):
bestIndex = 0
closest = float("-inf")
p_length = vector_length(p)
for i in range(len(centers)):
tempDist = cosine_dist(p, centers[i][1], p_length, centers[i][2])
if tempDist > closest:
closest = tempDist
bestIndex = i
if withDist == True:
return (bestIndex, closest)
else:
return bestIndex
def clustering(doc_vec, K, convergeDist, iter_count_limit):
kPoints = doc_vec.takeSample(False, K)
points_length = [0,0]
tempDist = 5.0
iter_count = 0
for i in range(len(kPoints)):
kPoints[i] = list(kPoints[i])
kPoints[i].append(vector_length(kPoints[i][1]))
while tempDist > convergeDist and iter_count < iter_count_limit:
iter_count += 1
closest = doc_vec.map(
lambda (tid, feature):(closestPoint(feature, kPoints), (tid, feature, 1)))
pointState = closest.reduceByKey(
lambda (x1, y1, z1), (x2, y2, z2): (-1, y1 + y2, z1 + z2))
newPoints = pointState.map(
lambda (x, (flag, y, z)): (x, y / z)).map(
lambda (x, y): (x, y, vector_length(y))).collect()
tempDist = sum(cosine_dist(kPoints[x][1], y, kPoints[x][2], z) for (x, y, z) in newPoints)
for (x, y, z) in newPoints:
kPoints[x][1] = y
kPoints[x][2] = z
return kPoints, tempDist, iter_count
def cluster_evaluation(doc_vec, kPoints):
closest = doc_vec.map(
lambda (tid, feature):(closestPoint(feature, kPoints, True), (tid, feature, 1)))
doc_variance = closest.map(
lambda ((index, dist), (tid, feature, num)): (index, (dist, num)))
cluster_variance = doc_variance.reduceByKey(lambda (x1,y1),(x2,y2):(x1+x2,y1+y2))
total_variance = cluster_variance.map(
lambda (index, (dist, num)): (dist, num)).reduce(lambda (x1,y1), (x2,y2):(x1+x2,y1+y2))
return cluster_variance, total_variance
def cal_cluster_variance(doc_vec):
cluster_center = doc_vec.mapValues(
lambda x: (x, 1)).values().reduce(
lambda (y1, z1), (y2, z2): (y1 + y2, z1 + z2))
_sum, _num = cluster_center
center = _sum / _num
center_length = vector_length(center)
initial_distance = doc_vec.mapValues(
lambda x:cosine_dist(x, center, vector_length(x), center_length)).values().sum()
return initial_distance
def load_cut_to_rdd(input_file, result_file):
sc = SparkContext(appName='PythonKMeans',master="mesos://219.224.135.91:5050")
lines = sc.textFile(input_file)
data = lines.map(parseKV).cache()
doc_term_tf = data.reduceByKey(add).cache()
num_doc = doc_term_tf.map(lambda ((tid, term), tf): tid).distinct().count()
initial_term_idf = doc_term_tf.map(
lambda ((tid, term), tf): (term, 1.0)
).reduceByKey(add)
# filter
initial_num_term = initial_term_idf.count()
print 'initial_num_term', initial_num_term
idf_sum = initial_term_idf.values().sum()
print 'idf_sum', idf_sum
idf_average = idf_sum / (initial_num_term * 3)
term_idf = initial_term_idf.filter(
lambda (term, idf): idf_average < idf < (idf_average * 2)).mapValues(
lambda idf: math.log(float(num_doc) / (idf+1)))
terms_list = term_idf.keys().collect()
num_term = len(terms_list)
print 'num_term', num_term
tfidf_join = doc_term_tf.map(
lambda ((tid, term), tf): (term, (tid, tf))).join(term_idf)
tfidf = tfidf_join.map(lambda (term, ((tid, tf), idf)): (tid, (terms_list.index(term), tf*idf)))
doc_vec = tfidf.groupByKey().mapValues(lambda feature : Vectors.sparse(num_term, feature).toArray()).cache()
global_center = doc_vec.mapValues(
lambda x: x / num_doc).values().sum()
g_length = vector_length(global_center)
# initial 2-way clustering
K = 2
convergeDist = 0.01
maximum_total_variance = 0
best_kPoints = []
print 'initial', now()
for i in range(INITIAL_ITER):
kPoints, tempDist, iter_count = clustering(doc_vec, K, convergeDist, CLUSTERING_ITER)
# evaluation
cluster_variance, total_variance = cluster_evaluation(doc_vec, kPoints)
# choose the best initial cluster
if total_variance[0] > maximum_total_variance:
maximum_total_variance = total_variance[0]
best_kPoints = kPoints
global_distance = sum(cosine_dist(best_kPoints[x][1], global_center, best_kPoints[x][2], g_length) for x in range(len(best_kPoints)))
f = open(result_file,'w')
f.write(str(iter_count)+"\t"+str(num_doc)+"\t"+str(num_term)+"\n")
"""
for term in terms_list:
f.write(term.encode('utf-8')+'\n')
for (term, ((tid,tf), idf)) in tfidf_join.collect():
f.write(term.encode('utf-8')+'\t'+str(tid)+'\t'+str(tf)+'\t'+str(idf)+'\n')
print >> f, "%0.9f" % tempDist
print >> f, "total_variance", total_variance[0], total_variance[1]
print >> f, "global_dist", global_distance
f.write("center:"+"\t")
for dim in global_center:
f.write(str(dim)+"\t")
f.write("\n")
for i in range(len(best_kPoints)):
f.write(str(i))
for unit in best_kPoints[i][1]:
f.write("\t")
f.write(str(unit))
f.write("\n")
for (index, (dist, num)) in cluster_variance.collect():
f.write(str(index))
f.write("\t")
f.write(str(dist))
f.write("\t")
f.write(str(num))
f.write("\n")
"""
f.close()
#repeated bisect
#choose cluster
updated_dict = {}
updated_points_dict = {}
total_delta_variance = 0
updated_dict[total_delta_variance] = doc_vec
updated_points_dict[total_delta_variance] = best_kPoints
print 'repeated', now()
for j in range(2, 6):
if not (total_delta_variance in updated_dict):
print "no cluster to divide"
break
print 'cluster to divide', total_delta_variance, updated_dict[total_delta_variance]
best_cluster = updated_dict[total_delta_variance]
global_best_kPoints = updated_points_dict[total_delta_variance]
del updated_dict[total_delta_variance]
del updated_points_dict[total_delta_variance]
closest = best_cluster.map(
lambda (tid, feature):(closestPoint(feature, global_best_kPoints), (tid, feature))).cache()
print 'total_count', closest.count()
total_delta_variance = float("-inf") # clear to zero
for key in updated_dict:
if key > total_delta_variance:
total_delta_variance = key
for i in range(K):
single_cluster = closest.filter(lambda (index, (tid, feature)): index == i).values().cache()
print 'count', i, single_cluster.count()
maximum_total_variance = 0
best_kPoints = []
initial_distance = cal_cluster_variance(single_cluster)
for j in range(RB_ITER):
# clustering
kPoints, tempDist, iter_count = clustering(single_cluster, K, convergeDist, CLUSTERING_ITER)
# evaluation
cluster_variance, total_variance = cluster_evaluation(single_cluster, kPoints)
if total_variance[0] > maximum_total_variance:
maximum_total_variance = total_variance[0]
best_kPoints = kPoints
improvement = maximum_total_variance - initial_distance
updated_dict[improvement] = single_cluster # update dict
updated_points_dict[improvement] = best_kPoints
print 'improvement', improvement, maximum_total_variance, initial_distance
if improvement > total_delta_variance:
total_delta_variance = improvement
print 'length', cluster_variance.count()
for key in updated_dict:
print 'key', key
sc.stop()
return
if __name__ == "__main__":
# topic = "APEC-微博"
# print topic
input_file = "data/source_chaijing.txt"
output_file = "data/out_chaijing2.txt"
result_file = "data/result_chaijing.txt"
print "step1", now()
# load_data_from_mongo(topic, input_file)
print "step2", now()
# cut_words_local(input_file, output_file)
print "step3", now()
load_cut_to_rdd(local2mfs(output_file), result_file)
print "end", now()
| [
"1257819385@qq.com"
] | 1257819385@qq.com |
a15f6c7f7005994b341fad929657892042159151 | e9dc0573d42ee003c563c12ba5e4241a70e88b29 | /old_documents/lms_app/migrations/0005_auto_20210823_1009.py | 131277a5649cc235bf712d562e2fff3d6ec21ab3 | [] | no_license | kamaliselvarajk/aspire | 279eed753db940d8feb3066e0885896d98549d3e | 3b61bad965a11877c4b63a7b93ea5f76f24ac96f | refs/heads/main | 2023-08-24T12:25:08.927712 | 2021-10-14T17:22:17 | 2021-10-14T17:22:17 | 383,044,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | # Generated by Django 3.2.5 on 2021-08-23 04:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('lms_app', '0004_leaverequest'),
]
operations = [
migrations.RemoveField(
model_name='leaverequest',
name='status',
),
migrations.AlterField(
model_name='leaverequest',
name='manager_name',
field=models.CharField(max_length=50),
),
migrations.CreateModel(
name='LeaveApprove',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=20)),
('cancel_reason', models.CharField(max_length=100)),
('manager_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"kamali.selvaraj@aspiresys.com"
] | kamali.selvaraj@aspiresys.com |
3bbf1c97245dd0a764aca70754af6b208acc406d | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/secondaires/navigation/masques/point_visible/__init__.py | a2b98c97ed671cc44f4f3eb9ca4d18ccd338fba7 | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,741 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le masque <point_visible>."""
from primaires.format.fonctions import contient, supprimer_accents
from primaires.interpreteur.masque.masque import Masque
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.exceptions.erreur_validation \
import ErreurValidation
from secondaires.navigation.constantes import *
from secondaires.navigation.visible import Visible
class VPointVisible(Masque):
"""Masque <point_visible>.
On attend un point observable en paramètre.
"""
nom = "point_visible"
nom_complet = "direction"
def init(self):
"""Initialisation des attributs"""
self.points = None
self.retour = ""
def repartir(self, personnage, masques, commande):
"""Répartition du masque."""
point = liste_vers_chaine(commande)
self.a_interpreter = point
commande[:] = []
masques.append(self)
return True
def valider(self, personnage, dic_masques):
"""Validation du masque"""
Masque.valider(self, personnage, dic_masques)
point = self.a_interpreter
salle = personnage.salle
if not hasattr(salle, "navire"):
return False
navire = salle.navire
etendue = navire.etendue
alt = etendue.altitude
portee = get_portee(salle)
if point:
point = supprimer_accents(point)
limite = 45
precision = 5
if point == "arriere":
direction = 180
elif point == "babord":
direction = -90
elif point == "tribord":
direction = 90
elif point in ("avant", "devant"):
direction = 0
else:
raise ErreurValidation("|err|Direction invalide.|ff|")
else:
direction = 0
limite = 90
precision = 15
# On récupère les points
points = Visible.observer(personnage, portee, precision,
{"": navire})
msg = points.formatter(direction, limite)
self.points = points
self.retour = msg
| [
"kredh@free.fr"
] | kredh@free.fr |
4a9e98c8b6df75b5063e618920505c559fb5d06e | 417448ce51e21233736c2f5eab7a0960c8cdcb3f | /Selenium/Tutorial/Amazon_Script.py | 3bafb73cafc9bed749ffec6917175a393dd3ddb3 | [] | no_license | ravalrupalj/Small_Projects | dcfd4da5a2e3dab4ccb16c693da7ecfd28c30312 | 89e0232503049468e181b387391abfbc523933a0 | refs/heads/master | 2022-12-25T04:18:06.296291 | 2020-10-08T01:58:13 | 2020-10-08T01:58:13 | 268,957,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,883 | py | from selenium import webdriver
driver= webdriver.Chrome(executable_path='C:\\Users\\raval\\Documents\\chromedriver_win32\\chromedriver.exe')
driver.get('https://www.amazon.ca/')
driver.maximize_window()
driver.implicitly_wait(5)
driver.find_element_by_id("twotabsearchtextbox").send_keys("ashlin wallet")
driver.find_element_by_css_selector("input[value='Go']").click()
driver.find_element_by_xpath("//img[@alt='ASHLIN RFID Blocking Wallet| Made with #1 Grade Napa Genuine Leather Excellent Credit Card Protector |10 Credit Card Pockets']").click()
driver.find_element_by_id("add-to-cart-button").click()
driver.back()
driver.back()
driver.find_element_by_xpath("(//img[@alt=\"Ashlin RFID Blocking Men's SLIM BI-fold Wallet - 100% Genuine Leather wallet with lined currency compartment\"])[1]").click()
driver.find_element_by_id("add-to-cart-button").click()
driver.back()
driver.back()
driver.find_element_by_xpath("(//img[@alt='ASHLIN RFID Blocking Wallet| Made with #1 Grade Napa Genuine Leather Excellent Credit Card Protector |10 Credit Card Pockets'])[2]").click()
driver.find_element_by_id("add-to-cart-button").click()
driver.back()
driver.back()
driver.find_element_by_xpath("(//img[@alt=\"ASHLIN Men's Bi-fold Wallet - 100% Lambskin Napa | Double Billfold Section | Midnight Black [5748-07-01]\"])[1]").click()
driver.find_element_by_id("add-to-cart-button").click()
driver.find_element_by_xpath("//a[@id='hlb-view-cart-announce']").click()
assert "ASHLIN Men's Bi-fold Wallet - 100% Lambskin Napa | Double Billfold Section | Midnight Black [5748-07-01]"==driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[1]").text
print(driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[1]").text)
assert "ASHLIN RFID Blocking Wallet| Made with #1 Grade Napa Genuine Leather Excellent Credit Card Protector |10 Credit Card Pockets" ==driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[2]").text
print(driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[2]").text)
assert "Ashlin RFID Blocking Men's SLIM BI-fold Wallet - 100% Genuine Leather wallet with lined currency compartment" == driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[3]").text
print(driver.find_element_by_xpath("(//span[@class='a-size-medium sc-product-title a-text-bold'])[3]").text)
#assert "CDN$ 70.89" == driver.find_element_by_xpath("//span[@class='a-size-medium a-color-base sc-price sc-white-space-nowrap']").text
print(driver.find_element_by_xpath("//span[@class='a-size-medium a-color-base sc-price sc-white-space-nowrap']").text)
#driver.find_element_by_id("hlb-ptc-btn").click()
#driver.find_element_by_id("ap_email").send_keys("ravalrupalj@gmail.com")
#driver.find_element_by_id("continue").click() | [
"63676082+ravalrupalj@users.noreply.github.com"
] | 63676082+ravalrupalj@users.noreply.github.com |
1600aa38bd05891af088262a94401d768b4f2f10 | 5e64335865c817eec677a2236709f3e73eb3ca9f | /utilities/tests/TestProject/pythonScripts/RunTests.py | e76fb6ce23b5a196337e2ea6d76fa93d94e7a562 | [] | no_license | Neurosim-lab/osb-model-validation | 3631ede96d811006fcc5f381faf1739f3e72a3e0 | d661b96682d7229ec94778380dd7978be254bb60 | refs/heads/master | 2020-03-28T11:15:13.957592 | 2018-09-10T19:20:37 | 2018-09-10T19:20:37 | 148,193,132 | 1 | 0 | null | 2018-09-10T17:29:54 | 2018-09-10T17:29:54 | null | UTF-8 | Python | false | false | 2,538 | py | #
#
# File to test current configuration of project.
#
# To execute this type of file, type 'nC.bat -python XXX.py' (Windows)
# or './nC.sh -python XXX.py' (Linux/Mac). Note: you may have to update the
# NC_HOME and NC_MAX_MEMORY variables in nC.bat/nC.sh
#
# Author: Padraig Gleeson
#
#
import sys
import os
try:
from java.io import File
except ImportError:
print "Note: this file should be run using ..\\..\\..\\nC.bat -python XXX.py' or '../../../nC.sh -python XXX.py'"
print "See http://www.neuroconstruct.org/docs/python.html for more details"
quit()
sys.path.append(os.environ["NC_HOME"]+"/pythonNeuroML/nCUtils")
import ncutils as nc
projFile = File(os.getcwd(), "../TestProject.ncx")
print "Project file for this test: "+ projFile.getAbsolutePath()
############## Main settings ##################
simConfigs = []
simConfigs.append("Default Simulation Configuration")
simDt = 0.001
simulators = ["NEURON"]
# simulators = ["NEURON", "LEMS"]
numConcurrentSims = 4
varTimestepNeuron = False
plotSims = True
plotVoltageOnly = True
analyseSims = True
runInBackground = True
verbose = False
#############################################
def testAll(argv=None):
if argv is None:
argv = sys.argv
print "Loading project from "+ projFile.getCanonicalPath()
simManager = nc.SimulationManager(projFile,
numConcurrentSims,
verbose)
simManager.runMultipleSims(simConfigs = simConfigs,
simDt = simDt,
simulators = simulators,
runInBackground = runInBackground)
simManager.reloadSims(plotVoltageOnly = plotVoltageOnly,
plotSims = plotSims,
analyseSims = analyseSims)
# These were discovered using analyseSims = True above.
# They need to hold for all simulators
spikeTimesToCheck = {'SampleCellGroup_0' : [21.6, 35.171, 48.396, 61.602, 74.807]}
spikeTimeAccuracy = 0.0
report = simManager.checkSims(spikeTimesToCheck = spikeTimesToCheck,
spikeTimeAccuracy = spikeTimeAccuracy)
print report
return report
if __name__ == "__main__":
testAll()
| [
"p.gleeson@gmail.com"
] | p.gleeson@gmail.com |
22fb6522d48eab678fbf0b4989078a87265c6b77 | e91f477713556f14b288b89ecce89754d4bd93f7 | /ML/rl/rl_utils.py | 7566686d6b7a40292181a9ba78acee9e48bc8b65 | [
"MIT"
] | permissive | PepSalehi/algorithms | 715603ad16c320c0f1d32c544062b71b11814263 | 1c20f57185e6324aa840ccff98e69764b4213131 | refs/heads/master | 2020-12-28T23:24:39.542742 | 2019-02-01T05:17:56 | 2019-02-01T05:17:56 | 14,173,271 | 0 | 0 | MIT | 2019-02-01T05:17:57 | 2013-11-06T13:27:34 | Python | UTF-8 | Python | false | false | 2,462 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility functions for Reinforcement Learning."""
# core modules
import logging
import os
# 3rd party modules
import yaml
# General code for loading ML configuration files
def load_cfg(yaml_filepath):
"""
Load a YAML configuration file.
Parameters
----------
yaml_filepath : str
Returns
-------
cfg : dict
"""
# Read YAML experiment definition file
with open(yaml_filepath, 'r') as stream:
cfg = yaml.load(stream)
cfg = make_paths_absolute(os.path.dirname(yaml_filepath), cfg)
return cfg
def make_paths_absolute(dir_, cfg):
"""
Make all values for keys ending with `_path` absolute to dir_.
Parameters
----------
dir_ : str
cfg : dict
Returns
-------
cfg : dict
"""
for key in cfg.keys():
if key.endswith("_path"):
cfg[key] = os.path.join(dir_, cfg[key])
cfg[key] = os.path.abspath(cfg[key])
if not os.path.isfile(cfg[key]):
logging.error("%s does not exist.", cfg[key])
if type(cfg[key]) is dict:
cfg[key] = make_paths_absolute(dir_, cfg[key])
return cfg
def test_agent(cfg, env, agent):
"""Calculate average reward."""
cum_reward = 0.0
for episode in range(cfg['testing']['nb_epochs']):
agent.reset()
observation_previous = env.reset()
is_done = False
while not is_done:
action = agent.act(observation_previous, no_exploration=True)
observation, reward, is_done, _ = env.step(action)
cum_reward += reward
observation_previous = observation
return cum_reward / cfg['testing']['nb_epochs']
def get_parser():
"""Get parser object."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--env",
dest="environment_name",
help="OpenAI Gym environment",
metavar="ENVIRONMENT",
default="FrozenLake-v0")
parser.add_argument("--agent",
dest="agent_cfg_file",
required=True,
metavar="AGENT_YAML",
help="Configuration file for the agent")
return parser
| [
"info@martin-thoma.de"
] | info@martin-thoma.de |
f1654e6ae542ec6bf12b305385f26c7ef3610381 | 964c83b67a45717874292468ded6d85ed69c2c9f | /reg_sign_in_out/views.py | 361b417c0ba82d171eab42791f39bc27ad0eb7b3 | [] | no_license | deshiyan1010/Colabratory | bddf6d8a21a568b33827ce3ca8763930764f7197 | 9edce90ba993e06d1e5f1cdef268d33af51f1dd5 | refs/heads/master | 2022-12-17T19:28:33.328936 | 2020-09-21T17:29:43 | 2020-09-21T17:29:43 | 281,600,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,364 | py | from django.shortcuts import render
from reg_sign_in_out.models import *
from . import forms
from django.contrib.auth import authenticate,login,logout
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_protect
import razorpay
def index(request):
return render(request,"index.html")
@login_required
def special(request):
return HttpResponse("In!")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
@csrf_protect
def registration(request):
registered = False
if request.method == "POST":
form = forms.UserForm(request.POST)
profileform = forms.RegistrationForm(request.POST,request.FILES)
if form.is_valid() and profileform.is_valid():
user = form.save()
user.set_password(user.password)
user.save()
profile = profileform.save(commit=False)
profile.user = user
profile.save()
registered = True
return HttpResponseRedirect(reverse('reg_sign_in_out:payment'))
else:
print(form.errors,profileform.errors)
return render(request,"reg_sign_in_out/registration.html",{"tried":"True",
"registered":registered,
"profile_form":profileform,
"user_form":form,
})
else:
user = forms.UserForm()
profileform = forms.RegistrationForm()
return render(request,"reg_sign_in_out/registration.html",{"registered":registered,
"profile_form":profileform,
"user_form":user,
})
@csrf_protect
def user_login(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,password=password)
if user:
if user.is_active:
login(request,user)
return HttpResponseRedirect(reverse('posts:home'))
else:
return render(request,"reg_sign_in_out/login.html",{'tried':'True'})
else:
return render(request,"reg_sign_in_out/login.html")
@login_required
def payment(request):
obj = Registration.objects.get(user__username=request.user.username)
if obj.paid==False:
client = razorpay.Client(auth = ('rzp_test_iCqL53D2oVdlIL', 'A5bcZDlcdxB6qz5K6O4i5eD1'))
payment = client.order.create({'amount':10000000, 'currency':'INR', 'payment_capture':'1'})
if request.method=="POST":
obj.paid = True
obj.order_id = request.POST["razorpay_order_id"]
obj.save()
return HttpResponseRedirect(reverse('profilepage:profilepage'))
return render(request,"reg_sign_in_out/payment.html",{'payment':payment})
else:
return render(request,"reg_sign_in_out/paid.html") | [
"vinayakamikkal@gmail.com"
] | vinayakamikkal@gmail.com |
9d6644181164a6acae72a68b2658055bb2528631 | 0ca7c7bdb297439554777e126ae8a2999962b7fe | /venv/Lib/site-packages/gevent/tests/test__refcount.py | d3c3430105bc53cd5a82709d8d68fb2181c48835 | [] | no_license | YazLuna/APIExpressJobs | 6c0857f63180bf5163d11fa9d1a411e44a4ba46f | cd52bc8d0d60100091637ef79f78cc79d58a1495 | refs/heads/master | 2023-06-13T02:50:57.672295 | 2021-06-18T14:57:53 | 2021-06-18T14:57:53 | 367,244,876 | 0 | 1 | null | 2021-06-18T14:57:53 | 2021-05-14T04:05:43 | Python | UTF-8 | Python | false | false | 6,028 | py | # Copyright (c) 2008 AG Projects
# Author: Denis Bilenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""This test checks that underlying socket instances (gevent.socket.socket._sock)
are not leaked by the hub.
"""
from __future__ import print_function
from _socket import socket as c_socket
import sys
if sys.version_info[0] >= 3:
# Python3 enforces that __weakref__ appears only once,
# and not when a slotted class inherits from an unslotted class.
# We mess around with the class MRO below and violate that rule
# (because socket.socket defines __slots__ with __weakref__),
# so import socket.socket before that can happen.
__import__('socket')
Socket = c_socket
else:
class Socket(c_socket):
"Something we can have a weakref to"
import _socket
_socket.socket = Socket
from gevent import monkey; monkey.patch_all()
import gevent.testing as greentest
from gevent.testing import support
from gevent.testing import params
try:
from thread import start_new_thread
except ImportError:
from _thread import start_new_thread
from time import sleep
import weakref
import gc
import socket
socket._realsocket = Socket
SOCKET_TIMEOUT = 0.1
if greentest.RESOLVER_DNSPYTHON:
# Takes a bit longer to resolve the client
# address initially.
SOCKET_TIMEOUT *= 2
if greentest.RUNNING_ON_CI:
SOCKET_TIMEOUT *= 2
class Server(object):
listening = False
client_data = None
server_port = None
def __init__(self, raise_on_timeout):
self.raise_on_timeout = raise_on_timeout
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.server_port = support.bind_port(self.socket, params.DEFAULT_BIND_ADDR)
except:
self.close()
raise
def close(self):
self.socket.close()
self.socket = None
def handle_request(self):
try:
self.socket.settimeout(SOCKET_TIMEOUT)
self.socket.listen(5)
self.listening = True
try:
conn, _ = self.socket.accept()
except socket.timeout:
if self.raise_on_timeout:
raise
return
try:
self.client_data = conn.recv(100)
conn.send(b'bye')
finally:
conn.close()
finally:
self.close()
class Client(object):
server_data = None
def __init__(self, server_port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_port = server_port
def close(self):
self.socket.close()
self.socket = None
def make_request(self):
try:
self.socket.connect((params.DEFAULT_CONNECT, self.server_port))
self.socket.send(b'hello')
self.server_data = self.socket.recv(100)
finally:
self.close()
class Test(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
def run_interaction(self, run_client):
server = Server(raise_on_timeout=run_client)
wref_to_hidden_server_socket = weakref.ref(server.socket._sock)
client = None
start_new_thread(server.handle_request)
if run_client:
client = Client(server.server_port)
start_new_thread(client.make_request)
# Wait until we do our business; we will always close
# the server; We may also close the client.
# On PyPy, we may not actually see the changes they write to
# their dicts immediately.
for obj in server, client:
if obj is None:
continue
while obj.socket is not None:
sleep(0.01)
# If we have a client, then we should have data
if run_client:
self.assertEqual(server.client_data, b'hello')
self.assertEqual(client.server_data, b'bye')
return wref_to_hidden_server_socket
def run_and_check(self, run_client):
wref_to_hidden_server_socket = self.run_interaction(run_client=run_client)
greentest.gc_collect_if_needed()
if wref_to_hidden_server_socket():
from pprint import pformat
print(pformat(gc.get_referrers(wref_to_hidden_server_socket())))
for x in gc.get_referrers(wref_to_hidden_server_socket()):
print(pformat(x))
for y in gc.get_referrers(x):
print('-', pformat(y))
self.fail('server socket should be dead by now')
def test_clean_exit(self):
self.run_and_check(True)
self.run_and_check(True)
def test_timeout_exit(self):
self.run_and_check(False)
self.run_and_check(False)
if __name__ == '__main__':
greentest.main()
| [
"ale_200200@hotmail.com"
] | ale_200200@hotmail.com |
08971a923b58b73eb2808bccbadaaf7dcaaaa8e1 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /前端笔记/thirtysecondsofcode/python/python的typings/15_可变元组.py | 388a8783be183c592d8de18c96e96c90e7e186e7 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | from typing import Tuple
def f(t: Tuple[int, str]) -> None:
t = 1, 'foo' # OK
# t = 'foo', 1 # Type check error
# as immutable, varying-length sequences
# 元组也可以用作不变的、可变长度的序列
def print_squared(t: Tuple[int, ...]) -> None:
for n in t:
print(n, n ** 2)
print_squared(()) # OK
print_squared((1, 3, 5)) # OK
print_squared([1, 2]) # Error: only a tuple is valid
# 通常使用 Sequence [ t ]代替 Tuple [ t,... ]是一个更好的主意,
# 因为 Sequence 也可以兼容 list 和其他非 Tuple 序列。
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
23e96c6597b7532ed4372492d8361f0982204258 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part003092.py | 8f00262a01984e897f436fa3ac0a91a7479430cf | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 235,033 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher3056(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.0_2', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.1', 1, 1, None), Mul)
]),
2: (2, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
3: (3, Multiset({1: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
4: (4, Multiset({2: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
5: (5, Multiset({3: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
6: (6, Multiset({}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.0_2', 1, 1, S(1)), Mul)
]),
7: (7, Multiset({}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.3.1.1.0', 1, 1, None), Mul)
]),
8: (8, Multiset({}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.4.1.1.0', 1, 1, None), Mul)
]),
9: (9, Multiset({}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul),
(VariableWithCount('i2.4.1.1.0', 1, 1, None), Mul)
]),
10: (10, Multiset({4: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
11: (11, Multiset({}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, None), Mul)
]),
12: (12, Multiset({5: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
13: (13, Multiset({6: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
14: (14, Multiset({7: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
15: (15, Multiset({8: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
16: (16, Multiset({9: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
17: (17, Multiset({10: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
18: (18, Multiset({11: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
19: (19, Multiset({12: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
20: (20, Multiset({13: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
21: (21, Multiset({}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.2.1.0', 1, 1, None), Mul)
]),
22: (22, Multiset({}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.2.1.0', 1, 1, None), Mul)
]),
23: (23, Multiset({14: 1}), [
(VariableWithCount('i2.2.1.0_2', 1, 1, S(1)), Mul)
]),
24: (24, Multiset({15: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
25: (25, Multiset({16: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
26: (26, Multiset({17: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
27: (27, Multiset({18: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
28: (28, Multiset({19: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
29: (29, Multiset({20: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
30: (30, Multiset({21: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
31: (31, Multiset({22: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
32: (32, Multiset({}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.3.1.0', 1, 1, None), Mul)
]),
33: (33, Multiset({}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.3.1.0', 1, 1, None), Mul)
]),
34: (34, Multiset({23: 1}), [
(VariableWithCount('i2.2.1.0_2', 1, 1, S(1)), Mul)
]),
35: (35, Multiset({24: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
36: (36, Multiset({25: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
37: (37, Multiset({26: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
]),
38: (38, Multiset({27: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
39: (39, Multiset({28: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
40: (40, Multiset({}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.2.0', 1, 1, None), Mul)
]),
41: (41, Multiset({29: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
42: (42, Multiset({30: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
43: (43, Multiset({31: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
44: (44, Multiset({32: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
45: (45, Multiset({33: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
46: (46, Multiset({34: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
47: (47, Multiset({35: 1}), [
(VariableWithCount('i2.2.1.0_2', 1, 1, S(1)), Mul)
]),
48: (48, Multiset({36: 1}), [
(VariableWithCount('i2.2.1.0_2', 1, 1, S(1)), Mul)
]),
49: (49, Multiset({37: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
50: (50, Multiset({38: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
51: (51, Multiset({39: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
52: (52, Multiset({40: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
53: (53, Multiset({41: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
54: (54, Multiset({42: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
55: (55, Multiset({43: 1}), [
(VariableWithCount('i2.2.1.0_1', 1, 1, S(1)), Mul)
]),
56: (56, Multiset({}), [
(VariableWithCount('i2.2.1.0_2', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.0_3', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher3056._instance is None:
CommutativeMatcher3056._instance = CommutativeMatcher3056()
return CommutativeMatcher3056._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 3055
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 5569
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 5570
if len(subjects2) >= 1 and subjects2[0] == Integer(2):
tmp5 = subjects2.popleft()
# State 5571
if len(subjects2) == 0:
pass
# State 5572
if len(subjects) == 0:
pass
# 0: x**2
yield 0, subst1
subjects2.appendleft(tmp5)
if len(subjects2) >= 1:
tmp6 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2', tmp6)
except ValueError:
pass
else:
pass
# State 7107
if len(subjects2) == 0:
pass
# State 7108
if len(subjects) == 0:
pass
# 1: x**j
yield 1, subst2
subjects2.appendleft(tmp6)
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2_1', 1)
except ValueError:
pass
else:
pass
# State 7845
if len(subjects2) == 0:
pass
# State 7846
if len(subjects) == 0:
pass
# 3: x**n
yield 3, subst2
if len(subjects2) >= 1:
tmp9 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2_1', tmp9)
except ValueError:
pass
else:
pass
# State 7845
if len(subjects2) == 0:
pass
# State 7846
if len(subjects) == 0:
pass
# 3: x**n
yield 3, subst2
subjects2.appendleft(tmp9)
subjects2.appendleft(tmp3)
if len(subjects2) >= 1:
tmp11 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.1_1', tmp11)
except ValueError:
pass
else:
pass
# State 7763
if len(subjects2) >= 1:
tmp13 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2', tmp13)
except ValueError:
pass
else:
pass
# State 7764
if len(subjects2) == 0:
pass
# State 7765
if len(subjects) == 0:
pass
# 2: v**n
yield 2, subst2
subjects2.appendleft(tmp13)
subjects2.appendleft(tmp11)
if len(subjects2) >= 1 and isinstance(subjects2[0], Pow):
tmp15 = subjects2.popleft()
subjects16 = deque(tmp15._args)
# State 16258
if len(subjects16) >= 1:
tmp17 = subjects16.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2', tmp17)
except ValueError:
pass
else:
pass
# State 16259
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.4.0', S(1))
except ValueError:
pass
else:
pass
# State 16260
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.4.1.0', S(0))
except ValueError:
pass
else:
pass
# State 16261
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.2.1.4.1.1.0', S(1))
except ValueError:
pass
else:
pass
# State 16262
if len(subjects16) >= 1:
tmp22 = subjects16.popleft()
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.3.1.1.0', tmp22)
except ValueError:
pass
else:
pass
# State 16263
if len(subjects16) == 0:
pass
# State 16264
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.4', 1)
except ValueError:
pass
else:
pass
# State 16265
if len(subjects2) == 0:
pass
# State 16266
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst6
if len(subjects2) >= 1:
tmp25 = subjects2.popleft()
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.4', tmp25)
except ValueError:
pass
else:
pass
# State 16265
if len(subjects2) == 0:
pass
# State 16266
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst6
subjects2.appendleft(tmp25)
subjects16.appendleft(tmp22)
if len(subjects16) >= 1 and isinstance(subjects16[0], Mul):
tmp27 = subjects16.popleft()
associative1 = tmp27
associative_type1 = type(tmp27)
subjects28 = deque(tmp27._args)
matcher = CommutativeMatcher16268.get()
tmp29 = subjects28
subjects28 = []
for s in tmp29:
matcher.add_subject(s)
for pattern_index, subst4 in matcher.match(tmp29, subst3):
pass
if pattern_index == 0:
pass
# State 16269
if len(subjects16) == 0:
pass
# State 16270
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.4', 1)
except ValueError:
pass
else:
pass
# State 16271
if len(subjects2) == 0:
pass
# State 16272
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst5
if len(subjects2) >= 1:
tmp31 = subjects2.popleft()
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.4', tmp31)
except ValueError:
pass
else:
pass
# State 16271
if len(subjects2) == 0:
pass
# State 16272
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst5
subjects2.appendleft(tmp31)
subjects16.appendleft(tmp27)
if len(subjects16) >= 1 and isinstance(subjects16[0], Add):
tmp33 = subjects16.popleft()
associative1 = tmp33
associative_type1 = type(tmp33)
subjects34 = deque(tmp33._args)
matcher = CommutativeMatcher16274.get()
tmp35 = subjects34
subjects34 = []
for s in tmp35:
matcher.add_subject(s)
for pattern_index, subst3 in matcher.match(tmp35, subst2):
pass
if pattern_index == 0:
pass
# State 16280
if len(subjects16) == 0:
pass
# State 16281
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.2.1.4', 1)
except ValueError:
pass
else:
pass
# State 16282
if len(subjects2) == 0:
pass
# State 16283
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst4
if len(subjects2) >= 1:
tmp37 = subjects2.popleft()
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.2.1.4', tmp37)
except ValueError:
pass
else:
pass
# State 16282
if len(subjects2) == 0:
pass
# State 16283
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst4
subjects2.appendleft(tmp37)
subjects16.appendleft(tmp33)
if len(subjects16) >= 1 and isinstance(subjects16[0], Mul):
tmp39 = subjects16.popleft()
associative1 = tmp39
associative_type1 = type(tmp39)
subjects40 = deque(tmp39._args)
matcher = CommutativeMatcher16285.get()
tmp41 = subjects40
subjects40 = []
for s in tmp41:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp41, subst1):
pass
if pattern_index == 0:
pass
# State 16300
if len(subjects16) == 0:
pass
# State 16301
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.4', 1)
except ValueError:
pass
else:
pass
# State 16302
if len(subjects2) == 0:
pass
# State 16303
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst3
if len(subjects2) >= 1:
tmp43 = subjects2.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.4', tmp43)
except ValueError:
pass
else:
pass
# State 16302
if len(subjects2) == 0:
pass
# State 16303
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst3
subjects2.appendleft(tmp43)
subjects16.appendleft(tmp39)
subjects16.appendleft(tmp17)
subjects2.appendleft(tmp15)
if len(subjects2) >= 1 and isinstance(subjects2[0], sin):
tmp45 = subjects2.popleft()
subjects46 = deque(tmp45._args)
# State 63753
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.0', S(0))
except ValueError:
pass
else:
pass
# State 63754
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 63755
if len(subjects46) >= 1:
tmp49 = subjects46.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.2.1.0', tmp49)
except ValueError:
pass
else:
pass
# State 63756
if len(subjects46) == 0:
pass
# State 63757
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp51 = subjects2.popleft()
# State 63758
if len(subjects2) == 0:
pass
# State 63759
if len(subjects) == 0:
pass
# 12: 1/sin(e + x*f)
yield 12, subst3
subjects2.appendleft(tmp51)
subjects46.appendleft(tmp49)
if len(subjects46) >= 1 and isinstance(subjects46[0], Mul):
tmp52 = subjects46.popleft()
associative1 = tmp52
associative_type1 = type(tmp52)
subjects53 = deque(tmp52._args)
matcher = CommutativeMatcher63761.get()
tmp54 = subjects53
subjects53 = []
for s in tmp54:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp54, subst1):
pass
if pattern_index == 0:
pass
# State 63762
if len(subjects46) == 0:
pass
# State 63763
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp55 = subjects2.popleft()
# State 63764
if len(subjects2) == 0:
pass
# State 63765
if len(subjects) == 0:
pass
# 12: 1/sin(e + x*f)
yield 12, subst2
subjects2.appendleft(tmp55)
subjects46.appendleft(tmp52)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.3.0', S(0))
except ValueError:
pass
else:
pass
# State 91047
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 91048
if len(subjects46) >= 1:
tmp58 = subjects46.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.3.1.0', tmp58)
except ValueError:
pass
else:
pass
# State 91049
if len(subjects46) == 0:
pass
# State 91050
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp60 = subjects2.popleft()
# State 91051
if len(subjects2) == 0:
pass
# State 91052
if len(subjects) == 0:
pass
# 24: 1/sin(e + x*f)
yield 24, subst3
subjects2.appendleft(tmp60)
subjects46.appendleft(tmp58)
if len(subjects46) >= 1 and isinstance(subjects46[0], Mul):
tmp61 = subjects46.popleft()
associative1 = tmp61
associative_type1 = type(tmp61)
subjects62 = deque(tmp61._args)
matcher = CommutativeMatcher91054.get()
tmp63 = subjects62
subjects62 = []
for s in tmp63:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp63, subst1):
pass
if pattern_index == 0:
pass
# State 91055
if len(subjects46) == 0:
pass
# State 91056
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp64 = subjects2.popleft()
# State 91057
if len(subjects2) == 0:
pass
# State 91058
if len(subjects) == 0:
pass
# 24: 1/sin(e + x*f)
yield 24, subst2
subjects2.appendleft(tmp64)
subjects46.appendleft(tmp61)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.3.0', S(0))
except ValueError:
pass
else:
pass
# State 92007
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 92008
if len(subjects46) >= 1:
tmp67 = subjects46.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.2.3.1.0', tmp67)
except ValueError:
pass
else:
pass
# State 92009
if len(subjects46) == 0:
pass
# State 92010
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp69 = subjects2.popleft()
# State 92011
if len(subjects2) == 0:
pass
# State 92012
if len(subjects) == 0:
pass
# 26: 1/sin(e + x*f)
yield 26, subst3
subjects2.appendleft(tmp69)
subjects46.appendleft(tmp67)
if len(subjects46) >= 1 and isinstance(subjects46[0], Mul):
tmp70 = subjects46.popleft()
associative1 = tmp70
associative_type1 = type(tmp70)
subjects71 = deque(tmp70._args)
matcher = CommutativeMatcher92014.get()
tmp72 = subjects71
subjects71 = []
for s in tmp72:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp72, subst1):
pass
if pattern_index == 0:
pass
# State 92015
if len(subjects46) == 0:
pass
# State 92016
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp73 = subjects2.popleft()
# State 92017
if len(subjects2) == 0:
pass
# State 92018
if len(subjects) == 0:
pass
# 26: 1/sin(e + x*f)
yield 26, subst2
subjects2.appendleft(tmp73)
subjects46.appendleft(tmp70)
if len(subjects46) >= 1 and isinstance(subjects46[0], Add):
tmp74 = subjects46.popleft()
associative1 = tmp74
associative_type1 = type(tmp74)
subjects75 = deque(tmp74._args)
matcher = CommutativeMatcher63767.get()
tmp76 = subjects75
subjects75 = []
for s in tmp76:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp76, subst0):
pass
if pattern_index == 0:
pass
# State 63773
if len(subjects46) == 0:
pass
# State 63774
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp77 = subjects2.popleft()
# State 63775
if len(subjects2) == 0:
pass
# State 63776
if len(subjects) == 0:
pass
# 12: 1/sin(e + x*f)
yield 12, subst1
subjects2.appendleft(tmp77)
if pattern_index == 1:
pass
# State 91062
if len(subjects46) == 0:
pass
# State 91063
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp78 = subjects2.popleft()
# State 91064
if len(subjects2) == 0:
pass
# State 91065
if len(subjects) == 0:
pass
# 24: 1/sin(e + x*f)
yield 24, subst1
subjects2.appendleft(tmp78)
if pattern_index == 2:
pass
# State 92022
if len(subjects46) == 0:
pass
# State 92023
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp79 = subjects2.popleft()
# State 92024
if len(subjects2) == 0:
pass
# State 92025
if len(subjects) == 0:
pass
# 26: 1/sin(e + x*f)
yield 26, subst1
subjects2.appendleft(tmp79)
subjects46.appendleft(tmp74)
subjects2.appendleft(tmp45)
if len(subjects2) >= 1 and isinstance(subjects2[0], cos):
tmp80 = subjects2.popleft()
subjects81 = deque(tmp80._args)
# State 64032
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.0', S(0))
except ValueError:
pass
else:
pass
# State 64033
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 64034
if len(subjects81) >= 1:
tmp84 = subjects81.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.2.1.0', tmp84)
except ValueError:
pass
else:
pass
# State 64035
if len(subjects81) == 0:
pass
# State 64036
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp86 = subjects2.popleft()
# State 64037
if len(subjects2) == 0:
pass
# State 64038
if len(subjects) == 0:
pass
# 13: 1/cos(e + x*f)
yield 13, subst3
subjects2.appendleft(tmp86)
subjects81.appendleft(tmp84)
if len(subjects81) >= 1 and isinstance(subjects81[0], Mul):
tmp87 = subjects81.popleft()
associative1 = tmp87
associative_type1 = type(tmp87)
subjects88 = deque(tmp87._args)
matcher = CommutativeMatcher64040.get()
tmp89 = subjects88
subjects88 = []
for s in tmp89:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp89, subst1):
pass
if pattern_index == 0:
pass
# State 64041
if len(subjects81) == 0:
pass
# State 64042
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp90 = subjects2.popleft()
# State 64043
if len(subjects2) == 0:
pass
# State 64044
if len(subjects) == 0:
pass
# 13: 1/cos(e + x*f)
yield 13, subst2
subjects2.appendleft(tmp90)
subjects81.appendleft(tmp87)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.3.0', S(0))
except ValueError:
pass
else:
pass
# State 91826
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 91827
if len(subjects81) >= 1:
tmp93 = subjects81.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.2.3.1.0', tmp93)
except ValueError:
pass
else:
pass
# State 91828
if len(subjects81) == 0:
pass
# State 91829
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp95 = subjects2.popleft()
# State 91830
if len(subjects2) == 0:
pass
# State 91831
if len(subjects) == 0:
pass
# 25: 1/cos(e + x*f)
yield 25, subst3
subjects2.appendleft(tmp95)
subjects81.appendleft(tmp93)
if len(subjects81) >= 1 and isinstance(subjects81[0], Mul):
tmp96 = subjects81.popleft()
associative1 = tmp96
associative_type1 = type(tmp96)
subjects97 = deque(tmp96._args)
matcher = CommutativeMatcher91833.get()
tmp98 = subjects97
subjects97 = []
for s in tmp98:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp98, subst1):
pass
if pattern_index == 0:
pass
# State 91834
if len(subjects81) == 0:
pass
# State 91835
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp99 = subjects2.popleft()
# State 91836
if len(subjects2) == 0:
pass
# State 91837
if len(subjects) == 0:
pass
# 25: 1/cos(e + x*f)
yield 25, subst2
subjects2.appendleft(tmp99)
subjects81.appendleft(tmp96)
if len(subjects81) >= 1 and isinstance(subjects81[0], Add):
tmp100 = subjects81.popleft()
associative1 = tmp100
associative_type1 = type(tmp100)
subjects101 = deque(tmp100._args)
matcher = CommutativeMatcher64046.get()
tmp102 = subjects101
subjects101 = []
for s in tmp102:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp102, subst0):
pass
if pattern_index == 0:
pass
# State 64052
if len(subjects81) == 0:
pass
# State 64053
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp103 = subjects2.popleft()
# State 64054
if len(subjects2) == 0:
pass
# State 64055
if len(subjects) == 0:
pass
# 13: 1/cos(e + x*f)
yield 13, subst1
subjects2.appendleft(tmp103)
if pattern_index == 1:
pass
# State 91841
if len(subjects81) == 0:
pass
# State 91842
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp104 = subjects2.popleft()
# State 91843
if len(subjects2) == 0:
pass
# State 91844
if len(subjects) == 0:
pass
# 25: 1/cos(e + x*f)
yield 25, subst1
subjects2.appendleft(tmp104)
subjects81.appendleft(tmp100)
subjects2.appendleft(tmp80)
if len(subjects2) >= 1 and isinstance(subjects2[0], tan):
tmp105 = subjects2.popleft()
subjects106 = deque(tmp105._args)
# State 78541
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.3.0', S(0))
except ValueError:
pass
else:
pass
# State 78542
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 78543
if len(subjects106) >= 1:
tmp109 = subjects106.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.3.1.0', tmp109)
except ValueError:
pass
else:
pass
# State 78544
if len(subjects106) == 0:
pass
# State 78545
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp111 = subjects2.popleft()
# State 78546
if len(subjects2) == 0:
pass
# State 78547
if len(subjects) == 0:
pass
# 16: 1/tan(e + x*f)
yield 16, subst3
subjects2.appendleft(tmp111)
subjects106.appendleft(tmp109)
if len(subjects106) >= 1 and isinstance(subjects106[0], Mul):
tmp112 = subjects106.popleft()
associative1 = tmp112
associative_type1 = type(tmp112)
subjects113 = deque(tmp112._args)
matcher = CommutativeMatcher78549.get()
tmp114 = subjects113
subjects113 = []
for s in tmp114:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp114, subst1):
pass
if pattern_index == 0:
pass
# State 78550
if len(subjects106) == 0:
pass
# State 78551
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp115 = subjects2.popleft()
# State 78552
if len(subjects2) == 0:
pass
# State 78553
if len(subjects) == 0:
pass
# 16: 1/tan(e + x*f)
yield 16, subst2
subjects2.appendleft(tmp115)
subjects106.appendleft(tmp112)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.0', S(0))
except ValueError:
pass
else:
pass
# State 79865
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 79866
if len(subjects106) >= 1:
tmp118 = subjects106.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.2.1.0', tmp118)
except ValueError:
pass
else:
pass
# State 79867
if len(subjects106) == 0:
pass
# State 79868
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp120 = subjects2.popleft()
# State 79869
if len(subjects2) == 0:
pass
# State 79870
if len(subjects) == 0:
pass
# 17: 1/tan(e + x*f)
yield 17, subst3
subjects2.appendleft(tmp120)
subjects106.appendleft(tmp118)
if len(subjects106) >= 1 and isinstance(subjects106[0], Mul):
tmp121 = subjects106.popleft()
associative1 = tmp121
associative_type1 = type(tmp121)
subjects122 = deque(tmp121._args)
matcher = CommutativeMatcher79872.get()
tmp123 = subjects122
subjects122 = []
for s in tmp123:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp123, subst1):
pass
if pattern_index == 0:
pass
# State 79873
if len(subjects106) == 0:
pass
# State 79874
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp124 = subjects2.popleft()
# State 79875
if len(subjects2) == 0:
pass
# State 79876
if len(subjects) == 0:
pass
# 17: 1/tan(e + x*f)
yield 17, subst2
subjects2.appendleft(tmp124)
subjects106.appendleft(tmp121)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.3.0', S(0))
except ValueError:
pass
else:
pass
# State 80307
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 80308
if len(subjects106) >= 1:
tmp127 = subjects106.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.2.3.1.0', tmp127)
except ValueError:
pass
else:
pass
# State 80309
if len(subjects106) == 0:
pass
# State 80310
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp129 = subjects2.popleft()
# State 80311
if len(subjects2) == 0:
pass
# State 80312
if len(subjects) == 0:
pass
# 19: 1/tan(e + x*f)
yield 19, subst3
subjects2.appendleft(tmp129)
subjects106.appendleft(tmp127)
if len(subjects106) >= 1 and isinstance(subjects106[0], Mul):
tmp130 = subjects106.popleft()
associative1 = tmp130
associative_type1 = type(tmp130)
subjects131 = deque(tmp130._args)
matcher = CommutativeMatcher80314.get()
tmp132 = subjects131
subjects131 = []
for s in tmp132:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp132, subst1):
pass
if pattern_index == 0:
pass
# State 80315
if len(subjects106) == 0:
pass
# State 80316
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp133 = subjects2.popleft()
# State 80317
if len(subjects2) == 0:
pass
# State 80318
if len(subjects) == 0:
pass
# 19: 1/tan(e + x*f)
yield 19, subst2
subjects2.appendleft(tmp133)
subjects106.appendleft(tmp130)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.4.0', S(0))
except ValueError:
pass
else:
pass
# State 80755
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.4.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 80756
if len(subjects106) >= 1:
tmp136 = subjects106.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.4.1.0', tmp136)
except ValueError:
pass
else:
pass
# State 80757
if len(subjects106) == 0:
pass
# State 80758
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp138 = subjects2.popleft()
# State 80759
if len(subjects2) == 0:
pass
# State 80760
if len(subjects) == 0:
pass
# 20: 1/tan(e + x*f)
yield 20, subst3
subjects2.appendleft(tmp138)
subjects106.appendleft(tmp136)
if len(subjects106) >= 1 and isinstance(subjects106[0], Mul):
tmp139 = subjects106.popleft()
associative1 = tmp139
associative_type1 = type(tmp139)
subjects140 = deque(tmp139._args)
matcher = CommutativeMatcher80762.get()
tmp141 = subjects140
subjects140 = []
for s in tmp141:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp141, subst1):
pass
if pattern_index == 0:
pass
# State 80763
if len(subjects106) == 0:
pass
# State 80764
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp142 = subjects2.popleft()
# State 80765
if len(subjects2) == 0:
pass
# State 80766
if len(subjects) == 0:
pass
# 20: 1/tan(e + x*f)
yield 20, subst2
subjects2.appendleft(tmp142)
subjects106.appendleft(tmp139)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.3.0', S(0))
except ValueError:
pass
else:
pass
# State 81858
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 81859
if len(subjects106) >= 1:
tmp145 = subjects106.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.3.1.0', tmp145)
except ValueError:
pass
else:
pass
# State 81860
if len(subjects106) == 0:
pass
# State 81861
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp147 = subjects2.popleft()
# State 81862
if len(subjects2) == 0:
pass
# State 81863
if len(subjects) == 0:
pass
# 22: 1/tan(e + x*f)
yield 22, subst3
subjects2.appendleft(tmp147)
subjects106.appendleft(tmp145)
if len(subjects106) >= 1 and isinstance(subjects106[0], Mul):
tmp148 = subjects106.popleft()
associative1 = tmp148
associative_type1 = type(tmp148)
subjects149 = deque(tmp148._args)
matcher = CommutativeMatcher81865.get()
tmp150 = subjects149
subjects149 = []
for s in tmp150:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp150, subst1):
pass
if pattern_index == 0:
pass
# State 81866
if len(subjects106) == 0:
pass
# State 81867
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp151 = subjects2.popleft()
# State 81868
if len(subjects2) == 0:
pass
# State 81869
if len(subjects) == 0:
pass
# 22: 1/tan(e + x*f)
yield 22, subst2
subjects2.appendleft(tmp151)
subjects106.appendleft(tmp148)
if len(subjects106) >= 1 and isinstance(subjects106[0], Add):
tmp152 = subjects106.popleft()
associative1 = tmp152
associative_type1 = type(tmp152)
subjects153 = deque(tmp152._args)
matcher = CommutativeMatcher78555.get()
tmp154 = subjects153
subjects153 = []
for s in tmp154:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp154, subst0):
pass
if pattern_index == 0:
pass
# State 78561
if len(subjects106) == 0:
pass
# State 78562
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp155 = subjects2.popleft()
# State 78563
if len(subjects2) == 0:
pass
# State 78564
if len(subjects) == 0:
pass
# 16: 1/tan(e + x*f)
yield 16, subst1
subjects2.appendleft(tmp155)
if pattern_index == 1:
pass
# State 79880
if len(subjects106) == 0:
pass
# State 79881
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp156 = subjects2.popleft()
# State 79882
if len(subjects2) == 0:
pass
# State 79883
if len(subjects) == 0:
pass
# 17: 1/tan(e + x*f)
yield 17, subst1
subjects2.appendleft(tmp156)
if pattern_index == 2:
pass
# State 80322
if len(subjects106) == 0:
pass
# State 80323
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp157 = subjects2.popleft()
# State 80324
if len(subjects2) == 0:
pass
# State 80325
if len(subjects) == 0:
pass
# 19: 1/tan(e + x*f)
yield 19, subst1
subjects2.appendleft(tmp157)
if pattern_index == 3:
pass
# State 80770
if len(subjects106) == 0:
pass
# State 80771
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp158 = subjects2.popleft()
# State 80772
if len(subjects2) == 0:
pass
# State 80773
if len(subjects) == 0:
pass
# 20: 1/tan(e + x*f)
yield 20, subst1
subjects2.appendleft(tmp158)
if pattern_index == 4:
pass
# State 81873
if len(subjects106) == 0:
pass
# State 81874
if len(subjects2) >= 1 and subjects2[0] == Integer(-1):
tmp159 = subjects2.popleft()
# State 81875
if len(subjects2) == 0:
pass
# State 81876
if len(subjects) == 0:
pass
# 22: 1/tan(e + x*f)
yield 22, subst1
subjects2.appendleft(tmp159)
subjects106.appendleft(tmp152)
subjects2.appendleft(tmp105)
subjects.appendleft(tmp1)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2_1', S(1))
except ValueError:
pass
else:
pass
# State 7843
if len(subjects) >= 1:
tmp161 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp161)
except ValueError:
pass
else:
pass
# State 7844
if len(subjects) == 0:
pass
# 3: x**n
yield 3, subst2
subjects.appendleft(tmp161)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.4', S(1))
except ValueError:
pass
else:
pass
# State 16219
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp164 = subjects.popleft()
subjects165 = deque(tmp164._args)
# State 16220
if len(subjects165) >= 1:
tmp166 = subjects165.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2', tmp166)
except ValueError:
pass
else:
pass
# State 16221
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.4.0', S(1))
except ValueError:
pass
else:
pass
# State 16222
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.2.1.4.1.0', S(0))
except ValueError:
pass
else:
pass
# State 16223
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.4.1.1.0', S(1))
except ValueError:
pass
else:
pass
# State 16224
if len(subjects165) >= 1:
tmp171 = subjects165.popleft()
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.3.1.1.0', tmp171)
except ValueError:
pass
else:
pass
# State 16225
if len(subjects165) == 0:
pass
# State 16226
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst6
subjects165.appendleft(tmp171)
if len(subjects165) >= 1 and isinstance(subjects165[0], Mul):
tmp173 = subjects165.popleft()
associative1 = tmp173
associative_type1 = type(tmp173)
subjects174 = deque(tmp173._args)
matcher = CommutativeMatcher16228.get()
tmp175 = subjects174
subjects174 = []
for s in tmp175:
matcher.add_subject(s)
for pattern_index, subst5 in matcher.match(tmp175, subst4):
pass
if pattern_index == 0:
pass
# State 16229
if len(subjects165) == 0:
pass
# State 16230
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst5
subjects165.appendleft(tmp173)
if len(subjects165) >= 1 and isinstance(subjects165[0], Add):
tmp176 = subjects165.popleft()
associative1 = tmp176
associative_type1 = type(tmp176)
subjects177 = deque(tmp176._args)
matcher = CommutativeMatcher16232.get()
tmp178 = subjects177
subjects177 = []
for s in tmp178:
matcher.add_subject(s)
for pattern_index, subst4 in matcher.match(tmp178, subst3):
pass
if pattern_index == 0:
pass
# State 16238
if len(subjects165) == 0:
pass
# State 16239
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst4
subjects165.appendleft(tmp176)
if len(subjects165) >= 1 and isinstance(subjects165[0], Mul):
tmp179 = subjects165.popleft()
associative1 = tmp179
associative_type1 = type(tmp179)
subjects180 = deque(tmp179._args)
matcher = CommutativeMatcher16241.get()
tmp181 = subjects180
subjects180 = []
for s in tmp181:
matcher.add_subject(s)
for pattern_index, subst3 in matcher.match(tmp181, subst2):
pass
if pattern_index == 0:
pass
# State 16256
if len(subjects165) == 0:
pass
# State 16257
if len(subjects) == 0:
pass
# 4: (F**(g*(e + f*x)))**n
yield 4, subst3
subjects165.appendleft(tmp179)
subjects165.appendleft(tmp166)
subjects.appendleft(tmp164)
if len(subjects) >= 1 and isinstance(subjects[0], log):
tmp182 = subjects.popleft()
subjects183 = deque(tmp182._args)
# State 36538
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 36539
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2', S(1))
except ValueError:
pass
else:
pass
# State 36540
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.2.0', S(1))
except ValueError:
pass
else:
pass
# State 36541
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.2.1.2.2.2', S(1))
except ValueError:
pass
else:
pass
# State 36542
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2.2.0', S(0))
except ValueError:
pass
else:
pass
# State 36543
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.2.2.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 36544
if len(subjects183) >= 1 and isinstance(subjects183[0], Pow):
tmp190 = subjects183.popleft()
subjects191 = deque(tmp190._args)
# State 36545
if len(subjects191) >= 1:
tmp192 = subjects191.popleft()
subst7 = Substitution(subst6)
try:
subst7.try_add_variable('i2.2.1.1', tmp192)
except ValueError:
pass
else:
pass
# State 36546
if len(subjects191) >= 1:
tmp194 = subjects191.popleft()
subst8 = Substitution(subst7)
try:
subst8.try_add_variable('i2.2.1.2', tmp194)
except ValueError:
pass
else:
pass
# State 36547
if len(subjects191) == 0:
pass
# State 36548
if len(subjects183) == 0:
pass
# State 36549
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst8
subjects191.appendleft(tmp194)
subjects191.appendleft(tmp192)
subjects183.appendleft(tmp190)
if len(subjects183) >= 1 and isinstance(subjects183[0], Mul):
tmp196 = subjects183.popleft()
associative1 = tmp196
associative_type1 = type(tmp196)
subjects197 = deque(tmp196._args)
matcher = CommutativeMatcher36551.get()
tmp198 = subjects197
subjects197 = []
for s in tmp198:
matcher.add_subject(s)
for pattern_index, subst6 in matcher.match(tmp198, subst5):
pass
if pattern_index == 0:
pass
# State 36556
if len(subjects183) == 0:
pass
# State 36557
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst6
subjects183.appendleft(tmp196)
if len(subjects183) >= 1 and isinstance(subjects183[0], Add):
tmp199 = subjects183.popleft()
associative1 = tmp199
associative_type1 = type(tmp199)
subjects200 = deque(tmp199._args)
matcher = CommutativeMatcher36559.get()
tmp201 = subjects200
subjects200 = []
for s in tmp201:
matcher.add_subject(s)
for pattern_index, subst5 in matcher.match(tmp201, subst4):
pass
if pattern_index == 0:
pass
# State 36572
if len(subjects183) == 0:
pass
# State 36573
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst5
subjects183.appendleft(tmp199)
if len(subjects183) >= 1 and isinstance(subjects183[0], Pow):
tmp202 = subjects183.popleft()
subjects203 = deque(tmp202._args)
# State 36574
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.2.1.2.2.2.0', S(0))
except ValueError:
pass
else:
pass
# State 36575
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 36576
if len(subjects203) >= 1 and isinstance(subjects203[0], Pow):
tmp206 = subjects203.popleft()
subjects207 = deque(tmp206._args)
# State 36577
if len(subjects207) >= 1:
tmp208 = subjects207.popleft()
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.1', tmp208)
except ValueError:
pass
else:
pass
# State 36578
if len(subjects207) >= 1:
tmp210 = subjects207.popleft()
subst7 = Substitution(subst6)
try:
subst7.try_add_variable('i2.2.1.2', tmp210)
except ValueError:
pass
else:
pass
# State 36579
if len(subjects207) == 0:
pass
# State 36580
subst8 = Substitution(subst7)
try:
subst8.try_add_variable('i2.2.1.2.2.2', 1)
except ValueError:
pass
else:
pass
# State 36581
if len(subjects203) == 0:
pass
# State 36582
if len(subjects183) == 0:
pass
# State 36583
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst8
if len(subjects203) >= 1:
tmp213 = subjects203.popleft()
subst8 = Substitution(subst7)
try:
subst8.try_add_variable('i2.2.1.2.2.2', tmp213)
except ValueError:
pass
else:
pass
# State 36581
if len(subjects203) == 0:
pass
# State 36582
if len(subjects183) == 0:
pass
# State 36583
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst8
subjects203.appendleft(tmp213)
subjects207.appendleft(tmp210)
subjects207.appendleft(tmp208)
subjects203.appendleft(tmp206)
if len(subjects203) >= 1 and isinstance(subjects203[0], Mul):
tmp215 = subjects203.popleft()
associative1 = tmp215
associative_type1 = type(tmp215)
subjects216 = deque(tmp215._args)
matcher = CommutativeMatcher36585.get()
tmp217 = subjects216
subjects216 = []
for s in tmp217:
matcher.add_subject(s)
for pattern_index, subst5 in matcher.match(tmp217, subst4):
pass
if pattern_index == 0:
pass
# State 36590
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.2.2.2', 1)
except ValueError:
pass
else:
pass
# State 36591
if len(subjects203) == 0:
pass
# State 36592
if len(subjects183) == 0:
pass
# State 36593
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst6
if len(subjects203) >= 1:
tmp219 = []
tmp219.append(subjects203.popleft())
while True:
if len(tmp219) > 1:
tmp220 = create_operation_expression(associative1, tmp219)
elif len(tmp219) == 1:
tmp220 = tmp219[0]
else:
assert False, "Unreachable"
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.2.2.2', tmp220)
except ValueError:
pass
else:
pass
# State 36591
if len(subjects203) == 0:
pass
# State 36592
if len(subjects183) == 0:
pass
# State 36593
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst6
if len(subjects203) == 0:
break
tmp219.append(subjects203.popleft())
subjects203.extendleft(reversed(tmp219))
subjects203.appendleft(tmp215)
if len(subjects203) >= 1 and isinstance(subjects203[0], Add):
tmp222 = subjects203.popleft()
associative1 = tmp222
associative_type1 = type(tmp222)
subjects223 = deque(tmp222._args)
matcher = CommutativeMatcher36595.get()
tmp224 = subjects223
subjects223 = []
for s in tmp224:
matcher.add_subject(s)
for pattern_index, subst4 in matcher.match(tmp224, subst3):
pass
if pattern_index == 0:
pass
# State 36608
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2.2', 1)
except ValueError:
pass
else:
pass
# State 36609
if len(subjects203) == 0:
pass
# State 36610
if len(subjects183) == 0:
pass
# State 36611
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst5
if len(subjects203) >= 1:
tmp226 = []
tmp226.append(subjects203.popleft())
while True:
if len(tmp226) > 1:
tmp227 = create_operation_expression(associative1, tmp226)
elif len(tmp226) == 1:
tmp227 = tmp226[0]
else:
assert False, "Unreachable"
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2.2', tmp227)
except ValueError:
pass
else:
pass
# State 36609
if len(subjects203) == 0:
pass
# State 36610
if len(subjects183) == 0:
pass
# State 36611
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst5
if len(subjects203) == 0:
break
tmp226.append(subjects203.popleft())
subjects203.extendleft(reversed(tmp226))
subjects203.appendleft(tmp222)
subjects183.appendleft(tmp202)
if len(subjects183) >= 1 and isinstance(subjects183[0], Mul):
tmp229 = subjects183.popleft()
associative1 = tmp229
associative_type1 = type(tmp229)
subjects230 = deque(tmp229._args)
matcher = CommutativeMatcher36613.get()
tmp231 = subjects230
subjects230 = []
for s in tmp231:
matcher.add_subject(s)
for pattern_index, subst3 in matcher.match(tmp231, subst2):
pass
if pattern_index == 0:
pass
# State 36678
if len(subjects183) == 0:
pass
# State 36679
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst3
subjects183.appendleft(tmp229)
if len(subjects183) >= 1 and isinstance(subjects183[0], Pow):
tmp232 = subjects183.popleft()
subjects233 = deque(tmp232._args)
# State 36680
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.0', S(1))
except ValueError:
pass
else:
pass
# State 36681
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.2.2', S(1))
except ValueError:
pass
else:
pass
# State 36682
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.2.1.2.2.2.0', S(0))
except ValueError:
pass
else:
pass
# State 36683
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 36684
if len(subjects233) >= 1 and isinstance(subjects233[0], Pow):
tmp238 = subjects233.popleft()
subjects239 = deque(tmp238._args)
# State 36685
if len(subjects239) >= 1:
tmp240 = subjects239.popleft()
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.1', tmp240)
except ValueError:
pass
else:
pass
# State 36686
if len(subjects239) >= 1:
tmp242 = subjects239.popleft()
subst7 = Substitution(subst6)
try:
subst7.try_add_variable('i2.2.1.2', tmp242)
except ValueError:
pass
else:
pass
# State 36687
if len(subjects239) == 0:
pass
# State 36688
subst8 = Substitution(subst7)
try:
subst8.try_add_variable('i2.2.1.2.2', 1)
except ValueError:
pass
else:
pass
# State 36689
if len(subjects233) == 0:
pass
# State 36690
if len(subjects183) == 0:
pass
# State 36691
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst8
if len(subjects233) >= 1:
tmp245 = subjects233.popleft()
subst8 = Substitution(subst7)
try:
subst8.try_add_variable('i2.2.1.2.2', tmp245)
except ValueError:
pass
else:
pass
# State 36689
if len(subjects233) == 0:
pass
# State 36690
if len(subjects183) == 0:
pass
# State 36691
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst8
subjects233.appendleft(tmp245)
subjects239.appendleft(tmp242)
subjects239.appendleft(tmp240)
subjects233.appendleft(tmp238)
if len(subjects233) >= 1 and isinstance(subjects233[0], Mul):
tmp247 = subjects233.popleft()
associative1 = tmp247
associative_type1 = type(tmp247)
subjects248 = deque(tmp247._args)
matcher = CommutativeMatcher36693.get()
tmp249 = subjects248
subjects248 = []
for s in tmp249:
matcher.add_subject(s)
for pattern_index, subst5 in matcher.match(tmp249, subst4):
pass
if pattern_index == 0:
pass
# State 36698
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.2.2', 1)
except ValueError:
pass
else:
pass
# State 36699
if len(subjects233) == 0:
pass
# State 36700
if len(subjects183) == 0:
pass
# State 36701
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst6
if len(subjects233) >= 1:
tmp251 = []
tmp251.append(subjects233.popleft())
while True:
if len(tmp251) > 1:
tmp252 = create_operation_expression(associative1, tmp251)
elif len(tmp251) == 1:
tmp252 = tmp251[0]
else:
assert False, "Unreachable"
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.2.2', tmp252)
except ValueError:
pass
else:
pass
# State 36699
if len(subjects233) == 0:
pass
# State 36700
if len(subjects183) == 0:
pass
# State 36701
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst6
if len(subjects233) == 0:
break
tmp251.append(subjects233.popleft())
subjects233.extendleft(reversed(tmp251))
subjects233.appendleft(tmp247)
if len(subjects233) >= 1 and isinstance(subjects233[0], Add):
tmp254 = subjects233.popleft()
associative1 = tmp254
associative_type1 = type(tmp254)
subjects255 = deque(tmp254._args)
matcher = CommutativeMatcher36703.get()
tmp256 = subjects255
subjects255 = []
for s in tmp256:
matcher.add_subject(s)
for pattern_index, subst4 in matcher.match(tmp256, subst3):
pass
if pattern_index == 0:
pass
# State 36716
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2', 1)
except ValueError:
pass
else:
pass
# State 36717
if len(subjects233) == 0:
pass
# State 36718
if len(subjects183) == 0:
pass
# State 36719
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst5
if len(subjects233) >= 1:
tmp258 = []
tmp258.append(subjects233.popleft())
while True:
if len(tmp258) > 1:
tmp259 = create_operation_expression(associative1, tmp258)
elif len(tmp258) == 1:
tmp259 = tmp258[0]
else:
assert False, "Unreachable"
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2', tmp259)
except ValueError:
pass
else:
pass
# State 36717
if len(subjects233) == 0:
pass
# State 36718
if len(subjects183) == 0:
pass
# State 36719
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst5
if len(subjects233) == 0:
break
tmp258.append(subjects233.popleft())
subjects233.extendleft(reversed(tmp258))
subjects233.appendleft(tmp254)
if len(subjects233) >= 1 and isinstance(subjects233[0], Pow):
tmp261 = subjects233.popleft()
subjects262 = deque(tmp261._args)
# State 36720
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.2.2.0', S(0))
except ValueError:
pass
else:
pass
# State 36721
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.2.1.2.2.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 36722
if len(subjects262) >= 1 and isinstance(subjects262[0], Pow):
tmp265 = subjects262.popleft()
subjects266 = deque(tmp265._args)
# State 36723
if len(subjects266) >= 1:
tmp267 = subjects266.popleft()
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.1', tmp267)
except ValueError:
pass
else:
pass
# State 36724
if len(subjects266) >= 1:
tmp269 = subjects266.popleft()
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.2', tmp269)
except ValueError:
pass
else:
pass
# State 36725
if len(subjects266) == 0:
pass
# State 36726
subst7 = Substitution(subst6)
try:
subst7.try_add_variable('i2.2.1.2.2.2', 1)
except ValueError:
pass
else:
pass
# State 36727
if len(subjects262) == 0:
pass
# State 36728
subst8 = Substitution(subst7)
try:
subst8.try_add_variable('i2.2.1.2.2', 1)
except ValueError:
pass
else:
pass
# State 36729
if len(subjects233) == 0:
pass
# State 36730
if len(subjects183) == 0:
pass
# State 36731
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst8
if len(subjects233) >= 1:
tmp273 = subjects233.popleft()
subst8 = Substitution(subst7)
try:
subst8.try_add_variable('i2.2.1.2.2', tmp273)
except ValueError:
pass
else:
pass
# State 36729
if len(subjects233) == 0:
pass
# State 36730
if len(subjects183) == 0:
pass
# State 36731
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst8
subjects233.appendleft(tmp273)
if len(subjects262) >= 1:
tmp275 = subjects262.popleft()
subst7 = Substitution(subst6)
try:
subst7.try_add_variable('i2.2.1.2.2.2', tmp275)
except ValueError:
pass
else:
pass
# State 36727
if len(subjects262) == 0:
pass
# State 36728
subst8 = Substitution(subst7)
try:
subst8.try_add_variable('i2.2.1.2.2', 1)
except ValueError:
pass
else:
pass
# State 36729
if len(subjects233) == 0:
pass
# State 36730
if len(subjects183) == 0:
pass
# State 36731
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst8
if len(subjects233) >= 1:
tmp278 = subjects233.popleft()
subst8 = Substitution(subst7)
try:
subst8.try_add_variable('i2.2.1.2.2', tmp278)
except ValueError:
pass
else:
pass
# State 36729
if len(subjects233) == 0:
pass
# State 36730
if len(subjects183) == 0:
pass
# State 36731
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst8
subjects233.appendleft(tmp278)
subjects262.appendleft(tmp275)
subjects266.appendleft(tmp269)
subjects266.appendleft(tmp267)
subjects262.appendleft(tmp265)
if len(subjects262) >= 1 and isinstance(subjects262[0], Mul):
tmp280 = subjects262.popleft()
associative1 = tmp280
associative_type1 = type(tmp280)
subjects281 = deque(tmp280._args)
matcher = CommutativeMatcher36733.get()
tmp282 = subjects281
subjects281 = []
for s in tmp282:
matcher.add_subject(s)
for pattern_index, subst4 in matcher.match(tmp282, subst3):
pass
if pattern_index == 0:
pass
# State 36738
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2.2', 1)
except ValueError:
pass
else:
pass
# State 36739
if len(subjects262) == 0:
pass
# State 36740
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.2.2', 1)
except ValueError:
pass
else:
pass
# State 36741
if len(subjects233) == 0:
pass
# State 36742
if len(subjects183) == 0:
pass
# State 36743
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst6
if len(subjects233) >= 1:
tmp285 = subjects233.popleft()
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.2.2', tmp285)
except ValueError:
pass
else:
pass
# State 36741
if len(subjects233) == 0:
pass
# State 36742
if len(subjects183) == 0:
pass
# State 36743
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst6
subjects233.appendleft(tmp285)
if len(subjects262) >= 1:
tmp287 = []
tmp287.append(subjects262.popleft())
while True:
if len(tmp287) > 1:
tmp288 = create_operation_expression(associative1, tmp287)
elif len(tmp287) == 1:
tmp288 = tmp287[0]
else:
assert False, "Unreachable"
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2.2', tmp288)
except ValueError:
pass
else:
pass
# State 36739
if len(subjects262) == 0:
pass
# State 36740
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.2.2', 1)
except ValueError:
pass
else:
pass
# State 36741
if len(subjects233) == 0:
pass
# State 36742
if len(subjects183) == 0:
pass
# State 36743
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst6
if len(subjects233) >= 1:
tmp291 = subjects233.popleft()
subst6 = Substitution(subst5)
try:
subst6.try_add_variable('i2.2.1.2.2', tmp291)
except ValueError:
pass
else:
pass
# State 36741
if len(subjects233) == 0:
pass
# State 36742
if len(subjects183) == 0:
pass
# State 36743
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst6
subjects233.appendleft(tmp291)
if len(subjects262) == 0:
break
tmp287.append(subjects262.popleft())
subjects262.extendleft(reversed(tmp287))
subjects262.appendleft(tmp280)
if len(subjects262) >= 1 and isinstance(subjects262[0], Add):
tmp293 = subjects262.popleft()
associative1 = tmp293
associative_type1 = type(tmp293)
subjects294 = deque(tmp293._args)
matcher = CommutativeMatcher36745.get()
tmp295 = subjects294
subjects294 = []
for s in tmp295:
matcher.add_subject(s)
for pattern_index, subst3 in matcher.match(tmp295, subst2):
pass
if pattern_index == 0:
pass
# State 36758
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.2.1.2.2.2', 1)
except ValueError:
pass
else:
pass
# State 36759
if len(subjects262) == 0:
pass
# State 36760
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2', 1)
except ValueError:
pass
else:
pass
# State 36761
if len(subjects233) == 0:
pass
# State 36762
if len(subjects183) == 0:
pass
# State 36763
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst5
if len(subjects233) >= 1:
tmp298 = subjects233.popleft()
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2', tmp298)
except ValueError:
pass
else:
pass
# State 36761
if len(subjects233) == 0:
pass
# State 36762
if len(subjects183) == 0:
pass
# State 36763
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst5
subjects233.appendleft(tmp298)
if len(subjects262) >= 1:
tmp300 = []
tmp300.append(subjects262.popleft())
while True:
if len(tmp300) > 1:
tmp301 = create_operation_expression(associative1, tmp300)
elif len(tmp300) == 1:
tmp301 = tmp300[0]
else:
assert False, "Unreachable"
subst4 = Substitution(subst3)
try:
subst4.try_add_variable('i2.2.1.2.2.2', tmp301)
except ValueError:
pass
else:
pass
# State 36759
if len(subjects262) == 0:
pass
# State 36760
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2', 1)
except ValueError:
pass
else:
pass
# State 36761
if len(subjects233) == 0:
pass
# State 36762
if len(subjects183) == 0:
pass
# State 36763
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst5
if len(subjects233) >= 1:
tmp304 = subjects233.popleft()
subst5 = Substitution(subst4)
try:
subst5.try_add_variable('i2.2.1.2.2', tmp304)
except ValueError:
pass
else:
pass
# State 36761
if len(subjects233) == 0:
pass
# State 36762
if len(subjects183) == 0:
pass
# State 36763
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst5
subjects233.appendleft(tmp304)
if len(subjects262) == 0:
break
tmp300.append(subjects262.popleft())
subjects262.extendleft(reversed(tmp300))
subjects262.appendleft(tmp293)
subjects233.appendleft(tmp261)
if len(subjects233) >= 1 and isinstance(subjects233[0], Mul):
tmp306 = subjects233.popleft()
associative1 = tmp306
associative_type1 = type(tmp306)
subjects307 = deque(tmp306._args)
matcher = CommutativeMatcher36765.get()
tmp308 = subjects307
subjects307 = []
for s in tmp308:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp308, subst1):
pass
if pattern_index == 0:
pass
# State 36830
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.2', 1)
except ValueError:
pass
else:
pass
# State 36831
if len(subjects233) == 0:
pass
# State 36832
if len(subjects183) == 0:
pass
# State 36833
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst3
if len(subjects233) >= 1:
tmp310 = []
tmp310.append(subjects233.popleft())
while True:
if len(tmp310) > 1:
tmp311 = create_operation_expression(associative1, tmp310)
elif len(tmp310) == 1:
tmp311 = tmp310[0]
else:
assert False, "Unreachable"
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.2', tmp311)
except ValueError:
pass
else:
pass
# State 36831
if len(subjects233) == 0:
pass
# State 36832
if len(subjects183) == 0:
pass
# State 36833
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst3
if len(subjects233) == 0:
break
tmp310.append(subjects233.popleft())
subjects233.extendleft(reversed(tmp310))
subjects233.appendleft(tmp306)
subjects183.appendleft(tmp232)
if len(subjects183) >= 1 and isinstance(subjects183[0], Mul):
tmp313 = subjects183.popleft()
associative1 = tmp313
associative_type1 = type(tmp313)
subjects314 = deque(tmp313._args)
matcher = CommutativeMatcher36835.get()
tmp315 = subjects314
subjects314 = []
for s in tmp315:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp315, subst0):
pass
if pattern_index == 0:
pass
# State 37116
if len(subjects183) == 0:
pass
# State 37117
if len(subjects) == 0:
pass
# 5: log(c*(d*(x**j*f + e)**p)**q)
yield 5, subst1
subjects183.appendleft(tmp313)
subjects.appendleft(tmp182)
if len(subjects) >= 1 and isinstance(subjects[0], sin):
tmp316 = subjects.popleft()
subjects317 = deque(tmp316._args)
# State 60831
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(0))
except ValueError:
pass
else:
pass
# State 60832
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 60833
if len(subjects317) >= 1:
tmp320 = subjects317.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.1.0', tmp320)
except ValueError:
pass
else:
pass
# State 60834
if len(subjects317) == 0:
pass
# State 60835
if len(subjects) == 0:
pass
# 6: sin(c + x*d)
yield 6, subst3
subjects317.appendleft(tmp320)
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 72357
if len(subjects317) >= 1:
tmp323 = subjects317.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.0', tmp323)
except ValueError:
pass
else:
pass
# State 72358
if len(subjects317) == 0:
pass
# State 72359
if len(subjects) == 0:
pass
# 14: sin(x*f + e)
yield 14, subst3
subjects317.appendleft(tmp323)
if len(subjects317) >= 1 and isinstance(subjects317[0], Mul):
tmp325 = subjects317.popleft()
associative1 = tmp325
associative_type1 = type(tmp325)
subjects326 = deque(tmp325._args)
matcher = CommutativeMatcher60837.get()
tmp327 = subjects326
subjects326 = []
for s in tmp327:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp327, subst1):
pass
if pattern_index == 0:
pass
# State 60838
if len(subjects317) == 0:
pass
# State 60839
if len(subjects) == 0:
pass
# 6: sin(c + x*d)
yield 6, subst2
if pattern_index == 1:
pass
# State 72360
if len(subjects317) == 0:
pass
# State 72361
if len(subjects) == 0:
pass
# 14: sin(x*f + e)
yield 14, subst2
subjects317.appendleft(tmp325)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.2.0', S(0))
except ValueError:
pass
else:
pass
# State 61476
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 61477
if len(subjects317) >= 1:
tmp330 = subjects317.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.2.2.1.0', tmp330)
except ValueError:
pass
else:
pass
# State 61478
if len(subjects317) == 0:
pass
# State 61479
if len(subjects) == 0:
pass
# 8: sin(e + x*f)
yield 8, subst3
subjects317.appendleft(tmp330)
if len(subjects317) >= 1 and isinstance(subjects317[0], Mul):
tmp332 = subjects317.popleft()
associative1 = tmp332
associative_type1 = type(tmp332)
subjects333 = deque(tmp332._args)
matcher = CommutativeMatcher61481.get()
tmp334 = subjects333
subjects333 = []
for s in tmp334:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp334, subst1):
pass
if pattern_index == 0:
pass
# State 61482
if len(subjects317) == 0:
pass
# State 61483
if len(subjects) == 0:
pass
# 8: sin(e + x*f)
yield 8, subst2
subjects317.appendleft(tmp332)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.3.0', S(0))
except ValueError:
pass
else:
pass
# State 63202
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 63203
if len(subjects317) >= 1:
tmp337 = subjects317.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.3.1.0', tmp337)
except ValueError:
pass
else:
pass
# State 63204
if len(subjects317) == 0:
pass
# State 63205
if len(subjects) == 0:
pass
# 10: sin(e + x*f)
yield 10, subst3
subjects317.appendleft(tmp337)
if len(subjects317) >= 1 and isinstance(subjects317[0], Mul):
tmp339 = subjects317.popleft()
associative1 = tmp339
associative_type1 = type(tmp339)
subjects340 = deque(tmp339._args)
matcher = CommutativeMatcher63207.get()
tmp341 = subjects340
subjects340 = []
for s in tmp341:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp341, subst1):
pass
if pattern_index == 0:
pass
# State 63208
if len(subjects317) == 0:
pass
# State 63209
if len(subjects) == 0:
pass
# 10: sin(e + x*f)
yield 10, subst2
subjects317.appendleft(tmp339)
if len(subjects317) >= 1 and isinstance(subjects317[0], Add):
tmp342 = subjects317.popleft()
associative1 = tmp342
associative_type1 = type(tmp342)
subjects343 = deque(tmp342._args)
matcher = CommutativeMatcher60841.get()
tmp344 = subjects343
subjects343 = []
for s in tmp344:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp344, subst0):
pass
if pattern_index == 0:
pass
# State 60847
if len(subjects317) == 0:
pass
# State 60848
if len(subjects) == 0:
pass
# 6: sin(c + x*d)
yield 6, subst1
if pattern_index == 1:
pass
# State 61487
if len(subjects317) == 0:
pass
# State 61488
if len(subjects) == 0:
pass
# 8: sin(e + x*f)
yield 8, subst1
if pattern_index == 2:
pass
# State 63213
if len(subjects317) == 0:
pass
# State 63214
if len(subjects) == 0:
pass
# 10: sin(e + x*f)
yield 10, subst1
if pattern_index == 3:
pass
# State 72365
if len(subjects317) == 0:
pass
# State 72366
if len(subjects) == 0:
pass
# 14: sin(x*f + e)
yield 14, subst1
subjects317.appendleft(tmp342)
subjects.appendleft(tmp316)
if len(subjects) >= 1 and isinstance(subjects[0], cos):
tmp345 = subjects.popleft()
subjects346 = deque(tmp345._args)
# State 60882
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(0))
except ValueError:
pass
else:
pass
# State 60883
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 60884
if len(subjects346) >= 1:
tmp349 = subjects346.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.1.0', tmp349)
except ValueError:
pass
else:
pass
# State 60885
if len(subjects346) == 0:
pass
# State 60886
if len(subjects) == 0:
pass
# 7: cos(c + x*d)
yield 7, subst3
subjects346.appendleft(tmp349)
if len(subjects346) >= 1 and isinstance(subjects346[0], Mul):
tmp351 = subjects346.popleft()
associative1 = tmp351
associative_type1 = type(tmp351)
subjects352 = deque(tmp351._args)
matcher = CommutativeMatcher60888.get()
tmp353 = subjects352
subjects352 = []
for s in tmp353:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp353, subst1):
pass
if pattern_index == 0:
pass
# State 60889
if len(subjects346) == 0:
pass
# State 60890
if len(subjects) == 0:
pass
# 7: cos(c + x*d)
yield 7, subst2
subjects346.appendleft(tmp351)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.2.0', S(0))
except ValueError:
pass
else:
pass
# State 61705
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 61706
if len(subjects346) >= 1:
tmp356 = subjects346.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.2.2.1.0', tmp356)
except ValueError:
pass
else:
pass
# State 61707
if len(subjects346) == 0:
pass
# State 61708
if len(subjects) == 0:
pass
# 9: cos(e + x*f)
yield 9, subst3
subjects346.appendleft(tmp356)
if len(subjects346) >= 1 and isinstance(subjects346[0], Mul):
tmp358 = subjects346.popleft()
associative1 = tmp358
associative_type1 = type(tmp358)
subjects359 = deque(tmp358._args)
matcher = CommutativeMatcher61710.get()
tmp360 = subjects359
subjects359 = []
for s in tmp360:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp360, subst1):
pass
if pattern_index == 0:
pass
# State 61711
if len(subjects346) == 0:
pass
# State 61712
if len(subjects) == 0:
pass
# 9: cos(e + x*f)
yield 9, subst2
subjects346.appendleft(tmp358)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.3.0', S(0))
except ValueError:
pass
else:
pass
# State 63378
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 63379
if len(subjects346) >= 1:
tmp363 = subjects346.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.3.1.0', tmp363)
except ValueError:
pass
else:
pass
# State 63380
if len(subjects346) == 0:
pass
# State 63381
if len(subjects) == 0:
pass
# 11: cos(e + x*f)
yield 11, subst3
subjects346.appendleft(tmp363)
if len(subjects346) >= 1 and isinstance(subjects346[0], Mul):
tmp365 = subjects346.popleft()
associative1 = tmp365
associative_type1 = type(tmp365)
subjects366 = deque(tmp365._args)
matcher = CommutativeMatcher63383.get()
tmp367 = subjects366
subjects366 = []
for s in tmp367:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp367, subst1):
pass
if pattern_index == 0:
pass
# State 63384
if len(subjects346) == 0:
pass
# State 63385
if len(subjects) == 0:
pass
# 11: cos(e + x*f)
yield 11, subst2
subjects346.appendleft(tmp365)
if len(subjects346) >= 1 and isinstance(subjects346[0], Add):
tmp368 = subjects346.popleft()
associative1 = tmp368
associative_type1 = type(tmp368)
subjects369 = deque(tmp368._args)
matcher = CommutativeMatcher60892.get()
tmp370 = subjects369
subjects369 = []
for s in tmp370:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp370, subst0):
pass
if pattern_index == 0:
pass
# State 60898
if len(subjects346) == 0:
pass
# State 60899
if len(subjects) == 0:
pass
# 7: cos(c + x*d)
yield 7, subst1
if pattern_index == 1:
pass
# State 61716
if len(subjects346) == 0:
pass
# State 61717
if len(subjects) == 0:
pass
# 9: cos(e + x*f)
yield 9, subst1
if pattern_index == 2:
pass
# State 63389
if len(subjects346) == 0:
pass
# State 63390
if len(subjects) == 0:
pass
# 11: cos(e + x*f)
yield 11, subst1
subjects346.appendleft(tmp368)
subjects.appendleft(tmp345)
if len(subjects) >= 1 and isinstance(subjects[0], tan):
tmp371 = subjects.popleft()
subjects372 = deque(tmp371._args)
# State 78446
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.3.0', S(0))
except ValueError:
pass
else:
pass
# State 78447
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 78448
if len(subjects372) >= 1:
tmp375 = subjects372.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.3.1.0', tmp375)
except ValueError:
pass
else:
pass
# State 78449
if len(subjects372) == 0:
pass
# State 78450
if len(subjects) == 0:
pass
# 15: tan(e + x*f)
yield 15, subst3
subjects372.appendleft(tmp375)
if len(subjects372) >= 1 and isinstance(subjects372[0], Mul):
tmp377 = subjects372.popleft()
associative1 = tmp377
associative_type1 = type(tmp377)
subjects378 = deque(tmp377._args)
matcher = CommutativeMatcher78452.get()
tmp379 = subjects378
subjects378 = []
for s in tmp379:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp379, subst1):
pass
if pattern_index == 0:
pass
# State 78453
if len(subjects372) == 0:
pass
# State 78454
if len(subjects) == 0:
pass
# 15: tan(e + x*f)
yield 15, subst2
subjects372.appendleft(tmp377)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.2.0', S(0))
except ValueError:
pass
else:
pass
# State 80053
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 80054
if len(subjects372) >= 1:
tmp382 = subjects372.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.2.2.1.0', tmp382)
except ValueError:
pass
else:
pass
# State 80055
if len(subjects372) == 0:
pass
# State 80056
if len(subjects) == 0:
pass
# 18: tan(e + x*f)
yield 18, subst3
subjects372.appendleft(tmp382)
if len(subjects372) >= 1 and isinstance(subjects372[0], Mul):
tmp384 = subjects372.popleft()
associative1 = tmp384
associative_type1 = type(tmp384)
subjects385 = deque(tmp384._args)
matcher = CommutativeMatcher80058.get()
tmp386 = subjects385
subjects385 = []
for s in tmp386:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp386, subst1):
pass
if pattern_index == 0:
pass
# State 80059
if len(subjects372) == 0:
pass
# State 80060
if len(subjects) == 0:
pass
# 18: tan(e + x*f)
yield 18, subst2
subjects372.appendleft(tmp384)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(0))
except ValueError:
pass
else:
pass
# State 81806
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 81807
if len(subjects372) >= 1:
tmp389 = subjects372.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.1.0', tmp389)
except ValueError:
pass
else:
pass
# State 81808
if len(subjects372) == 0:
pass
# State 81809
if len(subjects) == 0:
pass
# 21: tan(c + x*d)
yield 21, subst3
subjects372.appendleft(tmp389)
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 87029
if len(subjects372) >= 1:
tmp392 = subjects372.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.0', tmp392)
except ValueError:
pass
else:
pass
# State 87030
if len(subjects372) == 0:
pass
# State 87031
if len(subjects) == 0:
pass
# 23: tan(x*f + e)
yield 23, subst3
subjects372.appendleft(tmp392)
if len(subjects372) >= 1 and isinstance(subjects372[0], Mul):
tmp394 = subjects372.popleft()
associative1 = tmp394
associative_type1 = type(tmp394)
subjects395 = deque(tmp394._args)
matcher = CommutativeMatcher81811.get()
tmp396 = subjects395
subjects395 = []
for s in tmp396:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp396, subst1):
pass
if pattern_index == 0:
pass
# State 81812
if len(subjects372) == 0:
pass
# State 81813
if len(subjects) == 0:
pass
# 21: tan(c + x*d)
yield 21, subst2
if pattern_index == 1:
pass
# State 87032
if len(subjects372) == 0:
pass
# State 87033
if len(subjects) == 0:
pass
# 23: tan(x*f + e)
yield 23, subst2
subjects372.appendleft(tmp394)
if len(subjects372) >= 1 and isinstance(subjects372[0], Add):
tmp397 = subjects372.popleft()
associative1 = tmp397
associative_type1 = type(tmp397)
subjects398 = deque(tmp397._args)
matcher = CommutativeMatcher78456.get()
tmp399 = subjects398
subjects398 = []
for s in tmp399:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp399, subst0):
pass
if pattern_index == 0:
pass
# State 78462
if len(subjects372) == 0:
pass
# State 78463
if len(subjects) == 0:
pass
# 15: tan(e + x*f)
yield 15, subst1
if pattern_index == 1:
pass
# State 80064
if len(subjects372) == 0:
pass
# State 80065
if len(subjects) == 0:
pass
# 18: tan(e + x*f)
yield 18, subst1
if pattern_index == 2:
pass
# State 81817
if len(subjects372) == 0:
pass
# State 81818
if len(subjects) == 0:
pass
# 21: tan(c + x*d)
yield 21, subst1
if pattern_index == 3:
pass
# State 87037
if len(subjects372) == 0:
pass
# State 87038
if len(subjects) == 0:
pass
# 23: tan(x*f + e)
yield 23, subst1
subjects372.appendleft(tmp397)
subjects.appendleft(tmp371)
if len(subjects) >= 1 and isinstance(subjects[0], asin):
tmp400 = subjects.popleft()
subjects401 = deque(tmp400._args)
# State 108825
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 108826
if len(subjects401) >= 1:
tmp403 = subjects401.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp403)
except ValueError:
pass
else:
pass
# State 108827
if len(subjects401) == 0:
pass
# State 108828
if len(subjects) == 0:
pass
# 27: asin(x*c)
yield 27, subst2
subjects401.appendleft(tmp403)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0_1', S(1))
except ValueError:
pass
else:
pass
# State 109847
if len(subjects401) >= 1:
tmp406 = subjects401.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.1', tmp406)
except ValueError:
pass
else:
pass
# State 109848
if len(subjects401) == 0:
pass
# State 109849
if len(subjects) == 0:
pass
# 29: asin(c*x)
yield 29, subst2
subjects401.appendleft(tmp406)
if len(subjects401) >= 1 and isinstance(subjects401[0], Mul):
tmp408 = subjects401.popleft()
associative1 = tmp408
associative_type1 = type(tmp408)
subjects409 = deque(tmp408._args)
matcher = CommutativeMatcher108830.get()
tmp410 = subjects409
subjects409 = []
for s in tmp410:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp410, subst0):
pass
if pattern_index == 0:
pass
# State 108831
if len(subjects401) == 0:
pass
# State 108832
if len(subjects) == 0:
pass
# 27: asin(x*c)
yield 27, subst1
if pattern_index == 1:
pass
# State 109850
if len(subjects401) == 0:
pass
# State 109851
if len(subjects) == 0:
pass
# 29: asin(c*x)
yield 29, subst1
subjects401.appendleft(tmp408)
subjects.appendleft(tmp400)
if len(subjects) >= 1 and isinstance(subjects[0], acos):
tmp411 = subjects.popleft()
subjects412 = deque(tmp411._args)
# State 108867
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 108868
if len(subjects412) >= 1:
tmp414 = subjects412.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp414)
except ValueError:
pass
else:
pass
# State 108869
if len(subjects412) == 0:
pass
# State 108870
if len(subjects) == 0:
pass
# 28: acos(x*c)
yield 28, subst2
subjects412.appendleft(tmp414)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0_1', S(1))
except ValueError:
pass
else:
pass
# State 109903
if len(subjects412) >= 1:
tmp417 = subjects412.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.1', tmp417)
except ValueError:
pass
else:
pass
# State 109904
if len(subjects412) == 0:
pass
# State 109905
if len(subjects) == 0:
pass
# 30: acos(c*x)
yield 30, subst2
subjects412.appendleft(tmp417)
if len(subjects412) >= 1 and isinstance(subjects412[0], Mul):
tmp419 = subjects412.popleft()
associative1 = tmp419
associative_type1 = type(tmp419)
subjects420 = deque(tmp419._args)
matcher = CommutativeMatcher108872.get()
tmp421 = subjects420
subjects420 = []
for s in tmp421:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp421, subst0):
pass
if pattern_index == 0:
pass
# State 108873
if len(subjects412) == 0:
pass
# State 108874
if len(subjects) == 0:
pass
# 28: acos(x*c)
yield 28, subst1
if pattern_index == 1:
pass
# State 109906
if len(subjects412) == 0:
pass
# State 109907
if len(subjects) == 0:
pass
# 30: acos(c*x)
yield 30, subst1
subjects412.appendleft(tmp419)
subjects.appendleft(tmp411)
if len(subjects) >= 1 and isinstance(subjects[0], atan):
tmp422 = subjects.popleft()
subjects423 = deque(tmp422._args)
# State 113476
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 113477
if len(subjects423) >= 1:
tmp425 = subjects423.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp425)
except ValueError:
pass
else:
pass
# State 113478
if len(subjects423) == 0:
pass
# State 113479
if len(subjects) == 0:
pass
# 31: atan(x*c)
yield 31, subst2
subjects423.appendleft(tmp425)
if len(subjects423) >= 1 and isinstance(subjects423[0], Mul):
tmp427 = subjects423.popleft()
associative1 = tmp427
associative_type1 = type(tmp427)
subjects428 = deque(tmp427._args)
matcher = CommutativeMatcher113481.get()
tmp429 = subjects428
subjects428 = []
for s in tmp429:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp429, subst0):
pass
if pattern_index == 0:
pass
# State 113482
if len(subjects423) == 0:
pass
# State 113483
if len(subjects) == 0:
pass
# 31: atan(x*c)
yield 31, subst1
subjects423.appendleft(tmp427)
subjects.appendleft(tmp422)
if len(subjects) >= 1 and isinstance(subjects[0], acot):
tmp430 = subjects.popleft()
subjects431 = deque(tmp430._args)
# State 113525
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 113526
if len(subjects431) >= 1:
tmp433 = subjects431.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp433)
except ValueError:
pass
else:
pass
# State 113527
if len(subjects431) == 0:
pass
# State 113528
if len(subjects) == 0:
pass
# 32: acot(x*c)
yield 32, subst2
subjects431.appendleft(tmp433)
if len(subjects431) >= 1 and isinstance(subjects431[0], Mul):
tmp435 = subjects431.popleft()
associative1 = tmp435
associative_type1 = type(tmp435)
subjects436 = deque(tmp435._args)
matcher = CommutativeMatcher113530.get()
tmp437 = subjects436
subjects436 = []
for s in tmp437:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp437, subst0):
pass
if pattern_index == 0:
pass
# State 113531
if len(subjects431) == 0:
pass
# State 113532
if len(subjects) == 0:
pass
# 32: acot(x*c)
yield 32, subst1
subjects431.appendleft(tmp435)
subjects.appendleft(tmp430)
if len(subjects) >= 1 and isinstance(subjects[0], asec):
tmp438 = subjects.popleft()
subjects439 = deque(tmp438._args)
# State 119947
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 119948
if len(subjects439) >= 1:
tmp441 = subjects439.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp441)
except ValueError:
pass
else:
pass
# State 119949
if len(subjects439) == 0:
pass
# State 119950
if len(subjects) == 0:
pass
# 33: asec(x*c)
yield 33, subst2
subjects439.appendleft(tmp441)
if len(subjects439) >= 1 and isinstance(subjects439[0], Mul):
tmp443 = subjects439.popleft()
associative1 = tmp443
associative_type1 = type(tmp443)
subjects444 = deque(tmp443._args)
matcher = CommutativeMatcher119952.get()
tmp445 = subjects444
subjects444 = []
for s in tmp445:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp445, subst0):
pass
if pattern_index == 0:
pass
# State 119953
if len(subjects439) == 0:
pass
# State 119954
if len(subjects) == 0:
pass
# 33: asec(x*c)
yield 33, subst1
subjects439.appendleft(tmp443)
subjects.appendleft(tmp438)
if len(subjects) >= 1 and isinstance(subjects[0], acsc):
tmp446 = subjects.popleft()
subjects447 = deque(tmp446._args)
# State 120025
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 120026
if len(subjects447) >= 1:
tmp449 = subjects447.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp449)
except ValueError:
pass
else:
pass
# State 120027
if len(subjects447) == 0:
pass
# State 120028
if len(subjects) == 0:
pass
# 34: acsc(x*c)
yield 34, subst2
subjects447.appendleft(tmp449)
if len(subjects447) >= 1 and isinstance(subjects447[0], Mul):
tmp451 = subjects447.popleft()
associative1 = tmp451
associative_type1 = type(tmp451)
subjects452 = deque(tmp451._args)
matcher = CommutativeMatcher120030.get()
tmp453 = subjects452
subjects452 = []
for s in tmp453:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp453, subst0):
pass
if pattern_index == 0:
pass
# State 120031
if len(subjects447) == 0:
pass
# State 120032
if len(subjects) == 0:
pass
# 34: acsc(x*c)
yield 34, subst1
subjects447.appendleft(tmp451)
subjects.appendleft(tmp446)
if len(subjects) >= 1 and isinstance(subjects[0], sinh):
tmp454 = subjects.popleft()
subjects455 = deque(tmp454._args)
# State 122126
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(0))
except ValueError:
pass
else:
pass
# State 122127
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 122128
if len(subjects455) >= 1:
tmp458 = subjects455.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.0', tmp458)
except ValueError:
pass
else:
pass
# State 122129
if len(subjects455) == 0:
pass
# State 122130
if len(subjects) == 0:
pass
# 35: sinh(x*b + a)
yield 35, subst3
subjects455.appendleft(tmp458)
if len(subjects455) >= 1 and isinstance(subjects455[0], Mul):
tmp460 = subjects455.popleft()
associative1 = tmp460
associative_type1 = type(tmp460)
subjects461 = deque(tmp460._args)
matcher = CommutativeMatcher122132.get()
tmp462 = subjects461
subjects461 = []
for s in tmp462:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp462, subst1):
pass
if pattern_index == 0:
pass
# State 122133
if len(subjects455) == 0:
pass
# State 122134
if len(subjects) == 0:
pass
# 35: sinh(x*b + a)
yield 35, subst2
subjects455.appendleft(tmp460)
if len(subjects455) >= 1 and isinstance(subjects455[0], Add):
tmp463 = subjects455.popleft()
associative1 = tmp463
associative_type1 = type(tmp463)
subjects464 = deque(tmp463._args)
matcher = CommutativeMatcher122136.get()
tmp465 = subjects464
subjects464 = []
for s in tmp465:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp465, subst0):
pass
if pattern_index == 0:
pass
# State 122142
if len(subjects455) == 0:
pass
# State 122143
if len(subjects) == 0:
pass
# 35: sinh(x*b + a)
yield 35, subst1
subjects455.appendleft(tmp463)
subjects.appendleft(tmp454)
if len(subjects) >= 1 and isinstance(subjects[0], tanh):
tmp466 = subjects.popleft()
subjects467 = deque(tmp466._args)
# State 126354
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(0))
except ValueError:
pass
else:
pass
# State 126355
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 126356
if len(subjects467) >= 1:
tmp470 = subjects467.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.0', tmp470)
except ValueError:
pass
else:
pass
# State 126357
if len(subjects467) == 0:
pass
# State 126358
if len(subjects) == 0:
pass
# 36: tanh(x*b + a)
yield 36, subst3
subjects467.appendleft(tmp470)
if len(subjects467) >= 1 and isinstance(subjects467[0], Mul):
tmp472 = subjects467.popleft()
associative1 = tmp472
associative_type1 = type(tmp472)
subjects473 = deque(tmp472._args)
matcher = CommutativeMatcher126360.get()
tmp474 = subjects473
subjects473 = []
for s in tmp474:
matcher.add_subject(s)
for pattern_index, subst2 in matcher.match(tmp474, subst1):
pass
if pattern_index == 0:
pass
# State 126361
if len(subjects467) == 0:
pass
# State 126362
if len(subjects) == 0:
pass
# 36: tanh(x*b + a)
yield 36, subst2
subjects467.appendleft(tmp472)
if len(subjects467) >= 1 and isinstance(subjects467[0], Add):
tmp475 = subjects467.popleft()
associative1 = tmp475
associative_type1 = type(tmp475)
subjects476 = deque(tmp475._args)
matcher = CommutativeMatcher126364.get()
tmp477 = subjects476
subjects476 = []
for s in tmp477:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp477, subst0):
pass
if pattern_index == 0:
pass
# State 126370
if len(subjects467) == 0:
pass
# State 126371
if len(subjects) == 0:
pass
# 36: tanh(x*b + a)
yield 36, subst1
subjects467.appendleft(tmp475)
subjects.appendleft(tmp466)
if len(subjects) >= 1 and isinstance(subjects[0], acosh):
tmp478 = subjects.popleft()
subjects479 = deque(tmp478._args)
# State 138485
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 138486
if len(subjects479) >= 1:
tmp481 = subjects479.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp481)
except ValueError:
pass
else:
pass
# State 138487
if len(subjects479) == 0:
pass
# State 138488
if len(subjects) == 0:
pass
# 37: acosh(x*c)
yield 37, subst2
subjects479.appendleft(tmp481)
if len(subjects479) >= 1 and isinstance(subjects479[0], Mul):
tmp483 = subjects479.popleft()
associative1 = tmp483
associative_type1 = type(tmp483)
subjects484 = deque(tmp483._args)
matcher = CommutativeMatcher138490.get()
tmp485 = subjects484
subjects484 = []
for s in tmp485:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp485, subst0):
pass
if pattern_index == 0:
pass
# State 138491
if len(subjects479) == 0:
pass
# State 138492
if len(subjects) == 0:
pass
# 37: acosh(x*c)
yield 37, subst1
subjects479.appendleft(tmp483)
subjects.appendleft(tmp478)
if len(subjects) >= 1 and isinstance(subjects[0], asinh):
tmp486 = subjects.popleft()
subjects487 = deque(tmp486._args)
# State 138568
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 138569
if len(subjects487) >= 1:
tmp489 = subjects487.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp489)
except ValueError:
pass
else:
pass
# State 138570
if len(subjects487) == 0:
pass
# State 138571
if len(subjects) == 0:
pass
# 38: asinh(x*c)
yield 38, subst2
subjects487.appendleft(tmp489)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0_1', S(1))
except ValueError:
pass
else:
pass
# State 139610
if len(subjects487) >= 1:
tmp492 = subjects487.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.1', tmp492)
except ValueError:
pass
else:
pass
# State 139611
if len(subjects487) == 0:
pass
# State 139612
if len(subjects) == 0:
pass
# 39: asinh(c*x)
yield 39, subst2
subjects487.appendleft(tmp492)
if len(subjects487) >= 1 and isinstance(subjects487[0], Mul):
tmp494 = subjects487.popleft()
associative1 = tmp494
associative_type1 = type(tmp494)
subjects495 = deque(tmp494._args)
matcher = CommutativeMatcher138573.get()
tmp496 = subjects495
subjects495 = []
for s in tmp496:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp496, subst0):
pass
if pattern_index == 0:
pass
# State 138574
if len(subjects487) == 0:
pass
# State 138575
if len(subjects) == 0:
pass
# 38: asinh(x*c)
yield 38, subst1
if pattern_index == 1:
pass
# State 139613
if len(subjects487) == 0:
pass
# State 139614
if len(subjects) == 0:
pass
# 39: asinh(c*x)
yield 39, subst1
subjects487.appendleft(tmp494)
subjects.appendleft(tmp486)
if len(subjects) >= 1 and isinstance(subjects[0], atanh):
tmp497 = subjects.popleft()
subjects498 = deque(tmp497._args)
# State 143101
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 143102
if len(subjects498) >= 1:
tmp500 = subjects498.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp500)
except ValueError:
pass
else:
pass
# State 143103
if len(subjects498) == 0:
pass
# State 143104
if len(subjects) == 0:
pass
# 40: atanh(x*c)
yield 40, subst2
subjects498.appendleft(tmp500)
if len(subjects498) >= 1 and isinstance(subjects498[0], Mul):
tmp502 = subjects498.popleft()
associative1 = tmp502
associative_type1 = type(tmp502)
subjects503 = deque(tmp502._args)
matcher = CommutativeMatcher143106.get()
tmp504 = subjects503
subjects503 = []
for s in tmp504:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp504, subst0):
pass
if pattern_index == 0:
pass
# State 143107
if len(subjects498) == 0:
pass
# State 143108
if len(subjects) == 0:
pass
# 40: atanh(x*c)
yield 40, subst1
subjects498.appendleft(tmp502)
subjects.appendleft(tmp497)
if len(subjects) >= 1 and isinstance(subjects[0], acoth):
tmp505 = subjects.popleft()
subjects506 = deque(tmp505._args)
# State 143150
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 143151
if len(subjects506) >= 1:
tmp508 = subjects506.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp508)
except ValueError:
pass
else:
pass
# State 143152
if len(subjects506) == 0:
pass
# State 143153
if len(subjects) == 0:
pass
# 41: acoth(x*c)
yield 41, subst2
subjects506.appendleft(tmp508)
if len(subjects506) >= 1 and isinstance(subjects506[0], Mul):
tmp510 = subjects506.popleft()
associative1 = tmp510
associative_type1 = type(tmp510)
subjects511 = deque(tmp510._args)
matcher = CommutativeMatcher143155.get()
tmp512 = subjects511
subjects511 = []
for s in tmp512:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp512, subst0):
pass
if pattern_index == 0:
pass
# State 143156
if len(subjects506) == 0:
pass
# State 143157
if len(subjects) == 0:
pass
# 41: acoth(x*c)
yield 41, subst1
subjects506.appendleft(tmp510)
subjects.appendleft(tmp505)
if len(subjects) >= 1 and isinstance(subjects[0], asech):
tmp513 = subjects.popleft()
subjects514 = deque(tmp513._args)
# State 149123
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 149124
if len(subjects514) >= 1:
tmp516 = subjects514.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp516)
except ValueError:
pass
else:
pass
# State 149125
if len(subjects514) == 0:
pass
# State 149126
if len(subjects) == 0:
pass
# 42: asech(x*c)
yield 42, subst2
subjects514.appendleft(tmp516)
if len(subjects514) >= 1 and isinstance(subjects514[0], Mul):
tmp518 = subjects514.popleft()
associative1 = tmp518
associative_type1 = type(tmp518)
subjects519 = deque(tmp518._args)
matcher = CommutativeMatcher149128.get()
tmp520 = subjects519
subjects519 = []
for s in tmp520:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp520, subst0):
pass
if pattern_index == 0:
pass
# State 149129
if len(subjects514) == 0:
pass
# State 149130
if len(subjects) == 0:
pass
# 42: asech(x*c)
yield 42, subst1
subjects514.appendleft(tmp518)
subjects.appendleft(tmp513)
if len(subjects) >= 1 and isinstance(subjects[0], acsch):
tmp521 = subjects.popleft()
subjects522 = deque(tmp521._args)
# State 149201
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.0', S(1))
except ValueError:
pass
else:
pass
# State 149202
if len(subjects522) >= 1:
tmp524 = subjects522.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.1', tmp524)
except ValueError:
pass
else:
pass
# State 149203
if len(subjects522) == 0:
pass
# State 149204
if len(subjects) == 0:
pass
# 43: acsch(x*c)
yield 43, subst2
subjects522.appendleft(tmp524)
if len(subjects522) >= 1 and isinstance(subjects522[0], Mul):
tmp526 = subjects522.popleft()
associative1 = tmp526
associative_type1 = type(tmp526)
subjects527 = deque(tmp526._args)
matcher = CommutativeMatcher149206.get()
tmp528 = subjects527
subjects527 = []
for s in tmp528:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp528, subst0):
pass
if pattern_index == 0:
pass
# State 149207
if len(subjects522) == 0:
pass
# State 149208
if len(subjects) == 0:
pass
# 43: acsch(x*c)
yield 43, subst1
subjects522.appendleft(tmp526)
subjects.appendleft(tmp521)
return
yield
from .generated_part003114 import *
from .generated_part003116 import *
from .generated_part003192 import *
from .generated_part003189 import *
from .generated_part003177 import *
from .generated_part003206 import *
from .generated_part003101 import *
from .generated_part003204 import *
from .generated_part003207 import *
from .generated_part003109 import *
from .generated_part003127 import *
from .generated_part003205 import *
from .generated_part003119 import *
from .generated_part003203 import *
from .generated_part003100 import *
from .generated_part003136 import *
from .generated_part003093 import *
from .generated_part003179 import *
from .generated_part003113 import *
from .generated_part003103 import *
from .generated_part003106 import *
from .generated_part003110 import *
from .generated_part003107 import *
from .generated_part003200 import *
from .generated_part003183 import *
from .generated_part003195 import *
from collections import deque
from .generated_part003182 import *
from .generated_part003198 import *
from .generated_part003142 import *
from .generated_part003181 import *
from matchpy.utils import VariableWithCount
from .generated_part003176 import *
from .generated_part003187 import *
from .generated_part003196 import *
from .generated_part003149 import *
from .generated_part003112 import *
from .generated_part003096 import *
from .generated_part003102 import *
from .generated_part003124 import *
from .generated_part003094 import *
from .generated_part003201 import *
from .generated_part003197 import *
from .generated_part003178 import *
from .generated_part003111 import *
from multiset import Multiset
from .generated_part003123 import *
from .generated_part003129 import *
from .generated_part003186 import *
from .generated_part003193 import *
from .generated_part003188 import *
from .generated_part003140 import *
from .generated_part003117 import *
from .generated_part003208 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from .generated_part003126 import *
from .generated_part003184 import *
from .generated_part003191 import *
from .generated_part003194 import *
from .generated_part003139 import *
from .generated_part003137 import *
from .generated_part003105 import * | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
d56d529290d5fca12af0043705c03bfcf41f290b | c39f999cae8825afe2cdf1518d93ba31bd4c0e95 | /PYME/DSView/modules/shell.py | 9debe00a7baf593cae3782bd4eeda236c0d7d023 | [] | no_license | WilliamRo/CLipPYME | 0b69860136a9b2533f2f29fc29408d7471cb934d | 6596167034c727ad7dad0a741dd59e0e48f6852a | refs/heads/master | 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 | Python | UTF-8 | Python | false | false | 1,225 | py | #!/usr/bin/python
##################
# shell.py
#
# Copyright David Baddeley, 2011
# d.baddeley@auckland.ac.nz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
import wx.py.shell
def Plug(dsviewer):
sh = wx.py.shell.Shell(id=-1,
parent=dsviewer, pos=wx.Point(0, 0), size=wx.Size(618, 451), style=0, locals=dsviewer.__dict__,
introText='note that help, license etc below is for Python, not PYME\n\n')
sh.Execute('from pylab import *')
sh.Execute('from PYME.DSView import View3D, ViewIm3D')
dsviewer.AddPage(page=sh, select=False, caption='Console')
dsviewer.sh = sh | [
"willi4m@zju.edu.cn"
] | willi4m@zju.edu.cn |
b7a1cf6a73df1281ceebfdd3a8392fd02d8d3f68 | 01c39e5ac5398658f56e069a1f4c0142496a07f9 | /master/serializer.py | 17a5718aad1cbe8914485af00a9935eb95682aa7 | [] | no_license | vshaladhav97/first_kick | f95c0f402e7f0e869c05c1abf58404bb9a7b7863 | 367cccca72f0eae6c3ccb70fabb371dc905f915e | refs/heads/master | 2023-08-21T05:25:33.211862 | 2021-10-12T11:04:56 | 2021-10-12T11:04:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,543 | py | from django.db.models import fields
from rest_framework import serializers
from .models import (Company, AddressDetail, Location, AgeGroup, Months, PlayingSurface,
CourseType, EventType, WeekDay, ClassStatus, Ages)
class AddressDetailSerializer(serializers.ModelSerializer):
class Meta:
model = AddressDetail
fields = (
'id',
'address_line_1',
'address_line_2',
'address_line_3',
'town',
'country',
)
class CompanySerializer(serializers.ModelSerializer):
class Meta:
model = Company
fields = (
'id',
'company_name',
)
class LocationSerializer(serializers.ModelSerializer):
company = CompanySerializer(read_only=True)
class Meta:
model = Location
fields = (
'id',
'location',
'company',
'address_line_1',
'address_line_1',
'address_line_2',
'address_line_3',
'town',
'country',
'postal_code'
)
class LocationForAnalyticsSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = (
'id',
'location',
)
class CompanySerializer(serializers.ModelSerializer):
class Meta:
model = Company
fields = (
'id',
'company_name',
)
class LocationDataTableSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = (
'id',
'company',
'location',
'address_line_1',
'town',
'postal_code',
'playing_surface',
)
def to_representation(self, instance):
data = super().to_representation(instance)
company = Company.objects.get(pk=data['company'])
data['company'] = company.company_name
# playing_surface = PlayingSurface.objects.get(pk=data['playing_surface'])
# data['playing_surface'] = playing_surface.surface
return data
class LocationDataTableSerializerForPrepolated(serializers.ModelSerializer):
class Meta:
model = Location
fields = (
'id',
'company',
'location',
'address_line_1',
'town',
'postal_code',
'playing_surface',
)
def to_representation(self, instance):
data = super().to_representation(instance)
company = Company.objects.get(pk=data['company'])
data['company'] = company.company_name
playing_surface = PlayingSurface.objects.get(pk=data['playing_surface'])
data['playing_surface'] = playing_surface.surface
data['playing_surface_id'] = playing_surface.id
return data
class AgeGroupSerializer(serializers.ModelSerializer):
class Meta:
model = AgeGroup
fields = (
'id',
'age_group_text',
)
class CourseTypeSerializer(serializers.ModelSerializer):
class Meta:
model = CourseType
fields = (
'id',
'course_name',
'course_description',
'course_title',
)
class EvenTypeSerializer(serializers.ModelSerializer):
class Meta:
model = EventType
fields = (
'id',
'type_name',
)
class WeekDaySerializer(serializers.ModelSerializer):
class Meta:
model = WeekDay
fields = (
'id',
'weekday',
)
class ClassStatusSerializer(serializers.ModelSerializer):
class Meta:
model = ClassStatus
fields = (
'id',
'status_name',
)
class MonthSerializer(serializers.ModelSerializer):
class Meta:
model = Months
fields = (
'id',
'month',
)
class AgeSerializer(serializers.ModelSerializer):
class Meta:
model = Ages
fields = (
'id',
'age',
)
class PlayingSurfaceSerializer(serializers.ModelSerializer):
class Meta:
model = PlayingSurface
fields = (
'id',
'surface',
)
class CompanyNameDropdownSelectionSerializer(serializers.ModelSerializer):
"""Company Name serializer for dropdown"""
class Meta:
model = Company
fields = ("id", "company_name",) | [
"adhavv0@gmail.com"
] | adhavv0@gmail.com |
0153c67382f10038868a46db8eda74d5d8a0793c | 9f170204f6976fe59d9ee74aa78f62b10663c051 | /stubs/torch/nn/__init__.pyi | 8b1990ac1ca2f8e0d0f84f2e8b38318cc507d970 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | hercules261188/xformers | 04132b4c94bfb59ea9917ccf29800a049f924da9 | 71bab94cb954e6e291ca93d3bce5dffadab4286d | refs/heads/main | 2023-09-06T02:51:47.086611 | 2021-11-24T16:36:22 | 2021-11-24T16:36:22 | 431,593,450 | 1 | 0 | NOASSERTION | 2021-11-24T18:43:24 | 2021-11-24T18:43:23 | null | UTF-8 | Python | false | false | 8,489 | pyi | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import builtins
from typing import (
Any,
Generic,
Iterable,
Iterator,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
overload,
)
import torch
from pyre_extensions import Add, Divide, Multiply, Subtract, TypeVarTuple, Unpack
from torch import Tensor
from typing_extensions import Literal as L
DType = TypeVar("DType")
T = TypeVar("T")
Ts = TypeVarTuple("Ts")
InputSize = TypeVar("InputSize", bound=int)
OutputSize = TypeVar("OutputSize", bound=int)
HiddenSize = TypeVar("HiddenSize", bound=int)
Batch = TypeVar("Batch", bound=int)
N = TypeVar("N", bound=int)
EmbeddingDimension = TypeVar("EmbeddingDimension", bound=int)
H = TypeVar("H", bound=int)
W = TypeVar("W", bound=int)
class Module:
def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
def parameters(self) -> Iterator[Any]: ...
def double(self: T) -> T: ...
def to(self, dtype: Type[T], device: torch._device = ...) -> Module: ...
def eval(self) -> Module: ...
def train(self, mode: bool) -> Module: ...
def register_parameter(self, name: str, param: Optional[Parameter]) -> None: ...
bias: Parameter = ...
training: bool = ...
class LSTMCell(Module, Generic[InputSize, HiddenSize]):
def __init__(
self, input_size: InputSize, hidden_size: HiddenSize, bias: bool = ...
) -> None: ...
def __call__(
self,
input: Tensor[DType, Batch, InputSize],
hidden: Tuple[
Tensor[DType, Batch, HiddenSize], Tensor[DType, Batch, HiddenSize]
] = ...,
) -> Tuple[Tensor[DType, Batch, HiddenSize], Tensor[DType, Batch, HiddenSize]]: ...
class Linear(Module, Generic[InputSize, OutputSize]):
def __init__(
self, in_features: InputSize, out_features: OutputSize, bias: bool = ...
) -> None: ...
def __call__(
self,
input: Tensor[DType, N, Unpack[Ts], InputSize],
) -> Tensor[DType, N, Unpack[Ts], OutputSize]: ...
class _Loss(Module): ...
class MSELoss(_Loss):
def __init__(
self,
size_average: Optional[bool] = ...,
reduce: Optional[bool] = ...,
reduction: str = ...,
) -> None: ...
def __call__(
self,
input: Tensor[DType, N, Unpack[Ts]],
target: Tensor[DType, N, Unpack[Ts]],
) -> Tensor[DType]: ...
InChannels = TypeVar("InChannels", bound=int)
OutChannels = TypeVar("OutChannels", bound=int)
KernelSize1 = TypeVar("KernelSize1", bound=int)
KernelSize2 = TypeVar("KernelSize2", bound=int)
Stride = TypeVar("Stride", bound=int)
Batch = TypeVar("Batch", bound=int)
Height = TypeVar("Height", bound=int)
Width = TypeVar("Width", bound=int)
Channels = TypeVar("Channels", bound=int)
Padding = TypeVar("Padding", bound=int)
Padding1 = TypeVar("Padding1", bound=int)
Padding2 = TypeVar("Padding2", bound=int)
class Conv2d(
Module,
Generic[InChannels, OutChannels, KernelSize1, KernelSize2, Padding1, Padding2],
):
def __init__(
self,
in_channels: InChannels,
out_channels: OutChannels,
kernel_size: Tuple[KernelSize1, KernelSize2],
padding: Tuple[Padding1, Padding2],
bias: bool = ...,
) -> None: ...
def __call__(
self, input: Tensor[DType, Batch, InChannels, Height, Width]
) -> Tensor[
DType,
Batch,
OutChannels,
# We assume stride = 1.
# (H − K[0] + 2P[0]) + 1.
Add[Add[Subtract[Height, KernelSize1], Multiply[Padding1, L[2]]], L[1]],
# (W − K[1] + 2P[1]) + 1.
Add[Add[Subtract[Width, KernelSize2], Multiply[Padding2, L[2]]], L[1]],
]: ...
class ReflectionPad2d(Module, Generic[Padding]):
def __init__(
self,
padding: Padding,
) -> None: ...
def __call__(
self,
input: Tensor[DType, Batch, Channels, Height, Width],
) -> Tensor[
DType,
Batch,
Channels,
Add[Add[Height, Padding], Padding],
Add[Add[Width, Padding], Padding],
]: ...
class InstanceNorm2d(Generic[Channels]):
def __init__(self, num_features: Channels, affine: bool = False) -> None: ...
def __call__(
self, input: Tensor[DType, Batch, Channels, Height, Width]
) -> Tensor[DType, Batch, Channels, Height, Width]: ...
class LeakyReLU(Module):
def __init__(self, negative_slope: float = ..., inplace: bool = ...) -> None: ...
def __call__(
self, input: Tensor[DType, N, Unpack[Ts]]
) -> Tensor[DType, N, Unpack[Ts]]: ...
class ReLU(Module):
def __call__(
self, input: Tensor[DType, Batch, Channels, Height, Width]
) -> Tensor[DType, Batch, Channels, Height, Width]: ...
class GELU(Module):
def __call__(
self, input: Tensor[DType, Batch, Channels, Height, Width]
) -> Tensor[DType, Batch, Channels, Height, Width]: ...
class Dropout(Module):
def __init__(self, p: float, inplace: bool = ...) -> None: ...
def __call__(
self, input: Tensor[DType, Unpack[Ts]]
) -> Tensor[DType, Unpack[Ts]]: ...
class Embedding(Module, Generic[N, EmbeddingDimension]):
def __init__(
self,
num_embeddings: N,
embedding_dim: EmbeddingDimension,
padding_idx: Optional[int] = ...,
max_norm: Optional[float] = ...,
norm_type: float = ...,
scale_grad_by_freq: bool = ...,
sparse: bool = ...,
_weight: Optional[Tensor] = ...,
) -> None: ...
@property
def padding_idx(self) -> int: ...
@property
def max_norm(self) -> float: ...
@property
def norm_type(self) -> float: ...
@property
def scale_grad_by_freq(self) -> bool: ...
@property
def sparse(self) -> bool: ...
@property
def weight(self) -> Tensor[torch.float32, N, EmbeddingDimension]: ...
@classmethod
def from_pretrained(
cls,
embeddings: Tensor[DType, N, EmbeddingDimension],
freeze: bool = True,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
) -> Embedding[N, EmbeddingDimension]: ...
def forward(
self, x: Tensor[DType, Unpack[Ts]]
) -> Tensor[DType, Unpack[Ts], EmbeddingDimension]: ...
def __call__(
self, x: Tensor[DType, Unpack[Ts]]
) -> Tensor[DType, Unpack[Ts], EmbeddingDimension]: ...
_shape_t = Union[int, List[int], Tuple[Any, ...]]
class LayerNorm(Module):
def __init__(
self,
normalized_shape: _shape_t,
eps: float = ...,
elementwise_affine: bool = ...,
device=...,
dtype=...,
) -> None: ...
def forward(self, x: Tensor[DType, Unpack[Ts]]) -> Tensor[DType, Unpack[Ts]]: ...
def __call__(self, x: Tensor[DType, Unpack[Ts]]) -> Tensor[DType, Unpack[Ts]]: ...
class AdaptiveAvgPool2d(Module, Generic[H, W]):
@overload
def __new__(
self,
output_size: Tuple[N, L[None]],
) -> AdaptiveAvgPool2d[N, L[-1]]: ...
@overload
def __new__(
self,
output_size: Tuple[L[None], N],
) -> AdaptiveAvgPool2d[L[-1], N]: ...
@overload
def __new__(
self,
output_size: H,
) -> AdaptiveAvgPool2d[H, H]: ...
@overload
def __new__(
self,
output_size: Tuple[H, W],
) -> AdaptiveAvgPool2d[H, W]: ...
def forward(self, x: Tensor[DType, Unpack[Ts]]) -> Tensor[DType, Unpack[Ts]]: ...
@overload
def __call__(
self: AdaptiveAvgPool2d[L[-1], W], x: Tensor[DType, Unpack[Ts], N, int]
) -> Tensor[DType, Unpack[Ts], N, W]: ...
@overload
def __call__(
self: AdaptiveAvgPool2d[H, L[-1]], x: Tensor[DType, Unpack[Ts], int, N]
) -> Tensor[DType, Unpack[Ts], H, N]: ...
@overload
def __call__(
self: AdaptiveAvgPool2d[H, W], x: Tensor[DType, Unpack[Ts], int, int]
) -> Tensor[DType, Unpack[Ts], H, W]: ...
class ModuleList(Module):
def __init__(self, modules: Optional[Iterable[Module]] = ...) -> None: ...
def __iter__(self) -> Iterator[Module]: ...
def __len__(self) -> int: ...
class Parameter(Tensor[DType, Unpack[Ts]]):
def __init__(
self, data: Tensor[DType, Unpack[Ts]] = ..., requires_grad: builtins.bool = ...
) -> None: ...
Sequential: Any = ...
| [
"noreply@github.com"
] | hercules261188.noreply@github.com |
f117734a32321a363bf324dd1d20229a47415b0c | 78144baee82268a550400bbdb8c68de524adc68f | /Production/python/Autumn18/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8_cff.py | 681e179bb8350c18e44a27bed0578875855d29f0 | [] | no_license | tklijnsma/TreeMaker | e6989c03189b849aff2007bad22e2bfc6922a244 | 248f2c04cc690ef2e2202b452d6f52837c4c08e5 | refs/heads/Run2_2017 | 2023-05-26T23:03:42.512963 | 2020-05-12T18:44:15 | 2020-05-12T18:44:15 | 263,960,056 | 1 | 2 | null | 2020-09-25T00:27:35 | 2020-05-14T15:57:20 | null | UTF-8 | Python | false | false | 2,447 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/260000/2BAAA2BB-6D8C-0D46-8C3C-EDCE57AE666F.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/260000/7CFA4B9D-6E77-2843-8E45-21C1ACF3C828.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/260000/8D4BB693-2FAE-EA4E-8588-776A14EBCF80.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/260000/8DC72BEB-EFE5-D940-9E5F-21A05E1BC0B0.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/260000/A394366F-5C6E-DF43-AE84-F4EC1936F0AB.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/260000/A6362742-F52C-9A4C-81D3-2D40D4E662DA.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/260000/BAA7075A-777B-0A4D-9EB4-FE025592CFE4.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/260000/CBE481C4-B24D-B44C-8535-C3B5BF34C37B.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/260000/E89B752A-5734-2A46-9CD0-241040C25007.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/260000/EB533F13-5548-914E-AE88-D7C3320CD9F7.root',
'/store/mc/RunIIAutumn18MiniAOD/RPV_2t6j_mStop-600_mN1-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15-v2/260000/FEF1E0D2-15BA-1944-999B-C503BC4ED53D.root',
] )
| [
"Chris_Madrid@Baylor.edu"
] | Chris_Madrid@Baylor.edu |
83441941cb7936a690cd4274636b264724009523 | a6a27234bb623c047fe86e91c720a50ba6ab641f | /sctt/sctt/calibration/first_cracking_stress.py | ce27bff06a658a7fad9fc085cab9aabb124fabbf | [] | no_license | liyingxiong/sctt | 4f87b1bdeb09eafb2831e699fd82b4a0d9db9099 | f3f9af2be80d39a70668a8bbf9f1e1458ee0fbc3 | refs/heads/master | 2021-01-23T03:28:17.314156 | 2018-12-11T11:18:33 | 2018-12-11T11:18:33 | 20,015,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,385 | py | '''
Identify the first cracking stress according to the derivatives of the stress-strain diagram.
@author: Yingxiong
'''
import numpy as np
from scipy.interpolate import interp1d, UnivariateSpline
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
from lmfit import minimize, Parameters, Parameter, report_fit
eps_max_lst = []
for j in range(5):
filepath1 = 'D:\\data\\TT-6C-0' + str(j + 1) + '.txt'
data = np.loadtxt(filepath1, delimiter=';')
eps_max_lst.append(
np.amax(-data[:, 2] / 2. / 250. - data[:, 3] / 2. / 250.))
eps_max = np.amin(eps_max_lst)
eps_arr = np.linspace(0, eps_max, 1000)
sig_lst = []
params = Parameters()
params.add('k1', value=0., min=0.)
params.add('k2', value=0., min=0.)
params.add('k3', value=0., min=0.)
params.add('a', value=0., min=5e-5, max=0.001)
params.add('b', value=0., min=0.001, max=0.005)
def f(params, x, data):
k1 = params['k1'].value
k2 = params['k2'].value
k3 = params['k3'].value
a = params['a'].value
b = params['b'].value
return k1 * x * (x <= a) + (k1 * a + k2 * (x - a)) * (x > a) * (x <= b) + (k1 * a + k2 * (b - a) + k3 * (x - b)) * (x > b) - data
for j in range(5):
filepath1 = 'D:\\data\\TT-6C-0' + str(j + 1) + '.txt'
data = np.loadtxt(filepath1, delimiter=';')
interp_exp = interp1d(-data[:, 2] / 2. / 250. - data[:, 3] / 2. / 250.,
data[:, 1] / 2., bounds_error=False, fill_value=0.)
# interp_exp = UnivariateSpline(-data[:, 2] / 2. / 250. - data[:, 3] / 2. / 250.,
# data[:, 1] / 2., k=3)
# popt, pcov = curve_fit(f, eps_arr, interp_exp(eps_arr))
# k1, k2, a = popt
# e = f(eps_arr, k1, k2, a)
# plt.plot(eps_arr, e)
sig_lst.append(interp_exp(eps_arr))
result = minimize(
f, params, method='powell', args=(eps_arr, interp_exp(eps_arr)))
final = interp_exp(eps_arr) + result.residual
print(params)
# print interp_exp(params['a'].value)
print((interp_exp(params['a'].value) * 25 / (25 * 0.985 + 2.7)))
if j == 1:
plt.plot(eps_arr, interp_exp(eps_arr))
plt.plot(eps_arr, final, 'k--')
# plt.plot(eps_arr, interp_exp(eps_arr))
sig_avg = np.sum(sig_lst, axis=0) / 5.
# plt.plot(eps_arr, sig_avg)
for k in range(5):
dsig = np.gradient(sig_lst[k])
# plt.plot(eps_arr, dsig)
plt.show()
| [
"rostislav.chudoba@rwth-aachen.de"
] | rostislav.chudoba@rwth-aachen.de |
4dc6e3283fd599472b323f22fc8998f493be658c | 215e491c9962f2e199f7f84a5743196f21da0332 | /week-01-unit-testing/examples/calculator/calculator_test.py | 722e6f3bf9e8548430069e813b522daf6f74fe24 | [] | no_license | kstager/Python300-SystemDevelopmentWithPython-Fall-2014 | dd07190e6a55470c44bbb366cb95a1d716b86866 | 7e85ef68bc59d311ec748b333e0bad9357b88855 | refs/heads/master | 2020-12-31T06:32:20.948094 | 2014-11-12T01:34:56 | 2014-11-12T01:34:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | import unittest
import calculator_functions as calc
class TestCalculatorFunctions(unittest.TestCase):
def setUp(self):
self.x = 2
self.y = 3
def test_add(self):
self.assertEqual(calc.add(self.x, self.y), 5)
if __name__ == "__main__":
unittest.main()
| [
"joseph.sheedy@gmail.com"
] | joseph.sheedy@gmail.com |
a5551807162c6a3b053a0c940419c974b142c505 | 4db29e0d5f2e050d21bbf67042c713d8fa0421b0 | /com/mason/redis/part_two/chapter06/chapter0612.py | 0320f31d7421bb54908fed4b9d89d3079f14ce83 | [] | no_license | MasonEcnu/RedisInAction | 80e5556554c7e390264edd391042b09271cbfca4 | 710fd0316c6aee857acd350a092b657465096ed1 | refs/heads/master | 2020-07-08T17:24:39.540181 | 2019-09-30T04:14:49 | 2019-09-30T04:14:49 | 203,731,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,857 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 通讯录自动补全
# 我们将把有序集合里面的所有分值都设置为 0 — 这种做法使得我们可以使用有序集合的
# 另一个特性:当所有成员的分值都相同时,有序集合将根据成员的名字来进行排序;而当所有成
# 员的分值都是 0 的时候,成员将按照字符串的二进制顺序进行排序。
import bisect
import uuid
import redis
from redis import Redis
from com.mason.redis_client import redisClient
valid_characters = "`abcdefghijklmnopqrstuvwxyz{"
def find_prefix_range(prefix: str):
# [-1:] 取字符串或列表的最后一个元素
# [:-1] 取到字符串或列表的最后一个元素(不包含最后一个元素)
# [a,b)前闭后开区间
pos = bisect.bisect_left(valid_characters, prefix[-1:])
suffix = valid_characters[(pos or 1) - 1]
return prefix[:-1] + suffix + '{', prefix + '{'
# print(find_prefix_range("prefix"))
def autocomplete_on_prefix(conn: Redis, guild, prefix):
# 根据给定的前缀计算出查找的范围
start, end = find_prefix_range(prefix)
identifier = str(uuid.uuid4())
start += identifier
end += identifier
zset_name = "members:" + guild
# 将范围的起始和结束元素添加到有序集合中
conn.zadd(zset_name, {start: 0, end: 0})
pipe = conn.pipeline(True)
items = []
while 1:
try:
pipe.watch(zset_name)
# 找到开始和结束元素在有序列表中的排名
start_rank = pipe.zrank(zset_name, start)
end_rank = pipe.zrank(zset_name, end)
# 程序最多只会取出 10 个元素
query_range = min(start_rank + 9, end_rank - 2)
pipe.multi()
pipe.zrem(zset_name, start, end)
pipe.zrange(zset_name, start_rank, query_range)
items = pipe.execute()[-1]
break
except redis.exceptions.WatchError:
# 如果自动补全集合被其他客户端修改过
# 则重试
continue
# 如果有其他自动补全操作正在执行,那么从获
# 取到的元素里面移除起始元素和结束元素
return [item for item in items if "{" not in item]
def join_guild(conn: Redis, guild, user):
conn.zadd("members:" + guild, {user: 0})
def leave_guild(conn: Redis, guild, user):
conn.zrem("members:" + guild, user)
guild = "10086"
redisClient.delete("members:" + guild)
join_guild(redisClient, guild, "mason")
join_guild(redisClient, guild, "yahaha")
join_guild(redisClient, guild, "lilei")
join_guild(redisClient, guild, "hmeimei")
join_guild(redisClient, guild, "mmmeee")
join_guild(redisClient, guild, "lulala")
print(autocomplete_on_prefix(redisClient, guild, "mma"))
redisClient.delete("members:" + guild)
| [
"364207187@qq.com"
] | 364207187@qq.com |
373fbcea3668e9dda77cb6d9ac64861242498f33 | ed0f9eb0c1cb4858d91ef7e2d435db307f23a5a5 | /dist/manage/django/views/debug.py | 8538f2c3d2724abd15ddec1160103d0f911d947d | [] | no_license | hjlhehehe123/ATC_Data | 81b4622e7279aa9cc2013db8cc5a71d33561e768 | ad35e61afb8e87d8bab2d2b3aeea08e9409d56c0 | refs/heads/master | 2023-07-13T16:23:45.951584 | 2021-08-20T12:37:34 | 2021-08-20T12:37:34 | 256,994,694 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,397 | py | import functools
import re
import types
from pathlib import Path
import sys
from django.conf import settings
from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import pprint
from django.urls import resolve
from django.utils import timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str
from django.utils.module_loading import import_string
from django.utils.regex_helper import _lazy_re_compile
from django.utils.version import get_docs_version
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting. Templates are
# read directly from the filesystem so that the error handler
# works even if the template loader is broken.
DEBUG_ENGINE = Engine(
debug=True,
libraries={'i18n': 'django.templatetags.i18n'},
)
CURRENT_DIR = Path(__file__).parent
class CallableSettingWrapper:
"""
Object to wrap callable appearing in settings.
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes
(#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = get_exception_reporter_class(request)(request, exc_type, exc_value, tb)
if request.accepts('text/html'):
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
else:
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain; charset=utf-8')
@functools.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
def get_exception_reporter_class(request):
default_exception_reporter_class = import_string(settings.DEFAULT_EXCEPTION_REPORTER)
return getattr(request, 'exception_reporter_class', default_exception_reporter_class)
class SafeExceptionReporterFilter:
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
cleansed_substitute = '********************'
hidden_settings = _lazy_re_compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.I)
def cleanse_setting(self, key, value):
"""
Cleanse an individual setting key/value of sensitive content. If the
value is a dictionary, recursively cleanse the keys in that dictionary.
"""
try:
if self.hidden_settings.search(key):
cleansed = self.cleansed_substitute
elif isinstance(value, dict):
cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()}
elif isinstance(value, list):
cleansed = [self.cleanse_setting('', v) for v in value]
elif isinstance(value, tuple):
cleansed = tuple([self.cleanse_setting('', v) for v in value])
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings(self):
"""
Return a dictionary of the settings module with values of sensitive
settings replaced with stars (*********).
"""
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = self.cleanse_setting(k, getattr(settings, k))
return settings_dict
def get_safe_request_meta(self, request):
"""
Return a dictionary of request.META with sensitive values redacted.
"""
if not hasattr(request, 'META'):
return {}
return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replace the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = self.cleansed_substitute
return multivaluedict
def get_post_parameters(self, request):
"""
Replace the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k in cleansed:
cleansed[k] = self.cleansed_substitute
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = self.cleansed_substitute
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replace the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name in tb_frame.f_locals:
cleansed[name] = self.cleansed_substitute
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = self.cleansed_substitute
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and
'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = self.cleansed_substitute
cleansed['func_kwargs'] = self.cleansed_substitute
return cleansed.items()
class ExceptionReporter:
"""Organize and coordinate reporting on exceptions."""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.postmortem = None
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# Trim large blobs of data
if len(v) > 4096:
v = '%s… <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, v))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = force_str(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
if self.request is None:
user_str = None
else:
try:
user_str = str(self.request.user)
except Exception:
# request.user may raise OperationalError if the database is
# unavailable, for example.
user_str = '[unable to retrieve the current user]'
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'request_meta': self.filter.get_safe_request_meta(self.request),
'user_str': user_str,
'filtered_POST_items': list(self.filter.get_post_parameters(self.request).items()),
'settings': self.filter.get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'postmortem': self.postmortem,
}
if self.request is not None:
c['request_GET_items'] = self.request.GET.items()
c['request_FILES_items'] = self.request.FILES.items()
c['request_COOKIES_items'] = self.request.COOKIES.items()
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = str(self.exc_value)
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"""Return HTML version of debug 500 HTTP error page."""
with Path(CURRENT_DIR, 'templates', 'technical_500.html').open(encoding='utf-8') as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"""Return plain text version of debug 500 HTTP error page."""
with Path(CURRENT_DIR, 'templates', 'technical_500.txt').open(encoding='utf-8') as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_source(self, filename, loader, module_name):
source = None
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except OSError:
pass
return source
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Return context_lines before and after lineno from file.
Return (pre_context_lineno, pre_context, context_line, post_context).
"""
source = self._get_source(filename, loader, module_name)
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (https://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match[1].decode('ascii')
break
source = [str(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
try:
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
except IndexError:
return None, [], None, []
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
if exc_value in exceptions:
# Avoid infinite loop if there's a cyclic reference (#29393).
break
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = self.tb if not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is None:
pre_context_lineno = lineno
pre_context = []
context_line = '<source code not available>'
post_context = []
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def technical_404_response(request, exception):
"""Create a technical 404 error response. `exception` is the Http404."""
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried or ( # empty URLconf
request.path == '/' and
len(tried) == 1 and # default URLconf
len(tried[0]) == 1 and
getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin'
)):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Http404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
with Path(CURRENT_DIR, 'templates', 'technical_404.html').open(encoding='utf-8') as fh:
t = DEBUG_ENGINE.from_string(fh.read())
reporter_filter = get_default_exception_reporter_filter()
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': str(exception),
'request': request,
'settings': reporter_filter.get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"""Create an empty URLconf 404 error response."""
with Path(CURRENT_DIR, 'templates', 'default_urlconf.html').open(encoding='utf-8') as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context({
'version': get_docs_version(),
})
return HttpResponse(t.render(c), content_type='text/html')
| [
"1598214715@qq.com"
] | 1598214715@qq.com |
b22f9dbd128e6619dfe9a6447d989c2eb3054788 | c15a28ae62eb94dbf3ed13e2065195e572a9988e | /Cook book/src/8/how_to_define_an_interface_or_abstract_base_class/example.py | 2d4af5f1e99b6b30415fd216144a85ceba321fa2 | [] | no_license | xuyuchends1/python | 10798c92840a1a59d50f5dc5738b2881e65f7865 | 545d950a3d2fee799902658e8133e3692939496b | refs/heads/master | 2021-01-25T07:07:04.812140 | 2020-02-28T09:25:15 | 2020-02-28T09:25:15 | 93,647,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | # Defining a simple abstract base class
from abc import ABCMeta, abstractmethod
class IStream(metaclass=ABCMeta):
@abstractmethod
def read(self, maxbytes=-1):
pass
@abstractmethod
def write(self, data):
pass
# Example implementation
class SocketStream(IStream):
def read(self, maxbytes=-1):
print('reading')
def write(self, data):
print('writing')
# Example of type checking
def serialize(obj, stream):
if not isinstance(stream, IStream):
raise TypeError('Expected an IStream')
print('serializing')
# Examples
if __name__ == '__main__':
# Attempt to instantiate ABC directly (doesn't work)
try:
a = IStream()
except TypeError as e:
print(e)
# Instantiation of a concrete implementation
a = SocketStream()
a.read()
a.write('data')
# Passing to type-check function
serialize(None, a)
# Attempt to pass a file-like object to serialize (fails)
import sys
try:
serialize(None, sys.stdout)
except TypeError as e:
print(e)
# Register file streams and retry
import io
IStream.register(io.IOBase)
serialize(None, sys.stdout)
| [
"xuyuchends@163.com"
] | xuyuchends@163.com |
9769824aa87397a44d261ff48b366b0009967fbf | fa6fa9e154a205d575eda6615e8b62f4cce77a3d | /office365/sharepoint/permissions/permission_kind.py | 699fa02451b6d9b81eba7759583a17ab35a9b527 | [
"MIT"
] | permissive | beliaev-maksim/Office365-REST-Python-Client | 7f94b7b40227de1192bfc0cb325107482caf443c | b2fd54701d83cc91eb5ba3a0ec352a93ded24885 | refs/heads/master | 2023-08-14T20:47:51.972883 | 2021-09-05T12:44:47 | 2021-09-05T12:44:47 | 283,984,055 | 0 | 0 | MIT | 2020-07-31T08:30:48 | 2020-07-31T08:30:48 | null | UTF-8 | Python | false | false | 1,026 | py | class PermissionKind:
"""Specifies permissions that are used to define user roles."""
def __init__(self):
pass
EmptyMask = 0
ViewListItems = 1
AddListItems = 2
EditListItems = 3
DeleteListItems = 4
ApproveItems = 5
OpenItems = 6
ViewVersions = 7
DeleteVersions = 8
CancelCheckout = 9
ManagePersonalViews = 10
ManageLists = 12
ViewFormPages = 13
AnonymousSearchAccessList = 14
Open = 17
ViewPages = 18
AddAndCustomizePages = 19
ApplyThemeAndBorder = 20
ApplyStyleSheets = 21
ViewUsageData = 22
CreateSSCSite = 23
ManageSubwebs = 24
CreateGroups = 25
ManagePermissions = 26
BrowseDirectories = 27
BrowseUserInfo = 28
AddDelPrivateWebParts = 29
UpdatePersonalWebParts = 30
ManageWeb = 31
AnonymousSearchAccessWebLists = 32
UseClientIntegration = 37
UseRemoteAPIs = 38
ManageAlerts = 39
CreateAlerts = 40
EditMyUserInfo = 41
EnumeratePermissions = 63
FullMask = 65
| [
"vvgrem@gmail.com"
] | vvgrem@gmail.com |
4e725d904e1ecd922e7eecc433b83b4e5488f4c2 | cb882d5bc1a22b6d22a2d18a4ece8ec362f81b4b | /app/migrations/0004_product_user.py | 7ff7069f9a12115a709a07f6048c77bcabccd0b3 | [] | no_license | Vazimax/simple_djangoapp | 076806ac70f136f2a964db068b7b904f9429073e | 08e0f3f4b3830c6b1cde6bea69849775bd7ba578 | refs/heads/main | 2023-05-24T09:08:30.191567 | 2021-06-08T14:03:31 | 2021-06-08T14:03:31 | 375,030,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | # Generated by Django 3.1.7 on 2021-06-07 19:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0003_female_male'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('products', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.product')),
],
),
]
| [
"aboubakr.elhabti@gmail.com"
] | aboubakr.elhabti@gmail.com |
761e15410951d9ff18fef66225b3b1ca4c4188ec | 940a0f48027eefbfe028f2116aeb9702e7122f05 | /setup.py | 094555fbf31f039650d3781c5d1435c39416c7e2 | [] | no_license | umeboshi2/useless | 1d0f67335fad0897619859a2095fd32e649b0994 | 630c7491cfb7a70765878779f8496046d6ac18df | refs/heads/master | 2021-01-02T08:21:14.679866 | 2013-01-22T21:44:00 | 2013-01-22T21:44:00 | 7,497,326 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | import sys
from distutils.core import setup
PACKAGES = ['base', 'debian', 'sqlgen', 'db', 'kdebase', 'kdedb']
package = None
if sys.argv[1] in PACKAGES:
package = sys.argv[1]
del sys.argv[1]
pd = {'' : 'src'}
if package is not None:
packages = ['useless/'+package]
if package == 'base':
packages = ['useless'] + packages
else:
packages = []
package = 'dummy'
url = 'http://useless.berlios.de'
setup(name='useless-'+package,
version="0.2",
description = 'useless packages and modules for basic stuff',
author='Joseph Rawson',
author_email='umeboshi@gregscomputerservice.com',
url=url,
package_dir = {'' : '.'},
packages = packages
)
| [
"umeboshi@70758ab2-d2f7-0310-a994-9f7f813c4004"
] | umeboshi@70758ab2-d2f7-0310-a994-9f7f813c4004 |
7990ced14b088830eb2baff7df094aa02ac38795 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17s_1_02/mpls_state/ldp/path/__init__.py | 6794b5af3d53be00700cb98a8c4cff93301283c2 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,406 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import upstream_sessions
import downstream_sessions
class path(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/ldp/path. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: LDP Path information
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__destination_route','__upstream_sessions','__downstream_sessions',)
_yang_name = 'path'
_rest_name = 'path'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__upstream_sessions = YANGDynClass(base=YANGListType("ip",upstream_sessions.upstream_sessions, yang_name="upstream-sessions", rest_name="upstream-sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip', extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-upstream-sessions-1'}}), is_container='list', yang_name="upstream-sessions", rest_name="upstream-sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-upstream-sessions-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__downstream_sessions = YANGDynClass(base=YANGListType("ip",downstream_sessions.downstream_sessions, yang_name="downstream-sessions", rest_name="downstream-sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip', extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-downstream-sessions-1'}}), is_container='list', yang_name="downstream-sessions", rest_name="downstream-sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-downstream-sessions-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__destination_route = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-route", rest_name="destination-route", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'ldp', u'path']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'ldp', u'path']
def _get_destination_route(self):
"""
Getter method for destination_route, mapped from YANG variable /mpls_state/ldp/path/destination_route (string)
YANG Description: mpls_ldp_destination_route
"""
return self.__destination_route
def _set_destination_route(self, v, load=False):
"""
Setter method for destination_route, mapped from YANG variable /mpls_state/ldp/path/destination_route (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_destination_route is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_destination_route() directly.
YANG Description: mpls_ldp_destination_route
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="destination-route", rest_name="destination-route", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """destination_route must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-route", rest_name="destination-route", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__destination_route = t
if hasattr(self, '_set'):
self._set()
def _unset_destination_route(self):
self.__destination_route = YANGDynClass(base=unicode, is_leaf=True, yang_name="destination-route", rest_name="destination-route", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_upstream_sessions(self):
"""
Getter method for upstream_sessions, mapped from YANG variable /mpls_state/ldp/path/upstream_sessions (list)
"""
return self.__upstream_sessions
def _set_upstream_sessions(self, v, load=False):
"""
Setter method for upstream_sessions, mapped from YANG variable /mpls_state/ldp/path/upstream_sessions (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_upstream_sessions is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_upstream_sessions() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("ip",upstream_sessions.upstream_sessions, yang_name="upstream-sessions", rest_name="upstream-sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip', extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-upstream-sessions-1'}}), is_container='list', yang_name="upstream-sessions", rest_name="upstream-sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-upstream-sessions-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """upstream_sessions must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("ip",upstream_sessions.upstream_sessions, yang_name="upstream-sessions", rest_name="upstream-sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip', extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-upstream-sessions-1'}}), is_container='list', yang_name="upstream-sessions", rest_name="upstream-sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-upstream-sessions-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__upstream_sessions = t
if hasattr(self, '_set'):
self._set()
def _unset_upstream_sessions(self):
self.__upstream_sessions = YANGDynClass(base=YANGListType("ip",upstream_sessions.upstream_sessions, yang_name="upstream-sessions", rest_name="upstream-sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip', extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-upstream-sessions-1'}}), is_container='list', yang_name="upstream-sessions", rest_name="upstream-sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-upstream-sessions-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
def _get_downstream_sessions(self):
"""
Getter method for downstream_sessions, mapped from YANG variable /mpls_state/ldp/path/downstream_sessions (list)
"""
return self.__downstream_sessions
def _set_downstream_sessions(self, v, load=False):
"""
Setter method for downstream_sessions, mapped from YANG variable /mpls_state/ldp/path/downstream_sessions (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_downstream_sessions is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_downstream_sessions() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("ip",downstream_sessions.downstream_sessions, yang_name="downstream-sessions", rest_name="downstream-sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip', extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-downstream-sessions-1'}}), is_container='list', yang_name="downstream-sessions", rest_name="downstream-sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-downstream-sessions-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """downstream_sessions must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("ip",downstream_sessions.downstream_sessions, yang_name="downstream-sessions", rest_name="downstream-sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip', extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-downstream-sessions-1'}}), is_container='list', yang_name="downstream-sessions", rest_name="downstream-sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-downstream-sessions-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__downstream_sessions = t
if hasattr(self, '_set'):
self._set()
def _unset_downstream_sessions(self):
self.__downstream_sessions = YANGDynClass(base=YANGListType("ip",downstream_sessions.downstream_sessions, yang_name="downstream-sessions", rest_name="downstream-sessions", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip', extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-downstream-sessions-1'}}), is_container='list', yang_name="downstream-sessions", rest_name="downstream-sessions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-path-stream-downstream-sessions-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
destination_route = __builtin__.property(_get_destination_route)
upstream_sessions = __builtin__.property(_get_upstream_sessions)
downstream_sessions = __builtin__.property(_get_downstream_sessions)
_pyangbind_elements = {'destination_route': destination_route, 'upstream_sessions': upstream_sessions, 'downstream_sessions': downstream_sessions, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
089a86f2514e7fdb5dd0bfdb50fbb19280ef6f2f | d81dc8eda4aed1e66a2275ddd7463eaa90789ff4 | /Gesture Recognition/Video.py | 894fa066f68358ce312358d9fced5003a5219204 | [] | no_license | ai3DVision/BlendedJointAttention | c01b8b6b0c33923e2e7c6719765c427c1c5e5439 | 2bf9445d7749c9f138df950aea9dd101c8713ff4 | refs/heads/master | 2020-03-15T23:42:18.307922 | 2016-08-25T13:26:53 | 2016-08-25T13:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | import cv2
import sys
# Defining cascade variables
faceCascade1 = cv2.CascadeClassifier('../haarcascades/haarcascade_frontalface_alt2.xml')
nosecascade = cv2.CascadeClassifier('../haarcascades/haarcascade_mcs_nose.xml')
# Video capture via webcam
cam = cv2.VideoCapture(-1)
cam.set(3,640)
cam.set(4,480)
video_capture = cam
a = list()
b = list()
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces1 = faceCascade1.detectMultiScale(gray, 1.1, 5)
# Draw a rectangle around the faces
for (x, y, w, h) in faces1:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
nose = nosecascade.detectMultiScale(roi_gray,1.3,5)
for (ex,ey,ew,eh) in nose:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,0),2)
a.append(ex+ew/2)
b.append(ey+eh/2)
# Display the resulting frame
for i in range(len(a)):
cv2.circle(frame, (x+a[i],y+b[i]),1,(128,0,127),2)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release video capture
video_capture.release()
cv2.destroyAllWindows()
| [
"agarwalsoumitra1504@gmail.com"
] | agarwalsoumitra1504@gmail.com |
c9ee57c779cbb050035f866898ceb6d98ee4abdf | bac7a7507933ac5bb38b41bbe2a587764da3cf94 | /snappy_wrappers/wrappers/erds_sv2/merge_genotypes/wrapper.py | 96047b855627a47db6f2ddbbcfd18ed4f895f093 | [
"MIT"
] | permissive | Pregelnuss/snappy-pipeline | 923b0f36117a2f55ee52f9a8564ed3bb82a8be16 | 31200eba84bff8e459e9e210d6d95e2984627f5c | refs/heads/master | 2023-06-19T07:24:04.736033 | 2021-05-27T07:24:05 | 2021-05-27T07:24:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | # -*- coding: utf-8 -*-
"""Wrapper for running ERDS+SV2 merge genotypes step
"""
from snakemake.shell import shell
__author__ = "Manuel Holtgrewe"
__email__ = "manuel.holtgrewe@bihealth.de"
shell(
r"""
# -----------------------------------------------------------------------------
# Redirect stderr to log file by default and enable printing executed commands
exec &> >(tee -a "{snakemake.log}")
set -x
# -----------------------------------------------------------------------------
bcftools merge \
-m id \
-O z \
-o {snakemake.output.vcf} \
{snakemake.input}
$(which tabix) --version
$(which tabix) -f {snakemake.output.vcf}
pushd $(dirname {snakemake.output.vcf})
md5sum $(basename {snakemake.output.vcf}) >$(basename {snakemake.output.vcf}).md5
md5sum $(basename {snakemake.output.vcf}).tbi >$(basename {snakemake.output.vcf}).tbi.md5
"""
)
| [
"manuel.holtgrewe@bihealth.de"
] | manuel.holtgrewe@bihealth.de |
56c7e189b97621aba5e9156b2624bb12ef4c9007 | 0fcc6353edee4eed7a1ea4b1c89a00bfcf03e851 | /PythonFunctions/venv/Scripts/easy_install-3.7-script.py | cc2142893207f4e9d244ff35fbb13add41769553 | [] | no_license | GANESH0080/Python-Practice-Again | 81d8048c23d338a99bb17fa86a9f87b3057bfe52 | 6565911d14a22d0f33a41b417026c31a0a066be5 | refs/heads/master | 2020-09-20T03:40:45.462869 | 2019-11-27T07:19:24 | 2019-11-27T07:19:24 | 224,368,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | #!D:\PythonPracticeAgain\PythonFunctions\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"ganusalunkhe@gmail.com"
] | ganusalunkhe@gmail.com |
a7386795586d024e974682a2c99a5f99c1659a8f | a5698f82064aade6af0f1da21f504a9ef8c9ac6e | /huaweicloud-sdk-cbr/huaweicloudsdkcbr/v1/model/show_members_detail_request.py | d11649680f2ceb7aa7e1af032ed7496f05cb6aa0 | [
"Apache-2.0"
] | permissive | qizhidong/huaweicloud-sdk-python-v3 | 82a2046fbb7d62810984399abb2ca72b3b47fac6 | 6cdcf1da8b098427e58fc3335a387c14df7776d0 | refs/heads/master | 2023-04-06T02:58:15.175373 | 2021-03-30T10:47:29 | 2021-03-30T10:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,919 | py | # coding: utf-8
import pprint
import re
import six
class ShowMembersDetailRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'backup_id': 'str',
'dest_project_id': 'str',
'image_id': 'str',
'status': 'str',
'vault_id': 'str',
'limit': 'int',
'marker': 'str',
'offset': 'int',
'sort': 'str'
}
attribute_map = {
'backup_id': 'backup_id',
'dest_project_id': 'dest_project_id',
'image_id': 'image_id',
'status': 'status',
'vault_id': 'vault_id',
'limit': 'limit',
'marker': 'marker',
'offset': 'offset',
'sort': 'sort'
}
def __init__(self, backup_id=None, dest_project_id=None, image_id=None, status=None, vault_id=None, limit=None, marker=None, offset=None, sort=None):
"""ShowMembersDetailRequest - a model defined in huaweicloud sdk"""
self._backup_id = None
self._dest_project_id = None
self._image_id = None
self._status = None
self._vault_id = None
self._limit = None
self._marker = None
self._offset = None
self._sort = None
self.discriminator = None
self.backup_id = backup_id
if dest_project_id is not None:
self.dest_project_id = dest_project_id
if image_id is not None:
self.image_id = image_id
if status is not None:
self.status = status
if vault_id is not None:
self.vault_id = vault_id
if limit is not None:
self.limit = limit
if marker is not None:
self.marker = marker
if offset is not None:
self.offset = offset
if sort is not None:
self.sort = sort
@property
def backup_id(self):
"""Gets the backup_id of this ShowMembersDetailRequest.
:return: The backup_id of this ShowMembersDetailRequest.
:rtype: str
"""
return self._backup_id
@backup_id.setter
def backup_id(self, backup_id):
"""Sets the backup_id of this ShowMembersDetailRequest.
:param backup_id: The backup_id of this ShowMembersDetailRequest.
:type: str
"""
self._backup_id = backup_id
@property
def dest_project_id(self):
"""Gets the dest_project_id of this ShowMembersDetailRequest.
:return: The dest_project_id of this ShowMembersDetailRequest.
:rtype: str
"""
return self._dest_project_id
@dest_project_id.setter
def dest_project_id(self, dest_project_id):
"""Sets the dest_project_id of this ShowMembersDetailRequest.
:param dest_project_id: The dest_project_id of this ShowMembersDetailRequest.
:type: str
"""
self._dest_project_id = dest_project_id
@property
def image_id(self):
"""Gets the image_id of this ShowMembersDetailRequest.
:return: The image_id of this ShowMembersDetailRequest.
:rtype: str
"""
return self._image_id
@image_id.setter
def image_id(self, image_id):
"""Sets the image_id of this ShowMembersDetailRequest.
:param image_id: The image_id of this ShowMembersDetailRequest.
:type: str
"""
self._image_id = image_id
@property
def status(self):
"""Gets the status of this ShowMembersDetailRequest.
:return: The status of this ShowMembersDetailRequest.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowMembersDetailRequest.
:param status: The status of this ShowMembersDetailRequest.
:type: str
"""
self._status = status
@property
def vault_id(self):
"""Gets the vault_id of this ShowMembersDetailRequest.
:return: The vault_id of this ShowMembersDetailRequest.
:rtype: str
"""
return self._vault_id
@vault_id.setter
def vault_id(self, vault_id):
"""Sets the vault_id of this ShowMembersDetailRequest.
:param vault_id: The vault_id of this ShowMembersDetailRequest.
:type: str
"""
self._vault_id = vault_id
@property
def limit(self):
"""Gets the limit of this ShowMembersDetailRequest.
:return: The limit of this ShowMembersDetailRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ShowMembersDetailRequest.
:param limit: The limit of this ShowMembersDetailRequest.
:type: int
"""
self._limit = limit
@property
def marker(self):
"""Gets the marker of this ShowMembersDetailRequest.
:return: The marker of this ShowMembersDetailRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ShowMembersDetailRequest.
:param marker: The marker of this ShowMembersDetailRequest.
:type: str
"""
self._marker = marker
@property
def offset(self):
"""Gets the offset of this ShowMembersDetailRequest.
:return: The offset of this ShowMembersDetailRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ShowMembersDetailRequest.
:param offset: The offset of this ShowMembersDetailRequest.
:type: int
"""
self._offset = offset
@property
def sort(self):
"""Gets the sort of this ShowMembersDetailRequest.
:return: The sort of this ShowMembersDetailRequest.
:rtype: str
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this ShowMembersDetailRequest.
:param sort: The sort of this ShowMembersDetailRequest.
:type: str
"""
self._sort = sort
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowMembersDetailRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
fb9b2f8ef498d54f9d568b07bf798b3b11b828e0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/1/usersdata/66/180/submittedfiles/formula.py | e164e5a9a7884303cf825c60c1229245d2385105 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | # -*- coding: utf-8 -*-
from __future__ import division
p=input("digite p")
i=input("digite i")
n=input("digite n")
v=(P*((1+I)**N)-1)/T
print ( "o valor de v eh: %.2f" %V)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d95fc2686d35f61aa399c1c3f823176e317ea474 | 699a43917ce75b2026a450f67d85731a0f719e01 | /12_int_to_roman/interger_to_roman.py | e51bac0b5257bb65541b113406b145206f685e01 | [] | no_license | wusanshou2017/Leetcode | 96ab81ae38d6e04739c071acfc0a5f46a1c9620b | c4b85ca0e23700b84e4a8a3a426ab634dba0fa88 | refs/heads/master | 2021-11-16T01:18:27.886085 | 2021-10-14T09:54:47 | 2021-10-14T09:54:47 | 107,402,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | class Solution:
def __init__(self):
self.dic_roman={"I":1,"V":5,"X":10,"L":50,"C":100,"D":500,"M":1000}
self.dic_int2roman={ value:key for key,value in self.dic_roman.items()}
def int2roman(self,target):
res =""
if target//1000>0:
m=target//1000
target=target-m*1000
res+="M"*m;
if target>=300:
if target>=500:
target=target-500
c=target//100
res+="D"+c*"C"
else:
target=target-()
| [
"252652905@qq.com"
] | 252652905@qq.com |
bb3c3e3539618a84d358090da571b392a03cf637 | 6730aab6ed416937cc1ed96ae87f86d7761c8129 | /src/calc_parse.py | 9dc2b8937e0ec016416c724205099b4f3f25e54b | [] | no_license | vrthra/miner | 3c4adcd1db9b5583354d665ec169c47c38f1faa1 | 4b55999eedb97c607024ff04575b0f09b499d58a | refs/heads/master | 2020-05-02T22:54:43.393084 | 2019-05-02T17:07:37 | 2019-05-02T17:07:37 | 178,266,349 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,783 | py | #!/usr/bin/env python3
import string
from helpers import scope
def parse_num(s,i):
n = ''
while s[i:] and s[i].in_(list(string.digits)):
with scope('while_1', 0):
n += s[i]
i = i +1
return i,n
def parse_paren(s, i):
assert s[i] == '('
i, v = parse_expr(s, i+1)
if s[i:] == '':
with scope('if_0', 0):
raise Exception(s, i)
assert s[i] == ')'
return i+1, v
def parse_expr(s, i = 0):
expr = []
while s[i:]:
with scope('while_2', 0):
c = s[i]
if c.in_(list(string.digits)):
with scope('if_1', 0):
i,num = parse_num(s,i)
expr.append(num)
elif c.in_(['+', '-', '*', '/']):
with scope('if_1', 1):
expr.append(c)
i = i + 1
elif c == '(':
with scope('if_1', 2):
i, cexpr = parse_paren(s, i)
expr.append(cexpr)
elif c == ')':
with scope('if_1', 3):
return i, expr
else:
with scope('if_1', 4):
raise Exception(s,i)
return i, expr
import json
import sys
import Tracer
if __name__ == "__main__":
mystring = sys.argv[1] if len(sys.argv) > 1 else "(25-1/(2+3))*100/3"
restrict = {'methods':['parse_num', 'parse_paren', 'parse_expr']}
with Tracer.Tracer(mystring, restrict) as tracer:
parse_expr(tracer())
assert tracer.inputstr.comparisons
print(json.dumps({
'comparisons':Tracer.convert_comparisons(tracer.inputstr.comparisons),
'method_map': Tracer.convert_method_map(tracer.method_map),
'inputstr': str(tracer.inputstr)}))
| [
"rahul@gopinath.org"
] | rahul@gopinath.org |
2ee60026988f1846551704ad15e993f1cd397d43 | 6aa7e203f278b9d1fd01244e740d5c944cc7c3d3 | /airflow/providers/docker/hooks/docker.py | bae0e7f5a046ff578796c98cc623b63dec42f3c3 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] | permissive | laserpedro/airflow | 83fc991d91749550b151c81876d9e7864bff3946 | a28afa8172489e41ecf7c381674a0cb91de850ff | refs/heads/master | 2023-01-02T04:55:34.030935 | 2020-10-24T15:55:11 | 2020-10-24T15:55:11 | 285,867,990 | 1 | 0 | Apache-2.0 | 2020-08-07T15:56:49 | 2020-08-07T15:56:49 | null | UTF-8 | Python | false | false | 3,265 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from docker import APIClient
from docker.errors import APIError
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
class DockerHook(BaseHook, LoggingMixin):
"""
Interact with a private Docker registry.
:param docker_conn_id: ID of the Airflow connection where
credentials and extra configuration are stored
:type docker_conn_id: str
"""
def __init__(self,
docker_conn_id='docker_default',
base_url: Optional[str] = None,
version: Optional[str] = None,
tls: Optional[str] = None
) -> None:
super().__init__()
if not base_url:
raise AirflowException('No Docker base URL provided')
if not version:
raise AirflowException('No Docker API version provided')
conn = self.get_connection(docker_conn_id)
if not conn.host:
raise AirflowException('No Docker registry URL provided')
if not conn.login:
raise AirflowException('No username provided')
extra_options = conn.extra_dejson
self.__base_url = base_url
self.__version = version
self.__tls = tls
if conn.port:
self.__registry = "{}:{}".format(conn.host, conn.port)
else:
self.__registry = conn.host
self.__username = conn.login
self.__password = conn.password
self.__email = extra_options.get('email')
self.__reauth = extra_options.get('reauth') != 'no'
def get_conn(self) -> APIClient:
client = APIClient(
base_url=self.__base_url,
version=self.__version,
tls=self.__tls
)
self.__login(client)
return client
def __login(self, client) -> None:
self.log.debug('Logging into Docker registry')
try:
client.login(
username=self.__username,
password=self.__password,
registry=self.__registry,
email=self.__email,
reauth=self.__reauth
)
self.log.debug('Login successful')
except APIError as docker_error:
self.log.error('Docker registry login failed: %s', str(docker_error))
raise AirflowException(f'Docker registry login failed: {docker_error}')
| [
"noreply@github.com"
] | laserpedro.noreply@github.com |
5e106deb5aff07fd69b809c2d354e681e3d84798 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /AtCoder_Virtual_Contest/tessoku-book/ec/main.py | 09c1be3745ae6e29b841ff506af698a7664a78cd | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 3,200 | py | # -*- coding: utf-8 -*-
import random
import string
import traceback
# See:
# https://atcoder.jp/contests/abc284/submissions/37841742
class RollingHashWithRange:
def __init__(self, parent, left, right) -> None:
self.parent = parent
self.left = left
self.right = right
def __getitem__(self, key):
if key > self.right - self.left:
traceback.print_exc()
raise Exception("index out of range")
return self.get(self.left, self.left + key)
# Overall hash value
def get(self, left, right):
mod = RollingHash.mod
return (
self.parent.hash[right]
- self.parent.hash[left] * self.parent.power[right - left]
) % mod
def __len__(self):
return self.right - self.left
def __eq__(self, other):
return self.get(self.left, self.right) == other.get(other.left, other.right)
# Longest Common Prefix
def __lt__(self, other):
length = min(len(self), len(other))
if self[length] == other[length]:
return len(self) < len(other)
left, right = 0, length
while True:
mid = (left + right) // 2
if left == right:
return (
self.parent.s[self.left + right - 1]
< other.parent.s[other.left + right - 1]
)
if self[mid] != other[mid]:
right = mid
else:
left = mid + 1
right = right
class RollingHash:
base = 30
mod = 10**9 + 9
@classmethod
def config(cls, base, mod) -> None:
RollingHash.base = base
RollingHash.mod = mod
def __init__(self, s) -> None:
mod = RollingHash.mod
base = RollingHash.base
self.power = power = [1] * (len(s) + 1)
self.s = s
size = len(s)
self.hash = hash = [0] * (size + 1)
value = 0
for i in range(size):
hash[i + 1] = value = (value * base + ord(s[i])) % mod
value = 1
for i in range(size):
power[i + 1] = value = value * base % mod
def get(self, left, right) -> RollingHashWithRange:
return RollingHashWithRange(self, left, right)
def get_random_name(n):
rand_list = [random.choice(string.ascii_letters + string.digits) for i in range(n)]
return "".join(rand_list)
def test():
RollingHash.config(100, 10**9 + 7)
for i in range(100):
n = 5
x, y = get_random_name(n), get_random_name(n)
y = x
if (x < y) != (RollingHash(x).get(0, n) < RollingHash(y).get(0, n)):
print(
"No", x < y, RollingHash(x).get(0, n) < RollingHash(y).get(0, n), x, y
)
def main():
import sys
input = sys.stdin.readline
n, q = map(int, input().split())
s = input().rstrip()
rh1 = RollingHash(s)
rh2 = RollingHash(s[::-1])
for _ in range(q):
li, ri = map(int, input().split())
li -= 1
if rh1.get(li, ri) == rh2.get(n - ri, n - li):
print("Yes")
else:
print("No")
if __name__ == "__main__":
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
87f6e6bf13f60d03cd3b263df93b4e6feaef5cdb | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/Sudoku_II_008_20180622142549.py | a755b1bc64831da70f2eb0c38aca219723cf4bcb | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,605 | py | from random import randint
import copy
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, " ", " "]
]
sudoku1solved = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, 8, 2]
]
sudoku2 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
sudoku2solved = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[6, 5, 9, 1, 2, 4, 7, 8, 3]
]
sudoku3 = [
[1, 2, ' ', ' ', ' ', 4, ' ', 8, 6],
[5, ' ', ' ', ' ', ' ', 9, ' ', ' ', 4],
[' ', ' ', ' ', ' ', 3, ' ', 2, ' ', ' '],
[2, ' ', 6, ' ', 1, ' ', 4, ' ', 3],
[' ', ' ', ' ', 7, ' ', 6, ' ', ' ', ' '],
[' ', 7, ' ', ' ', 8, ' ', 9, ' ', 1],
[' ', ' ', ' ', ' ', ' ', ' ', 8, 3, ' '],
[3, ' ', 7, 8, ' ', 2, 1, ' ', ' '],
[' ', 4, 1, ' ', ' ', 7, ' ', 9, ' '],
]
sudoku3solved = [
[1, 2, 9, 5, 7, 4, 3, 8, 6],
[5, 3, 8, 6, 2, 9, 7, 1, 4],
[7, 6, 4, 1, 3, 8, 2, 5, 9],
[2, 8, 6, 9, 1, 5, 4, 7, 3],
[9, 1, 3, 7, 4, 6, 5, 2, 8],
[4, 7, 5, 2, 8, 3, 9, 6, 1],
[6, 5, 2, 4, 9, 1, 8, 3, 7],
[3, 9, 7, 8, 6, 2, 1, 4, 5],
[8, 4, 1, 3, 5, 7, 6, 9, 2],
]
def giveHint(emptyS, solvedS):
i = 0
for x in range(0, 9):
for y in range(0, 9):
if emptyS[x][y] == " ":
emptyS[x][y] = solvedS[x][y]
i = 1
break
if i == 1:
break
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
line = "|"
if i < 9:
print(' {2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku[i], line, i+1))
i = i + 1
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
print("\n\n\n\n")
s = 0
if choice == "R" or choice == "r":
listaSudoku = [sudoku1, sudoku2, sudoku3]
sudoku_number = randint(0, 2)
print("Plansza nr:", sudoku_number)
s = sudoku_number
sudoku = copy.deepcopy(listaSudoku[sudoku_number])
elif int(choice) == 1:
s = 1
sudoku = copy.deepcopy(sudoku1)
elif int(choice) == 2:
s = 2
sudoku = copy.deepcopy(sudoku2)
elif int(choice) == 3:
s = 3
sudoku = copy.deepcopy(sudoku3)
while True: # prints Sudoku until is solved
# print("Your sudoku to solve:")
printSudoku()
print("\nInput 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
if s == 1:
sudoku = copy.deepcopy(sudoku1)
elif s == 2:
sudoku = copy.deepcopy(sudoku2)
elif s == 3:
sudoku = copy.deepcopy(sudoku3)
elif x == "h": # show:
print()
if s == 1:
giveHint(sudoku, sudoku1solved)
elif s == 2:
giveHint(sudoku, sudoku2solved)
elif s == 3:
giveHint(sudoku, sudoku3solved)
else:
print("Error - wrong number format \n ")
continue
else:
sudoku[int(x[0])-1][int(x[2])-1] = int(x[4])
column1 = 0
column2 = 0
try: # check if sudoku is solved
i = 0
list = []
while i < 9: # check are all column == 45
column = 0
for item in sudoku:
column = column + item[i]
list.append(column)
i += 1
is45 = 0 # check if sudoku is solved
for listElement in list:
if listElement == 45:
is45 = is45 + 1
#
i = 0
for item in sudoku:
if sum(item) == 45 and is45 == 9:
i = i + 1
if i == 9:
printSudoku()
print(" ")
print("/%%. ,%%/ ,%@@@&/ .%%, /%# /%% *%%* /%/ #%/ %%%* (%. ")
print(" @@@, .@@& &@@@/,*@@@@ /@@# &@@. ,@@# @@@@ ,@@, .@@@ @@@@% @@( ")
print(" %@@.,@@& &@@ @@@ /@@# &@@. @@@. /@@@@# @@@ .@@@ @@,@@@ @@( ")
print(" @@@@@# .@@@ @@@ /@@# &@@. ,@@% @@/&@@. .@@, .@@@ @@, @@@ @@( ")
print(" %@@% .@@@ @@@ /@@# &@@. %@@ ,@@ .@@& %@@ .@@@ @@, @@@ @@( ")
print(" *@@* @@@. .@@& *@@& @@@ @@/&@* (@@,@@* .@@@ @@, @@@@@( ")
print(" *@@* @@@&*,/@@@@ #@@@(,/@@@, &@@@@ @@@@& .@@@ @@, @@@@( ")
print(
" .%%. /&@@@%, *&@@@%. #%%, *%%# #%/ %% (%% ")
print(" ")
'''
print(" @@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print(" @@@@@@@@@@ YOU WIN @@@@@@@@@@")
print(" @@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
'''
break
except TypeError:
print()
| [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
da80db2eab0104cf95159207871265764239feb7 | 67d8173a716da10a7350213d98938aae9f2115ce | /ProgrammingCourses/CS61A/lab/lab09/tests/split-at.py | 8aeb6433d36caff342936185c794ff632ff506c2 | [] | no_license | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 1,127 | py | test = {
"name": "split-at",
"points": 0,
"suites": [
{
"type": "scheme",
"scored": True,
"setup": """
scm> (load 'lab09)
scm> (load 'lab09_extra)
""",
"cases": [
{
"code": """
scm> (car (split-at '(1 2 3 4 5) 3))
(1 2 3)
""",
"hidden": False
},
{
"code": """
scm> (cdr (split-at '(1 2 3 4 5) 3))
(4 5)
""",
"hidden": False
},
{
"code": """
scm> (car (split-at '(1 2 3 4 5) 10))
(1 2 3 4 5)
""",
"hidden": False
},
{
"code": """
scm> (cdr (split-at '(1 2 3 4 5) 10))
()
""",
"hidden": False
},
{
"code": """
scm> (car (split-at '(0 1 1 2 3) 0))
()
""",
"hidden": False
},
{
"code": """
scm> (cdr (split-at '(0 1 1 2 3) 0))
(0 1 1 2 3)
""",
"hidden": False
},
]
}
]
}
| [
"30805062+jxie0755@users.noreply.github.com"
] | 30805062+jxie0755@users.noreply.github.com |
888b859b9f5faab436ba29ab2a12e06acbd44125 | 72dc7d124cdac8f2dcab3f72e95e9a646154a6a0 | /byceps/services/news/models/channel.py | 00229d0acddd28bb9c310a03e564d0e9b3135b06 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | m-ober/byceps | e6569802ee76e8d81b892f1f547881010359e416 | 4d0d43446f3f86a7888ed55395bc2aba58eb52d5 | refs/heads/master | 2020-11-30T23:31:33.944870 | 2020-02-12T23:53:55 | 2020-02-12T23:56:04 | 40,315,983 | 0 | 0 | null | 2015-08-06T16:41:36 | 2015-08-06T16:41:36 | null | UTF-8 | Python | false | false | 1,022 | py | """
byceps.services.news.models.channel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from ....database import db
from ....typing import BrandID
from ....util.instances import ReprBuilder
from ..transfer.models import ChannelID
class Channel(db.Model):
"""A channel to which news items can be published."""
__tablename__ = 'news_channels'
id = db.Column(db.UnicodeText, primary_key=True)
brand_id = db.Column(db.UnicodeText, db.ForeignKey('brands.id'), index=True, nullable=False)
url_prefix = db.Column(db.UnicodeText, nullable=False)
def __init__(
self, channel_id: ChannelID, brand_id: BrandID, url_prefix: str
) -> None:
self.id = channel_id
self.brand_id = brand_id
self.url_prefix = url_prefix
def __repr__(self) -> str:
return ReprBuilder(self) \
.add_with_lookup('id') \
.add('brand', self.brand_id) \
.build()
| [
"homework@nwsnet.de"
] | homework@nwsnet.de |
b0f5e5106a95baf291b263f728fbb921336b5e00 | 512f48fdcfa78e322526cf47163110009b84bf73 | /test/test_update_settings.py | 86dfd6e0e237c110a37851d41b45c214ddc85ca8 | [
"MIT"
] | permissive | confluentinc/vm-console-client-python | 9a0f540c0113acf68ee9dc914715bc255e4d99f4 | ccbd944a0e0333c73e098b769fe4c82755d29874 | refs/heads/master | 2023-07-18T10:33:58.909287 | 2021-09-02T20:52:20 | 2021-09-02T20:52:20 | 402,559,283 | 0 | 0 | MIT | 2021-09-02T20:49:56 | 2021-09-02T20:49:56 | null | UTF-8 | Python | false | false | 851 | py | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.update_settings import UpdateSettings # noqa: E501
from swagger_client.rest import ApiException
class TestUpdateSettings(unittest.TestCase):
"""UpdateSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateSettings(self):
"""Test UpdateSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.update_settings.UpdateSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"zachary_youtz@rapid7.com"
] | zachary_youtz@rapid7.com |
99c3e5aa620113bd9066d530f9fa040ebaa93d0b | 7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a | /examples/adspygoogle/dfp/v201103/update_ad_units.py | e6cac253cc32b39005e61ae41624aad51c441c81 | [
"Apache-2.0"
] | permissive | hockeyprincess/google-api-dfp-python | 534519695ffd26341204eedda7a8b50648f12ea9 | efa82a8d85cbdc90f030db9d168790c55bd8b12a | refs/heads/master | 2021-01-10T10:01:09.445419 | 2011-04-14T18:25:38 | 2011-04-14T18:25:38 | 52,676,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,300 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates an ad unit by enabling AdSense to the first 500.
To determine which ad units exist, run get_all_ad_units.py or
get_inventory_tree.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# the sandbox environment.
inventory_service = client.GetInventoryService(
'https://sandbox.google.com', 'v201103')
# Create statement object to get all ad units.
filter_statement = {'query': 'LIMIT 500'}
# Get ad units by filter.
ad_units = inventory_service.GetAdUnitsByStatement(
filter_statement)[0]['results']
if ad_units:
# Update each local ad unit object by enabling AdSense.
for ad_unit in ad_units:
ad_unit['inheritedAdSenseSettings']['value']['adSenseEnabled'] = 'true'
# Update ad units remotely.
ad_units = inventory_service.UpdateAdUnits(ad_units)
# Display results.
if ad_units:
for ad_units in ad_units:
print ('Ad unit with id \'%s\', name \'%s\', and is AdSense enabled '
'\'%s\' was updated.'
% (ad_unit['id'], ad_unit['name'],
ad_unit['inheritedAdSenseSettings']['value']['adSenseEnabled']))
else:
print 'No ad units were updated.'
else:
print 'No ad units found to update.'
| [
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] | api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138 |
d59a1e58246c961c72fe4f4d523b4df4fa88e7c2 | 87040e6a11f28e9e6bfe19abf2bf912a5c5ea286 | /raccoon_dataset/train.py | 0394928fbd2e0cc153ea193ed052a892fa66b090 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | HoboQian/Deep-Learning | f2b788d64c290ab025ae4e09e1cef494b8204536 | 4d335ffebded266647bd853b138c15d8b9a8694a | refs/heads/master | 2020-03-11T07:24:20.303608 | 2017-12-05T02:27:24 | 2017-12-05T02:27:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,445 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training executable for detection models.
This executable is used to train DetectionModels. There are two ways of
configuring the training job:
1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file
can be specified by --pipeline_config_path.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--pipeline_config_path=pipeline_config.pbtxt
2) Three configuration files can be provided: a model_pb2.DetectionModel
configuration file to define what type of DetectionModel is being trained, an
input_reader_pb2.InputReader file to specify what training data will be used and
a train_pb2.TrainConfig file to configure training parameters.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--model_config_path=model_config.pbtxt \
--train_config_path=train_config.pbtxt \
--input_config_path=train_input_config.pbtxt
"""
import functools
import json
import os
import tensorflow as tf
import sys;sys.path.append('../')
sys.path.append('../object_detection/')
sys.path.append('../object_detection/slim')#deployment
from object_detection import trainer
from object_detection.builders import input_reader_builder
from object_detection.builders import model_builder
from object_detection.utils import config_util
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_integer('task', 0, 'task id')
flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.')
flags.DEFINE_boolean('clone_on_cpu', False,
'Force clones to be deployed on CPU. Note that even if '
'set to False (allowing ops to run on gpu), some ops may '
'still be run on the CPU if they have no GPU kernel.')
flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer '
'replicas.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter server tasks. If None, does not use '
'a parameter server.')
flags.DEFINE_string('train_dir', '',
'Directory to save the checkpoints and training summaries.')
flags.DEFINE_string('pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
flags.DEFINE_string('train_config_path', '',
'Path to a train_pb2.TrainConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
FLAGS = flags.FLAGS
def main(_):
assert FLAGS.train_dir, '`train_dir` is missing.'
if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
if FLAGS.task == 0:
tf.gfile.Copy(FLAGS.pipeline_config_path,
os.path.join(FLAGS.train_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
train_config_path=FLAGS.train_config_path,
train_input_config_path=FLAGS.input_config_path)
if FLAGS.task == 0:
for name, config in [('model.config', FLAGS.model_config_path),
('train.config', FLAGS.train_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(config, os.path.join(FLAGS.train_dir, name),
overwrite=True)
model_config = configs['model']
train_config = configs['train_config']
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=True)
create_input_dict_fn = functools.partial(
input_reader_builder.build, input_config)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task_info = type('TaskSpec', (object,), task_data)
# Parameters for a single worker.
ps_tasks = 0
worker_replicas = 1
worker_job_name = 'lonely_worker'
task = 0
is_chief = True
master = ''
if cluster_data and 'worker' in cluster_data:
# Number of total worker replicas include "worker"s and the "master".
worker_replicas = len(cluster_data['worker']) + 1
if cluster_data and 'ps' in cluster_data:
ps_tasks = len(cluster_data['ps'])
if worker_replicas > 1 and ps_tasks < 1:
raise ValueError('At least 1 ps task is needed for distributed training.')
if worker_replicas >= 1 and ps_tasks > 0:
# Set up distributed training.
server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc',
job_name=task_info.type,
task_index=task_info.index)
if task_info.type == 'ps':
server.join()
return
worker_job_name = '%s/task:%d' % (task_info.type, task_info.index)
task = task_info.index
is_chief = (task_info.type == 'master')
master = server.target
trainer.train(create_input_dict_fn, model_fn, train_config, master, task,
FLAGS.num_clones, worker_replicas, FLAGS.clone_on_cpu, ps_tasks,
worker_job_name, is_chief, FLAGS.train_dir)
if __name__ == '__main__':
tf.app.run()
| [
"lee.x.a90@gmail.com"
] | lee.x.a90@gmail.com |
7e3956bb610da283f9f5d675899815f982c86079 | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve-8.51.857815/eve/client/script/ui/shared/mapView/systemMapHandler.py | 8bdd670dbef97185191e05e3e99e04e706df7ae0 | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,456 | py | #Embedded file name: eve/client/script/ui/shared/mapView\systemMapHandler.py
from carbonui.primitives.layoutGrid import LayoutGrid
from eve.client.script.environment.spaceObject.planet import Planet
from eve.client.script.ui.control.eveLabel import EveLabelLarge, EveLabelMedium
from eve.client.script.ui.shared.mapView.mapViewConst import MARKERID_SOLARSYSTEM_CELESTIAL, VIEWMODE_MARKERS_SETTINGS
from eve.client.script.ui.shared.mapView.mapViewMarkers.mapViewMarkerCelestial import MarkerCelestial
from eve.client.script.ui.shared.mapView.mapViewSettings import GetMapViewSetting
from eve.client.script.ui.shared.maps.mapcommon import STARMAP_SCALE
from eve.client.script.ui.shared.mapView.mapViewUtil import SolarSystemPosToMapPos, ScaleSolarSystemValue
from eve.common.script.planet.surfacePoint import SurfacePoint
from eve.common.script.util.eveFormat import FmtSystemSecStatus
from localization import GetByLabel
import trinity
import uthread
import carbonui.const as uiconst
import evegraphics.settings as gfxsettings
import geo2
import sys
import math
PLANET_TEXTURE_SIZE = 512
class SystemMapHandler(object):
scene = None
markersHandler = None
localMarkerIDs = None
_yScaleFactor = 1.0
def __init__(self, solarsystemID, scene = None, scaling = 1.0, position = None, markersHandler = None):
self.scene = scene
self.solarsystemID = solarsystemID
self.scaling = scaling
self.bracketsByID = {}
self.systemMapSvc = sm.GetService('systemmap')
self.markersHandler = markersHandler
self.localMarkerIDs = set()
parent = trinity.EveRootTransform()
parent.name = 'solarsystem_%s' % solarsystemID
self.systemMapTransform = parent
if scene:
scene.objects.append(self.systemMapTransform)
if position:
self.SetPosition(position)
@apply
def yScaleFactor():
def fget(self):
return self._yScaleFactor
def fset(self, value):
self._yScaleFactor = value
self.UpdatePosition()
return property(**locals())
def SetMarkersHandler(self, markersHandler):
self.markersHandler = markersHandler
self.localMarkerIDs = set()
def SetPosition(self, position):
self.position = position
self.UpdatePosition()
def UpdatePosition(self):
x, y, z = self.position
self.systemMapTransform.translation = (x, y * self.yScaleFactor, z)
self.systemMapTransform.scaling = (ScaleSolarSystemValue(1.0), ScaleSolarSystemValue(self.yScaleFactor), ScaleSolarSystemValue(1.0))
def Close(self):
if self.scene:
uicore.animations.MorphVector3(self.systemMapTransform, 'scaling', self.systemMapTransform.scaling, (0.0, 0.0, 0.0), duration=0.5, callback=self.RemoveFromScene)
else:
self.RemoveFromScene()
def RemoveFromScene(self):
if self.markersHandler and self.localMarkerIDs:
for markerID in self.localMarkerIDs:
self.markersHandler.RemoveMarker(markerID)
self.markersHandler = None
self.bracketsByID = None
self.scene.objects.remove(self.systemMapTransform)
self.systemMapTransform = None
self.scene = None
def LoadCelestials(self):
groups, solarsystemData = self.systemMapSvc.GetSolarsystemHierarchy(self.solarsystemID)
for transform in self.systemMapTransform.children:
try:
itemID = int(transform.name)
itemData = solarsystemData[itemID]
except:
continue
if itemData.groupID == const.groupPlanet:
planetTransform = self.LoadPlanet(itemData.typeID, itemID)
scaling = self.scaling
planetTransform.scaling = (1 / scaling * 0.1, 1 / scaling * 0.1, 1 / scaling * 0.1)
planetTransform.translation = transform.translation
def LoadPlanet(self, planetTypeID, planetID):
planet = Planet()
objType = cfg.invtypes.Get(planetTypeID)
graphicFile = objType.GraphicFile()
planet.typeData['graphicFile'] = graphicFile
planet.typeID = planetTypeID
planet.LoadPlanet(planetID, forPhotoService=True, rotate=False, hiTextures=True)
if planet.model is None or planet.model.highDetail is None:
return
planetTransform = trinity.EveTransform()
planetTransform.name = 'planet'
planetTransform.children.append(planet.model.highDetail)
renderTarget, size = self.CreateRenderTarget()
planet.DoPreProcessEffect(size, None, renderTarget)
trinity.WaitForResourceLoads()
for t in planet.model.highDetail.children:
if t.mesh is not None:
if len(t.mesh.transparentAreas) > 0:
t.sortValueMultiplier = 2.0
self.systemMapTransform.children.append(planetTransform)
return planetTransform
def LoadSolarSystemMap(self):
self.maxRadius = 0.0
solarsystemID = self.solarsystemID
parent = self.systemMapTransform
solarSystemData = self.systemMapSvc.GetSolarsystemData(solarsystemID)
planets = []
childrenToParentByID = {}
sunID = None
maxRadius = 0.0
for celestialObject in solarSystemData:
if celestialObject.groupID == const.groupPlanet:
planets.append((celestialObject.itemID, geo2.Vector(celestialObject.x, celestialObject.y, celestialObject.z)))
elif celestialObject.groupID == const.groupSun:
sunID = celestialObject.itemID
for each in solarSystemData:
if each.groupID in (const.groupPlanet, const.groupStargate):
childrenToParentByID[each.itemID] = sunID
continue
closest = []
eachPosition = geo2.Vector(each.x, each.y, each.z)
for planetID, planetPos in planets:
diffPos = planetPos - eachPosition
diffVector = geo2.Vec3Length(diffPos)
closest.append((diffVector, planetID))
maxRadius = max(maxRadius, diffVector)
closest.sort()
childrenToParentByID[each.itemID] = planets[0][1]
self.maxRadius = maxRadius
orbits = []
objectTransforms = {}
pm = (const.groupPlanet, const.groupMoon)
for each in solarSystemData:
if each.itemID == each.locationID:
continue
if each.groupID == const.groupSecondarySun:
continue
if each.groupID in pm:
parentID = childrenToParentByID.get(each.itemID, None)
if parentID:
orbits.append([each.itemID, parentID])
transform = trinity.EveTransform()
transform.translation = (each.x, each.y, each.z)
transform.name = str(each.itemID)
parent.children.append(transform)
objectTransforms[each.itemID] = transform
uthread.new(self.CreateOrbits, orbits, objectTransforms)
self.solarSystemRadius = maxRadius
cfg.evelocations.Prime(objectTransforms.keys(), 0)
def LoadMarkers(self):
if self.markersHandler and self.localMarkerIDs:
for markerID in self.localMarkerIDs:
self.markersHandler.RemoveMarker(markerID)
self.localMarkerIDs = set()
solarSystemData = self.systemMapSvc.GetSolarsystemData(self.solarsystemID)
loadMarkerGroups = GetMapViewSetting(VIEWMODE_MARKERS_SETTINGS)
for each in solarSystemData:
if self.markersHandler and each.groupID in loadMarkerGroups:
bracketData = sm.GetService('bracket').GetMappedBracketProps(cfg.invgroups.Get(each.groupID).categoryID, each.groupID, each.typeID)
markerID = (MARKERID_SOLARSYSTEM_CELESTIAL, each.itemID)
markerObject = self.markersHandler.AddMarker(markerID, geo2.Vec3Add(self.position, SolarSystemPosToMapPos((each.x, each.y, each.z))), MarkerCelestial, texturePath=bracketData[0], celestialData=each, distanceFadeAlpha=True, maxVisibleRange=2500)
markerObject.SetSolarSystemID(self.solarsystemID)
self.localMarkerIDs.add(markerID)
def CreateOrbits(self, child_parent, objectTransforms):
lineSet = trinity.EveCurveLineSet()
lineSet.name = 'OrbitLines'
lineSet.depthOffset = 10000000.0
self.systemMapTransform.children.append(lineSet)
tex2D = trinity.TriTexture2DParameter()
tex2D.name = 'TexMap'
tex2D.resourcePath = 'res:/UI/Texture/classes/MapView/lineSegment.dds'
lineSet.lineEffect.resources.append(tex2D)
overlayTex2D = trinity.TriTexture2DParameter()
overlayTex2D.name = 'OverlayTexMap'
overlayTex2D.resourcePath = 'res:/UI/Texture/classes/MapView/lineSegment.dds'
lineSet.lineEffect.resources.append(overlayTex2D)
for childID, parentID in child_parent:
if childID in objectTransforms and parentID in objectTransforms:
self.CreateOrbitCircle(objectTransforms[childID], objectTransforms[parentID], lineSet)
if lineSet:
lineSet.SubmitChanges()
def CreateOrbitCircle(self, orbitem, parent, lineSet, points = 256):
orbitPos = geo2.Vector(*orbitem.translation)
parentPos = geo2.Vector(*parent.translation)
dirVec = orbitPos - parentPos
radius = geo2.Vec3Length(dirVec)
if radius == 0:
return
lineColor = (1, 1, 1, 0.1)
dx, dy, dz = dirVec
fromPoint = SurfacePoint(dx, dy, dz)
radius, theta, phi = fromPoint.GetAsRadThPhiTuple()
toPoint = SurfacePoint(theta=theta + math.pi * 0.5, phi=phi)
x, y, z = toPoint.GetAsXYZTuple()
line1 = lineSet.AddSpheredLineCrt(fromPoint.GetAsXYZTuple(), lineColor, (x, y, z), lineColor, parentPos, 3.0)
line2 = lineSet.AddSpheredLineCrt(fromPoint.GetAsXYZTuple(), lineColor, (-x, -y, -z), lineColor, parentPos, 3.0)
fromPoint = SurfacePoint(-dx, -dy, -dz)
radius, theta, phi = fromPoint.GetAsRadThPhiTuple()
toPoint = SurfacePoint(theta=theta + math.pi * 0.5, phi=phi)
x, y, z = toPoint.GetAsXYZTuple()
line3 = lineSet.AddSpheredLineCrt(fromPoint.GetAsXYZTuple(), lineColor, (x, y, z), lineColor, parentPos, 3.0)
line4 = lineSet.AddSpheredLineCrt(fromPoint.GetAsXYZTuple(), lineColor, (-x, -y, -z), lineColor, parentPos, 3.0)
lineSet.ChangeLineSegmentation(line1, 25)
lineSet.ChangeLineSegmentation(line2, 25)
lineSet.ChangeLineSegmentation(line3, 25)
lineSet.ChangeLineSegmentation(line4, 25)
animationColor = (0, 0, 0, 0.5)
lineSet.ChangeLineAnimation(line1, animationColor, 0.1, 0.5)
lineSet.ChangeLineAnimation(line2, animationColor, -0.1, 0.5)
lineSet.ChangeLineAnimation(line3, animationColor, 0.1, 0.5)
lineSet.ChangeLineAnimation(line4, animationColor, -0.1, 0.5)
def CreateRenderTarget(self):
textureQuality = gfxsettings.Get(gfxsettings.GFX_TEXTURE_QUALITY)
size = PLANET_TEXTURE_SIZE >> textureQuality
rt = None
while rt is None or not rt.isValid:
rt = trinity.Tr2RenderTarget(2 * size, size, 0, trinity.PIXEL_FORMAT.B8G8R8A8_UNORM)
if not rt.isValid:
if size < 2:
return
size = size / 2
rt = None
return (rt, size)
class SolarSystemInfoBox(LayoutGrid):
default_columns = 2
default_cellPadding = (0, 1, 6, 1)
def ApplyAttributes(self, attributes):
LayoutGrid.ApplyAttributes(self, attributes)
self.nameLabel = EveLabelLarge(bold=True)
self.AddCell(cellObject=self.nameLabel, colSpan=self.columns)
EveLabelMedium(parent=self, text=GetByLabel('UI/Map/StarMap/SecurityStatus'))
self.securityValue = EveLabelMedium(parent=self, bold=True, color=(1, 0, 0, 1))
def LoadSolarSystemID(self, solarSystemID):
self.nameLabel.text = cfg.evelocations.Get(solarSystemID).name
securityStatus, color = FmtSystemSecStatus(sm.GetService('map').GetSecurityStatus(solarSystemID), True)
self.securityValue.color = (color.r,
color.g,
color.b,
1.0)
self.securityValue.text = securityStatus
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
6e30b513864fb38e0ac1557595002b2ba7e2682e | 09ba5ae2edc51f3fd812b9205188b1b01e6bea77 | /src/CPMel/__init__.py | 3c84ac72d9fa33794d013bdd7ec8fadd8a93a212 | [] | no_license | cpcgskill/Maya_tools | c6a43ad20eab3b97e82c9dfe40a1745b6098e5c4 | 93f9e66e5dc3bb51f33df0615415a56a60613ff1 | refs/heads/main | 2023-02-26T16:20:52.959050 | 2021-01-28T06:12:18 | 2021-01-28T06:12:18 | 325,512,423 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,651 | py | #!/usr/bin/python
# -*-coding:utf-8 -*-
u"""
:创建时间: 2020/5/18 23:57
:作者: 苍之幻灵
:我的主页: https://www.cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
* 获得路径模块
* PATH : CPMel所在路径
* MAYAPLUG : CPMel的Maya插件所在路径
* ISDEBUG : 是否处在Debug模式
* 快速入门:
* 导入:
>>> import CPMel.cmds as cc
>>> import CPMel.tool as ctl
* 命令:
* maya.cmds:
>>> import maya.cmds as cmds
>>> cmds.joint()
u"xxx"
* CPMel.cmds
>>> cc.joint()
joint(u"xxx")
* 命令参数转化规则:
* CPObject = str ,Double3 = (x,y,z), Matrix = (x,x,x,..*16)
* 更加方便的创建节点的方法:
>>> cc.createNode.transform()
transform(u"transform")
* mel方法访问:
>>> cc.mel.SmoothSkinWeights()
None
* 事件引擎:
>>> class printDg(cevent.Dg):
... def createNode(self, node):
... print(node)
... def removeNode(self, node):
... print(node)
>>> obj = printDg()
>>> cc.createNode.transform()
transform1 << 打印
transform(u'transform1')
* 工具:
>>> ctl.decode("你好世界")
u'你好世界'
>>> ctl.MayaObjectData(u"time1")
<CPMel.tool.MayaObjectData object at 0x0000000053CB32E8>
>>> ctl.undoBlock(xxx type = func)# Qt撤销的实现
xxx type = func
* 视频版教程: https://www.aboutcg.org/courseDetails/1031/introduce
* 2.5版本更新 :
* 使用了预编译脚本优化了文件体积
* 修复了一些BUG
* 2.6版本更新 :
* 解决了qt错误处理问题
* 错误与mayaplug可以运行多个了
* 实现了相对运行
* 区分debug版与release版
* 去除了static_cmds中无用的注释
* 通过文档注释进行类型指定优化了在pycharm中编写程序的补全效果
* 去除了mayaPlug模块下无用的程序
* 2.7版本更新 :
* 优化了导入实现
* 使用CLI
注意2.7的CLI还不完善将于!!!CPMel3版本稳定CLI功能
"""
from . import initializeMaya
import os
import sys
import maya.cmds
sys.cpmel_data = dict()
MAYAINDEX = int(maya.cmds.about(v=True))
ISDEBUG = False
try:
PATH = os.path.dirname(os.path.abspath(__file__))
if type(PATH) == str:
try:
PATH = PATH.decode("utf8")
except UnicodeDecodeError:
try:
PATH = PATH.decode("gbk")
except UnicodeDecodeError:
try:
PATH = PATH.decode("GB18030")
except UnicodeDecodeError:
try:
PATH = PATH.decode("GB2312")
except UnicodeDecodeError:
PATH = unicode(PATH)
PATH = PATH.encode("utf8").decode("utf8")
except:
PATH = os.path.dirname(os.path.abspath(__file__))
MAYAPLUG = u'%s\\mayaPlug' % PATH
from . import mayaPlug
from . import core
from . import api
from . import cmds
from . import event
from . import ui
from . import tool
# DELETE #
if ISDEBUG:
reload(mayaPlug)
reload(core)
reload(api)
reload(cmds)
reload(event)
reload(ui)
reload(tool)
# \DELETE #
cmds.upcommands()
maya.cmds.pluginInfo(cc=cmds.upcommands)
del maya
if hasattr(sys, "cpmel_data"):
del sys.cpmel_data | [
"www.cpcgskill.com"
] | www.cpcgskill.com |
6b371890aaa3ffd72c30d16c8b286448df3d5f9d | 1fddb12ae9b7db260b9161983a726f1528ece940 | /Part_01/Cap_09/Classes9.7.py | 4b3c0876b7328f323d06eda1b9b9514537c724e0 | [] | no_license | davicosta12/python_work | aecf642bbbf2a007fba159f305b4ab21ff00503f | 5df62d3ced25b7e04cda71846fd07862dda39b4c | refs/heads/master | 2023-04-23T09:49:31.923211 | 2021-04-27T00:31:42 | 2021-04-27T00:31:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | class User():
def __init__(self, first_name, last_name, age, sex, situacao, login_attempts):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.sex = sex
self.situacao = situacao
self.login_attempts = login_attempts
def describe_user(self):
print("\n Informações sobre o usuário: " + self.first_name.title())
print("\n Nome: " + self.first_name.title())
print(" Sobrenome: " + self.last_name.title())
print(" Sexo: " + self.sex.title())
print(" Idade: " + str(self.age))
print(" Situação: " + self.situacao.title())
def greet_user(self):
print("\n Boa noite, " + self.first_name.title())
def increment_login_attempts(self):
self.login_attempts = self.login_attempts + 1
def reset_login_attempts(self):
self.login_attempts = 0
class Admin(User):
def __init__(self, first_name, last_name, age, sex, situacao, login_attempts):
super().__init__(first_name, last_name, age, sex, situacao, login_attempts)
self.privileges = ['can add post', 'can delete post', 'can ban user',
'can commit private messages', 'can active anything']
def show_privileges(self):
print(" Privilégios do Admin")
i = 0
for privilegio in self.privileges:
if i == len(self.privileges) - 1:
print("\n " + privilegio + ".")
else:
print("\n " + privilegio + ";")
i = i + 1
instancia_user_davi = Admin('davi', 'silva', 21, 'masculino', 'solteiro', 6)
instancia_user_davi.describe_user()
instancia_user_davi.greet_user()
admin = Admin('davi', 'silva', 21, 'masculino', 'solteiro', 6)
print("\n ------------------------- \n")
admin.show_privileges()
| [
"deadspace24@hotmail.com"
] | deadspace24@hotmail.com |
492e9832c721c5e23652772a639a4d59fc4457f1 | d5af5459d0a68d8934219cdd516a23d73c7c52fb | /labs/greek.py | 308315866e2f731924c244567a5e77a0b20f5e03 | [] | no_license | flathunt/pylearn | 1e5b147924dca792eb1cddbcbee1b8da0fc3d055 | 006f37d67343a0288e7efda359ed9454939ec25e | refs/heads/master | 2022-11-23T23:31:32.016146 | 2022-11-17T08:20:57 | 2022-11-17T08:20:57 | 146,803,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py |
greek = ['Alpha','Beta','Gamma','Delta','Epsilon','Zeta','Eta','Theta',
'Iota','Kappa','Lamda','Mu','Nu','Xi','Omicron','Pi','Rho',
'Final Sigma','Sigma','Tau','Upsilon','Phi','Chi','Psi','Omega']
#Format required:
# The hex value of the character
# The character name (cname), left justified, maximum 12 characters
# A colon separator
# The lowercase Greek character
# The uppercase Greek character
for pos, cname in enumerate(greek, start=0x03B1):
try:
char = chr(pos)
print("{0:#x} {1:^14s} : {2:3s} {3:3s}".format(pos, cname, char, char.upper()))
except UnicodeEncodeError as err:
print (cname, 'unknown')
| [
"porkpie@gmail.com"
] | porkpie@gmail.com |
d15786b8e34263c55d3ab8ca58d919ff88dbdc42 | d22634a6101cafc75dde63f48882e29b0312388c | /master/python/swagger_client/models/fiat_dep_data.py | 06bd3229631ff1bc9dccbb05577149cc1209133d | [
"Apache-2.0"
] | permissive | rajdeep225/plugins | e4cf16bc612ceec160c6ea4bae9ffd47012e0b0a | 6f62aac931e15b844431b08858303cac53b9a5ca | refs/heads/master | 2020-12-24T05:40:05.313004 | 2016-08-21T19:00:00 | 2016-08-21T19:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,087 | py | # coding: utf-8
"""
Coinsecure Api Documentation
To generate an API key, please visit <a href='https://coinsecure.in/api' target='_new' class='homeapi'>https://coinsecure.in/api</a>.<br>Guidelines for use can be accessed at <a href='https://api.coinsecure.in/v1/guidelines'>https://api.coinsecure.in/v1/guidelines</a>.<br>Programming Language Libraries for use can be accessed at <a href='https://api.coinsecure.in/v1/code-libraries'>https://api.coinsecure.in/v1/code-libraries</a>.
OpenAPI spec version: beta
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class FiatDepData(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, time_details=None, deposit_id=None, value=None, account=None, reason=None):
"""
FiatDepData - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'time_details': 'TimeDataCoin',
'deposit_id': 'str',
'value': 'int',
'account': 'str',
'reason': 'str'
}
self.attribute_map = {
'time_details': 'timeDetails',
'deposit_id': 'depositID',
'value': 'value',
'account': 'account',
'reason': 'reason'
}
self._time_details = time_details
self._deposit_id = deposit_id
self._value = value
self._account = account
self._reason = reason
@property
def time_details(self):
"""
Gets the time_details of this FiatDepData.
:return: The time_details of this FiatDepData.
:rtype: TimeDataCoin
"""
return self._time_details
@time_details.setter
def time_details(self, time_details):
"""
Sets the time_details of this FiatDepData.
:param time_details: The time_details of this FiatDepData.
:type: TimeDataCoin
"""
self._time_details = time_details
@property
def deposit_id(self):
"""
Gets the deposit_id of this FiatDepData.
:return: The deposit_id of this FiatDepData.
:rtype: str
"""
return self._deposit_id
@deposit_id.setter
def deposit_id(self, deposit_id):
"""
Sets the deposit_id of this FiatDepData.
:param deposit_id: The deposit_id of this FiatDepData.
:type: str
"""
self._deposit_id = deposit_id
@property
def value(self):
"""
Gets the value of this FiatDepData.
:return: The value of this FiatDepData.
:rtype: int
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this FiatDepData.
:param value: The value of this FiatDepData.
:type: int
"""
self._value = value
@property
def account(self):
"""
Gets the account of this FiatDepData.
:return: The account of this FiatDepData.
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""
Sets the account of this FiatDepData.
:param account: The account of this FiatDepData.
:type: str
"""
self._account = account
@property
def reason(self):
"""
Gets the reason of this FiatDepData.
:return: The reason of this FiatDepData.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this FiatDepData.
:param reason: The reason of this FiatDepData.
:type: str
"""
self._reason = reason
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"vivek0@users.noreply.github.com"
] | vivek0@users.noreply.github.com |
24d3c36c02a4b47b41d98818cf1b47b210c12c13 | 72b1035d1589d3b77c721c7be4e248bce8cde185 | /pythonapi.py | 9dc0ac5dff54962fc86678d02bdebc952576d5b4 | [] | no_license | marciopocebon/fast2sms | 2632f5c0594392604302b2d9a20f005ddecde58a | 617124b86fc46526d8312c43f27cdaa6afaec5f9 | refs/heads/master | 2020-09-14T06:09:54.435634 | 2019-04-14T16:26:19 | 2019-04-14T16:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py | import requests
import json
def forward_to_api(url,mobile,message,api_key,flash,count):
payload = {'senderId': 'FSTSMS', 'mobile': mobile, 'message': message, 'flash': flash}
headers = {'Authorization': api_key}
print '\n-- Trying to send SMS via API ['+str(count)+'] --'
response = requests.request("POST", url, data=payload, headers=headers)
if response.json()['return'] != True:
return False
print '<< '+response.json()['message']+' >>'
return response.json()['return']
def forward_to_paid_api(url,mobile,message,api_key,flash,count):
headers = {'cache-control': "no-cache"}
querystring = {"authorization":api_key,"sender_id":"FSTSMS","message":message,"language":"english","route":"p","numbers":mobile,"flash":flash}
print '\n-- Trying to send SMS via Paid API ['+str(count)+'] --'
response = requests.request("GET", url, headers=headers, params=querystring)
print '<< '+response.json()['message']+' >>'
return response.json()['return']
def send_sms(sms_data):
phone = sms_data[0]
message = sms_data[1]
allowed_sms_length = 149
#Trim Message length to 160-11 = 149 characters#
if len(message) > allowed_sms_length:
message = message[0:145]
message+='[..]'
print '--> Sending SMS to '+str(phone)
number = str(phone)
api_keys = ["API key1", # Your free msgs API key
"API key2"] # Your paid msgs API key
url = "https://www.fast2sms.com/api/sms/free"
for key in api_keys:
count = api_keys.index(key)+1
if api_keys.index(key)>1:
url = "https://www.fast2sms.com/dev/bulk"
sent_status = forward_to_paid_api(url,number,message,key,0,count)
else:
sent_status = forward_to_api(url,number,message,key,0,count)
if sent_status:
return 'SUCCESS'
else:
print '-- SMS was not sent. Retrying... --'
continue
if sent_status == False:
return 'ERROR'
| [
"noreply@github.com"
] | marciopocebon.noreply@github.com |
790d12fdd35a10bb8f4f501ba6b96fef01d9ee64 | bc441bb06b8948288f110af63feda4e798f30225 | /resource_monitor_sdk/model/inspection/collector_pb2.pyi | fd9fb083c6ce2a16e062f751d97be2910a5a5a97 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,520 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from resource_monitor_sdk.model.inspection.arg_pb2 import (
InspectionArg as resource_monitor_sdk___model___inspection___arg_pb2___InspectionArg,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class InspectionCollector(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
id = ... # type: typing___Text
name = ... # type: typing___Text
content = ... # type: typing___Text
script = ... # type: typing___Text
@property
def args(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[resource_monitor_sdk___model___inspection___arg_pb2___InspectionArg]: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
content : typing___Optional[typing___Text] = None,
script : typing___Optional[typing___Text] = None,
args : typing___Optional[typing___Iterable[resource_monitor_sdk___model___inspection___arg_pb2___InspectionArg]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> InspectionCollector: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> InspectionCollector: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"args",b"args",u"content",b"content",u"id",b"id",u"name",b"name",u"script",b"script"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
b5912e62cf946057ada2c318246c5d9ad9ed2c4b | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/UDisks/ObjectSkeleton.py | 12e7b23b75e5111155dd62f68424e87435f13d71 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 21,125 | py | # encoding: utf-8
# module gi.repository.UDisks
# from /usr/lib64/girepository-1.0/UDisks-2.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.Gio as __gi_overrides_Gio
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gio as __gi_repository_Gio
import gobject as __gobject
from .Object import Object
class ObjectSkeleton(__gi_repository_Gio.DBusObjectSkeleton, Object):
"""
:Constructors:
::
ObjectSkeleton(**properties)
new(object_path:str) -> UDisks.ObjectSkeleton
"""
def add_interface(self, interface_): # real signature unknown; restored from __doc__
""" add_interface(self, interface_:Gio.DBusInterfaceSkeleton) """
pass
def bind_property(self, *args, **kwargs): # real signature unknown
pass
def bind_property_full(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def chain(self, *args, **kwargs): # real signature unknown
pass
def compat_control(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def connect(self, *args, **kwargs): # real signature unknown
pass
def connect_after(self, *args, **kwargs): # real signature unknown
pass
def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect
"""
Connect a callback to the given signal with optional user data.
:param str detailed_signal:
A detailed signal to connect to.
:param callable handler:
Callback handler to connect to the signal.
:param *data:
Variable data which is passed through to the signal handler.
:param GObject.ConnectFlags connect_flags:
Flags used for connection options.
:returns:
A signal id which can be used with disconnect.
"""
pass
def connect_object(self, *args, **kwargs): # real signature unknown
pass
def connect_object_after(self, *args, **kwargs): # real signature unknown
pass
def disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def disconnect_by_func(self, *args, **kwargs): # real signature unknown
pass
def do_authorize_method(self, *args, **kwargs): # real signature unknown
""" authorize_method(self, interface_:Gio.DBusInterfaceSkeleton, invocation:Gio.DBusMethodInvocation) -> bool """
pass
def emit(self, *args, **kwargs): # real signature unknown
pass
def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def find_property(self, property_name): # real signature unknown; restored from __doc__
""" find_property(self, property_name:str) -> GObject.ParamSpec """
pass
def flush(self): # real signature unknown; restored from __doc__
""" flush(self) """
pass
def force_floating(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def freeze_notify(self): # reliably restored by inspect
"""
Freezes the object's property-changed notification queue.
:returns:
A context manager which optionally can be used to
automatically thaw notifications.
This will freeze the object so that "notify" signals are blocked until
the thaw_notify() method is called.
.. code-block:: python
with obj.freeze_notify():
pass
"""
pass
def getv(self, names, values): # real signature unknown; restored from __doc__
""" getv(self, names:list, values:list) """
pass
def get_block(self): # real signature unknown; restored from __doc__
""" get_block(self) -> UDisks.Block or None """
pass
def get_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def get_drive(self): # real signature unknown; restored from __doc__
""" get_drive(self) -> UDisks.Drive or None """
pass
def get_drive_ata(self): # real signature unknown; restored from __doc__
""" get_drive_ata(self) -> UDisks.DriveAta or None """
pass
def get_encrypted(self): # real signature unknown; restored from __doc__
""" get_encrypted(self) -> UDisks.Encrypted or None """
pass
def get_filesystem(self): # real signature unknown; restored from __doc__
""" get_filesystem(self) -> UDisks.Filesystem or None """
pass
def get_interface(self, interface_name): # real signature unknown; restored from __doc__
""" get_interface(self, interface_name:str) -> Gio.DBusInterface """
pass
def get_interfaces(self): # real signature unknown; restored from __doc__
""" get_interfaces(self) -> list """
return []
def get_job(self): # real signature unknown; restored from __doc__
""" get_job(self) -> UDisks.Job or None """
pass
def get_loop(self): # real signature unknown; restored from __doc__
""" get_loop(self) -> UDisks.Loop or None """
pass
def get_manager(self): # real signature unknown; restored from __doc__
""" get_manager(self) -> UDisks.Manager or None """
pass
def get_mdraid(self): # real signature unknown; restored from __doc__
""" get_mdraid(self) -> UDisks.MDRaid or None """
pass
def get_object_path(self): # real signature unknown; restored from __doc__
""" get_object_path(self) -> str """
return ""
def get_partition(self): # real signature unknown; restored from __doc__
""" get_partition(self) -> UDisks.Partition or None """
pass
def get_partition_table(self): # real signature unknown; restored from __doc__
""" get_partition_table(self) -> UDisks.PartitionTable or None """
pass
def get_properties(self, *args, **kwargs): # real signature unknown
pass
def get_property(self, *args, **kwargs): # real signature unknown
pass
def get_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def get_swapspace(self): # real signature unknown; restored from __doc__
""" get_swapspace(self) -> UDisks.Swapspace or None """
pass
def handler_block(obj, handler_id): # reliably restored by inspect
"""
Blocks the signal handler from being invoked until
handler_unblock() is called.
:param GObject.Object obj:
Object instance to block handlers for.
:param int handler_id:
Id of signal to block.
:returns:
A context manager which optionally can be used to
automatically unblock the handler:
.. code-block:: python
with GObject.signal_handler_block(obj, id):
pass
"""
pass
def handler_block_by_func(self, *args, **kwargs): # real signature unknown
pass
def handler_disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def handler_is_connected(*args, **kwargs): # reliably restored by inspect
""" signal_handler_is_connected(instance:GObject.Object, handler_id:int) -> bool """
pass
def handler_unblock(*args, **kwargs): # reliably restored by inspect
""" signal_handler_unblock(instance:GObject.Object, handler_id:int) """
pass
def handler_unblock_by_func(self, *args, **kwargs): # real signature unknown
pass
def install_properties(self, pspecs): # real signature unknown; restored from __doc__
""" install_properties(self, pspecs:list) """
pass
def install_property(self, property_id, pspec): # real signature unknown; restored from __doc__
""" install_property(self, property_id:int, pspec:GObject.ParamSpec) """
pass
def interface_find_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_install_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_list_properties(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def is_floating(self): # real signature unknown; restored from __doc__
""" is_floating(self) -> bool """
return False
def list_properties(self): # real signature unknown; restored from __doc__
""" list_properties(self) -> list, n_properties:int """
return []
def new(self, object_path): # real signature unknown; restored from __doc__
""" new(object_path:str) -> UDisks.ObjectSkeleton """
pass
def newv(self, object_type, parameters): # real signature unknown; restored from __doc__
""" newv(object_type:GType, parameters:list) -> GObject.Object """
pass
def notify(self, property_name): # real signature unknown; restored from __doc__
""" notify(self, property_name:str) """
pass
def notify_by_pspec(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def override_property(self, property_id, name): # real signature unknown; restored from __doc__
""" override_property(self, property_id:int, name:str) """
pass
def ref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def ref_sink(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def remove_interface(self, interface_): # real signature unknown; restored from __doc__
""" remove_interface(self, interface_:Gio.DBusInterfaceSkeleton) """
pass
def remove_interface_by_name(self, interface_name): # real signature unknown; restored from __doc__
""" remove_interface_by_name(self, interface_name:str) """
pass
def replace_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def replace_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def run_dispose(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def set_block(self, interface_=None): # real signature unknown; restored from __doc__
""" set_block(self, interface_:UDisks.Block=None) """
pass
def set_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def set_drive(self, interface_=None): # real signature unknown; restored from __doc__
""" set_drive(self, interface_:UDisks.Drive=None) """
pass
def set_drive_ata(self, interface_=None): # real signature unknown; restored from __doc__
""" set_drive_ata(self, interface_:UDisks.DriveAta=None) """
pass
def set_encrypted(self, interface_=None): # real signature unknown; restored from __doc__
""" set_encrypted(self, interface_:UDisks.Encrypted=None) """
pass
def set_filesystem(self, interface_=None): # real signature unknown; restored from __doc__
""" set_filesystem(self, interface_:UDisks.Filesystem=None) """
pass
def set_job(self, interface_=None): # real signature unknown; restored from __doc__
""" set_job(self, interface_:UDisks.Job=None) """
pass
def set_loop(self, interface_=None): # real signature unknown; restored from __doc__
""" set_loop(self, interface_:UDisks.Loop=None) """
pass
def set_manager(self, interface_=None): # real signature unknown; restored from __doc__
""" set_manager(self, interface_:UDisks.Manager=None) """
pass
def set_mdraid(self, interface_=None): # real signature unknown; restored from __doc__
""" set_mdraid(self, interface_:UDisks.MDRaid=None) """
pass
def set_object_path(self, object_path): # real signature unknown; restored from __doc__
""" set_object_path(self, object_path:str) """
pass
def set_partition(self, interface_=None): # real signature unknown; restored from __doc__
""" set_partition(self, interface_:UDisks.Partition=None) """
pass
def set_partition_table(self, interface_=None): # real signature unknown; restored from __doc__
""" set_partition_table(self, interface_:UDisks.PartitionTable=None) """
pass
def set_properties(self, *args, **kwargs): # real signature unknown
pass
def set_property(self, *args, **kwargs): # real signature unknown
pass
def set_swapspace(self, interface_=None): # real signature unknown; restored from __doc__
""" set_swapspace(self, interface_:UDisks.Swapspace=None) """
pass
def steal_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def steal_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def stop_emission(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def stop_emission_by_name(*args, **kwargs): # reliably restored by inspect
""" signal_stop_emission_by_name(instance:GObject.Object, detailed_signal:str) """
pass
def thaw_notify(self): # real signature unknown; restored from __doc__
""" thaw_notify(self) """
pass
def unref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def watch_closure(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def weak_ref(self, *args, **kwargs): # real signature unknown
pass
def _force_floating(self, *args, **kwargs): # real signature unknown
""" force_floating(self) """
pass
def _ref(self, *args, **kwargs): # real signature unknown
""" ref(self) -> GObject.Object """
pass
def _ref_sink(self, *args, **kwargs): # real signature unknown
""" ref_sink(self) -> GObject.Object """
pass
def _unref(self, *args, **kwargs): # real signature unknown
""" unref(self) """
pass
def _unsupported_data_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def _unsupported_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __deepcopy__(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, **properties): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
g_type_instance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent_instance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
priv = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
qdata = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ref_count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__gpointer__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__grefcount__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
props = None # (!) real value is '<gi._gi.GProps object at 0x7f13a7e9fee0>'
__class__ = None # (!) real value is "<class 'gi.types.GObjectMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': ObjectInfo(ObjectSkeleton), '__module__': 'gi.repository.UDisks', '__gtype__': <GType UDisksObjectSkeleton (93969722803872)>, '__doc__': None, '__gsignals__': {}, 'new': gi.FunctionInfo(new), 'set_block': gi.FunctionInfo(set_block), 'set_drive': gi.FunctionInfo(set_drive), 'set_drive_ata': gi.FunctionInfo(set_drive_ata), 'set_encrypted': gi.FunctionInfo(set_encrypted), 'set_filesystem': gi.FunctionInfo(set_filesystem), 'set_job': gi.FunctionInfo(set_job), 'set_loop': gi.FunctionInfo(set_loop), 'set_manager': gi.FunctionInfo(set_manager), 'set_mdraid': gi.FunctionInfo(set_mdraid), 'set_partition': gi.FunctionInfo(set_partition), 'set_partition_table': gi.FunctionInfo(set_partition_table), 'set_swapspace': gi.FunctionInfo(set_swapspace), 'parent_instance': <property object at 0x7f13a806d450>, 'priv': <property object at 0x7f13a806d540>})"
__gdoc__ = 'Object UDisksObjectSkeleton\n\nSignals from GDBusObject:\n interface-added (GDBusInterface)\n interface-removed (GDBusInterface)\n\nSignals from GDBusObjectSkeleton:\n authorize-method (GDBusInterfaceSkeleton, GDBusMethodInvocation) -> gboolean\n\nProperties from GDBusObjectSkeleton:\n g-object-path -> gchararray: Object Path\n The object path where the object is exported\n\nSignals from GDBusObject:\n interface-added (GDBusInterface)\n interface-removed (GDBusInterface)\n\nSignals from GObject:\n notify (GParam)\n\n'
__gsignals__ = {}
__gtype__ = None # (!) real value is '<GType UDisksObjectSkeleton (93969722803872)>'
__info__ = ObjectInfo(ObjectSkeleton)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
b2e93e0c9728d11d1d9ffc5ba84b45a1de565b61 | 7238e16c65051cafd41d6a973bd440598589b10c | /cella/migrations/0002_recoding.py | 5c0c82b92bbeb5515555233f5600befb9aa1de9f | [] | no_license | mnogoruk/TraductorCella | 06ff28b422d3fcc86be216a23ae311d8082f36a7 | 21ee676dc8d3c467454872436bee5df5c023beb6 | refs/heads/master | 2023-03-15T09:43:43.874227 | 2021-03-23T17:08:10 | 2021-03-23T17:08:10 | 330,973,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | # Generated by Django 3.1.5 on 2021-03-18 10:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('authentication', '0001_initial'),
('cella', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Recoding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('message', models.TextField()),
('operator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='authentication.operator')),
],
),
]
| [
"lionless072@gmail.com"
] | lionless072@gmail.com |
c298d73d0f66373ea5e1201d026b6b041489e7c5 | 4331b28f22a2efb12d462ae2a8270a9f666b0df1 | /.history/dvdstore/webapp/form_20190914124144.py | 51f49dd87c243928340315bdc38c041086f3c49f | [] | no_license | ZiyaadLakay/csc312.group.project | ba772a905e0841b17478eae7e14e43d8b078a95d | 9cdd9068b5e24980c59a53595a5d513c2e738a5e | refs/heads/master | 2020-07-26T23:30:22.542450 | 2019-09-16T11:46:41 | 2019-09-16T11:46:41 | 200,703,160 | 0 | 0 | null | 2019-08-05T17:52:37 | 2019-08-05T17:52:37 | null | UTF-8 | Python | false | false | 875 | py | from django import forms
from .models import DVD, Customer
class DocumentForm(forms.ModelForm):
class Meta:
model = DVD
fields = ('Title','year','genre','InStock','Synopsis','BookingPickup' ,'NumOfTimesRented','ImageDVD')
class customerForm:
class Meta:
model= Customer
#user = User.objects.create_user(username=username, password=password1, email=email, first_name=first_name, last_name=last_name)
fields = ('username','password','email','first_name','last_name','phone_number','address','identification')
class customerForm:
class Meta:
model= Customer
#user = User.objects.create_user(username=username, password=password1, email=email, first_name=first_name, last_name=last_name)
fields = ('username','password','email','first_name','last_name','phone_number','address','identification')
| [
"uzairjoneswolf@gmail.com"
] | uzairjoneswolf@gmail.com |
513eb3ce39cc001993bc72f82d1bb7c5faaf1a94 | a9063fd669162d4ce0e1d6cd2e35974274851547 | /swagger_client/models/tsp_accounts_list1.py | 413e6c2c8c5b80d3ab26a0e7afdb03ee2977b8fb | [] | no_license | rootalley/py-zoom-api | 9d29a8c750e110f7bd9b65ff7301af27e8518a3d | bfebf3aa7b714dcac78be7c0affb9050bbce8641 | refs/heads/master | 2022-11-07T14:09:59.134600 | 2020-06-20T18:13:50 | 2020-06-20T18:13:50 | 273,760,906 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,034 | py | # coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TSPAccountsList1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'conference_code': 'str',
'leader_pin': 'str',
'dial_in_numbers': 'list[UsersuserIdtspDialInNumbers]',
'tsp_bridge': 'str'
}
attribute_map = {
'conference_code': 'conference_code',
'leader_pin': 'leader_pin',
'dial_in_numbers': 'dial_in_numbers',
'tsp_bridge': 'tsp_bridge'
}
def __init__(self, conference_code=None, leader_pin=None, dial_in_numbers=None, tsp_bridge=None): # noqa: E501
"""TSPAccountsList1 - a model defined in Swagger""" # noqa: E501
self._conference_code = None
self._leader_pin = None
self._dial_in_numbers = None
self._tsp_bridge = None
self.discriminator = None
self.conference_code = conference_code
self.leader_pin = leader_pin
if dial_in_numbers is not None:
self.dial_in_numbers = dial_in_numbers
if tsp_bridge is not None:
self.tsp_bridge = tsp_bridge
@property
def conference_code(self):
"""Gets the conference_code of this TSPAccountsList1. # noqa: E501
Conference code: numeric value, length is less than 16. # noqa: E501
:return: The conference_code of this TSPAccountsList1. # noqa: E501
:rtype: str
"""
return self._conference_code
@conference_code.setter
def conference_code(self, conference_code):
"""Sets the conference_code of this TSPAccountsList1.
Conference code: numeric value, length is less than 16. # noqa: E501
:param conference_code: The conference_code of this TSPAccountsList1. # noqa: E501
:type: str
"""
if conference_code is None:
raise ValueError("Invalid value for `conference_code`, must not be `None`") # noqa: E501
self._conference_code = conference_code
@property
def leader_pin(self):
"""Gets the leader_pin of this TSPAccountsList1. # noqa: E501
Leader PIN: numeric value, length is less than 16. # noqa: E501
:return: The leader_pin of this TSPAccountsList1. # noqa: E501
:rtype: str
"""
return self._leader_pin
@leader_pin.setter
def leader_pin(self, leader_pin):
"""Sets the leader_pin of this TSPAccountsList1.
Leader PIN: numeric value, length is less than 16. # noqa: E501
:param leader_pin: The leader_pin of this TSPAccountsList1. # noqa: E501
:type: str
"""
if leader_pin is None:
raise ValueError("Invalid value for `leader_pin`, must not be `None`") # noqa: E501
self._leader_pin = leader_pin
@property
def dial_in_numbers(self):
"""Gets the dial_in_numbers of this TSPAccountsList1. # noqa: E501
List of dial in numbers. # noqa: E501
:return: The dial_in_numbers of this TSPAccountsList1. # noqa: E501
:rtype: list[UsersuserIdtspDialInNumbers]
"""
return self._dial_in_numbers
@dial_in_numbers.setter
def dial_in_numbers(self, dial_in_numbers):
"""Sets the dial_in_numbers of this TSPAccountsList1.
List of dial in numbers. # noqa: E501
:param dial_in_numbers: The dial_in_numbers of this TSPAccountsList1. # noqa: E501
:type: list[UsersuserIdtspDialInNumbers]
"""
self._dial_in_numbers = dial_in_numbers
@property
def tsp_bridge(self):
"""Gets the tsp_bridge of this TSPAccountsList1. # noqa: E501
Telephony bridge # noqa: E501
:return: The tsp_bridge of this TSPAccountsList1. # noqa: E501
:rtype: str
"""
return self._tsp_bridge
@tsp_bridge.setter
def tsp_bridge(self, tsp_bridge):
"""Sets the tsp_bridge of this TSPAccountsList1.
Telephony bridge # noqa: E501
:param tsp_bridge: The tsp_bridge of this TSPAccountsList1. # noqa: E501
:type: str
"""
allowed_values = ["US_TSP_TB", "EU_TSP_TB"] # noqa: E501
if tsp_bridge not in allowed_values:
raise ValueError(
"Invalid value for `tsp_bridge` ({0}), must be one of {1}" # noqa: E501
.format(tsp_bridge, allowed_values)
)
self._tsp_bridge = tsp_bridge
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TSPAccountsList1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TSPAccountsList1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"github@rootalley.com"
] | github@rootalley.com |
6080401306f6ff3f3b2e6b26eefa0f58cc359af8 | ef1f62cf4e53f856bf763ac0dee73f054518530d | /Week_09/115.Distinct_Subsequences.py | 018e3a33c7c7d88cdacda79eb2ae627bdde78904 | [] | no_license | ZHHJemotion/algorithm008-class01 | 3338af3619d8e1754a62af6a852f517b47298d95 | 5bb7d2b74110df0b5788b94c69582552d711563a | refs/heads/master | 2022-11-12T09:26:24.941738 | 2020-06-30T15:29:20 | 2020-06-30T15:29:20 | 255,102,230 | 0 | 0 | null | 2020-04-12T14:39:17 | 2020-04-12T14:39:17 | null | UTF-8 | Python | false | false | 1,702 | py | # Given a string S and a string T, count the number of distinct subsequences of
# S which equals T.
#
# A subsequence of a string is a new string which is formed from the original s
# tring by deleting some (can be none) of the characters without disturbing the re
# lative positions of the remaining characters. (ie, "ACE" is a subsequence of "AB
# CDE" while "AEC" is not).
#
# It's guaranteed the answer fits on a 32-bit signed integer.
#
# Example 1:
#
#
# Input: S = "rabbbit", T = "rabbit"
# Output: 3
# Explanation:
# As shown below, there are 3 ways you can generate "rabbit" from S.
# (The caret symbol ^ means the chosen letters)
#
# rabbbit
# ^^^^ ^^
# rabbbit
# ^^ ^^^^
# rabbbit
# ^^^ ^^^
#
#
# Example 2:
#
#
# Input: S = "babgbag", T = "bag"
# Output: 5
# Explanation:
# As shown below, there are 5 ways you can generate "bag" from S.
# (The caret symbol ^ means the chosen letters)
#
# babgbag
# ^^ ^
# babgbag
# ^^ ^
# babgbag
# ^ ^^
# babgbag
# ^ ^^
# babgbag
# ^^^
#
# Related Topics String Dynamic Programming
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def numDistinct(self, s: str, t: str) -> int:
# 动态规划
m = len(t)
n = len(s)
dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for j in range(n + 1):
dp[0][j] = 1
for i in range(1, m + 1):
for j in range(1, n + 1):
if t[i - 1] == s[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + dp[i][j - 1]
else:
dp[i][j] = dp[i][j - 1]
return dp[-1][-1]
# leetcode submit region end(Prohibit modification and deletion)
| [
"zhhjemotion@hotmail.com"
] | zhhjemotion@hotmail.com |
738ed00c033335e21488fbe3f9265c920897121e | 87a4bbc643c862dc8ded6e5e5bda065366ee322c | /src/python/WMCore/MicroService/Unified/RSEQuotas.py | 8a66b415aa257560639467b16d4ebd1f21f35121 | [
"Apache-2.0"
] | permissive | ericvaandering/WMCore | 650070de98ac07eec2c7ba3a308c3ad3871725f6 | 1aa64fa885d07c9e26946821fcf9487e5aac5dbe | refs/heads/master | 2021-05-23T23:11:49.062198 | 2020-09-04T07:01:15 | 2020-09-04T07:01:15 | 5,404,330 | 0 | 0 | null | 2015-07-08T16:27:03 | 2012-08-13T20:27:48 | Python | UTF-8 | Python | false | false | 10,950 | py | """
Non thread-safe object which provides all the RSE/PNN information
required for automatic data placement.
It can also communicate with other data management tools, like
Detox, Rucio and PhEDEx.
"""
from __future__ import division, print_function
from future.utils import viewitems
from WMCore.MicroService.Unified.Common import getDetoxQuota, getMSLogger, gigaBytes, teraBytes
class RSEQuotas(object):
"""
Class which represents a list of RSEs, their quota and
their storage usage
"""
def __init__(self, dataAcct, quotaFraction, useRucio, **kwargs):
"""
Executes a basic setup, including proper logging.
:param dataAcct: string with either the Rucio account or PhEDEx group name
:param quotaFraction: float point number representing the fraction of the quota
:param useRucio: boolean flag used to decide between Rucio and PhEDEx data management
:param kwargs: the supported keyword arguments are:
minimumThreshold: integer value defining the minimum available space required
detoxUrl: string with the detox url (to fetch the quota)
verbose: logger verbosity
logger: logger object
"""
self.dataAcct = dataAcct
self.quotaFraction = quotaFraction
self.useRucio = useRucio
self.minimumSpace = kwargs["minimumThreshold"]
self.detoxUrl = kwargs.get("detoxUrl", "")
self.logger = getMSLogger(kwargs.get("verbose"), kwargs.get("logger"))
msg = "RSEQuotas started with parameters: dataAcct=%s, quotaFraction=%s, "
msg += "minimumThreshold=%s GB, useRucio=%s"
self.logger.info(msg, dataAcct, quotaFraction, gigaBytes(self.minimumSpace), self.useRucio)
self.nodeUsage = {}
self.availableRSEs = set()
self.outOfSpaceNodes = set()
def __str__(self):
"""
Write out useful information for this object
:return: a stringified dictionary
"""
res = {'detoxUrl': self.detoxUrl, 'dataAcct': self.dataAcct,
'useRucio': self.useRucio, 'quotaFraction': self.quotaFraction,
'minimumSpace': self.minimumSpace}
return str(res)
def getNodeUsage(self):
"""
Return a dictionary of RSEs and a few storage statistics
"""
return self.nodeUsage
def getAvailableRSEs(self):
"""
Return a list of out-of-space RSE/PNNs
"""
return self.availableRSEs
def getOutOfSpaceRSEs(self):
"""
Return a list of out-of-space RSE/PNNs
"""
return self.outOfSpaceNodes
def fetchStorageQuota(self, dataSvcObj):
"""
Fetch the DataOps quota from Detox. At this stage, we do not do
any manipulation with the quota value (Unified uses 80% of the quota),
use it as is!
:param dataSvcObj: object instance for the Rucio data service
:return: create an instance cache structure to keep track of quota
and available storage. The structure is as follows:
{"pnn_name": {"quota": quota in bytes for the rucio account or phedex group,
"bytes_limit": total space for the account/group,
"bytes": amount of bytes currently used/archived,
"bytes_remaining": space remaining for the acct/group,
"quota_avail": a fraction of the quota that we will use}
NOTE: code extracted/modified from Unified, see `fetch_detox_info` in
https://github.com/CMSCompOps/WmAgentScripts/blob/master/utils.py#L2514
"""
# FIXME: besides the 1-line below to clear the data structure, this method
# will be useless once we migrate to Rucio
self.nodeUsage.clear()
if self.useRucio:
response = dataSvcObj.getAccountLimits(self.dataAcct)
for rse, quota in viewitems(response):
if rse.endswith("_Tape") or rse.endswith("_Export"):
continue
self.nodeUsage.setdefault(rse, {})
self.nodeUsage[rse] = dict(quota=int(quota),
bytes_limit=int(quota),
bytes=0,
bytes_remaining=int(quota), # FIXME: always 0
quota_avail=0)
self.logger.info("Storage quota filled from Rucio")
else:
# FIXME: extremely fragile code that has to be replaced by a proper
# CRIC/Rucio API in the very near future
info = getDetoxQuota(self.detoxUrl)
doRead = False
for line in info:
if 'DDM Partition:' in line and self.dataAcct in line:
doRead = True
continue
elif 'DDM Partition:' in line:
doRead = False
continue
elif line.startswith('#'):
continue
if not doRead:
continue
_, quota, _, _, pnn = line.split()
if pnn.endswith("_MSS") or pnn.endswith("_Export"):
continue
self.nodeUsage.setdefault(pnn, {})
# convert from TB to bytes
self.nodeUsage[pnn] = dict(quota=int(quota) * (1000 ** 4),
bytes_limit=0,
bytes=0,
bytes_remaining=0,
quota_avail=0)
self.logger.info("Storage quota filled from Detox information")
def fetchStorageUsage(self, dataSvcObj):
"""
Fetch the storage usage from either Rucio or PhEDEx, which will then
be used as part of the data placement mechanism.
Also calculate the available quota - given the configurable quota
fraction - and mark RSEs with less than 1TB available as NOT usable.
:param dataSvcObj: object instance for the data service
Keys definition is:
* quota: the PhEDEx group quota provided by Detox
* bytes_limit: either the PhEDEx quota or the account quota from Rucio
* bytes: data volume placed by Rucio (or subscribed in PhEDEx)
* bytes_remaining: storage available for our account/group
* quota_avail: space left (in bytes) that we can use for data placement
:return: update our cache in place with up-to-date values, in the format of:
{"pnn_name": {"bytes_limit": total space for the account/group,
"bytes": amount of bytes currently used/archived,
"bytes_remaining": space remaining for the acct/group}
"""
if self.useRucio:
self.logger.debug("Using Rucio for storage usage, with acct: %s", self.dataAcct)
for item in dataSvcObj.getAccountUsage(self.dataAcct):
if item['rse'] not in self.nodeUsage:
self.logger.warning("Rucio RSE: %s has data usage but no quota available.", item['rse'])
continue
# bytes_limit is always 0, so skip it and use whatever came from the limits call
# bytes_remaining is always negative, so calculate it based on the limits
quota = self.nodeUsage[item['rse']]['quota']
self.nodeUsage[item['rse']].update({'bytes': item['bytes'],
'bytes_remaining': quota - item['bytes']})
else:
self.logger.debug("Using PhEDEx for storage usage, with acct: %s", self.dataAcct)
# for PhEDEx, we have also to remap the key's to keep in sync with Rucio
res = dataSvcObj.getGroupUsage(group=self.dataAcct)
for item in res['phedex']['node']:
if item['name'] not in self.nodeUsage:
continue
quota = self.nodeUsage[item['name']]['quota']
self.nodeUsage[item['name']].update({'bytes_limit': quota,
'bytes': item['group'][0]['dest_bytes'],
'bytes_remaining': quota - item['group'][0]['dest_bytes']})
def evaluateQuotaExceeded(self):
"""
Goes through every single site, their quota and their remaining
storage; and mark those with less than X TB available (1TB at the
moment) as not eligible to receive data
:return: updates instance structures in place
"""
self.availableRSEs.clear()
self.outOfSpaceNodes.clear()
# given a configurable sub-fraction of our quota, recalculate how much storage is left
for rse, info in self.nodeUsage.items():
quotaAvail = info['quota'] * self.quotaFraction
info['quota_avail'] = min(quotaAvail, info['bytes_remaining'])
if info['quota_avail'] < self.minimumSpace:
self.outOfSpaceNodes.add(rse)
else:
self.availableRSEs.add(rse)
self.logger.info("Currently %d nodes are out of space.", len(self.outOfSpaceNodes))
def printQuotaSummary(self):
"""
Print a summary of the current quotas, space usage and space available
"""
self.logger.info("Summary of the current quotas in Terabytes:")
for node in sorted(self.nodeUsage.keys()):
msg = " %s:\t\tbytes_limit: %.2f, bytes_used: %.2f, bytes_remaining: %.2f, "
msg += "quota: %.2f, quota_avail: %.2f"
self.logger.debug(msg, node, teraBytes(self.nodeUsage[node]['bytes_limit']),
teraBytes(self.nodeUsage[node]['bytes']),
teraBytes(self.nodeUsage[node]['bytes_remaining']),
teraBytes(self.nodeUsage[node]['quota']),
teraBytes(self.nodeUsage[node]['quota_avail']))
self.logger.info("List of RSE's out of quota: %s", self.outOfSpaceNodes)
def updateNodeUsage(self, node, dataSize):
"""
Provided a RSE/PNN name and the data size, in bytes, update the node
storage usage by subtracting it from the current available quota.
If it gets a list of nodes, the same dataSize is accounted for all
of them.
:param node: string with the PNN/RSE
:param dataSize: integer with the amount of bytes allocated
:return: nothing. updates nodeUsage cache
"""
if isinstance(node, basestring):
node = [node]
if not isinstance(dataSize, int):
self.logger.error("dataSize needs to be integer, not '%s'!", type(dataSize))
for rse in node:
self.nodeUsage[rse]['quota_avail'] -= dataSize
| [
"alan.malta@cern.ch"
] | alan.malta@cern.ch |
302200b67c6ff46459d3cbb1afd4dd9ccceda468 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/healthinsights/azure-healthinsights-cancerprofiling/setup.py | 661aee6d571a51ce540e6ca8bc4357451c10400c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 2,517 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) Python Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
import os
import re
from setuptools import setup, find_packages
PACKAGE_NAME = "azure-healthinsights-cancerprofiling"
PACKAGE_PPRINT_NAME = "Cognitive Services Health Insights Cancer Profilings"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace("-", "/")
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, "_version.py"), "r") as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError("Cannot find version information")
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
license="MIT License",
author="Microsoft Corporation",
author_email="azpysdkhelp@microsoft.com",
url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk",
keywords="azure, azure sdk",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
packages=find_packages(
exclude=[
"tests",
# Exclude packages that will be covered by PEP420 or nspkg
"azure",
"azure.healthinsights",
]
),
include_package_data=True,
package_data={
"pytyped": ["py.typed"],
},
install_requires=[
"isodate<1.0.0,>=0.6.1",
"azure-core<2.0.0,>=1.24.0",
"typing-extensions>=4.3.0; python_version<'3.8.0'",
],
python_requires=">=3.7",
)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
5de57eff05286734272612ef9ae48a7cdf10e537 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03854/s007978704.py | f030887d6dd78ea3bba4175865619e5e5e2d9b9c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | import re
s = input()
flag = re.match('^(dream|dreamer|erase|eraser)+$',s)
if flag:
print('YES')
else:
print('NO')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
363ccaf11d04a765dfe123aa8108c5efc179a7a3 | fb78fd824e904705fb1ee09db8b3c20cc3902805 | /django-react-app/leads/models.py | 17a10418f8e0184ed0c4e0904cc1bf8146d137be | [] | no_license | Roderich25/mac | 8469833821ac49c539a744db29db5a41d755ad55 | 4f7fe281c88f0199b85d0ac99ce41ffb643d6e82 | refs/heads/master | 2023-01-12T05:55:12.753209 | 2021-11-26T01:16:24 | 2021-11-26T01:16:24 | 207,029,750 | 0 | 0 | null | 2023-01-07T11:49:23 | 2019-09-07T21:51:53 | Jupyter Notebook | UTF-8 | Python | false | false | 277 | py | from django.db import models
class Lead(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField(max_length=100, unique=True)
message = models.CharField(max_length=500, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
| [
"rodrigoavilasolis@gmail.com"
] | rodrigoavilasolis@gmail.com |
fddd9876f7cba4560371eeb94013cf83c23decbe | d9da91e23cb394f4f1622755098717840715be23 | /python/jittor/utils/polish.py | a03b8ed03942ed06fcd290bcfa8dfa556ec73ba0 | [
"Apache-2.0"
] | permissive | yezi05/jittor | 4af5e857575aca28c2b0f7dbfb4d8b717ee659ff | 03e8253a363aa74ce68e707ccf1726f30d9d64c5 | refs/heads/master | 2023-02-06T03:01:59.934523 | 2020-12-22T13:58:52 | 2020-12-22T13:58:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,577 | py | #!/usr/bin/python3
# ***************************************************************
# Copyright (c) 2020 Jittor. Authors: Dun Liang <randonlang@gmail.com>. All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
# Polish steps:
# 1. create jittor-polish repo
# 2. copy jittor src into it
# 3. remove files
# 4. commit jittor-polish(check modify and break)
# 5. compile to build/$git_version/$cc_type/$use_cuda/a.obj
# 6. rsync to build-server
# 7. push to github
# 8. push to pip
import os
import jittor as jt
from jittor import LOG
from jittor.compiler import run_cmd
from jittor_utils import translator
import sys
jittor_path = jt.flags.jittor_path
root_path = os.path.realpath(os.path.join(jt.flags.jittor_path, "..", ".."))
data_path = os.path.join(jittor_path, "src", "__data__")
build_path = os.path.join(data_path, "build")
if not os.path.isdir(build_path):
os.mkdir(build_path)
status = run_cmd("git status", data_path)
print(status)
if "working tree clean" not in status:
LOG.f("__data__ has untracked files")
git_version = run_cmd("git rev-parse HEAD", data_path)
LOG.i("git_version", git_version)
run_cmd(f"git rev-parse HEAD > {jittor_path}/version", data_path)
# remove files
files = jt.compiler.files
data_files = [ name for name in files
if "__data__" in name
]
LOG.i("data_files", data_files)
# compile data files
from pathlib import Path
home = str(Path.home())
# for cc_type in ["g++", "clang"]:
# for device in ["cpu", "cuda"]:
for cc_type in ["g++"]:
for device in ["cpu"]:
key = f"{git_version}-{cc_type}-{device}"
env = f"cache_name=build/{cc_type}/{device} cc_path="
cname = "g++" if cc_type=="g++" else "clang-8"
env += cname
# use core2 arch, avoid using avx instructions
# TODO: support more archs, such as arm, or use ir(GIMPLE or LLVM)
env += " cc_flags='-march=core2' "
if device == "cpu":
env += "nvcc_path='' "
elif jt.flags.nvcc_path == "":
env = "unset nvcc_path && " + env
cmd = f"{env} {sys.executable} -c 'import jittor'"
LOG.i("run cmd:", cmd)
os.system(cmd)
LOG.i("run cmd:", cmd)
os.system(cmd)
obj_path = home + f"/.cache/jittor/build/{cc_type}/{device}/{cname}/obj_files"
obj_files = []
for name in data_files:
name = name.split("/")[-1]
fname = f"{obj_path}/{name}.o"
assert os.path.isfile(fname), fname
obj_files.append(fname)
run_cmd(f"ld -r {' '.join(obj_files)} -o {build_path}/{key}.o")
# compress source
# tar -cvzf build/jittor.tgz . --exclude build --exclude .git --exclude .ipynb_checkpoints --exclude __pycache__
# mkdir -p jittor && tar -xvf ./jittor.tgz -C jittor
assert os.system(f"cd {root_path} && tar --exclude=build --exclude=.git --exclude=.ipynb_checkpoints --exclude=__pycache__ --exclude=__data__ --exclude=my --exclude=dist --exclude=.vscode --exclude=.github -cvzf {build_path}/jittor.tgz * ")==0
# rsync to build-server
jittor_web_base_dir = "Documents/jittor-blog/assets/"
jittor_web_build_dir = jittor_web_base_dir
assert os.system(f"rsync -avPu {build_path} jittor-web:{jittor_web_build_dir}")==0
assert os.system(f"ssh jittor-web Documents/jittor-blog.git/hooks/post-update")==0
# sys.exit(0)
# push to github
# assert os.system(f"cd {polish_path} && git push -f origin master")==0
# push to pip | [
"randonlang@gmail.com"
] | randonlang@gmail.com |
efe621adfaa9b8a93bd44ee3a70caffb919035cf | 23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6 | /rootfs/usr/share/pyshared/desktopcouch/pair/__init__.py | fc4788fa401ce96d02d912686bae79f0a5eedc4b | [] | no_license | xinligg/trainmonitor | 07ed0fa99e54e2857b49ad3435546d13cc0eb17a | 938a8d8f56dc267fceeb65ef7b867f1cac343923 | refs/heads/master | 2021-09-24T15:52:43.195053 | 2018-10-11T07:12:25 | 2018-10-11T07:12:25 | 116,164,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | # Copyright 2009 Canonical Ltd.
#
# This file is part of desktopcouch.
#
# desktopcouch is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# desktopcouch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with desktopcouch. If not, see <http://www.gnu.org/licenses/>.
"""The pair module."""
pairing_record_type = "http://www.freedesktop.org/wiki/Specifications/desktopcouch/paired_server"
| [
"root@xinli.xinli"
] | root@xinli.xinli |
ec29664e79ce885956b357da822230d58ef90bff | cf088e68e93981292270905c983378288c4bbd66 | /backend/chat/migrations/0001_initial.py | 2fb31cf4d731520bfec0e757eea48934675de7d1 | [] | no_license | crowdbotics-apps/test-27314 | 92f91f9e3bc3cfe508338cd71b8d29df8de1927d | e436c8d0f0b346b23b26b90ea698c3ad0020711e | refs/heads/master | 2023-04-21T01:55:47.441435 | 2021-05-24T10:39:51 | 2021-05-24T10:39:51 | 370,310,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,805 | py | # Generated by Django 2.2.20 on 2021-05-24 10:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('chat_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('attachment', models.URLField()),
('is_draft', models.BooleanField()),
('is_delivered', models.BooleanField()),
('is_read', models.BooleanField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('timestamp_delivered', models.DateTimeField()),
('timestamp_read', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Thread',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('thread_photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ThreadMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_admin', models.BooleanField()),
('timestamp_joined', models.DateTimeField(auto_now_add=True)),
('timestamp_left', models.DateTimeField()),
('last_rejoined', models.DateTimeField()),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadmember_profile', to='chat_user_profile.Profile')),
('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadmember_thread', to='chat.Thread')),
],
),
migrations.CreateModel(
name='ThreadAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=7)),
('timestamp_action', models.DateTimeField(auto_now_add=True)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadaction_profile', to='chat_user_profile.Profile')),
('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='threadaction_thread', to='chat.Thread')),
],
),
migrations.CreateModel(
name='MessageAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=7)),
('timestamp_action', models.DateTimeField(auto_now_add=True)),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messageaction_message', to='chat.Message')),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messageaction_profile', to='chat_user_profile.Profile')),
],
),
migrations.AddField(
model_name='message',
name='sent_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_sent_by', to='chat.ThreadMember'),
),
migrations.AddField(
model_name='message',
name='thread',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_thread', to='chat.Thread'),
),
migrations.CreateModel(
name='ForwardedMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp_forwarded', models.DateTimeField(auto_now_add=True)),
('forwarded_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_forwarded_by', to='chat_user_profile.Profile')),
('forwarded_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_forwarded_to', to='chat.Thread')),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='forwardedmessage_message', to='chat.Message')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
0f3033d9f66cd33138abf1898e4ac25f24abc2f5 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/321/101585/submittedfiles/jogoDaVelha.py | ceb9986c93fca04e49a3a98db4789eb815024b33 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | # -*- coding: utf-8 -*-
from jogoDaVelha_BIB import *
# COLOQUE SEU PROGRAMA A PARTIR DAQUI
print('Bem vindo ao JogoDaVelha do grupo 8 [Iara, Ingrid, Luiz Otávio, Tatiane]')
nome = str(input('Qual seu nome? '))
s = str(input('Qual símbolo você deseja utilizar no jogo? (X ou O) '))
while s != X or s != O:
print('Isira um símbolo válido')
s = str(input('Qual símbolo você deseja utilizar no jogo? '))
print(sorteio(inicio))
print(tabuleiro) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c37eec426fa77166b8385a84df964e54b7b27bad | e519a3134e5242eff29a95a05b02f8ae0bfde232 | /services/control-tower/vendor/riffyn-sdk/swagger_client/models/add_property_type_body.py | 584e5a664de8f7be094368cc425bdffcfd495f56 | [] | no_license | zoltuz/lab-automation-playground | ba7bc08f5d4687a6daa64de04c6d9b36ee71bd3e | 7a21f59b30af6922470ee2b20651918605914cfe | refs/heads/master | 2023-01-28T10:21:51.427650 | 2020-12-04T14:13:13 | 2020-12-05T03:27:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,463 | py | # coding: utf-8
"""
Riffyn REST API
### Vocabulary Before you begin, please familiarize yourself with our [Glossary of Terms](https://help.riffyn.com/hc/en-us/articles/360045503694). ### Getting Started If you'd like to play around with the API, there are several free GUI tools that will allow you to send requests and receive responses. We suggest using the free app [Postman](https://www.getpostman.com/). ### Authentication Begin with a call the [authenticate](/#api-Authentication-authenticate) endpoint using [HTTP Basic authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) with your `username` and `password` to retrieve either an API Key or an Access Token. For example: curl -X POST -u '<username>' https://api.app.riffyn.com/v1/auth -v You may then use either the API Key or the accessToken for all future requests to the API. For example: curl -H 'access-token: <ACCESS_TOKEN>' https://api.app.riffyn.com/v1/units -v curl -H 'api-key: <API_KEY>' https://api.app.riffyn.com/v1/units -v The tokens' values will be either in the message returned by the `/authenticate` endpoint or in the createApiKey `/auth/api-key` or CreateAccesToken `/auth/access-token` endpoints. The API Key will remain valid until it is deauthorized by revoking it through the Security Settings in the Riffyn App UI. The API Key is best for running scripts and longer lasting interactions with the API. The Access Token will expire automatically and is best suited to granting applications short term access to the Riffyn API. Make your requests by sending the HTTP header `api-key: $API_KEY`, or `access-token: $ACCESS_TOKEN`. In Postman, add your prefered token to the headers under the Headers tab for any request other than the original request to `/authenticate`. If you are enrolled in MultiFactor Authentication (MFA) the `status` returned by the `/authenticate` endpoint will be `MFA_REQUIRED`. A `passCode`, a `stateToken`, and a `factorId` must be passed to the [/verify](/#api-Authentication-verify) endpoint to complete the authentication process and achieve the `SUCCESS` status. MFA must be managed in the Riffyn App UI. ### Paging and Sorting The majority of endpoints that return a list of data support paging and sorting through the use of three properties, `limit`, `offset`, and `sort`. Please see the list of query parameters, displayed below each endpoint's code examples, to see if paging or sorting is supported for that specific endpoint. Certain endpoints return data that's added frequently, like resources. As a result, you may want filter results on either the maximum or minimum creation timestamp. This will prevent rows from shifting their position from the top of the list, as you scroll though subsequent pages of a multi-page response. Before querying for the first page, store the current date-time (in memory, a database, a file...). On subsequent pages you *may* include the `before` query parameter, to limit the results to records created before that date-time. E.g. before loading page one, you store the current date time of `2016-10-31T22:00:00Z` (ISO date format). Later, when generating the URL for page two, you *could* limit the results by including the query parameter `before=1477951200000` (epoch timestamp). ### Postman endpoint examples There is a YAML file with the examples of the request on Riffyn API [Click here](/collection) to get the file. If you don't know how to import the collection file, [here](https://learning.postman.com/docs/postman/collections/data-formats/#importing-postman-data) are the steps. ### Client SDKs You may write your own API client, or you may use one of ours. [Click here](/clients) to select your programming language and download an API client. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: support@riffyn.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AddPropertyTypeBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'property_type_id': 'str',
'component_id': 'str',
'specs': 'Spec',
'hint': 'Hint'
}
attribute_map = {
'property_type_id': 'propertyTypeId',
'component_id': 'componentId',
'specs': 'specs',
'hint': 'hint'
}
def __init__(self, property_type_id=None, component_id=None, specs=None, hint=None): # noqa: E501
"""AddPropertyTypeBody - a model defined in Swagger""" # noqa: E501
self._property_type_id = None
self._component_id = None
self._specs = None
self._hint = None
self.discriminator = None
self.property_type_id = property_type_id
if component_id is not None:
self.component_id = component_id
if specs is not None:
self.specs = specs
if hint is not None:
self.hint = hint
@property
def property_type_id(self):
"""Gets the property_type_id of this AddPropertyTypeBody. # noqa: E501
The `_id` of the property type being added to the activity `input` or `output`. # noqa: E501
:return: The property_type_id of this AddPropertyTypeBody. # noqa: E501
:rtype: str
"""
return self._property_type_id
@property_type_id.setter
def property_type_id(self, property_type_id):
"""Sets the property_type_id of this AddPropertyTypeBody.
The `_id` of the property type being added to the activity `input` or `output`. # noqa: E501
:param property_type_id: The property_type_id of this AddPropertyTypeBody. # noqa: E501
:type: str
"""
if property_type_id is None:
raise ValueError("Invalid value for `property_type_id`, must not be `None`") # noqa: E501
self._property_type_id = property_type_id
@property
def component_id(self):
"""Gets the component_id of this AddPropertyTypeBody. # noqa: E501
The resource type `_id` of the component the property type is being added to. # noqa: E501
:return: The component_id of this AddPropertyTypeBody. # noqa: E501
:rtype: str
"""
return self._component_id
@component_id.setter
def component_id(self, component_id):
"""Sets the component_id of this AddPropertyTypeBody.
The resource type `_id` of the component the property type is being added to. # noqa: E501
:param component_id: The component_id of this AddPropertyTypeBody. # noqa: E501
:type: str
"""
self._component_id = component_id
@property
def specs(self):
"""Gets the specs of this AddPropertyTypeBody. # noqa: E501
:return: The specs of this AddPropertyTypeBody. # noqa: E501
:rtype: Spec
"""
return self._specs
@specs.setter
def specs(self, specs):
"""Sets the specs of this AddPropertyTypeBody.
:param specs: The specs of this AddPropertyTypeBody. # noqa: E501
:type: Spec
"""
self._specs = specs
@property
def hint(self):
"""Gets the hint of this AddPropertyTypeBody. # noqa: E501
:return: The hint of this AddPropertyTypeBody. # noqa: E501
:rtype: Hint
"""
return self._hint
@hint.setter
def hint(self, hint):
"""Sets the hint of this AddPropertyTypeBody.
:param hint: The hint of this AddPropertyTypeBody. # noqa: E501
:type: Hint
"""
self._hint = hint
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AddPropertyTypeBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddPropertyTypeBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"jaceys.tan@gmail.com"
] | jaceys.tan@gmail.com |
7f485812412727f8eec54250a2717de3c9cc38c6 | c0e1d9ab5c5fae94f988e03ead37337cd283e012 | /earth461/ass1/q1b.py | 9c0c57dc3f730e8c0738ef34cbfc4ba6e4f27518 | [] | no_license | BruceJohnJennerLawso/turbulence-chain | 114dafd19fa00f6e0af7113cdaa8d603a62dfc66 | 767786368c08e8ca3ba0fbc2896cb6cc9908ebdb | refs/heads/master | 2016-09-01T05:15:48.650177 | 2015-12-07T01:49:46 | 2015-12-07T01:49:46 | 45,279,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,312 | py | ## q2.py #######################################################################
## log log conductivity by ppm again, this time with different values ##########
## depending on temperature ####################################################
################################################################################
from conductivity import *
if(__name__ == "__main__"):
tds_values = range(50, 50000)
tencConductivities = []
twentyfivecConductivities = []
fortycConductivities = []
for cy in tds_values:
tencConductivities.append(adjustConductivityForTemperature(25, 10, getConductivity(1.0, 0.40, 1.5, cy , 0.5))*1000)
twentyfivecConductivities.append(getConductivity(1.0, 0.40, 1.5, cy , 0.5)*1000)
fortycConductivities.append(adjustConductivityForTemperature(25, 40, getConductivity(1.0, 0.40, 1.5, cy , 0.5))*1000)
## 0.5 works here cause we can
ten = dataSet(tds_values, tencConductivities, "10 Degrees C", "b-")
twentyfive = dataSet(tds_values,twentyfivecConductivities,"25 Degrees C", "g-")
forty = dataSet(tds_values, fortycConductivities, "40 Degrees C", "r-")
graphLogLog("log conductivity by log TDS for various\n temperatures of saturated sands", \
"log sand conductivity (mS/m)", "log TDS (ppm)", 1, 1e6, 1e-2, 1e4, True, \
ten, twentyfive, forty)
| [
"johnnybgoode@rogers.com"
] | johnnybgoode@rogers.com |
9bf0b8872faffa3a20d8a64810b284909a28af6b | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-ci/connectors/metadata_service/orchestrator/tests/test_dagster_helpers.py | 13d92550f51ddd6f46330b65e3966c660c1ef658 | [
"LicenseRef-scancode-free-unknown",
"MIT",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 1,358 | py | from orchestrator.utils.dagster_helpers import string_array_to_hash
def test_string_array_to_hash_is_deterministic():
strings = ["hello", "world", "foo", "bar", "baz"]
assert string_array_to_hash(strings) == string_array_to_hash(strings)
def test_string_array_to_hash_ignores_repeated_strings():
strings = ["hello", "world", "foo", "bar", "baz"]
repeated_strings = ["hello", "world", "foo", "bar", "baz", "foo", "bar"]
assert string_array_to_hash(strings) == string_array_to_hash(repeated_strings)
def test_string_array_to_hash_outputs_on_empty_list():
assert string_array_to_hash([])
def test_string_array_to_hash_ignores_value_order_input():
strings = ["baz", "bar", "foo", "world", "hello"]
same_but_different_order = ["hello", "world", "foo", "bar", "baz"]
assert string_array_to_hash(strings) == string_array_to_hash(same_but_different_order)
def test_string_array_to_hash_differs():
unique_cursor_1 = string_array_to_hash(["hello", "world", "foo"])
unique_cursor_2 = string_array_to_hash(["hello", "world", "foo", "bar", "baz", "foo", "bar"])
unique_cursor_3 = string_array_to_hash(["hello", "world", "baz"])
unique_cursor_4 = string_array_to_hash(["world", "baz"])
unique_set = set([unique_cursor_1, unique_cursor_2, unique_cursor_3, unique_cursor_4])
assert len(unique_set) == 4
| [
"noreply@github.com"
] | thomas-vl.noreply@github.com |
f44076cdee710f265bf0956224acadf8aeac8724 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-opensearch/aliyunsdkopensearch/request/v20171225/DescribeAppRequest.py | 1fde6aefdec2387c286c91083d822a3b0f7492ff | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,708 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkopensearch.endpoint import endpoint_data
class DescribeAppRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'OpenSearch', '2017-12-25', 'DescribeApp','opensearch')
self.set_uri_pattern('/v4/openapi/app-groups/[appGroupIdentity]/apps/[appId]')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_appId(self):
return self.get_path_params().get('appId')
def set_appId(self,appId):
self.add_path_param('appId',appId)
def get_appGroupIdentity(self):
return self.get_path_params().get('appGroupIdentity')
def set_appGroupIdentity(self,appGroupIdentity):
self.add_path_param('appGroupIdentity',appGroupIdentity) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
2d59dda3daae2055f815f55af8792090bb339bf3 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ_16_2/16_2_1_Mike_Xiao_q1.py | f1ab7c431a32948cb0d5c6ff6318219a81209fcd | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 855 | py | filename = "A-large"
fin = open(filename + ".in", "r")
fout = open(filename + ".out", "w")
casenum = int(fin.readline())
for ite in range(casenum):
in_str = fin.readline()
dignums = [0,0,0,0,0,0,0,0,0,0]
dignums[0] = in_str.count('Z')
dignums[2] = in_str.count('W')
dignums[4] = in_str.count('U')
dignums[6] = in_str.count('X')
dignums[8] = in_str.count('G')
dignums[3] = in_str.count('H') - dignums[8]
dignums[5] = in_str.count('F') - dignums[4]
dignums[1] = in_str.count('O') - dignums[0] - dignums[2] - dignums[4]
dignums[7] = in_str.count('V') - dignums[5]
dignums[9] = in_str.count('I') - dignums[5] - dignums[6] - dignums[8]
return_str = ""
for jte in range(10):
return_str += str(jte) * dignums[jte]
fout.write("Case #{0}: {1}\n".format(ite + 1, return_str))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
65a82f0daa5889a34b436582af4722fc7077ffe0 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-mpc/huaweicloudsdkmpc/v1/mpc_client.py | acf12740f51f323053cdb6dee05918acc7a08119 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 126,593 | py | # coding: utf-8
from __future__ import absolute_import
import importlib
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class MpcClient(Client):
def __init__(self):
super(MpcClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkmpc.v1.model")
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls)
if clazz.__name__ != "MpcClient":
raise TypeError("client type error, support client type is MpcClient")
return ClientBuilder(clazz)
def create_animated_graphics_task(self, request):
"""新建转动图任务
创建动图任务,用于将完整的视频文件或视频文件中的一部分转换为动态图文件,暂只支持输出GIF文件。
待转动图的视频文件需要存储在与媒体处理服务同区域的OBS桶中,且该OBS桶已授权。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateAnimatedGraphicsTask
:type request: :class:`huaweicloudsdkmpc.v1.CreateAnimatedGraphicsTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateAnimatedGraphicsTaskResponse`
"""
return self._create_animated_graphics_task_with_http_info(request)
def _create_animated_graphics_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/animated-graphics',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateAnimatedGraphicsTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_animated_graphics_task(self, request):
"""取消转动图任务
取消已下发的生成动图任务,仅支持取消正在排队中的任务。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteAnimatedGraphicsTask
:type request: :class:`huaweicloudsdkmpc.v1.DeleteAnimatedGraphicsTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteAnimatedGraphicsTaskResponse`
"""
return self._delete_animated_graphics_task_with_http_info(request)
def _delete_animated_graphics_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/animated-graphics',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteAnimatedGraphicsTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_animated_graphics_task(self, request):
"""查询转动图任务
查询动图任务的状态。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListAnimatedGraphicsTask
:type request: :class:`huaweicloudsdkmpc.v1.ListAnimatedGraphicsTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListAnimatedGraphicsTaskResponse`
"""
return self._list_animated_graphics_task_with_http_info(request)
def _list_animated_graphics_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
collection_formats['task_id'] = 'multi'
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
if 'x_language' in local_var_params:
header_params['x-language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/animated-graphics',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListAnimatedGraphicsTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_agencies_task(self, request):
"""请求委托任务
开启或关闭\"委托授权\", 开启后,媒体处理服务将拥有您所有桶的读写权限,子账号不支持委托授权。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateAgenciesTask
:type request: :class:`huaweicloudsdkmpc.v1.CreateAgenciesTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateAgenciesTaskResponse`
"""
return self._create_agencies_task_with_http_info(request)
def _create_agencies_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/agencies',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateAgenciesTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_all_buckets(self, request):
"""查询桶列表
请求查询自己创建的指定的桶区域位置的桶列表。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListAllBuckets
:type request: :class:`huaweicloudsdkmpc.v1.ListAllBucketsRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListAllBucketsResponse`
"""
return self._list_all_buckets_with_http_info(request)
def _list_all_buckets_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/buckets',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListAllBucketsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_all_obs_obj_list(self, request):
"""查询桶里的object
查询桶里的object。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListAllObsObjList
:type request: :class:`huaweicloudsdkmpc.v1.ListAllObsObjListRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListAllObsObjListResponse`
"""
return self._list_all_obs_obj_list_with_http_info(request)
def _list_all_obs_obj_list_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'bucket' in local_var_params:
query_params.append(('bucket', local_var_params['bucket']))
if 'prefix' in local_var_params:
query_params.append(('prefix', local_var_params['prefix']))
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1.0-ext/{project_id}/objects',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListAllObsObjListResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_notify_event(self, request):
"""查询转码服务端所有事件
查询消息订阅功能板块, SMN主题的所有订阅事件。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListNotifyEvent
:type request: :class:`huaweicloudsdkmpc.v1.ListNotifyEventRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListNotifyEventResponse`
"""
return self._list_notify_event_with_http_info(request)
def _list_notify_event_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/notification/event',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListNotifyEventResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_notify_smn_topic_config(self, request):
"""查询转码服务端事件通知
查询消息订阅功能板块, SMN主题的订阅事件的启用状态和订阅消息的启用状态。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListNotifySmnTopicConfig
:type request: :class:`huaweicloudsdkmpc.v1.ListNotifySmnTopicConfigRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListNotifySmnTopicConfigResponse`
"""
return self._list_notify_smn_topic_config_with_http_info(request)
def _list_notify_smn_topic_config_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/notification',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListNotifySmnTopicConfigResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def notify_smn_topic_config(self, request):
"""配置转码服务端事件通知
配置转码服务端事件通知。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for NotifySmnTopicConfig
:type request: :class:`huaweicloudsdkmpc.v1.NotifySmnTopicConfigRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.NotifySmnTopicConfigResponse`
"""
return self._notify_smn_topic_config_with_http_info(request)
def _notify_smn_topic_config_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/notification',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='NotifySmnTopicConfigResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_agencies_task(self, request):
"""查询创建委托任务状态
查询创建委托任务状态。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ShowAgenciesTask
:type request: :class:`huaweicloudsdkmpc.v1.ShowAgenciesTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ShowAgenciesTaskResponse`
"""
return self._show_agencies_task_with_http_info(request)
def _show_agencies_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/agencies',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ShowAgenciesTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_bucket_authorized(self, request):
"""桶授权或取消授权
对OBS桶进行授权或取消授权,媒体处理服务仅拥有已授权桶的读写权限。(暂不支持KMS加密桶的授权)
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for UpdateBucketAuthorized
:type request: :class:`huaweicloudsdkmpc.v1.UpdateBucketAuthorizedRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.UpdateBucketAuthorizedResponse`
"""
return self._update_bucket_authorized_with_http_info(request)
def _update_bucket_authorized_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/authority',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='UpdateBucketAuthorizedResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_editing_job(self, request):
"""新建剪辑任务
创建剪辑任务,用于将多个视频文件进行裁剪成多个视频分段,并且可以把这些视频分段合并成一个视频,剪切和拼接功能可以单独使用。
待剪辑的视频文件需要存储在与媒体处理服务同区域的OBS桶中,且该OBS桶已授权。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateEditingJob
:type request: :class:`huaweicloudsdkmpc.v1.CreateEditingJobRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateEditingJobResponse`
"""
return self._create_editing_job_with_http_info(request)
def _create_editing_job_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/editing/jobs',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateEditingJobResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_editing_job(self, request):
"""取消剪辑任务
取消已下发的生成剪辑任务,仅支持取消正在排队中的任务。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteEditingJob
:type request: :class:`huaweicloudsdkmpc.v1.DeleteEditingJobRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteEditingJobResponse`
"""
return self._delete_editing_job_with_http_info(request)
def _delete_editing_job_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'job_id' in local_var_params:
query_params.append(('job_id', local_var_params['job_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/editing/jobs',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteEditingJobResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_editing_job(self, request):
"""查询剪辑任务
查询剪辑任务的状态。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListEditingJob
:type request: :class:`huaweicloudsdkmpc.v1.ListEditingJobRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListEditingJobResponse`
"""
return self._list_editing_job_with_http_info(request)
def _list_editing_job_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'job_id' in local_var_params:
query_params.append(('job_id', local_var_params['job_id']))
collection_formats['job_id'] = 'multi'
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
if 'x_language' in local_var_params:
header_params['x-language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/editing/jobs',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListEditingJobResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_encrypt_task(self, request):
"""新建独立加密任务
支持独立加密,包括创建、查询、删除独立加密任务。该API已废弃。
约束:
- 只支持转码后的文件进行加密。
- 加密的文件必须是m3u8或者mpd结尾的文件。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateEncryptTask
:type request: :class:`huaweicloudsdkmpc.v1.CreateEncryptTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateEncryptTaskResponse`
"""
return self._create_encrypt_task_with_http_info(request)
def _create_encrypt_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/encryptions',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateEncryptTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_encrypt_task(self, request):
"""取消独立加密任务
取消独立加密任务。该API已废弃。
约束:
只能取消正在任务队列中排队的任务。已开始加密或已完成的加密任务不能取消。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteEncryptTask
:type request: :class:`huaweicloudsdkmpc.v1.DeleteEncryptTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteEncryptTaskResponse`
"""
return self._delete_encrypt_task_with_http_info(request)
def _delete_encrypt_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/encryptions',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteEncryptTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_encrypt_task(self, request):
"""查询独立加密任务
查询独立加密任务状态。返回任务执行结果或当前状态。该API已废弃。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListEncryptTask
:type request: :class:`huaweicloudsdkmpc.v1.ListEncryptTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListEncryptTaskResponse`
"""
return self._list_encrypt_task_with_http_info(request)
def _list_encrypt_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
collection_formats['task_id'] = 'multi'
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/encryptions',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListEncryptTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_extract_task(self, request):
"""新建视频解析任务
创建视频解析任务,解析视频元数据。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateExtractTask
:type request: :class:`huaweicloudsdkmpc.v1.CreateExtractTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateExtractTaskResponse`
"""
return self._create_extract_task_with_http_info(request)
def _create_extract_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/extract-metadata',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateExtractTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_extract_task(self, request):
"""取消视频解析任务
取消已下发的视频解析任务,仅支持取消正在排队中的任务。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteExtractTask
:type request: :class:`huaweicloudsdkmpc.v1.DeleteExtractTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteExtractTaskResponse`
"""
return self._delete_extract_task_with_http_info(request)
def _delete_extract_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/extract-metadata',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteExtractTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_extract_task(self, request):
"""查询视频解析任务
查询解析任务的状态和结果。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListExtractTask
:type request: :class:`huaweicloudsdkmpc.v1.ListExtractTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListExtractTaskResponse`
"""
return self._list_extract_task_with_http_info(request)
def _list_extract_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
collection_formats['task_id'] = 'multi'
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
if 'x_language' in local_var_params:
header_params['x-language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/extract-metadata',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListExtractTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_mb_tasks_report(self, request):
"""合并多声道任务、重置声轨任务上报接口
## 典型场景 ##
合并音频多声道文件任务、重置音频文件声轨任务上报结果接口。
## 接口功能 ##
合并音频多声道文件任务、重置音频文件声轨任务上报结果接口。
## 接口约束 ##
无。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateMbTasksReport
:type request: :class:`huaweicloudsdkmpc.v1.CreateMbTasksReportRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateMbTasksReportResponse`
"""
return self._create_mb_tasks_report_with_http_info(request)
def _create_mb_tasks_report_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/mediabox/tasks/report',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateMbTasksReportResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_merge_channels_task(self, request):
"""创建声道合并任务
创建声道合并任务,合并声道任务支持将每个声道各放一个文件中的片源,合并为单个音频文件。
执行合并声道的源音频文件需要存储在与媒体处理服务同区域的OBS桶中,且该OBS桶已授权。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateMergeChannelsTask
:type request: :class:`huaweicloudsdkmpc.v1.CreateMergeChannelsTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateMergeChannelsTaskResponse`
"""
return self._create_merge_channels_task_with_http_info(request)
def _create_merge_channels_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/audio/services/merge_channels/task',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateMergeChannelsTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_reset_tracks_task(self, request):
"""创建音轨重置任务
创建音轨重置任务,重置音轨任务支持按人工指定关系声道layout,语言标签,转封装片源,使其满足转码输入。
执行音轨重置的源音频文件需要存储在与媒体处理服务同区域的OBS桶中,且该OBS桶已授权。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateResetTracksTask
:type request: :class:`huaweicloudsdkmpc.v1.CreateResetTracksTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateResetTracksTaskResponse`
"""
return self._create_reset_tracks_task_with_http_info(request)
def _create_reset_tracks_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/audio/services/reset_tracks/task',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateResetTracksTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_merge_channels_task(self, request):
"""取消声道合并任务
取消合并音频多声道文件。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteMergeChannelsTask
:type request: :class:`huaweicloudsdkmpc.v1.DeleteMergeChannelsTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteMergeChannelsTaskResponse`
"""
return self._delete_merge_channels_task_with_http_info(request)
def _delete_merge_channels_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/audio/services/merge_channels/task',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteMergeChannelsTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_reset_tracks_task(self, request):
"""取消音轨重置任务
取消重置音频文件声轨任务。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteResetTracksTask
:type request: :class:`huaweicloudsdkmpc.v1.DeleteResetTracksTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteResetTracksTaskResponse`
"""
return self._delete_reset_tracks_task_with_http_info(request)
def _delete_reset_tracks_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/audio/services/reset_tracks/task',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteResetTracksTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_merge_channels_task(self, request):
"""查询声道合并任务
查询声道合并任务的状态。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListMergeChannelsTask
:type request: :class:`huaweicloudsdkmpc.v1.ListMergeChannelsTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListMergeChannelsTaskResponse`
"""
return self._list_merge_channels_task_with_http_info(request)
def _list_merge_channels_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
collection_formats['task_id'] = 'multi'
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/audio/services/merge_channels/task',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListMergeChannelsTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_reset_tracks_task(self, request):
"""查询音轨重置任务
查询音轨重置任务的状态。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListResetTracksTask
:type request: :class:`huaweicloudsdkmpc.v1.ListResetTracksTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListResetTracksTaskResponse`
"""
return self._list_reset_tracks_task_with_http_info(request)
def _list_reset_tracks_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
collection_formats['task_id'] = 'multi'
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/audio/services/reset_tracks/task',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListResetTracksTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_media_process_task(self, request):
"""创建视频增强任务
## 典型场景 ##
创建视频增强任务。
## 接口功能 ##
创建视频增强任务。
## 接口约束 ##
无。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateMediaProcessTask
:type request: :class:`huaweicloudsdkmpc.v1.CreateMediaProcessTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateMediaProcessTaskResponse`
"""
return self._create_media_process_task_with_http_info(request)
def _create_media_process_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/enhancements',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateMediaProcessTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_media_process_task(self, request):
"""取消视频增强任务
## 典型场景 ##
取消视频增强任务。
## 接口功能 ##
取消视频增强任务。
## 接口约束 ##
仅可删除正在排队中的任务。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteMediaProcessTask
:type request: :class:`huaweicloudsdkmpc.v1.DeleteMediaProcessTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteMediaProcessTaskResponse`
"""
return self._delete_media_process_task_with_http_info(request)
def _delete_media_process_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/enhancements',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteMediaProcessTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_media_process_task(self, request):
"""查询视频增强任务
## 典型场景 ##
查询视频增强任务。
## 接口功能 ##
查询视频增强任务。
## 接口约束 ##
无。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListMediaProcessTask
:type request: :class:`huaweicloudsdkmpc.v1.ListMediaProcessTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListMediaProcessTaskResponse`
"""
return self._list_media_process_task_with_http_info(request)
def _list_media_process_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
collection_formats['task_id'] = 'multi'
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/enhancements',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListMediaProcessTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_mpe_call_back(self, request):
"""mpe通知mpc
## 典型场景 ##
mpe通知mpc。
## 接口功能 ##
mpe调用此接口通知mpc转封装等结果。
## 接口约束 ##
无。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateMpeCallBack
:type request: :class:`huaweicloudsdkmpc.v1.CreateMpeCallBackRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateMpeCallBackResponse`
"""
return self._create_mpe_call_back_with_http_info(request)
def _create_mpe_call_back_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/mpe/notification',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateMpeCallBackResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_quality_enhance_template(self, request):
"""创建视频增强模板
创建视频增强模板
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateQualityEnhanceTemplate
:type request: :class:`huaweicloudsdkmpc.v1.CreateQualityEnhanceTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateQualityEnhanceTemplateResponse`
"""
return self._create_quality_enhance_template_with_http_info(request)
def _create_quality_enhance_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/qualityenhance',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateQualityEnhanceTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_quality_enhance_template(self, request):
"""删除用户视频增强模板
删除用户视频增强模板。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteQualityEnhanceTemplate
:type request: :class:`huaweicloudsdkmpc.v1.DeleteQualityEnhanceTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteQualityEnhanceTemplateResponse`
"""
return self._delete_quality_enhance_template_with_http_info(request)
def _delete_quality_enhance_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'template_id' in local_var_params:
query_params.append(('template_id', local_var_params['template_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/qualityenhance',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteQualityEnhanceTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_quality_enhance_default_template(self, request):
"""查询视频增强预置模板
查询视频增强预置模板,返回所有结果。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListQualityEnhanceDefaultTemplate
:type request: :class:`huaweicloudsdkmpc.v1.ListQualityEnhanceDefaultTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListQualityEnhanceDefaultTemplateResponse`
"""
return self._list_quality_enhance_default_template_with_http_info(request)
def _list_quality_enhance_default_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/qualityenhance/default',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListQualityEnhanceDefaultTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_quality_enhance_template(self, request):
"""更新视频增强模板
更新视频增强模板。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for UpdateQualityEnhanceTemplate
:type request: :class:`huaweicloudsdkmpc.v1.UpdateQualityEnhanceTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.UpdateQualityEnhanceTemplateResponse`
"""
return self._update_quality_enhance_template_with_http_info(request)
def _update_quality_enhance_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/qualityenhance',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='UpdateQualityEnhanceTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_transcode_detail(self, request):
"""查询媒资转码详情
查询媒资转码详情
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListTranscodeDetail
:type request: :class:`huaweicloudsdkmpc.v1.ListTranscodeDetailRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListTranscodeDetailResponse`
"""
return self._list_transcode_detail_with_http_info(request)
def _list_transcode_detail_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
collection_formats['task_id'] = 'multi'
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/transcodings/detail',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListTranscodeDetailResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def cancel_remux_task(self, request):
"""取消转封装任务
取消已下发的转封装任务,仅支持取消正在排队中的任务。。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CancelRemuxTask
:type request: :class:`huaweicloudsdkmpc.v1.CancelRemuxTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CancelRemuxTaskResponse`
"""
return self._cancel_remux_task_with_http_info(request)
def _cancel_remux_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/remux',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CancelRemuxTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_remux_task(self, request):
"""新建转封装任务
创建转封装任务,转换音视频文件的格式,但不改变其分辨率和码率。
待转封装的媒资文件需要存储在与媒体处理服务同区域的OBS桶中,且该OBS桶已授权。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateRemuxTask
:type request: :class:`huaweicloudsdkmpc.v1.CreateRemuxTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateRemuxTaskResponse`
"""
return self._create_remux_task_with_http_info(request)
def _create_remux_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/remux',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateRemuxTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_retry_remux_task(self, request):
"""重试转封装任务
对失败的转封装任务进行重试。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateRetryRemuxTask
:type request: :class:`huaweicloudsdkmpc.v1.CreateRetryRemuxTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateRetryRemuxTaskResponse`
"""
return self._create_retry_remux_task_with_http_info(request)
def _create_retry_remux_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/remux',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateRetryRemuxTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_remux_task(self, request):
"""删除转封装任务记录
删除转封装任务记录,只能删除状态为“已取消”,“转码成功”,“转码失败”的任务记录。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteRemuxTask
:type request: :class:`huaweicloudsdkmpc.v1.DeleteRemuxTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteRemuxTaskResponse`
"""
return self._delete_remux_task_with_http_info(request)
def _delete_remux_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/remux/task',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteRemuxTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_remux_task(self, request):
"""查询转封装任务
查询转封装任务状态。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListRemuxTask
:type request: :class:`huaweicloudsdkmpc.v1.ListRemuxTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListRemuxTaskResponse`
"""
return self._list_remux_task_with_http_info(request)
def _list_remux_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
collection_formats['task_id'] = 'multi'
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'input_bucket' in local_var_params:
query_params.append(('input_bucket', local_var_params['input_bucket']))
if 'input_object' in local_var_params:
query_params.append(('input_object', local_var_params['input_object']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/remux',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListRemuxTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_template_group(self, request):
"""新建转码模板组
新建转码模板组,最多支持一进六出。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateTemplateGroup
:type request: :class:`huaweicloudsdkmpc.v1.CreateTemplateGroupRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateTemplateGroupResponse`
"""
return self._create_template_group_with_http_info(request)
def _create_template_group_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template_group/transcodings',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateTemplateGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_template_group(self, request):
"""删除转码模板组
删除转码模板组。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteTemplateGroup
:type request: :class:`huaweicloudsdkmpc.v1.DeleteTemplateGroupRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteTemplateGroupResponse`
"""
return self._delete_template_group_with_http_info(request)
def _delete_template_group_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template_group/transcodings',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteTemplateGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_template_group(self, request):
"""查询转码模板组
查询转码模板组列表。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListTemplateGroup
:type request: :class:`huaweicloudsdkmpc.v1.ListTemplateGroupRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListTemplateGroupResponse`
"""
return self._list_template_group_with_http_info(request)
def _list_template_group_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'group_id' in local_var_params:
query_params.append(('group_id', local_var_params['group_id']))
collection_formats['group_id'] = 'multi'
if 'group_name' in local_var_params:
query_params.append(('group_name', local_var_params['group_name']))
collection_formats['group_name'] = 'multi'
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template_group/transcodings',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListTemplateGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_template_group(self, request):
"""更新转码模板组
修改模板组接口。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for UpdateTemplateGroup
:type request: :class:`huaweicloudsdkmpc.v1.UpdateTemplateGroupRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.UpdateTemplateGroupResponse`
"""
return self._update_template_group_with_http_info(request)
def _update_template_group_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template_group/transcodings',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='UpdateTemplateGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_thumbnails_task(self, request):
"""新建截图任务
新建截图任务,视频截图将从首帧开始,按设置的时间间隔截图,最后截取末帧。
待截图的视频文件需要存储在与媒体处理服务同区域的OBS桶中,且该OBS桶已授权。
约束:
暂只支持生成JPG格式的图片文件。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateThumbnailsTask
:type request: :class:`huaweicloudsdkmpc.v1.CreateThumbnailsTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateThumbnailsTaskResponse`
"""
return self._create_thumbnails_task_with_http_info(request)
def _create_thumbnails_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/thumbnails',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateThumbnailsTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_thumbnails_task(self, request):
"""取消截图任务
取消已下发截图任务。
只能取消已接受尚在队列中等待处理的任务,已完成或正在执行阶段的任务不能取消。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteThumbnailsTask
:type request: :class:`huaweicloudsdkmpc.v1.DeleteThumbnailsTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteThumbnailsTaskResponse`
"""
return self._delete_thumbnails_task_with_http_info(request)
def _delete_thumbnails_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/thumbnails',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteThumbnailsTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_thumbnails_task(self, request):
"""查询截图任务
查询截图任务状态。返回任务执行结果,包括状态、输入、输出等信息。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListThumbnailsTask
:type request: :class:`huaweicloudsdkmpc.v1.ListThumbnailsTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListThumbnailsTaskResponse`
"""
return self._list_thumbnails_task_with_http_info(request)
def _list_thumbnails_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
collection_formats['task_id'] = 'multi'
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
if 'x_language' in local_var_params:
header_params['x-language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/thumbnails',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListThumbnailsTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_transcoding_task(self, request):
"""新建转码任务
新建转码任务可以将视频进行转码,并在转码过程中压制水印、视频截图等。视频转码前需要配置转码模板。
待转码的音视频需要存储在与媒体处理服务同区域的OBS桶中,且该OBS桶已授权。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateTranscodingTask
:type request: :class:`huaweicloudsdkmpc.v1.CreateTranscodingTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateTranscodingTaskResponse`
"""
return self._create_transcoding_task_with_http_info(request)
def _create_transcoding_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/transcodings',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateTranscodingTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_transcoding_task(self, request):
"""取消转码任务
取消已下发转码任务。
只能取消正在转码任务队列中排队的转码任务。已开始转码或已完成的转码任务不能取消。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteTranscodingTask
:type request: :class:`huaweicloudsdkmpc.v1.DeleteTranscodingTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteTranscodingTaskResponse`
"""
return self._delete_transcoding_task_with_http_info(request)
def _delete_transcoding_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/transcodings',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteTranscodingTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_transcoding_task_by_console(self, request):
"""删除转码任务记录
删除转码任务记录,只能删除状态为“已取消”,“转码成功”,“转码失败”的转码任务记录。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteTranscodingTaskByConsole
:type request: :class:`huaweicloudsdkmpc.v1.DeleteTranscodingTaskByConsoleRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteTranscodingTaskByConsoleResponse`
"""
return self._delete_transcoding_task_by_console_with_http_info(request)
def _delete_transcoding_task_by_console_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/transcodings/task',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteTranscodingTaskByConsoleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_stat_summary(self, request):
"""查询点播概览信息
查询最近一周,最近一月或者自定义时间段的“转码时长”,“调用转码API次数”。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListStatSummary
:type request: :class:`huaweicloudsdkmpc.v1.ListStatSummaryRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListStatSummaryResponse`
"""
return self._list_stat_summary_with_http_info(request)
def _list_stat_summary_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'stat_type' in local_var_params:
query_params.append(('stat_type', local_var_params['stat_type']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/transcodings/summaries',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListStatSummaryResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_transcoding_task(self, request):
"""查询转码任务
查询转码任务状态。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListTranscodingTask
:type request: :class:`huaweicloudsdkmpc.v1.ListTranscodingTaskRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListTranscodingTaskResponse`
"""
return self._list_transcoding_task_with_http_info(request)
def _list_transcoding_task_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'task_id' in local_var_params:
query_params.append(('task_id', local_var_params['task_id']))
collection_formats['task_id'] = 'multi'
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'start_time' in local_var_params:
query_params.append(('start_time', local_var_params['start_time']))
if 'end_time' in local_var_params:
query_params.append(('end_time', local_var_params['end_time']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
if 'x_language' in local_var_params:
header_params['x-language'] = local_var_params['x_language']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/transcodings',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListTranscodingTaskResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_trans_template(self, request):
"""新建转码模板
新建转码模板,采用自定义的模板转码。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateTransTemplate
:type request: :class:`huaweicloudsdkmpc.v1.CreateTransTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateTransTemplateResponse`
"""
return self._create_trans_template_with_http_info(request)
def _create_trans_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/transcodings',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateTransTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_template(self, request):
"""删除转码模板
删除转码模板。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteTemplate
:type request: :class:`huaweicloudsdkmpc.v1.DeleteTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteTemplateResponse`
"""
return self._delete_template_with_http_info(request)
def _delete_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'template_id' in local_var_params:
query_params.append(('template_id', local_var_params['template_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/transcodings',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_template(self, request):
"""查询转码模板
查询用户自定义转码配置模板。
支持指定模板ID查询,或分页全量查询。转码配置模板ID,最多10个。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListTemplate
:type request: :class:`huaweicloudsdkmpc.v1.ListTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListTemplateResponse`
"""
return self._list_template_with_http_info(request)
def _list_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'template_id' in local_var_params:
query_params.append(('template_id', local_var_params['template_id']))
collection_formats['template_id'] = 'multi'
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/transcodings',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_trans_template(self, request):
"""更新转码模板
更新转码模板。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for UpdateTransTemplate
:type request: :class:`huaweicloudsdkmpc.v1.UpdateTransTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.UpdateTransTemplateResponse`
"""
return self._update_trans_template_with_http_info(request)
def _update_trans_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/transcodings',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='UpdateTransTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_watermark_template(self, request):
"""新建水印模板
自定义水印模板。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for CreateWatermarkTemplate
:type request: :class:`huaweicloudsdkmpc.v1.CreateWatermarkTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.CreateWatermarkTemplateResponse`
"""
return self._create_watermark_template_with_http_info(request)
def _create_watermark_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/watermark',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='CreateWatermarkTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_watermark_template(self, request):
"""删除水印模板
删除自定义水印模板。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for DeleteWatermarkTemplate
:type request: :class:`huaweicloudsdkmpc.v1.DeleteWatermarkTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.DeleteWatermarkTemplateResponse`
"""
return self._delete_watermark_template_with_http_info(request)
def _delete_watermark_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'template_id' in local_var_params:
query_params.append(('template_id', local_var_params['template_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/watermark',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='DeleteWatermarkTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_watermark_template(self, request):
"""查询水印模板
查询自定义水印模板。支持指定模板ID查询,或分页全量查询。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for ListWatermarkTemplate
:type request: :class:`huaweicloudsdkmpc.v1.ListWatermarkTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.ListWatermarkTemplateResponse`
"""
return self._list_watermark_template_with_http_info(request)
def _list_watermark_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
if 'template_id' in local_var_params:
query_params.append(('template_id', local_var_params['template_id']))
collection_formats['template_id'] = 'multi'
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'size' in local_var_params:
query_params.append(('size', local_var_params['size']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/watermark',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='ListWatermarkTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_watermark_template(self, request):
"""更新水印模板
更新自定义水印模板。
Please refer to HUAWEI cloud API Explorer for details.
:param request: Request instance for UpdateWatermarkTemplate
:type request: :class:`huaweicloudsdkmpc.v1.UpdateWatermarkTemplateRequest`
:rtype: :class:`huaweicloudsdkmpc.v1.UpdateWatermarkTemplateResponse`
"""
return self._update_watermark_template_with_http_info(request)
def _update_watermark_template_with_http_info(self, request):
local_var_params = {attr: getattr(request, attr) for attr in request.attribute_map if hasattr(request, attr)}
cname = None
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v1/{project_id}/template/watermark',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
cname=cname,
response_type='UpdateWatermarkTemplateResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, cname=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be placed in the request header.
:param body: Request body.
:param post_params: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param cname: Used for obs endpoint.
:param auth_settings: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
cname=cname,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type)
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
286e5b54ffddc08cced75a5f8c14d1b7d58eb735 | 04c06575a49a3f4e30e4f3f2bf2365585664d2e8 | /python_leetcode_2020/Python_Leetcode_2020/204_count_primes.py | 92cf09def1c8409654aad00245651ec1aa15c81e | [] | no_license | xiangcao/Leetcode | 18da3d5b271ff586fdf44c53f1a677423ca3dfed | d953abe2c9680f636563e76287d2f907e90ced63 | refs/heads/master | 2022-06-22T04:45:15.446329 | 2022-06-17T13:03:01 | 2022-06-17T13:03:01 | 26,052,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | """
Count the number of prime numbers less than a non-negative number, n.
Example 1:
Input: n = 10
Output: 4
Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.
Example 2:
Input: n = 0
Output: 0
Example 3:
Input: n = 1
Output: 0
"""
class Solution:
def countPrimes(self, n: int) -> int:
if n < 3:
return 0
isPrime = [True] * n
isPrime[0] = isPrime[1] = False
for i in range(2, int(math.sqrt(n))+1):
if isPrime[i]:
for j in range(i*i, n, i):
isPrime[j] = False
return isPrime.count(True)
| [
"xiangcao_liu@apple.com"
] | xiangcao_liu@apple.com |
6cc9e1b159a966897cb04e871047b58e49c391ed | 83cf642504313b6ef6527dda52158a6698c24efe | /scripts/addons/fd_scripting_tools/autocompletion/suggestions/dynamic/_bpy_fake/__private__/motionpath.py | 0f8b705595927dd286be09273a493ff5ba911c2d | [] | no_license | PyrokinesisStudio/Fluid-Designer-Scripts | a4c40b871e8d27b0d76a8025c804d5a41d09128f | 23f6fca7123df545f0c91bf4617f4de7d9c12e6b | refs/heads/master | 2021-06-07T15:11:27.144473 | 2016-11-08T03:02:37 | 2016-11-08T03:02:37 | 113,630,627 | 1 | 0 | null | 2017-12-09T00:55:58 | 2017-12-09T00:55:58 | null | UTF-8 | Python | false | false | 1,020 | py | from . motionpathvert import MotionPathVert
from . struct import Struct
from . bpy_struct import bpy_struct
import mathutils
class MotionPath(bpy_struct):
@property
def rna_type(self):
'''(Struct) RNA type definition'''
return Struct()
@property
def points(self):
'''(Sequence of MotionPathVert) Cached positions per frame'''
return (MotionPathVert(),)
@property
def frame_start(self):
'''(Integer) Starting frame of the stored range'''
return int()
@property
def frame_end(self):
'''(Integer) End frame of the stored range'''
return int()
@property
def length(self):
'''(Integer) Number of frames cached'''
return int()
@property
def use_bone_head(self):
'''(Boolean) For PoseBone paths, use the bone head location when
calculating this path'''
return bool()
@property
def is_modified(self):
'''(Boolean) Path is being edited'''
return bool() | [
"dev.andrewpeel@gmail.com"
] | dev.andrewpeel@gmail.com |
583762e669d9ed831762240fab8c3d9d18bf4965 | 7b5f6c72fa6427a9d9350d1c9007f346161b3866 | /portfolio/views.py | 8c90e48160b005931e983f557f6ad67693646031 | [
"MIT"
] | permissive | GoWebyCMS/portfolio | a15daf8f553e7ddb92154c60e7b57b4383fa2a44 | 1ed5c20f6fe280388ff0876ca6a5b5129cf6b3f2 | refs/heads/master | 2020-06-25T22:33:46.298359 | 2017-07-17T10:44:46 | 2017-07-17T10:44:46 | 96,993,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import ListView
from django.utils import timezone
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Project, ProjectCategory, Skill
# Create your views here.
# TODO: Find out how to UT test a class based view
# class PortfolioListView(ListView):
def portfolio_list(request):
project_list = Project.objects.filter(end_date__lte=timezone.now()).order_by('-end_date')
categories = Category.objects.all()
# get pageneated objects for projects
paginator = Paginator(project_list, 6)
page = request.GET.get('page')
try:
projects = paginator.page(page)
except PageNotAnInteger:
# if page is not an integer deliver the first page
projects = paginator.page(1)
except EmptyPage:
# if page is out of range deliver the last page of results
projects = paginator.page(paginator.num_pages)
return render(request, 'portfolio/list.html',
{
'project_list': project_list,
'projects': projects,
'categories': categories,
'page': page,
})
def portfolio_detail(request, pk):
project = get_object_or_404(Project, pk=pk)
return render(
request,
'portfolio/portfolio_detail.html',
{
'project': project,
}
)
| [
"kkampardi@gmail.com"
] | kkampardi@gmail.com |
2f6e304d6d3a22932ae14069516b6f656a244924 | 4a8c1f7d9935609b780aff95c886ef7781967be0 | /atcoder/LeetCodeWeekly/327_c.py | 255919176b616b224096bbcb0e64767e7e69ba66 | [] | no_license | recuraki/PythonJunkTest | d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a | 2556c973d468a6988d307ce85c5f2f8ab15e759a | refs/heads/master | 2023-08-09T17:42:21.875768 | 2023-07-18T23:06:31 | 2023-07-18T23:06:31 | 13,790,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,077 | py | from typing import List, Tuple, Optional
from pprint import pprint
from collections import deque, defaultdict
list_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
class Solution:
def isItPossible(self, word1: str, word2: str) -> bool:
se1 = defaultdict(int)
se2 = defaultdict(int)
for x in word1: se1[x] += 1
for x in word2: se2[x] += 1
# se 1 >= se 2としたい
if len(se1) < len(se2):
se1, se2 = se2, se1
word1, word2 = word2, word1
#print(se1, se2)
diffchar = (len(se1) - len(se2))
print(">", diffchar, word1, word2)
# 3以上の時は何をしてもだめ
if diffchar >= 3:
return False
# 0の場合、必ずswapしないといけないので
# pat1: count1同士をswapする
# pat2: count>=2同士をswapする
# が必要
if diffchar == 0:
for a in list_lower:
if se1[a] == 0: continue
for b in list_lower:
if se2[b] == 0: continue
if se1[a] == se2[b] == 1: return True
if se1[a] >=2 and se2[b] >= 2: return True
return False
# 1の時、se2を1つ増やさないといけない
# pat1: se1に2つ以上あり、se2にないものをあげる (se2++)
# pat1: se1に1つしかなく、se2にあるものを上げる (se1++)
if diffchar == 1:
for a in list_lower:
if se1[a] == 0: continue
for b in list_lower:
if se2[b] == 0: continue
#if se1[a] >= 2 and se2[b] == 0: return True
if se1[a] == 1 and se2[b] >= 2:
return True
return False
# 2の時、se2を2つふやさないといけない
# pat1: se1に1つしかなく、se2にないものを上げる(se1--, se2++)
if diffchar == 2:
for a in list_lower:
if se1[a] == 0: continue
for b in list_lower:
if se2[b] == 0: continue
if se1[a] == 1 and se2[b] == 0: return True
return False
st = Solution()
print(st.isItPossible(word1 = "abcd", word2 = "a")==False) # むり
print(st.isItPossible(word1 = "abc", word2 = "d")==False) # むり
print(st.isItPossible(word1 = "abc", word2 = "a")==False) # むり
print(st.isItPossible(word1 = "ac", word2 = "b")==False) # むり
print(st.isItPossible(word1 = "ac", word2 = "a")==True) # aa, c
print(st.isItPossible(word1 = "abcc", word2 = "aab")==True) # abca, acb
print(st.isItPossible(word1 = "abcde", word2 = "fghij")==True) # どれをひっくりかえしてもOK
print(st.isItPossible(word1 = "abc", word2 = "ddeeff")==False) # abd, cdeeff など絶対に3,4になる
print(st.isItPossible(word1 = "c", word2 = "ac")==True) # a, cc
| [
"kanai@wide.ad.jp"
] | kanai@wide.ad.jp |
d343f2bf7825fe6d38f60fa6dc2ccb045815be2a | 15d05b24ab8086ac84757c4d596372fd7801b827 | /.ycm_extra_conf.py | d083fa69a8d6ce218a149a5a10a224238cb5081b | [] | no_license | wqx081/mpr_base | 5d410720728b3e1a720a36087226979763ac538b | fb1526856898954420673807eef50a5478382c38 | refs/heads/master | 2021-01-11T14:28:24.249395 | 2017-02-10T03:43:21 | 2017-02-10T03:43:21 | 56,306,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,628 | py | # This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
# This is a configuration file for YouCompleteMe (YCM), a Vim extension for
# navigation and code completion with C++ and other languages.
#
# To make YCM work with Kudu, add your Kudu source directory to the
# g:ycm_extra_conf_globlist variable in your .vimrc file. For details on how to
# install and configure YouCompleteMe, see
# https://github.com/Valloric/YouCompleteMe
#
# This file is based on the example configuration file from YouCompleteMe.
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-x',
'c++',
'-DBASE_HEADERS_NO_STUBS=1',
'-DBASE_HEADERS_USE_RICH_SLICE=1',
'-DBASE_HEADERS_USE_SHORT_STATUS_MACROS=1',
'-DBASE_STATIC_DEFINE',
'-Dintegration_tests_EXPORTS',
'-D__STDC_FORMAT_MACROS',
'-fno-strict-aliasing',
'-msse4.2',
'-Wall',
'-Wno-sign-compare',
'-Wno-deprecated',
'-pthread',
'-ggdb',
'-Qunused-arguments',
'-Wno-ambiguous-member-template',
'-std=c++11',
'-g',
'-fPIC',
'-I',
'src',
'-I',
'./src',
'-isystem',
'thirdparty/installed/common/include',
'-isystem',
'thirdparty/installed/uninstrumented/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.