content stringlengths 5 1.05M |
|---|
#-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Martin Paces <martin.paces@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2011 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
"""
This module provides CRS handling utilities.
"""
#-------------------------------------------------------------------------------
import re
import logging
import math
from eoxserver.contrib import osr
from eoxserver.core.config import get_eoxserver_config
from eoxserver.core.decoders import config
logger = logging.getLogger(__name__)
#-------------------------------------------------------------------------------
#: Set (Python ``set`` type) of EPSG codes of CRS whose axes are displayed
#: in reversed order.
#: Source: GDAL 1.10.0, WKT/AXES definitions
EPSG_AXES_REVERSED = set([
# GEOGRAPHIC COORDINATE SYSTEMS
# NOTE: Tested to be consistent with GDAL
# OGRSpatialReference::EPSGTreatsAsNorthingEasting()
3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007,
4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020,
4021, 4022, 4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033,
4034, 4035, 4036, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053,
4054, 4055, 4075, 4081, 4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127,
4128, 4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139,
4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148, 4149, 4150, 4151,
4152, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161, 4162, 4163,
4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172, 4173, 4174, 4175,
4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185, 4188, 4189, 4190,
4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200, 4201, 4202,
4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, 4214,
4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227,
4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239,
4240, 4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251,
4252, 4253, 4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263,
4264, 4265, 4266, 4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275,
4276, 4277, 4278, 4279, 4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287,
4288, 4289, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300,
4301, 4302, 4303, 4304, 4306, 4307, 4308, 4309, 4310, 4311, 4312, 4313,
4314, 4315, 4316, 4317, 4318, 4319, 4322, 4324, 4326, 4463, 4470, 4475,
4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607,
4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619,
4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631,
4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643,
4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665,
4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677,
4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689,
4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701,
4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713,
4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725,
4726, 4727, 4728, 4729, 4730, 4731, 4732, 4733, 4734, 4735, 4736, 4737,
4738, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749,
4750, 4751, 4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761,
4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804, 4805, 4806, 4807, 4808,
4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818, 4819, 4820, 4821,
4823, 4824, 4901, 4902, 4903, 4904, 5013, 5132, 5228, 5229, 5233, 5246,
5252, 5264, 5324, 5340, 5354, 5360, 5365, 5371, 5373, 5381, 5393, 5451,
5464, 5467, 5489, 5524, 5527, 5546,
# PROJECTED COORDINATE SYSTEMS
#TODO: verify with OGRSpatialReference::EPSGTreatsAsNorthingEasting()
# (avaiable only in GDAL 1.10.0 C/C++ API but not in Python)
# SOUTH,WEST pointing projected coordinate systems:
# NOTE: These are probably inconsistent with
# OGRSpatialReference::EPSGTreatsAsNorthingEasting()
# as this fucntion check NORTH pointing coordinates only.
2065, 5513,
# NORTH,EAST pointing projected coordinate systems:
# NOTE: These should be consistent with
# OGRSpatialReference::EPSGTreatsAsNorthingEasting()
2036, 2044, 2045, 2081, 2082, 2083, 2085, 2086, 2091, 2092, 2093, 2096,
2097, 2098, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114,
2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126,
2127, 2128, 2129, 2130, 2131, 2132, 2166, 2167, 2168, 2169, 2170, 2171,
2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2193, 2199, 2200,
2206, 2207, 2208, 2209, 2210, 2211, 2212, 2319, 2320, 2321, 2322, 2323,
2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331, 2332, 2333, 2334, 2335,
2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345, 2346, 2347,
2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356, 2357, 2358, 2359,
2360, 2361, 2362, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371,
2372, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2382, 2383,
2384, 2385, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2395,
2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407,
2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419,
2420, 2421, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431,
2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443,
2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455,
2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466, 2467,
2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479,
2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491,
2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503,
2504, 2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515,
2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527,
2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2538, 2539,
2540, 2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548, 2549, 2551, 2552,
2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2564,
2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576,
2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588,
2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600,
2601, 2602, 2603, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612,
2613, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624,
2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636,
2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648,
2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660,
2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672,
2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684,
2685, 2686, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696,
2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708,
2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2718, 2719, 2720,
2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732,
2733, 2734, 2735, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746,
2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758,
2935, 2936, 2937, 2938, 2939, 2940, 2941, 2953, 3006, 3007, 3008, 3009,
3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021,
3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3034, 3035, 3038,
3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050,
3051, 3058, 3059, 3068, 3114, 3115, 3116, 3117, 3118, 3120, 3126, 3127,
3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3140,
3146, 3147, 3150, 3151, 3152, 3300, 3301, 3328, 3329, 3330, 3331, 3332,
3333, 3334, 3335, 3346, 3350, 3351, 3352, 3366, 3386, 3387, 3388, 3389,
3390, 3396, 3397, 3398, 3399, 3407, 3414, 3416, 3764, 3788, 3789, 3790,
3791, 3793, 3795, 3796, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840,
3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852,
3854, 3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883,
3884, 3885, 3907, 3908, 3909, 3910, 3911, 4026, 4037, 4038, 4417, 4434,
4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502,
4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514,
4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526,
4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538,
4539, 4540, 4541, 4542, 4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550,
4551, 4552, 4553, 4554, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575,
4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587,
4588, 4589, 4652, 4653, 4654, 4655, 4656, 4766, 4767, 4768, 4769, 4770,
4771, 4772, 4773, 4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782,
4783, 4784, 4785, 4786, 4787, 4788, 4789, 4790, 4791, 4792, 4793, 4794,
4795, 4796, 4797, 4798, 4799, 4800, 4812, 4822, 4839, 4855, 4856, 4857,
4858, 4859, 4860, 4861, 4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869,
4870, 4871, 4872, 4873, 4874, 4875, 4876, 4877, 4878, 4879, 4880, 5048,
5105, 5106, 5107, 5108, 5109, 5110, 5111, 5112, 5113, 5114, 5115, 5116,
5117, 5118, 5119, 5120, 5121, 5122, 5123, 5124, 5125, 5126, 5127, 5128,
5129, 5130, 5167, 5168, 5169, 5170, 5171, 5172, 5173, 5174, 5175, 5176,
5177, 5178, 5179, 5180, 5181, 5182, 5183, 5184, 5185, 5186, 5187, 5188,
5253, 5254, 5255, 5256, 5257, 5258, 5259, 5269, 5270, 5271, 5272, 5273,
5274, 5275, 5343, 5344, 5345, 5346, 5347, 5348, 5349, 5367, 5479, 5480,
5481, 5482, 5518, 5519, 5520, 20004, 20005, 20006, 20007, 20008, 20009,
20010, 20011, 20012, 20013, 20014, 20015, 20016, 20017, 20018, 20019,
20020, 20021, 20022, 20023, 20024, 20025, 20026, 20027, 20028, 20029,
20030, 20031, 20032, 20064, 20065, 20066, 20067, 20068, 20069, 20070,
20071, 20072, 20073, 20074, 20075, 20076, 20077, 20078, 20079, 20080,
20081, 20082, 20083, 20084, 20085, 20086, 20087, 20088, 20089, 20090,
20091, 20092, 21413, 21414, 21415, 21416, 21417, 21418, 21419, 21420,
21421, 21422, 21423, 21453, 21454, 21455, 21456, 21457, 21458, 21459,
21460, 21461, 21462, 21463, 21473, 21474, 21475, 21476, 21477, 21478,
21479, 21480, 21481, 21482, 21483, 21896, 21897, 21898, 21899, 22171,
22172, 22173, 22174, 22175, 22176, 22177, 22181, 22182, 22183, 22184,
22185, 22186, 22187, 22191, 22192, 22193, 22194, 22195, 22196, 22197,
25884, 27205, 27206, 27207, 27208, 27209, 27210, 27211, 27212, 27213,
27214, 27215, 27216, 27217, 27218, 27219, 27220, 27221, 27222, 27223,
27224, 27225, 27226, 27227, 27228, 27229, 27230, 27231, 27232, 27391,
27392, 27393, 27394, 27395, 27396, 27397, 27398, 27492, 28402, 28403,
28404, 28405, 28406, 28407, 28408, 28409, 28410, 28411, 28412, 28413,
28414, 28415, 28416, 28417, 28418, 28419, 28420, 28421, 28422, 28423,
28424, 28425, 28426, 28427, 28428, 28429, 28430, 28431, 28432, 28462,
28463, 28464, 28465, 28466, 28467, 28468, 28469, 28470, 28471, 28472,
28473, 28474, 28475, 28476, 28477, 28478, 28479, 28480, 28481, 28482,
28483, 28484, 28485, 28486, 28487, 28488, 28489, 28490, 28491, 28492,
29702, 30161, 30162, 30163, 30164, 30165, 30166, 30167, 30168, 30169,
30170, 30171, 30172, 30173, 30174, 30175, 30176, 30177, 30178, 30179,
30800, 31251, 31252, 31253, 31254, 31255, 31256, 31257, 31258, 31259,
31275, 31276, 31277, 31278, 31279, 31281, 31282, 31283, 31284, 31285,
31286, 31287, 31288, 31289, 31290, 31466, 31467, 31468, 31469, 31700,
32661, 32761,
])
#-------------------------------------------------------------------------------
# format functions
def asInteger(epsg):
""" convert EPSG code to integer """
return int(epsg)
def asShortCode(epsg):
""" convert EPSG code to short CRS ``EPSG:<code>`` notation """
return "EPSG:%d" % int(epsg)
def asURL(epsg):
""" convert EPSG code to OGC URL CRS
``http://www.opengis.net/def/crs/EPSG/0/<code>`` notation """
return "http://www.opengis.net/def/crs/EPSG/0/%d" % int(epsg)
def asURN(epsg):
""" convert EPSG code to OGC URN CRS ``urn:ogc:def:crs:epsg::<code>``
notation """
return "urn:ogc:def:crs:epsg::%d" % int(epsg)
def asProj4Str(epsg):
""" convert EPSG code to *proj4* ``+init=epsg:<code>`` notation """
return "+init=epsg:%d" % int(epsg)
#-------------------------------------------------------------------------------
# format parsers
# compiled regular expesions
_gerexURL = re.compile(
r"^http://www.opengis.net/def/crs/epsg/\d+\.?\d*/(\d+)$", re.IGNORECASE
)
_gerexURN = re.compile(r"^urn:ogc:def:crs:epsg:\d*\.?\d*:(\d+)$", re.IGNORECASE)
_gerexShortCode = re.compile(r"^epsg:(\d+)$", re.IGNORECASE)
_gerexProj4Str = re.compile(r"^\+init=epsg:(\d+)$")
def validateEPSGCode(string):
"""Check whether the given string is a valid EPSG code (True) or not
(False)"""
try:
osr.SpatialReference().ImportFromEPSG(int(string))
except (ValueError, RuntimeError):
return False
return True
def fromInteger(string):
""" parse EPSG code from simple integer string """
return int(string) if validateEPSGCode(string) else None
def _fromRegEx(string, gerex):
""" parser EPSG code from given string and compiled regular expression """
match = gerex.match(string)
if match is None:
return None
return fromInteger(match.group(1))
def fromURL(string):
""" parse EPSG code from given string in OGC URL CRS
``http://www.opengis.net/def/crs/EPSG/0/<code>`` notation """
return _fromRegEx(string, _gerexURL)
def fromURN(string):
""" parse EPSG code from given string in OGC URN CRS
``urn:ogc:def:crs:epsg::<code>`` notation """
return _fromRegEx(string, _gerexURN)
def fromShortCode(string):
""" parse EPSG code from given string in short CRS
``EPSG:<code>`` notation """
return _fromRegEx(string, _gerexShortCode)
def fromProj4Str(string):
""" parse EPSG code from given string in OGC Proj4Str CRS
``+init=epsg:<code>`` notation """
return _fromRegEx(string, _gerexProj4Str)
def parseEPSGCode(string, parsers):
""" parse EPSG code using provided sequence of EPSG parsers """
for parser in parsers:
epsg = parser(string)
if epsg is not None:
return epsg
return None
#-------------------------------------------------------------------------------
# public API
__SUPPORTED_CRS_WMS = None
__SUPPORTED_CRS_WCS = None
__SUPPORTED_CRS_ALL = None
__SUPPORTED_CRS_REVERSED = None
def getSupportedCRS_WMS(format_function=asShortCode):
""" Get list of CRSes supported by WMS. The ``format_function`` is used to
format individual list items."""
global __SUPPORTED_CRS_WMS
if __SUPPORTED_CRS_WMS is None:
reader = CRSsConfigReader(get_eoxserver_config())
__SUPPORTED_CRS_WMS = reader.supported_crss_wms
# return formated list of EPSG codes
return map(format_function, __SUPPORTED_CRS_WMS)
def getSupportedCRS_WCS(format_function=asShortCode):
""" Get list of CRSes supported by WCS. The ``format_function`` is used to
format individual list items."""
global __SUPPORTED_CRS_WCS
if __SUPPORTED_CRS_WCS is None:
reader = CRSsConfigReader(get_eoxserver_config())
__SUPPORTED_CRS_WCS = reader.supported_crss_wcs
# return formated list of EPSG codes
return map(format_function, __SUPPORTED_CRS_WCS)
#-------------------------------------------------------------------------------
def hasSwappedAxes(epsg):
"""Decide whether the coordinate system given by the passed EPSG code is
displayed with swapped axes (True) or not (False)."""
# NOTE: the whole set of reversed axes is large so in case of the EPSG
# code being among the supported CRSes only limitted set is used.
global __SUPPORTED_CRS_ALL
global __SUPPORTED_CRS_REVERSED
if __SUPPORTED_CRS_REVERSED is None:
# get intersection of all supported and reversed axes CRSes
crs_wms = set(getSupportedCRS_WMS(asInteger))
crs_wcs = set(getSupportedCRS_WCS(asInteger))
__SUPPORTED_CRS_ALL = crs_wms | crs_wcs
__SUPPORTED_CRS_REVERSED = __SUPPORTED_CRS_ALL & EPSG_AXES_REVERSED
if epsg in __SUPPORTED_CRS_ALL:
return epsg in __SUPPORTED_CRS_REVERSED
else:
return epsg in EPSG_AXES_REVERSED
def getAxesSwapper(epsg, swapAxes=None):
"""
Second order function returning point tuple axes swaper
f(x,y) -> (x,y) or f(x,y) -> (y,x). The axes order is determined
by the provided EPSG code. (Or exlicitely by the swapAxes boolean
flag.
"""
if swapAxes not in (True, False):
swapAxes = hasSwappedAxes(epsg)
return (lambda x, y: (y, x)) if swapAxes else (lambda x, y: (x, y))
def isProjected(epsg):
"""Is the coordinate system projected (True) or Geographic (False)? """
spat_ref = osr.SpatialReference()
spat_ref.ImportFromEPSG(epsg)
return bool(spat_ref.IsProjected())
def crs_bounds(srid):
""" Get the maximum bounds of the CRS. """
srs = osr.SpatialReference()
srs.ImportFromEPSG(srid)
if srs.IsGeographic():
return (-180.0, -90.0, 180.0, 90.0)
else:
earth_circumference = 2 * math.pi * srs.GetSemiMajor()
return (
-earth_circumference,
-earth_circumference,
earth_circumference,
earth_circumference
)
def crs_tolerance(srid):
""" Get the "tolerance" of the CRS """
srs = osr.SpatialReference()
srs.ImportFromEPSG(srid)
if srs.IsGeographic():
return 1e-8
else:
return 1e-2
image_crss_ids = set((
"urn:ogc:def:crs:OGC::imageCRS", "imageCRS",
"CRS:1", "urn:ogc:def:crs:OGC::CRS1", "urn:ogc:def:crs:OGC:1.3:CRS1",
"http://www.opengis.net/def/crs/OGC/0/CRS1",
"http://www.opengis.net/def/crs/OGC/1.3/CRS1"
))
def is_image_crs(string):
return string in image_crss_ids
#-------------------------------------------------------------------------------
def _parseListOfCRS(raw_value):
""" parse CRS configuartion """
# validate and convert to EPSG code
def checkCode(v):
# validate the input CRS whether recognized by GDAL/Proj
rv = validateEPSGCode(v)
if not rv:
logger.warning(
"Invalid EPSG code '%s'! This CRS will be ignored!" %
str(v).strip()
)
return rv
# strip comments
tmp = "".join([l.partition("#")[0] for l in raw_value.split("\n")])
# filter out invalid EPSG codes and covert them to integer
return map(int, filter(checkCode, tmp.split(",")))
#-------------------------------------------------------------------------------
class CRSsConfigReader(config.Reader):
section = "services.ows.wms"
supported_crss_wms = config.Option("supported_crs", type=_parseListOfCRS)
section = "services.ows.wcs"
supported_crss_wcs = config.Option("supported_crs", type=_parseListOfCRS)
|
"""
This app is for management of roles and permissions.
The permissions module only gives module instance operations
Instead of permissions let me introduce roles
INVENTORY ROLES
- Add Supplier
- View Supplier
- Delete Supplier
- Update Supplier
"""
# TODO: Look at examples of online generated receipt systems
# TODO: Look for point of sale systems and how they work and integrate with my api
# TODO: Look at how to build a "smart" data query system
default_app_config = 'business.authorization.apps.ManagementAppConfig'
|
from django.db.models.query_utils import Q
from product.models import Category, Gallery, Product, Review
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
from product.serializers import CategorySerializer, CreateProductSerializer, CreateReviewSerializer, GallerySerializer, ProductReviewSerializer, ProductSerializer, ReviewSerializer
from rest_framework import status
from django.shortcuts import get_object_or_404
from user.permissions import IsActiveOrReadOnly, IsStaffOrReadOnly
from django.http import Http404
from drf_yasg.utils import swagger_auto_schema
from rest_framework.permissions import AllowAny
from drf_yasg import openapi
from rest_framework.mixins import RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin, ListModelMixin, CreateModelMixin
from rest_framework.generics import GenericAPIView
class Categories(ListModelMixin,
CreateModelMixin,
GenericAPIView):
permission_classes = [IsStaffOrReadOnly]
queryset = Category.objects.all()
serializer_class = CategorySerializer
@swagger_auto_schema(
responses={
200: CategorySerializer(),
204: 'No Content'
}
)
def get(self, request, *args, **kwargs):
"""
Category List
Return all available category.<br>
Will use 204 status code if result is empty.
### Permission:
* Allow Any
"""
qs = self.list(request, *args, **kwargs)
return qs if qs.data else Response(status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
responses={
201: CategorySerializer(),
400: 'Bad Request',
401: 'Invalid User\'s Credential',
403: 'You Do Not Have Permission To Perform This Action'
}
)
def post(self, request, *args, **kwargs):
"""
Create Category
Create a new category.
### Permission:
* Staff Only
"""
return self.create(request, *args, **kwargs)
class CategoryDetail(RetrieveModelMixin,
UpdateModelMixin,
DestroyModelMixin,
GenericAPIView):
permission_classes = [IsStaffOrReadOnly]
queryset = Category.objects.all()
serializer_class = CategorySerializer
@swagger_auto_schema(
responses={
200: CategorySerializer(),
404: 'No Category With That ID Found'
}
)
def get(self, request, *args, **kwargs):
"""
Detail Category
Return the detail of category with requested id.<br>
Return 404 if no category with that id is found.
### Permission:
* Allow Any
"""
return self.retrieve(request, *args, **kwargs)
@swagger_auto_schema(
request_body=CategorySerializer(),
responses={
200: CategorySerializer(),
400: 'Bad Request',
401: 'Invalid User\'s Credential',
403: 'You Do Not Have Permission To Perform This Action',
404: 'No Category With That ID Found'
}
)
def put(self, request, *args, **kwargs):
"""
Update Category
Update category that the id mentioned, returned the updated category if succesfull.<br>
Return 404 if no category with that id is found.
### Permission:
* Staff Only
"""
return self.update(request, *args, **kwargs)
@swagger_auto_schema(
responses={
204: 'Category Succesfully Deleted',
400: 'Bad Request',
401: 'Invalid User\'s Credential',
403: 'You Do Not Have Permission To Perform This Action',
404: 'No Category With That ID Found'
}
)
def delete(self, request, *args, **kwargs):
"""
Delete Category
Delete category that the id mentioned, then returned 204 if succesfull.<br>
Return 404 if no category with that id is found.
### Permission:
* Staff Only
"""
return self.destroy(request, *args, **kwargs)
class CategoryProduct(APIView):
permission_classes = [AllowAny]
def get(self, request, pk):
products = Product.objects.filter(Q(category__id__icontains=pk))
serializer = ProductSerializer(products, many=True)
if not products:
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(serializer.data, status.HTTP_200_OK)
def post(self, request, slug, pk):
pass
class Products(APIView):
permission_classes = [IsStaffOrReadOnly]
@swagger_auto_schema(
responses={
200: ProductSerializer(),
204: 'No Content'
}
)
def get(self, request):
"""
Product List
Return all available product. You can set a query parameters to filter the result.<br>
### Valid query parameter list:<br>
**featured**: If set to 'true' will only return featured products, if set to 'false' will do the opposite.<br>
**category**: Expected a category's ID,
if properly set will only return product that contain category with mentioned ID.<br>
### Example request:<br>
```
/api/v1/products/?category=1&featured=true
```
"""
product_qs = Product.objects.all()
# If featured parameter is set to either true or false,
# filter the queryset first to the desired featured value
if request.GET.get('featured') in ['true', 'false']:
featured = True if request.GET.get('featured') == 'true' else False
product_qs = product_qs.filter(is_featured=featured)
# If category parameter is set,
# check if the product have that category first before appending them to the list
if request.GET.get('category'):
products = [product for product in product_qs if product.category.filter(id=request.GET.get('category'))]
else:
products = [product for product in product_qs]
# Return early with no content (204) if queryset is empty
if not products:
return Response(status=status.HTTP_204_NO_CONTENT)
serializer = ProductSerializer(products, many=True)
return Response(serializer.data)
@swagger_auto_schema(
request_body=CreateProductSerializer(),
responses={
201: openapi.Schema(type=openapi.TYPE_OBJECT, properties={'detail': 'Message', 'id': 'Product\'s ID', 'slug': 'Product\'s Slug'}),
400: 'Bad Request',
401: 'Invalid User\'s Credential',
403: 'You Do Not Have Permission To Perform This Action'
}
)
def post(self, request):
"""
Create Product
A staff account is needed to request this endpoint.<br>
Otherwise, if user is not authenticated return 401 but if user is authenticated but is not a staff account return 403.<br>
"""
serializer = CreateProductSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(
{
'detail': 'Product successfully created',
'id': serializer.data['id'],
'slug': serializer.data['slug']
},
status=status.HTTP_201_CREATED
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProductDetail(APIView):
permission_classes = [IsStaffOrReadOnly]
def get_object(self, slug):
try:
return Product.objects.get(slug=slug)
except Product.DoesNotExist:
raise Http404
@swagger_auto_schema(
responses={
200: ProductReviewSerializer(),
404: 'No Product With That Slug Found'
}
)
def get(self, request, slug):
"""
Detail Product
Return the detail of product that the slug mentioned.<br>
Return 404 if no product with that slug is found.
"""
product = self.get_object(slug)
serializer = ProductReviewSerializer(product)
return Response(serializer.data)
@swagger_auto_schema(
request_body=CreateProductSerializer(),
responses={
200: ProductSerializer(),
400: 'Bad Request',
404: 'No Product With That Slug Found',
409: 'Some Unique Field(e.g username) Conflicted'
}
)
def put(self, request, slug):
"""
Update Product
Update product that the slug mentioned, returned the updated product if succesful.<br>
Return 404 if no product with that slug is found.
"""
product = self.get_object(slug)
try:
serializer = ProductSerializer(product, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
except ValidationError:
return Response(serializer.errors, status=status.HTTP_409_CONFLICT)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
responses={
204: 'Succesfully Delete Product',
404: 'No Product With That Slug Found'
}
)
def delete(self, request, slug):
"""
Delete Product
Delete product that the slug mentioned, then returned 204 if succesful
Return 404 if no product with that slug is found.
"""
product = self.get_object(slug)
product.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class GalleryList(APIView):
permission_classes = [AllowAny]
@swagger_auto_schema(
responses={
200: GallerySerializer(),
204: 'No Gallery Results Found'
}
)
def get(self, request):
"""
Gallery
Return all available gallery image.
You can set a GET parameter to filter the result.
GET parameter list:<br>
**featured**: If set to 'true' will only return featured product's gallery<br>
**product_slug**: Will only return product's gallery with mentioned slug
"""
gallery = Gallery.objects.all().order_by('product')
gallery = gallery.filter(product__is_featured=True) if request.GET.get('featured') == 'true' else gallery
gallery = gallery.filter(product__slug=request.GET.get('product_slug')) if request.GET.get('product_slug') else gallery
if not gallery:
# Return early with no content (204) if no queryset found
return Response(status=status.HTTP_204_NO_CONTENT)
serialize = GallerySerializer(gallery, many=True)
return Response(serialize.data, status=status.HTTP_200_OK)
class ProductReviews(APIView):
permission_classes = [IsActiveOrReadOnly]
@swagger_auto_schema(
responses={
200: ReviewSerializer(many=True),
204: 'No Content',
404: 'No Product With That Slug Found',
}
)
def get(self, request, slug):
"""
Review List
Return a list of reviews for product with mentioned slug.<br>
Return 404 if no product with that slug is found.<br>
"""
get_object_or_404(Product, slug=slug)
reviews = Review.objects.filter(product__slug=slug)
if not reviews:
return Response(status=status.HTTP_204_NO_CONTENT)
serializer = ReviewSerializer(reviews, many=True)
return Response(serializer.data)
@swagger_auto_schema(
request_body=CreateReviewSerializer(),
responses={
201: ReviewSerializer(),
400: 'Bad Request',
401: 'Invalid User\'s Credential',
404: 'No Product With That Slug Found',
409: 'This User Already Reviewed This Product'
}
)
def post(self, request, slug):
"""
Create Review
Create a review for product with mentioned slug, the author will be automatically the requesting user.<br>
Each user can only review one product once.<br>
Return 401 if request are not authenticated (user aren't logged in).<br>
Return 404 if no product with that slug is found.<br>
Return 409 if you rty to review a product using a same user more than once.<br>
"""
product = get_object_or_404(Product, slug=slug)
try:
review = Review.objects.create(user=request.user, product=product, **request.data)
except ValidationError as e:
return Response({'detail': e.detail[0] or 'Unknown error, please check logs'}, status=e.get_codes()[0] or 400)
return Response(ReviewSerializer(review).data, status=status.HTTP_201_CREATED)
def delete(self, request, slug):
"""
Delete Review
Delete a review for product with mentioned slug, the author will be automatically the requesting user.<br>
Return 204 if review successfully deleted.<br>
Return 401 if request are not authenticated (user aren't logged in).<br>
Return 404 if no product with that slug is found or user haven't made any review for that product yet.<br>
"""
get_object_or_404(Review, product__slug=slug, user=request.user).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ProductCategories(APIView):
permission_classes = [IsStaffOrReadOnly]
def post(self, request, slug, pk):
product = get_object_or_404(Product, slug=slug)
category = get_object_or_404(Category, id=pk)
product_category = [category.id for category in product.category.all()]
if category.id in product_category:
return Response({'message': 'This product already on this category'}, status.HTTP_400_BAD_REQUEST)
product.category.add(category)
serializer = ProductSerializer(product)
return Response(serializer.data, status.HTTP_200_OK)
def delete(self, request, slug, pk):
product = get_object_or_404(Product, slug=slug)
category = get_object_or_404(Category, id=pk)
product_category = [category.id for category in product.category.all()]
if not category.id in product_category:
return Response({'message': 'This product is not already on this category'}, status.HTTP_400_BAD_REQUEST)
product.category.remove(category)
serializer = ProductSerializer(product)
return Response(serializer.data, status.HTTP_200_OK) |
"""Functions for selecting lists of timeslots based on certain criteria."""
import datetime
import functools
import itertools
import sqlalchemy
import lass.schedule.models
import lass.credits.query
import lass.common.time
import lass.schedule.filler
def process(slots, start, finish):
"""Processes a raw list of timeslots, annotating and filling them.
Args:
slots: The list of timeslots. May be mutated.
start: The datetime representing the desired start of the schedule;
filler will pad between here and the first timeslot, if necessary.
finish: The datetime representing the desired end of the schedule;
filler will pad between the last timeslot and here, if necessary.
Returns:
The list 'slots', augmented with filler and metadata/credits
annotations. The list may exceed the boundaries of
'start' and 'finish', but should be filled to meet them at least.
"""
if slots:
lass.schedule.models.Timeslot.annotate(slots)
start = min(slots[0].start, start)
finish = max(slots[-1].finish, finish)
return lass.schedule.filler.fill(
slots,
lass.schedule.filler.filler_from_config(),
start,
finish
)
class Schedule(object):
"""Lazy loader/cache for schedule lists."""
def __init__(
self,
creator,
processor=process,
source=None,
start=None,
finish=None
):
"""Initialises the lazy loader.
Args:
creator: A function taking the start time and returning a list of
unfilled and unannotated timeslots.
processor: A processing function for post-processing the output of
'function' (for example, annotating and filling). (Default:
'process'.)
source: A Query from which all timeslots should be selected.
May be None, in which case the query matching all public
timeslots will be used.
(Default: None.)
start: The datetime at which the schedule should start.
May be None, in which case the date/time at calling is used.
(Default: None.)
finish: The datetime at which the schedule should finish.
May be None, in which case the schedule finishes one day after
it starts.
(Default: None.)
Returns:
A Schedule object.
"""
self.slots = None
self.source = lass.schedule.models.Timeslot.public()
self.start = start if start else lass.common.time.aware_now()
self.finish = finish if finish else (
self.start + datetime.timedelta(days=1)
)
self.creator = functools.partial(
creator,
source=self.source,
start=self.start,
finish=self.finish
)
self.processor = functools.partial(
processor,
start=self.start,
finish=self.finish
)
self.stored = False
self.time_context = lass.common.time.context_from_config()
@property
def timeslots(self):
"""Retrieves the list of timeslots this Schedule object is set to
retrieve.
If the timeslots have not yet been retrieved, this will invoke the
retrieval function and then save the results for future calls on this
object only.
Returns:
The list of timeslots this object has been directed to compute.
"""
if not self.stored:
self.slots = self.processor(self.creator())
self.stored = True
return self.slots
def days(self):
"""Returns the dates of all days covered by this schedule.
Returns:
An iterable of dates corresponding to each day in this schedule, in
chronological order.
"""
start_date = self.time_context.localise(self.start).date()
finish_date = self.time_context.localise(self.finish).date()
return itertools.takewhile(
(lambda i: i < finish_date),
map(
lambda c: start_date + datetime.timedelta(days=c),
itertools.count()
)
)
def table(self):
"""Attempts to convert the schedule into table form.
This will likely not work for non-weekly schedules.
Returns:
The schedule, in tabular form (lists of time rows containing day
columns).
"""
return lass.schedule.table.tabulate(
self.start,
self.timeslots,
self.time_context
)
def from_to(source, start, finish):
"""Selects all shows between 'start' and 'finish'.
No filling is done, and shows selected may start or finish outside the
boundaries required; consequently the start of the first and end of the
last show may not equate to start and finish respectively.
Args:
source: A query providing the timeslots from which this function should
select. For all public shows, use 'Timeslot.query.public()',
for example.
start: The datetime representing the start of the schedule.
finish: The datetime representing the finish of the schedule.
Returns:
A raw ordered list of every show from 'source' with air-time between
'start' and 'finish'.
"""
all_from_to = source.filter(
(start < lass.schedule.models.Timeslot.finish) &
(lass.schedule.models.Timeslot.start < finish)
)
with_credits = load_credits(all_from_to)
return with_credits.order_by(order()).all()
def next(source, start, finish, count):
"""Selects the next 'count' shows, current timeslot inclusive.
No filling is done.
Args:
source: A query providing the timeslots from which this function
should select. For all public shows, use
'Timeslot.public()', for example.
start: The datetime used as a reference point to determine which
shows are the 'next' ones.
finish: Ignored; kept for interface consistency.
count: The maximum number of timeslots to select.
Returns:
A list of up to 'count' raw timeslots representing the next timeslots
to occur from 'start' (or now if 'from' is None).
"""
all_next = source.filter(start < lass.schedule.models.Timeslot.finish)
with_credits = load_credits(all_next)
return with_credits.order_by(order()).limit(count).all()
def load_credits(query):
"""Adds credit loading to a timeslot query."""
# Grab the *show*'s credits, because timeslots don't have their
# own. What we do instead is make sure the show's credits are
# eager-loaded, and then any requests for the timeslot's credits
# are indirectly pulled from here.
path = ('season', 'show', 'credits')
return lass.credits.query.add_to_query(query, path)
def order():
"""Returns the correct ordering for timeslots in a schedule list."""
return sqlalchemy.asc(lass.schedule.models.Timeslot.start)
|
#!/usr/bin/env python3
"""
Geoscience Australia - Python Geodesy Package
Transform Module
Ref1
http://www.icsm.gov.au/sites/default/files/GDA2020TechnicalManualV1.1.1.pdf
Ref2
http://www.mygeodesy.id.au/documents/Karney-Krueger%20equations.pdf
"""
import datetime
from math import radians
import numpy as np
from geodepy.constants import Transformation, atrf_gda2020,\
gda94_to_gda2020
from geodepy.convert import hp2dec, geo2grid, \
grid2geo, xyz2llh, llh2xyz
def conform7(x, y, z, trans):
"""
Performs a Helmert 7 Parameter Conformal Transformation using Cartesian point co-ordinates
and a predefined transformation object.
:param x: Cartesian X (m)
:param y: Cartesian Y (m)
:param z: Cartesian Z (m)
:param trans: Transformation Object (note: this function ignores all time-dependent variables)
:return: Transformed X, Y, Z Cartesian Co-ordinates
"""
if type(trans) != Transformation:
raise ValueError('trans must be a Transformation Object')
# Create XYZ Vector
xyz_before = np.array([[x],
[y],
[z]])
# Convert Units for Transformation Parameters
scale = trans.sc / 1000000
rx = radians(hp2dec(trans.rx / 10000))
ry = radians(hp2dec(trans.ry / 10000))
rz = radians(hp2dec(trans.rz / 10000))
# Create Translation Vector
translation = np.array([[trans.tx],
[trans.ty],
[trans.tz]])
# Create Rotation Matrix
rotation = np.array([[1., rz, -ry],
[-rz, 1., rx],
[ry, -rx, 1.]])
# Conformal Transform Eq
xyz_after = translation + (1 + scale) * np.dot(rotation, xyz_before)
# Convert Vector to Separate Variables
xtrans = float(xyz_after[0])
ytrans = float(xyz_after[1])
ztrans = float(xyz_after[2])
return xtrans, ytrans, ztrans
def conform14(x, y, z, to_epoch, trans):
"""
Performs a Helmert 14 Parameter Conformal Transformation using Cartesian point co-ordinates
and a predefined transformation object. The transformation parameters are projected from
the transformation objects reference epoch to a specified epoch.
:param x: Cartesian X (m)
:param y: Cartesian Y (m)
:param z: Cartesian Z (m)
:param to_epoch: Epoch co-ordinate transformation is performed at (datetime.date Object)
:param trans: Transformation Object
:return: Cartesian X, Y, Z co-ordinates transformed using Transformation parameters at desired epoch
"""
if type(trans) != Transformation:
raise ValueError('trans must be a Transformation Object')
if type(to_epoch) != datetime.date:
raise ValueError('to_epoch must be a datetime.date Object')
# Calculate 7 Parameters from 14 Parameter Transformation Object
timetrans = trans + to_epoch
# Perform Transformation
xtrans, ytrans, ztrans = conform7(x, y, z, timetrans)
return xtrans, ytrans, ztrans
def mga94_to_mga2020(zone, east, north, ell_ht=False):
"""
Performs conformal transformation of Map Grid of Australia 1994 to Map Grid of Australia 2020 Coordinates
using the GDA2020 Tech Manual v1.2 7 parameter similarity transformation parameters
:param zone: Zone Number - 1 to 60
:param east: Easting (m, within 3330km of Central Meridian)
:param north: Northing (m, 0 to 10,000,000m)
:param ell_ht: Ellipsoid Height (m) (optional)
:return: MGA2020 Zone, Easting, Northing and Ellipsoid Height (if none provided, returns 0)
"""
lat, lon, psf, gridconv = grid2geo(zone, east, north)
if ell_ht is False:
ell_ht_in = 0
else:
ell_ht_in = ell_ht
x94, y94, z94 = llh2xyz(lat, lon, ell_ht_in)
x20, y20, z20 = conform7(x94, y94, z94, gda94_to_gda2020)
lat, lon, ell_ht_out = xyz2llh(x20, y20, z20)
if ell_ht is False:
ell_ht_out = 0
hemisphere, zone20, east20, north20, psf, gridconv = geo2grid(lat, lon)
return zone20, east20, north20, round(ell_ht_out, 4)
def mga2020_to_mga94(zone, east, north, ell_ht=False):
"""
Performs conformal transformation of Map Grid of Australia 2020 to Map Grid of Australia 1994 Coordinates
using the reverse form of the GDA2020 Tech Manual v1.2 7 parameter similarity transformation parameters
:param zone: Zone Number - 1 to 60
:param east: Easting (m, within 3330km of Central Meridian)
:param north: Northing (m, 0 to 10,000,000m)
:param ell_ht: Ellipsoid Height (m) (optional)
:return: MGA1994 Zone, Easting, Northing and Ellipsoid Height (if none provided, returns 0)
"""
lat, lon, psf, gridconv = grid2geo(zone, east, north)
if ell_ht is False:
ell_ht_in = 0
else:
ell_ht_in = ell_ht
x94, y94, z94 = llh2xyz(lat, lon, ell_ht_in)
x20, y20, z20 = conform7(x94, y94, z94, -gda94_to_gda2020)
lat, lon, ell_ht_out = xyz2llh(x20, y20, z20)
if ell_ht is False:
ell_ht_out = 0
hemisphere, zone20, east20, north20, psf, gridconv = geo2grid(lat, lon)
return zone20, east20, north20, round(ell_ht_out, 4)
def atrf2014_to_gda2020(x, y, z, epoch_from):
"""
Transforms Cartesian (x, y, z) Coordinates in terms of the Australian Terrestrial Reference Frame (ATRF) at
a specified epoch to coordinates in terms of Geocentric Datum of Australia 2020 (GDA2020 - reference epoch 2020.0)
:param x: ATRF Cartesian X Coordinate (m)
:param y: ATRF Cartesian Y Coordinate (m)
:param z: ATRF Cartesian Z Coordinate (m)
:param epoch_from: ATRF Coordinate Epoch (datetime.date Object)
:return: Cartesian X, Y, Z Coordinates in terms of GDA2020
"""
return conform14(x, y, z, epoch_from, atrf_gda2020)
def gda2020_to_atrf2014(x, y, z, epoch_to):
"""
Transforms Cartesian (x, y, z) Coordinates in terms of Geocentric Datum of Australia 2020
(GDA2020 - reference epoch 2020.0) to coordinates in terms of the Australian Terrestrial Reference Frame (ATRF) at
a specified epoch
:param x: GDA2020 Cartesian X Coordinate (m)
:param y: GDA2020 Cartesian Y Coordinate (m)
:param z: GDA2020 Cartesian Z Coordinate (m)
:param epoch_to: ATRF Coordinate Epoch (datetime.date Object)
:return: Cartesian X, Y, Z Coordinate in terms of ATRF at the specified Epoch
"""
return conform14(x, y, z, epoch_to, -atrf_gda2020)
|
from django import forms
from django.forms.models import modelformset_factory
import happyforms
from quieter_formset.formset import BaseModelFormSet
from tower import ugettext as _, ugettext_lazy as _lazy
import mkt
from mkt.reviewers.models import ReviewerScore, RereviewQueue
from mkt.webapps.models import Webapp
from mkt.websites.models import Website
ABUSE_REPORT_SKIP = 0
ABUSE_REPORT_READ = 1
ABUSE_REPORT_FLAG = 2
class BaseAbuseViewFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
self.form = AbuseViewForm
self.request = kwargs.pop('request', None)
super(BaseAbuseViewFormSet, self).__init__(*args, **kwargs)
def save(self):
for form in self.forms:
if form.cleaned_data:
action = int(form.cleaned_data['action'])
if action == ABUSE_REPORT_SKIP:
continue
inst = form.instance
app = None
site = None
user = None
texts = []
for report in inst.abuse_reports.all().filter(read=False):
report.read = True
report.save()
app = report.addon
site = report.website
user = report.user
if report.message:
texts.append(report.message)
if app:
mkt.log(mkt.LOG.APP_ABUSE_MARKREAD, app, report,
details=dict(
body=unicode(report.message),
addon_id=app.id,
addon_title=unicode(app.name)
))
elif user:
# Not possible on Marketplace currently.
pass
elif site:
mkt.log(mkt.LOG.WEBSITE_ABUSE_MARKREAD, site,
report,
details=dict(
body=unicode(report.message),
website_id=site.id,
website_title=unicode(site.name)
))
if app or site:
ReviewerScore.award_mark_abuse_points(
self.request.user, addon=app, website=site)
if app and action == ABUSE_REPORT_FLAG:
message = _('Abuse reports needing investigation: %s' %
(', '.join(texts)))
RereviewQueue.flag(
app, mkt.LOG.REREVIEW_ABUSE_APP, message=message)
class AbuseViewForm(happyforms.ModelForm):
action_choices = [
(ABUSE_REPORT_SKIP, _lazy(u'Skip for now')),
(ABUSE_REPORT_READ, _lazy(u'Mark all reports Read')),
(ABUSE_REPORT_FLAG, _lazy(u'Flag for re-review'))]
action = forms.ChoiceField(choices=action_choices, required=False,
initial=0, widget=forms.RadioSelect())
class Meta:
model = Webapp
fields = ('action',)
class WebsiteAbuseViewForm(AbuseViewForm):
action_choices = [
(ABUSE_REPORT_SKIP, _lazy(u'Skip for now')),
(ABUSE_REPORT_READ, _lazy(u'Mark all reports Read'))]
action = forms.ChoiceField(choices=action_choices, required=False,
initial=0, widget=forms.RadioSelect())
class Meta:
model = Website
fields = ('action',)
AppAbuseViewFormSet = modelformset_factory(Webapp, extra=0,
form=AbuseViewForm,
formset=BaseAbuseViewFormSet)
WebsiteAbuseViewFormSet = modelformset_factory(Website, extra=0,
form=WebsiteAbuseViewForm,
formset=BaseAbuseViewFormSet)
|
from nltk.corpus import wordnet as wn
import sys
import os
import re
reload(sys)
sys.setdefaultencoding('UTF8')
pathstump = "/Users/Torri/Documents/Grad stuff/Thesis stuff/Data - Novels/Analysis/"
target = 'tagged'
specialwords = open("/Users/Torri/Documents/Grad stuff/Thesis stuff/Data - Novels/Analysis/novels_special_words.txt").read().splitlines()
cocaFile1000 = open("/Users/Torri/pythonstuff/COCA-top1000.txt").read().splitlines()
advTags = ['RB', 'RBR', 'RBS']
adjTags = ['JJ', 'JJR', 'JJS']
for dirname, dirs, files in os.walk('.'):
if target in dirname:
#author = re.findall("..(.*?)\\\\tagged", dirname)[0]
author = dirname.split(os.sep)[1]
print author
#print author
output = os.path.join(dirname, '..', author + "_specialness.txt")
outfile = open(output, 'w')
print "%s\t%s\t%s" % ("Novel", "special types", "relative specialness")
print>>outfile, "%s\t%s\t%s" % ("Novel", "special types", "relative specialness")
# .\James\tagged\..\James_MAT_50.txt
for filename in files:
specialtypes = 0
specialwordlist = []
tokens = 0.
if '_tagged.txt' in filename:
book = filename.replace('_tagged.txt', '')
data = open(pathstump + author + "/" + "tagged/" + filename, 'r')
for line in data:
line = line.rstrip('\r\n')
first_char = line[0]
line = line.split('\t')
pos = line[1]
lemma = line[2]
tokens += 1
if lemma in specialwords:
if lemma not in specialwordlist:
specialwordlist.append(lemma)
specialtypes += 1
if pos == "IN": #prepositions
if lemma not in specialwordlist:
specialwordlist.append(lemma)
specialtypes += 1
if pos == "MD": #modals
if lemma not in specialwordlist:
specialwordlist.append(lemma)
specialtypes += 1
if (pos in advTags) and (lemma not in cocaFile1000): #adverbs
if lemma not in specialwordlist:
specialwordlist.append(lemma)
specialtypes += 1
if (pos in adjTags) and (lemma not in cocaFile1000): #adjectives
if lemma not in specialwordlist:
specialwordlist.append(lemma)
specialtypes += 1
relativeSpecial = (specialtypes/tokens) * 100
print "%s\t%s\t%.2f" % (book, specialtypes, relativeSpecial)
print>>outfile, "%s\t%s\t%.2f" % (book, specialtypes, relativeSpecial) |
from hikyuu import PG_FixedHoldDays
# 部件作者
author = "fasiondog"
# 版本
version = '20200825'
def part(days=5):
return PG_FixedHoldDays(days)
part.__doc__ = PG_FixedHoldDays.__doc__
if __name__ == '__main__':
print(part()) |
#!/usr/bin/env python3
import webbrowser
import argparse
def make_it_short():
parser = argparse.ArgumentParser(description='Tool for going to your most wanted links with short keys')
parser.add_argument('-w', metavar='url', dest='wanted', type=str,
help="""the wanted url ,
sap >>> web.whatsapp.com ,
lnkdin >>> linkedin.com ,
tlgrm >>> web.telegram.com ,
gogol >>> google.com ,
github >>> github.com """
)
args = parser.parse_args()
_wanted_url = args.wanted or None
urls = {"linkedin": "https://www.linkedin.com/feed/",
"github": "https://github.com/",
"sup": "https://web.whatsapp.com/",
"telegram": "https://web.telegram.org/#/im",
"gogol": "https://google.com/"}
if _wanted_url:
webbrowser.open(urls[_wanted_url])
make_it_short()
|
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot, offline
from datetime import datetime
today = datetime.today().strftime('%Y-%m-%d')
date_time = datetime.today().strftime('%Y-%m-%d-%H-%M')
import vis_layout
from manipulation import normalize_cols, remove_acentos
def all_countrys(dd,var,scale,theme,save=False):
mask = ((dd[f'{var}']!=0) & (dd[f'{var}'] > theme['vars'][var]['since_var']))
dd = dd[mask]
dd['count'] = 1
since_first_day = dd[['count','countrycode']].groupby(by = ['countrycode',]).cumsum()['count'].tolist()
dd['since_first_day'] = since_first_day
dd = dd.sort_values(by=['countryname'], ascending=False)
dd = dd.sort_values(by=['date'])
countrys = list(dd['countrycode'].unique())
countrys.sort(reverse=True)
data = []
for geoid in countrys:
mask = (dd['countrycode']==geoid)
dc = dd[mask]
dc[var] = dc[var].rolling(theme['vars'][var]['roling_window']).mean()
mask = (dc[var].notnull())
dc = dc[mask]
trace = go.Scatter(
name=dc['countryname'].str.replace('_',' ').tolist()[0],
x=dc['since_first_day'],
y=dc[var],
# line=dict(color='#a14900', width=wid),
line=dict(width=theme['data']['line_width']),
mode='lines+markers',
marker=dict(size=theme['data']['marker_size']),
hoverlabel=dict(namelength=-1, font=dict(size=theme['data']['hoverlabel_size']))
)
data.append(trace)
layout = vis_layout.get_layout_new(theme, var, scale)
fig = go.Figure(data=data, layout=layout)
return fig
def unique_country(dd,themes):
data=[]
for var in themes['vars'].keys():
trace = go.Bar(
name=themes['vars'][var]['nome'],
x=dd['date'],
y=dd[var],
marker=dict(color=themes['vars'][var]['color'],),
hoverlabel=dict(namelength=-1, font=dict(size=themes['data']['hoverlabel_size']))
)
data.append(trace)
layout = vis_layout.get_layout_bar(themes)
fig = go.Figure(data=data, layout=layout)
return fig
def brasil_vis_cum(dd, var,scale ,themes):
dd = dd[dd['confirmed']>0]
if themes['vars'][var]['in_cities'][0] == 'all':
in_cities = dd['state'].unique()
else:
in_cities=themes['vars'][var]['in_cities']
colors = themes['data']['colors']
factor = int(np.ceil(len(in_cities)/len(colors)))
colors = factor * colors
data = []
i=0
for city in in_cities:
mask =dd['city']==city
dc = dd[mask]
dc[var] = dc[var].rolling(themes['vars'][var]['roling_window']).mean()
mask = (dc[var].notnull())
dc = dc[mask]
trace = go.Scatter(
name=city,
x=dc['date'],
y=dc[var],
line=dict(color=colors[i], width=themes['data']['line_width']),
mode='lines+markers',
marker=dict(size=themes['data']['marker_size']),
hoverlabel=dict(namelength=-1, font=dict(size=themes['data']['hoverlabel_size'])),
)
data.append(trace)
i+=1
layout = vis_layout.get_layout_new(themes,var,scale)
fig = go.Figure(data=data, layout=layout)
return fig
return(fig)
|
import os
Schedule = []
# # =================================== CLASSIFIER ========================================
# # ================== TRAINING AND TESTING ========================
# # Source: AMAZON_RO
# Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_RO --mode classifier --phase train --runs 5 --patches_dimension 128 --image_channels 7 --num_classes 2 --epochs 150 --batch_size 32 --ada_batch_size 1 --patience 10 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_RO --t_dataset AMAZON_RO --mode classifier --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_RO --t_dataset AMAZON_PA --mode classifier --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_RO --t_dataset CERRADO_MA --mode classifier --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset AMAZON_RO --t_dataset AMAZON_RO --mode classifier --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
# Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset AMAZON_RO --t_dataset AMAZON_PA --mode classifier --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
# Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset AMAZON_RO --t_dataset CERRADO_MA --mode classifier --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
# Source: AMAZON_PA
Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_PA --mode classifier --phase train --runs 5 --patches_dimension 128 --image_channels 7 --num_classes 2 --epochs 150 --batch_size 32 --ada_batch_size 1 --patience 10 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_PA --t_dataset AMAZON_PA --mode classifier --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_PA --t_dataset AMAZON_RO --mode classifier --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_PA --t_dataset CERRADO_MA --mode classifier --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset AMAZON_PA --t_dataset AMAZON_PA --mode classifier --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset AMAZON_PA --t_dataset AMAZON_RO --mode classifier --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset AMAZON_PA --t_dataset CERRADO_MA --mode classifier --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
# # Source: CERRADO_MA
# Schedule.append("python Main_Train_Test.py --s_dataset CERRADO_MA --mode classifier --phase train --runs 5 --patches_dimension 128 --image_channels 7 --num_classes 2 --epochs 150 --batch_size 32 --ada_batch_size 1 --patience 10 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Train_Test.py --s_dataset CERRADO_MA --t_dataset CERRADO_MA --mode classifier --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Train_Test.py --s_dataset CERRADO_MA --t_dataset AMAZON_PA --mode classifier --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Train_Test.py --s_dataset CERRADO_MA --t_dataset AMAZON_RO --mode classifier --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset CERRADO_MA --t_dataset CERRADO_MA --mode classifier --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
# Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset CERRADO_MA --t_dataset AMAZON_PA --mode classifier --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
# Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset CERRADO_MA --t_dataset AMAZON_RO --mode classifier --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
# --------------------------------
# =================================== ADAPTATION ========================================
# ================== TRAINING, TEST AND METRICS CALCULATION ========================
# --margin 0.0 --use_pseudoreference False
# =========== Source: AMAZON_RO ============
# Target: CERRADO_MA
# Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_RO --t_dataset CERRADO_MA --mode adaptation --match early --phase train --runs 5 --patches_dimension 128 --image_channels 7 --num_classes 2 --epochs 150 --batch_size 32 --ada_batch_size 1 --patience 10 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_RO --t_dataset CERRADO_MA --mode adaptation --match early --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset AMAZON_RO --t_dataset CERRADO_MA --mode adaptation --match early --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
# Schedule.append("python Main_Compute_Create_Avg_Precision_Recall_Curve_Avg_Hit_map.py --s_dataset AMAZON_RO --t_dataset CERRADO_MA")
# Target: AMAZON_PA
# Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_RO --t_dataset AMAZON_PA --mode adaptation --match early --phase train --runs 5 --patches_dimension 128 --image_channels 7 --num_classes 2 --epochs 150 --batch_size 32 --ada_batch_size 1 --patience 10 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_RO --t_dataset AMAZON_PA --mode adaptation --match early --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset AMAZON_RO --t_dataset AMAZON_PA --mode adaptation --match early --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
# Schedule.append("python Main_Compute_Create_Avg_Precision_Recall_Curve_Avg_Hit_map.py --s_dataset AMAZON_RO --t_dataset AMAZON_PA")
# =========== Source: AMAZON_PA ============
# Target: AMAZON_RO
Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_PA --t_dataset AMAZON_RO --mode adaptation --match early --phase train --runs 5 --patches_dimension 128 --image_channels 7 --num_classes 2 --epochs 150 --batch_size 32 --ada_batch_size 1 --patience 10 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_PA --t_dataset AMAZON_RO --mode adaptation --match early --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset AMAZON_PA --t_dataset AMAZON_RO --mode adaptation --match early --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
Schedule.append("python Main_Compute_Create_Avg_Precision_Recall_Curve_Avg_Hit_map.py --s_dataset AMAZON_PA --t_dataset AMAZON_RO")
# Target: CERRADO_MA
Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_PA --t_dataset CERRADO_MA --mode adaptation --match early --phase train --runs 5 --patches_dimension 128 --image_channels 7 --num_classes 2 --epochs 150 --batch_size 32 --ada_batch_size 1 --patience 10 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
Schedule.append("python Main_Train_Test.py --s_dataset AMAZON_PA --t_dataset CERRADO_MA --mode adaptation --match early --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset AMAZON_PA --t_dataset CERRADO_MA --mode adaptation --match early --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
Schedule.append("python Main_Compute_Create_Avg_Precision_Recall_Curve_Avg_Hit_map.py --s_dataset AMAZON_PA --t_dataset CERRADO_MA")
# ========== Source: CERRADO_MA ============
# Target: AMAZON_RO
# Schedule.append("python Main_Train_Test.py --s_dataset CERRADO_MA --t_dataset AMAZON_RO --mode adaptation --match early --phase train --runs 5 --patches_dimension 128 --image_channels 7 --num_classes 2 --epochs 150 --batch_size 32 --ada_batch_size 1 --patience 10 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Train_Test.py --s_dataset CERRADO_MA --t_dataset AMAZON_RO --mode adaptation --match early --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset CERRADO_MA --t_dataset AMAZON_RO --mode adaptation --match early --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
# Schedule.append("python Main_Compute_Create_Avg_Precision_Recall_Curve_Avg_Hit_map.py --s_dataset CERRADO_MA --t_dataset AMAZON_RO")
# Target: AMAZON_PA
# Schedule.append("python Main_Train_Test.py --s_dataset CERRADO_MA --t_dataset AMAZON_PA --mode adaptation --match early --phase train --runs 5 --patches_dimension 128 --image_channels 7 --num_classes 2 --epochs 150 --batch_size 32 --ada_batch_size 1 --patience 10 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Train_Test.py --s_dataset CERRADO_MA --t_dataset AMAZON_PA --mode adaptation --match early --phase test --patches_dimension 128 --image_channels 7 --num_classes 2 --batch_size 32 --ada_batch_size 1 --lr 0.0001 --optimizer Adam --data_augmentation True --fixed_tiles True --balanced_tr True --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --percent_of_last_reference_in_actual_reference 100 --percent_of_positive_pixels_in_actual_reference 2 --data_type .npy --dataset_main_path ../../..//Datasets/")
# Schedule.append("python Main_Compute_Metrics_MT.py --s_dataset CERRADO_MA --t_dataset AMAZON_PA --mode adaptation --match early --dataset_main_path ../../..//Datasets/ --Npoints 200 --eliminate_regions True --area_avoided 69 --buffer True --buffer_dimension_out 2 --buffer_dimension_in 0 --image_channels 7 --patches_dimension 128 --fixed_tiles True --data_type .npy")
# Schedule.append("python Main_Compute_Create_Avg_Precision_Recall_Curve_Avg_Hit_map.py --s_dataset CERRADO_MA --t_dataset AMAZON_PA")
for i in range(len(Schedule)):
os.system(Schedule[i])
|
import numpy as np
import networkx as nx
import os
import pandas as pd
MAX_NODES = 3
NON_CLOSED_MAX_NODES = 4
DIRECTED_NX_GRAPH = './knowledge_graphs/Directed_KG_triples_no-mesh.gpickle'
NON_CLOSED_DIRECTED_NX_GRAPH = './knowledge_graphs/non-closed_Directed_KG_triples_no-mesh.gpickle'
#NON_CLOSED_DIRECTED_NX_GRAPH = "../pheknowlator/PheKnowLator_full_NotClosed_NoOWLSemantics_Triples_Integers.gpickle"
ENTREZ_MAPPINGS = "./entrez/gene_symbol_to_entrez_id.tsv"
ALIASES = "./entrez/gene_with_protein_product.txt"
CUO_REL = "<http://purl.obolibrary.org/obo/RO_0002411>" # causally upstream of
PATHS_NP = 'numpy_dumps/mech2gene_paths.npy'
GENE_LABELS_NP = 'numpy_dumps/mech2gene_gene_labels.npy'
MECH_LABELS_NP = 'numpy_dumps/mech2gene_mech_labels.npy'
ONTO_LABELS = "labels/all_labels.tsv"
#ONTO_LABELS = "../pheknowlator/PheKnowLator_full_NotClosed_NoOWLSemantics_NodeLabels.txt"
TISSUE_SPECIFICITY = "./entrez/genes_active_in_tissue.csv"
PATH_COUNT_LIMIT = 10000
GRCH38_GENES = "./variants/refseq_hg38_genes.bed"
GRCH38_SNPS = "./variants/parsed_locii/pathogenic_snps.bed"
GIGGLE_INDICES = "./variants/locii_sort_b"
NODE_BLACKLIST = [ '<http://purl.obolibrary.org/obo/GO_0003674>',
'<http://purl.obolibrary.org/obo/GO_0008150>',
'<http://purl.obolibrary.org/obo/GO_0005575>',
'<http://purl.obolibrary.org/obo/GO_0000989>',
'<http://purl.obolibrary.org/obo/GO_0001134>',
'<http://purl.obolibrary.org/obo/GO_0006357>',
'<http://purl.obolibrary.org/obo/GO_0016591>',
'<http://purl.obolibrary.org/obo/GO_0090575>',
'<http://purl.obolibrary.org/obo/GO_0045892>',
'<http://purl.obolibrary.org/obo/GO_0045893 >',
'<http://purl.obolibrary.org/obo/GO_0061586 >',
'<http://purl.obolibrary.org/obo/GO_0010621 >',
'<http://purl.obolibrary.org/obo/GO_0001077>',
'<http://purl.obolibrary.org/obo/GO_0005515>',
'<http://purl.obolibrary.org/obo/GO_0008134>',
'<http://purl.obolibrary.org/obo/GO_0006366>',
'<https://reactome.org/content/detail/R-HSA-1643685>',
'<http://www.w3.org/2002/07/owl#Class>',
'<http://www.w3.org/2002/07/owl#NamedIndividual>',
'<http://purl.obolibrary.org/obo/go.owl>',
'<http://www.geneontology.org/formats/oboInOwl#SubsetProperty>',
]
NODE_INDEX_FILENAME = 'embeddings/node_idx_for_node2vec.txt'
EMBEDDINGS_FILENAME = 'embeddings/node2vec_embeddings_no-mesh_q3_d32.txt'
EMBEDDINGS_NUMPY = "%s.npy" % EMBEDDINGS_FILENAME
# Initialize lookup structures
####################################################################
NODE_INDEX = dict()
with open(NODE_INDEX_FILENAME, 'r') as fd:
for line in fd.readlines():
chunks = line.split('\t')
NODE_INDEX[chunks[1][:-1]] = int(chunks[0])
EMB = None
if os.path.isfile(EMBEDDINGS_NUMPY):
EMB = np.load(EMBEDDINGS_NUMPY)
else:
is_header = True
emb_size = 0
with open(EMBEDDINGS_FILENAME, 'r') as fd:
for line in fd.readlines():
chunks = line.split(' ')
if is_header:
is_header = False
emb_size = int(chunks[1])
EMB = np.zeros((int(chunks[0]), emb_size))
next
else:
node_id = int(chunks[0])
for i in list(range(1, emb_size+1)):
EMB[node_id, i-1] = float(chunks[i])
np.save(EMBEDDINGS_NUMPY, EMB)
# Helper functions:
###################################################################
def get_mech_step_weights(nr_m, nr_t, current_t, strict=False):
weights = np.zeros(nr_m)
mech_splits = np.array_split(list(range(nr_m)), nr_t)
penalty = 1.0 / nr_t
split_nr = 0
for split in mech_splits:
for position in split:
if strict:
if current_t == split_nr:
weights[position] = 1
else:
weights[position] = 0
else:
weights[position] = 1 - penalty * abs(current_t - split_nr) / 2
split_nr += 1
return weights
def get_graph():
G = nx.read_gpickle(DIRECTED_NX_GRAPH)
# Get rid of some root/hub nodes for path intersections we don't care about:
for bl_node in NODE_BLACKLIST:
if G.has_node(bl_node):
G.remove_node(bl_node)
return G
def get_non_closed_graph():
G = nx.read_gpickle(NON_CLOSED_DIRECTED_NX_GRAPH)
# Get rid of some root/hub nodes for path intersections we don't care about:
# for bl_node in NODE_BLACKLIST:
# if G.has_node(bl_node):
# G.remove_node(bl_node)
return G
def get_variants(gene_name):
gene_df = pd.read_csv(GRCH38_GENES, sep="\t", na_filter=False, \
usecols=[0, 1, 2, 3], names=['chrom', 'start', 'end', 'name'])
# account for multiple isoforms
this_gene_df = gene_df[gene_df['name'] == gene_name]
variants = []
if len(this_gene_df) > 0:
start = this_gene_df.start.min()
end = this_gene_df.end.max()
chrom = this_gene_df.chrom.unique()[0]
snp_df = pd.read_csv(GRCH38_SNPS, sep="\t", na_filter=False, \
usecols=[0, 1, 2, 4], names=['chrom', 'start', 'end', 'rs'])
variants = list(snp_df[(snp_df['chrom'] == chrom) & (snp_df['start'] >= start) & (snp_df['end'] <= end)].rs)
return variants
def get_node_embedding(node):
node_idx = NODE_INDEX[node]
return EMB[node_idx]
|
from distutils.core import setup
setup(name = "id3reader",
version = "1.53.20070415",
description = "Python module that reads ID3 metadata tags in MP3 files",
author = "Ned Batchelder, Nik Reiman",
packages = ['id3reader'],
package_data = {'id3reader': ["id3reader/*"]}
)
|
from bot_box.features import requests, bsp, os # imported in __init__.py
''' Sample Response
<forismatic>
<quote>
<quoteText>Don't miss all the beautiful colors of the rainbow looking for that pot of gold.</quoteText>
<quoteAuthor></quoteAuthor>
<senderName></senderName>
<senderLink></senderLink>
<quoteLink>
'''
def quote_me_api():
"""
relsult of quote command of user
:return: the quote and its author
"""
url = os.environ.get('QUOTE_URL')
while True:
quote_resp = requests.get(url).text
soup = bsp(quote_resp, 'html.parser')
quote = soup.find('quote')
text = quote.find('quotetext').get_text()
author = quote.find('quoteauthor').get_text()
return "" + text + "\n- " + author
|
# @Time : 2020/6/26
# @Author : Shanlei Mu
# @Email : slmu@ruc.edu.cn
# UPDATE:
# @Time : 2020/8/7
# @Author : Shanlei Mu
# @Email : slmu@ruc.edu.cn
"""
recbole.model.loss
#######################
Common Loss in recommender system
"""
import torch
import torch.nn as nn
class BPRLoss(nn.Module):
""" BPRLoss, based on Bayesian Personalized Ranking
Args:
- gamma(float): Small value to avoid division by zero
Shape:
- Pos_score: (N)
- Neg_score: (N), same shape as the Pos_score
- Output: scalar.
Examples::
>>> loss = BPRLoss()
>>> pos_score = torch.randn(3, requires_grad=True)
>>> neg_score = torch.randn(3, requires_grad=True)
>>> output = loss(pos_score, neg_score)
>>> output.backward()
"""
def __init__(self, gamma=1e-10):
super(BPRLoss, self).__init__()
self.gamma = gamma
def forward(self, pos_score, neg_score):#pos_score,neg_score都是tensor数组,不是一个数
loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score)).mean()
return loss
class RegLoss(nn.Module):
""" RegLoss, L2 regularization on model parameters
"""
def __init__(self):
super(RegLoss, self).__init__()
def forward(self, parameters):
reg_loss = None
for W in parameters:
if reg_loss is None:
reg_loss = W.norm(2)
else:
reg_loss = reg_loss + W.norm(2)
return reg_loss
class EmbLoss(nn.Module):
""" EmbLoss, regularization on embeddings
"""
def __init__(self, norm=2):
super(EmbLoss, self).__init__()
self.norm = norm
def forward(self, *embeddings):
emb_loss = torch.zeros(1).to(embeddings[-1].device)
for embedding in embeddings:
emb_loss += torch.norm(embedding, p=self.norm)
emb_loss /= embeddings[-1].shape[0]
return emb_loss
class EmbMarginLoss(nn.Module):
""" EmbMarginLoss, regularization on embeddings
"""
def __init__(self, power=2):
super(EmbMarginLoss, self).__init__()
self.power = power
def forward(self, *embeddings):
dev = embeddings[-1].device
cache_one = torch.tensor(1.0).to(dev)
cache_zero = torch.tensor(0.0).to(dev)
emb_loss = torch.tensor(0.).to(dev)
for embedding in embeddings:
norm_e = torch.sum(embedding ** self.power, dim=1, keepdim=True)
emb_loss += torch.sum(torch.max(norm_e - cache_one, cache_zero))
return emb_loss
|
import json
import Stations.models as models
#from django.db.models import Avg
from datetime import datetime, timedelta
def sensorData2HighchartsData(data_rec, converter=None):
datastr = ''
for record in data_rec:
#highcharts uses unix timestamp in milliseconds
ts = record.timestamp.timestamp()*1000
if converter is None:
val = record.val
else:
val = converter(record.val)
datastr = datastr + "[{},{}],".format(int(ts), val)
return datastr
def dataSince(sensor, since):
return models.SensorData.objects.filter(sensor=sensor,
timestamp__gte=since)
def optionsFromObj(sensor_obj, start=None, end=None,
types=('AVG', 'MIN', 'MAX'), maxpoints=500):
"""Creates a JSON string to configure a highchart object"""
if start is None:
last2days = datetime.now() - timedelta(2)
data = dataSince(sensor_obj, last2days)
elif end is None:
data = dataSince(sensor_obj, start)
else:
data = models.SensorData.objects.filter(
sensor=sensor_obj,
timestamp__range=(start, end+timedelta(1))
)
# if data.count() > maxpoints:
# partition data to reduce number of points
# data = data.annotate(val_avg=Avg('val')).values('timestamp__range')
highchart_args = {}
highchart_args['title'] = {
'text': sensor_obj.get_formatted_name()
}
chart = {'renderTo': "%s::%s" % (sensor_obj.station.slug, sensor_obj.slug)}
xAxis = {'title': {'text': "Date/Time CST (GMT-6)"}}
xAxis['type'] = "datetime"
yAxis = {'title': {
'text': sensor_obj.get_sensor_type_display() + ' (%s)' % (str(sensor_obj.data_unit))
}}
lbl = {}
lbl['format'] = '{value} ' + sensor_obj.data_unit
yAxis['labels'] = lbl
tooltip = {'valueSuffix': sensor_obj.data_unit}
tooltip['valueDecimals'] = 2
#Create a list of series objects for each requested type of data
seriesList = []
ValType = dict(models.VALUE_TYPE)
for t in types:
try:
thisdat = data.filter(val_type=t)
series = {'name': ValType[t]}
series['tooltip'] = tooltip
series['data'] = [
[d_obj.timestamp.timestamp()*1000,
d_obj.val] for d_obj in thisdat]
seriesList.append(series)
except models.SensorData.DoesNotExist:
pass
highchart_args['chart'] = chart
highchart_args['xAxis'] = xAxis
highchart_args['yAxis'] = yAxis
highchart_args['series'] = seriesList
json_str = json.dumps(highchart_args, indent=2)
return json_str
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class BuildDefinitionCounter(Model):
"""BuildDefinitionCounter.
:param id: The unique Id of the Counter.
:type id: int
:param seed: This is the original counter value.
:type seed: long
:param value: This is the current counter value.
:type value: long
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'seed': {'key': 'seed', 'type': 'long'},
'value': {'key': 'value', 'type': 'long'}
}
def __init__(self, id=None, seed=None, value=None):
super(BuildDefinitionCounter, self).__init__()
self.id = id
self.seed = seed
self.value = value
|
from socket import *
class POP3():
def connect(self,mailserver):
self.clientSocket = socket(AF_INET, SOCK_STREAM)
self.mailserver=mailserver
self.clientSocket.connect((self.mailserver, 110))
self.connect_recv = self.clientSocket.recv(1024).decode()
print(self.connect_recv)
def auth(self,user_login,psw_login):
self.user_login=str(user_login)
self.psw_login=str(psw_login)
self.clientSocket.sendall(('USER ' + self.user_login + '\r\n').encode())
recv = self.clientSocket.recv(1024).decode()
print(recv)
self.clientSocket.sendall(('PASS ' + self.psw_login + '\r\n').encode())
self.auth_recv = self.clientSocket.recv(1024).decode()
print(self.auth_recv)
self.clientSocket.sendall(('LIST\r\n').encode())
recv = self.clientSocket.recv(1024).decode()
list = recv[4:-2].split('\r\n')[1:-1]
self.MailList = []
for _t in list:
_d = {}
_d['id'] = _t.split(' ')[0]
_d['size'] = _t.split(' ')[1]
self.MailList.append(_d)
# print(list)
# print(self.MailList)
def getStat(self):
self.clientSocket.sendall(('LIST\r\n').encode())
recv = self.clientSocket.recv(1024).decode()
list = recv[4:-2].split('\r\n')[1:-1]
self.MailList = []
for _t in list:
_d = {}
_d['id'] = _t.split(' ')[0]
_d['size'] = _t.split(' ')[1]
self.MailList.append(_d)
self.clientSocket.sendall(('STAT\r\n').encode())
recv = self.clientSocket.recv(1024).decode()
print(recv)
self.len_mail_list=len(self.MailList)
return len(self.MailList)
def getAllMail(self):
self.clientSocket.sendall(('LIST\r\n').encode())
recv = self.clientSocket.recv(1024).decode()
list = recv[4:-2].split('\r\n')[1:-1]
self.MailList = []
for _t in list:
_d = {}
_d['id'] = _t.split(' ')[0]
_d['size'] = _t.split(' ')[1]
self.MailList.append(_d)
print('len_mail_list',len(self.MailList))
self.mailList = []
for _t in self.MailList:
self.clientSocket.sendall(('RETR ' + _t['id'] + '\r\n').encode())
_ = self.clientSocket.recv(1024).decode()
if not '+OK' in _:
return False
_mail = ''
_size = int(_t['size'])
if('Received: from'in _):
# print('里面有',_)
b,start=_.split('octets\r\n', 1)
_size -= len(start)
_mail += start
# print('split',start)
# print('第一次接收',_mail)
while _size > 0:
_ = self.clientSocket.recv(1024).decode()
# print('第二次接收到',_)
_size -= len(_)
_mail = _mail + _
# print('本轮共收到',_mail)
# Format the mail
# _mailobj = email.message_from_string(_mail)
self.mailList.append(_mail)
print("get all mails")
def delete(self,index):
print(self.len_mail_list,'删除第',index)
self.clientSocket.sendall(('DELE ' + str(index+1) + '\r\n').encode())
print('dele')
_= self.clientSocket.recv(1024).decode()
print(_,self.getStat())
def newpop3(self):
return self.mailserver,self.user_login,self.psw_login
def quit(self):
# if not self.loginSucc:
# self.logger.error('Please login first!')
# return False, '请先登录'
self.clientSocket.sendall(('QUIT \r\n').encode())
_ = self.clientSocket.recv(1024)
print(_)
|
# ==================================================================================
#
# Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==================================================================================
class Constants:
# xapp registration constants
SERVICE_HTTP = "SERVICE_{}_{}_HTTP_PORT"
SERVICE_RMR = "SERVICE_{}_{}_RMR_PORT"
CONFIG_PATH = "/ric/v1/config"
REGISTER_PATH = "http://service-{}-appmgr-http.{}:8080/ric/v1/register"
DEREGISTER_PATH = "http://service-{}-appmgr-http.{}:8080/ric/v1/deregister"
DEFAULT_PLT_NS = "ricplt"
DEFAULT_XAPP_NS = "ricxapp"
# message-type constants
RIC_HEALTH_CHECK_REQ = 100
RIC_HEALTH_CHECK_RESP = 101
# environment variable with path to configuration file
CONFIG_FILE_ENV = "CONFIG_FILE"
|
from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.helpers.state import next_epoch_via_block
from eth2spec.test.helpers.attestations import next_epoch_with_attestations
def check_finality(spec,
state,
prev_state,
current_justified_changed,
previous_justified_changed,
finalized_changed):
if current_justified_changed:
assert state.current_justified_checkpoint.epoch > prev_state.current_justified_checkpoint.epoch
assert state.current_justified_checkpoint.root != prev_state.current_justified_checkpoint.root
else:
assert state.current_justified_checkpoint == prev_state.current_justified_checkpoint
if previous_justified_changed:
assert state.previous_justified_checkpoint.epoch > prev_state.previous_justified_checkpoint.epoch
assert state.previous_justified_checkpoint.root != prev_state.previous_justified_checkpoint.root
else:
assert state.previous_justified_checkpoint == prev_state.previous_justified_checkpoint
if finalized_changed:
assert state.finalized_checkpoint.epoch > prev_state.finalized_checkpoint.epoch
assert state.finalized_checkpoint.root != prev_state.finalized_checkpoint.root
else:
assert state.finalized_checkpoint == prev_state.finalized_checkpoint
@with_all_phases
@spec_state_test
def test_finality_no_updates_at_genesis(spec, state):
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
yield 'pre', state
blocks = []
for epoch in range(2):
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False)
blocks += new_blocks
# justification/finalization skipped at GENESIS_EPOCH
if epoch == 0:
check_finality(spec, state, prev_state, False, False, False)
# justification/finalization skipped at GENESIS_EPOCH + 1
elif epoch == 1:
check_finality(spec, state, prev_state, False, False, False)
yield 'blocks', blocks
yield 'post', state
@with_all_phases
@spec_state_test
def test_finality_rule_4(spec, state):
# get past first two epochs that finality does not run on
next_epoch_via_block(spec, state)
next_epoch_via_block(spec, state)
yield 'pre', state
blocks = []
for epoch in range(2):
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False)
blocks += new_blocks
if epoch == 0:
check_finality(spec, state, prev_state, True, False, False)
elif epoch == 1:
# rule 4 of finality
check_finality(spec, state, prev_state, True, True, True)
assert state.finalized_checkpoint == prev_state.current_justified_checkpoint
yield 'blocks', blocks
yield 'post', state
@with_all_phases
@spec_state_test
def test_finality_rule_1(spec, state):
# get past first two epochs that finality does not run on
next_epoch_via_block(spec, state)
next_epoch_via_block(spec, state)
yield 'pre', state
blocks = []
for epoch in range(3):
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True)
blocks += new_blocks
if epoch == 0:
check_finality(spec, state, prev_state, True, False, False)
elif epoch == 1:
check_finality(spec, state, prev_state, True, True, False)
elif epoch == 2:
# finalized by rule 1
check_finality(spec, state, prev_state, True, True, True)
assert state.finalized_checkpoint == prev_state.previous_justified_checkpoint
yield 'blocks', blocks
yield 'post', state
@with_all_phases
@spec_state_test
def test_finality_rule_2(spec, state):
# get past first two epochs that finality does not run on
next_epoch_via_block(spec, state)
next_epoch_via_block(spec, state)
yield 'pre', state
blocks = []
for epoch in range(3):
if epoch == 0:
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False)
check_finality(spec, state, prev_state, True, False, False)
elif epoch == 1:
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, False)
check_finality(spec, state, prev_state, False, True, False)
elif epoch == 2:
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True)
# finalized by rule 2
check_finality(spec, state, prev_state, True, False, True)
assert state.finalized_checkpoint == prev_state.previous_justified_checkpoint
blocks += new_blocks
yield 'blocks', blocks
yield 'post', state
@with_all_phases
@spec_state_test
def test_finality_rule_3(spec, state):
"""
Test scenario described here
https://github.com/ethereum/eth2.0-specs/issues/611#issuecomment-463612892
"""
# get past first two epochs that finality does not run on
next_epoch_via_block(spec, state)
next_epoch_via_block(spec, state)
yield 'pre', state
blocks = []
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False)
blocks += new_blocks
check_finality(spec, state, prev_state, True, False, False)
# In epoch N, JE is set to N, prev JE is set to N-1
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, False)
blocks += new_blocks
check_finality(spec, state, prev_state, True, True, True)
# In epoch N+1, JE is N, prev JE is N-1, and not enough messages get in to do anything
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, False)
blocks += new_blocks
check_finality(spec, state, prev_state, False, True, False)
# In epoch N+2, JE is N, prev JE is N, and enough messages from the previous epoch get in to justify N+1.
# N+1 now becomes the JE. Not enough messages from epoch N+2 itself get in to justify N+2
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, False, True)
blocks += new_blocks
# rule 2
check_finality(spec, state, prev_state, True, False, True)
# In epoch N+3, LJE is N+1, prev LJE is N, and enough messages get in to justify epochs N+2 and N+3.
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, True)
blocks += new_blocks
# rule 3
check_finality(spec, state, prev_state, True, True, True)
assert state.finalized_checkpoint == prev_state.current_justified_checkpoint
yield 'blocks', blocks
yield 'post', state
|
from setuptools import setup
__version__ = "0.9.0"
__url__ = "https://github.com/carlskeide/is-healthy"
setup(
name="is-healthy",
version=__version__,
description="Mini healthcheck CLI",
author="Carl Skeide",
author_email="carl@skeide.se",
license="MIT",
keywords=[
"healthcheck",
],
classifiers=[],
py_modules=[
"is_healthy"
],
include_package_data=True,
zip_safe=False,
url=__url__,
download_url="{}/archive/{}.tar.gz".format(__url__, __version__),
install_requires=[
"requests"
],
entry_points={
"console_scripts": [
"is-healthy = is_healthy:cli",
]
}
)
|
import logging
from typing import List, Optional
from overrides import overrides
import torch
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler
logger = logging.getLogger(__name__)
@LearningRateScheduler.register("slanted_triangular")
class SlantedTriangular(LearningRateScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
num_epochs: int,
num_steps_per_epoch: Optional[int] = None,
cut_frac: float = 0.1,
ratio: int = 32,
last_epoch: int = -1,
gradual_unfreezing: bool = False,
discriminative_fine_tuning: bool = False,
decay_factor: float = 0.38,
) -> None:
self.num_epochs = num_epochs
self.num_steps_per_epoch = num_steps_per_epoch
self.cut_frac = cut_frac
self.ratio = ratio
self.gradual_unfreezing = gradual_unfreezing
self.freezing_current = self.gradual_unfreezing
self.is_first_epoch = True
self.batch_num_total_epoch_end: List[int] = []
if self.gradual_unfreezing:
assert not optimizer.param_groups[-1]["params"], "The default group should be empty."
if self.gradual_unfreezing or discriminative_fine_tuning:
assert len(optimizer.param_groups) > 2, (
"There should be at least 3 param_groups (2 + empty default group)"
" for gradual unfreezing / discriminative fine-tuning to make sense."
)
super().__init__(optimizer, last_epoch)
self.step()
if discriminative_fine_tuning:
exponent = 0
for i in range(len(self.base_values) - 1, -1, -1):
param_group = optimizer.param_groups[i]
if param_group["params"]:
param_group["lr"] = self.base_values[i] * decay_factor ** exponent
self.base_values[i] = param_group["lr"]
exponent += 1
self.last_batch_num_total = -1
self.step_batch(0)
@overrides
def step(self, metric: float = None) -> None:
self.last_epoch += 1
if len(self.batch_num_total_epoch_end) == 0:
self.batch_num_total_epoch_end.append(0)
else:
self.batch_num_total_epoch_end.append(self.last_batch_num_total)
if self.gradual_unfreezing:
if self.is_first_epoch:
num_layers_to_unfreeze = 1
self.is_first_epoch = False
else:
num_layers_to_unfreeze = self.last_epoch + 1
if num_layers_to_unfreeze >= len(self.optimizer.param_groups) - 1:
logger.info("Gradual unfreezing finished. Training all layers.")
self.freezing_current = False
else:
logger.info(
f"Gradual unfreezing. Training only the top {num_layers_to_unfreeze} layers."
)
for i, param_group in enumerate(reversed(self.optimizer.param_groups)):
for param in param_group["params"]:
param.requires_grad = bool(i <= num_layers_to_unfreeze)
def step_batch(self, batch_num_total: int = None):
if batch_num_total is None:
batch_num_total = self.last_batch_num_total + 1
self.last_batch_num_total = batch_num_total
reveal_type(self) |
# -*- coding: utf-8 -*-
"""
"""
import os
from datetime import datetime
from typing import Union, Optional, Any, List, NoReturn
from numbers import Real
import numpy as np
np.set_printoptions(precision=5, suppress=True)
import pandas as pd
from ..utils.common import (
ArrayLike,
get_record_list_recursive,
)
from ..base import OtherDataBase
__all__ = [
"PPGBP",
]
class PPGBP(OtherDataBase):
"""
ABOUT PPG_BP (ref. [1])
-----------------------
1. the PPG sensor:
1.1. sensor model was SEP9AF-2 (SMPLUS Company, Korea)
1.2. contains dual LED with 660nm (Red light) and 905 nm (Infrared) wavelengths
1.3. sampling rate 1 kHz and 12-bit ADC
1.4. hardware filter design is 0.5‒12Hz bandpass
more to be written
NOTE
----
1. PPG analysis tips (ref. [1],[2]):
1.1. Taking the first and second derivatives of the PPG signals may help in detecting the informative inflection points more accurately
ISSUES
------
Usage
-----
1. blood pressure prediction from PPG
References
----------
[1] Liang Y, Chen Z, Liu G, et al. A new, short-recorded photoplethysmogram dataset for blood pressure monitoring in China[J]. Scientific data, 2018, 5: 180020.
[2] Allen J. Photoplethysmography and its application in clinical physiological measurement[J]. Physiological measurement, 2007, 28(3): R1.
[3] Elgendi M. On the analysis of fingertip photoplethysmogram signals[J]. Current cardiology reviews, 2012, 8(1): 14-25.
[4] https://figshare.com/articles/PPG-BP_Database_zip/5459299/3
"""
def __init__(self, db_dir:str, working_dir:Optional[str]=None, verbose:int=2, **kwargs:Any) -> NoReturn:
""" finished, to be improved,
Parameters
----------
db_dir: str,
storage path of the database
working_dir: str, optional,
working directory, to store intermediate files and log file
verbose: int, default 2,
log verbosity
kwargs: auxilliary key word arguments
typical "db_dir": "/export/servers/kuangzhexiang/data/PPG_BP/"
------------------
to be written
"""
super().__init__(db_name="PPG_BP", db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)
self.ppg_data_dir = None
self.unkown_file = None
self.ann_file = None
self.form_paths()
self.fs = 1000
self._all_records = sorted(list(set([fn.split("_")[0] for fn in os.listdir(self.ppg_data_dir)])), key=lambda r:int(r))
self.rec_ext = "txt"
self.ann_items = [
"Num.", "subject_ID",
"Sex(M/F)", "Age(year)", "Height(cm)", "Weight(kg)", "BMI(kg/m^2)",
"Systolic Blood Pressure(mmHg)", "Diastolic Blood Pressure(mmHg)", "Heart Rate(b/m)",
"Hypertension", "Diabetes", "cerebral infarction", "cerebrovascular disease",
]
def form_paths(self) -> NoReturn:
""" finished, checked, to be improved,
"""
self.ppg_data_dir = os.path.join(self.db_dir, "0_subject")
self.unkown_file = os.path.join(self.db_dir, "Table 1.xlsx")
self.ann_file = os.path.join(self.db_dir, "PPG-BP dataset.xlsx")
def get_subject_id(self, rec_no:int) -> int:
""" not finished,
Parameters
----------
rec_no: int,
number of the record, or "subject_ID"
Returns
-------
int, the `subject_id` corr. to `rec_no`
"""
return int(self._all_records[rec_no])
def database_info(self, detailed:bool=False) -> NoReturn:
""" not finished,
print the information about the database
detailed: bool, default False,
if False, an short introduction of the database will be printed,
if True, then docstring of the class will be printed additionally
"""
raw_info = {}
print(raw_info)
if detailed:
print(self.__doc__)
def load_ppg_data(self, rec_no:int, seg_no:int, verbose: int=None) -> np.ndarray:
""" finished, checked,
Parameters
----------
rec_no: int,
number of the record, or "subject_ID"
seg_no: int,
number of the segment measured from the subject
Returns
-------
data: ndarray,
the ppg data
"""
verbose = self.verbose if verbose is None else verbose
rec_fn = f"{self._all_records[rec_no]}_{seg_no}.txt"
data = []
with open(self.ppg_data_dir+rec_fn, "r") as f:
data = f.readlines()
data = np.array([float(i) for i in data[0].split("\t") if len(i)>0]).astype(int)
if verbose >= 2:
import matplotlib.pyplot as plt
fig,ax = plt.subplots(figsize=(8,4))
ax.plot(np.arange(0,len(data)/self.fs, 1/self.fs),data)
plt.show()
return data
def load_ann(self, rec_no:Optional[int]=None) -> pd.DataFrame:
""" finished, checked,
Parameters
----------
rec_no: int, optional,
number of the record, or "subject_ID",
if not specified, then all annotations will be returned
Returns
-------
df_ann: DataFrame,
the annotations
"""
df_ann = pd.read_excel(self.ann_file)
df_ann.columns = df_ann.iloc[0]
df_ann = df_ann[1:].reset_index(drop=True)
if rec_no is None:
return df_ann
df_ann = df_ann[df_ann["subject_ID"]==int(self._all_records[rec_no])].reset_index(drop=True)
return df_ann
def load_diagnosis(self, rec_no:int) -> Union[List[str],list]:
""" finished, checked,
Parameters
----------
rec_no: int,
number of the record, or "subject_ID"
Returns
-------
diagonosis: list,
the list of diagnosis or empty list for the normal subjects
"""
diagonosis_items = [
"Hypertension", "Diabetes", "cerebral infarction", "cerebrovascular disease",
]
df_ann = self.load_ann(rec_no)[diagonosis_items].dropna(axis=1)
diagonosis = [item for item in df_ann.iloc[0].tolist() if item != "Normal"]
return diagonosis
def get_patient_info(self, rec_no:int, items:Optional[List[str]]=None) -> Union[Real,str,pd.DataFrame]:
""" not finished,
Parameters
----------
rec_no: int,
number of the record, or "subject_ID"
items: list of str, optional,
items of the patient information (e.g. sex, age, etc.)
Returns
-------
if `items` contains only one item, then value of this item in the subject"s information will be returned,
otherwise, a dataframe of all information of the subject will be returned
"""
if items is None or len(items) == 0:
info_items = [
"Sex(M/F)","Age(year)","Height(cm)","Weight(kg)","BMI(kg/m^2)",
"Systolic Blood Pressure(mmHg)","Diastolic Blood Pressure(mmHg)","Heart Rate(b/m)",
]
else:
info_items = items
df_info = self.load_ann(rec_no)[info_items]
if len(info_items) == 1:
return df_info.iloc[0].values[0]
else:
return df_info
def plot(self,) -> NoReturn:
"""
"""
raise NotImplementedError
|
import hashlib
import json
import sys
import pytest
from asv.machine import Machine, MachineCollection
from write_asv_machine import floor_nearest, write_machine_info
@pytest.mark.parametrize("ram, expected_ram", [("98765", "98700"), ("", "")])
def test_write_asv_machine(mocker, ram, expected_ram):
mocker.patch.object(
Machine,
"get_defaults",
return_value={
"ram": ram,
"machine": "machine_name",
"other_info": "other_info",
},
)
expected_name = "{}_{}".format(
sys.platform,
hashlib.md5(
json.dumps(
{"ram": expected_ram, "other_info": "other_info"},
sort_keys=True,
).encode("utf-8")
).hexdigest()[:7],
)
save_mock = mocker.patch.object(MachineCollection, "save")
write_machine_info()
assert save_mock.call_args == mocker.call(
expected_name,
{
"ram": expected_ram,
"machine": expected_name,
"other_info": "other_info",
},
)
@pytest.mark.parametrize(
"x, floor, expected_result",
[(199, 100, 100), (101, 100, 100), (99, 100, 0)],
)
def test_floor_nearest(x, floor, expected_result):
assert floor_nearest(x, floor) == expected_result
|
COPY_GOOGLE_DOC_KEY = '1BmhlbCaQnH3UkLt7nOy7hgGQ0QngvgOka3Jl92ZaPjY'
DEPLOY_SLUG = 'brazil'
NUM_SLIDES_AFTER_CONTENT = 1
# Configuration
AUDIO = False
VIDEO = False
FILMSTRIP = False
PROGRESS_BAR = True
|
# il faudrait charger les images aussi
# probleme avec les sauts a la ligne + mise en page ppt
from lxml import etree
from itertools import groupby
from win32com import client
def dataCut(datapath):
word = client.Dispatch("Word.Application")
word.Visible = False
doc = word.Documents.Open(datapath)
styles_list = []
words_list = []
for heading, grp_wrds in groupby(doc.Words, key=lambda x: str(x.Style)):
# List of the succesive style of text
# Séparation des groupes de mots successifs, dès qu'on change de style on change des groupes.
styles_list += [heading]
# List of the words grouped by style
words_list += [''.join(str(word) for word in grp_wrds)]
return (styles_list, words_list)
# Generation of the XML for a "Title + Subtitle" text
def generateXMLSubtitle(presentation, slide, words_list):
slide.set("layout", "title+subtitle")
presentation.append(etree.SubElement(slide, "title"))
presentation[-1].text = (words_list[0]).replace("\r", " ")
presentation.append(etree.SubElement(slide, "subtitle"))
presentation[-1].text = (words_list[1]).replace("\r", " ")
# Generation of the XML for a "Title" text
def generateXMLTitle(presentation, slide, words_list):
slide.set("layout", "title")
presentation.append(etree.SubElement(slide, "title"))
presentation[-1].text = (words_list[0]).replace("\r", " ")
# Generation of the XML for a "Normal" text
# ind conserve le titre de la partie et i l'emplacement des words choisi
def generateXMLNormal(presentation, slide, words_list, ind, i):
slide.set("layout", "title+paragraph")
presentation.append(etree.SubElement(slide, "title"))
presentation[-1].text = (words_list[ind]).replace("\r", " ")
presentation.append(etree.SubElement(slide, "paragraph"))
presentation[-1].text = (words_list[i])
# Generation of the XML for a "List Pragraph" text
def generateXMLBullets(presentation, slide, words_list, ind, i):
slide.set("layout", "title+bullets")
presentation.append(etree.SubElement(slide, "title"))
presentation[-1].text = (words_list[ind]).replace("\r", "")
listbullets = words_list[i].split("\r")
for j in listbullets:
if j != "":
presentation.append(etree.SubElement(slide, "bullet"))
presentation[-1].set("level", "0")
presentation[-1].text = j
#Generation of the XML for an image ("Subtle emphasis" text)
def generateXMLImage(presentation,slide,words_list,ind,i):
slide.set("layout","title+paragraph+image")
presentation.append(etree.SubElement(slide, "title"))
presentation[-1].text=(words_list[ind]).replace("\r","")
presentation.append(etree.SubElement(slide,"image"))
presentation[-1].text=(words_list[i].replace("\r",""))
#Generation of the XML for a figure ("Strong" text)
def generateXMLFigure(presentation,slide,words_list,ind,i):
if ":" in words_list[i]:
slide.set("layout","title+figure+label")
presentation.append(etree.SubElement(slide, "title"))
presentation[-1].text=(words_list[ind]).replace("\r","")
textSplit=words_list[i].split(":")
presentation.append(etree.SubElement(slide,"figure"))
presentation[-1].text=(textSplit[0].replace("\r",""))
presentation.append(etree.SubElement(slide,"label"))
presentation[-1].text=(textSplit[1].replace("\r",""))
else:
slide.set("layout","title+figure")
presentation.append(etree.SubElement(slide, "title"))
presentation[-1].text=(words_list[ind]).replace("\r","")
presentation.append(etree.SubElement(slide,"figure"))
presentation[-1].text=(words_list[i].replace("\r",""))
def xmlGeneration(datapath, output):
# Gathering the data
styles_list, words_list = dataCut(datapath)
# Preparation of the xml file
user = etree.Element("user")
done = False
# List that will stock the different XML tags
presentation = []
presentation.append(etree.SubElement(user, "diapo"))
ind = 0 # this variable will follow our progression on the word file
# Generation of the first slide. 2 possibilities : "Title" or "Title+Subtitle" slide
slide = presentation[0]
if styles_list[1] == "Subtitle":
generateXMLSubtitle(presentation, slide, words_list)
ind = 2
else:
generateXMLTitle(presentation, slide, words_list)
ind = 1
#
while (not done):
# Check if we reached the last heading
done = not ("Heading 1" in styles_list[ind + 1:])
# Calculate the amount of slides for this heading
if (not done):
final_ind = ind + 1 + styles_list[ind + 1:].index('Heading 1')
else:
final_ind = len(styles_list)
# Iterate on the slide under this Heading
for i in range(ind + 1, final_ind):
presentation.append(etree.SubElement(user, "diapo"))
slide = presentation[-1]
if (styles_list[i] == "Normal"):
generateXMLNormal(presentation, slide, words_list, ind, i)
elif (styles_list[i] == "List Paragraph"):
generateXMLBullets(presentation, slide, words_list, ind, i)
elif(styles_list[i]=="Subtle Emphasis"):
generateXMLImage(presentation,slide,words_list,ind,i)
elif(styles_list[i]=="Strong"):
generateXMLFigure(presentation,slide,words_list,ind,i)
ind = final_ind
# Save file
filename = output
with open(filename, 'wb') as f:
f.write(etree.tostring(user))
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class pos_confirm(osv.osv_memory):
_name = 'pos.confirm'
_description = 'Post POS Journal Entries'
def action_confirm(self, cr, uid, ids, context=None):
order_obj = self.pool.get('pos.order')
ids = order_obj.search(cr, uid, [('state','=','paid')], context=context)
for order in order_obj.browse(cr, uid, ids, context=context):
todo = True
for line in order.statement_ids:
if line.statement_id.state != 'confirm':
todo = False
break
if todo:
order.signal_workflow('done')
# Check if there is orders to reconcile their invoices
ids = order_obj.search(cr, uid, [('state','=','invoiced'),('invoice_id.state','=','open')], context=context)
for order in order_obj.browse(cr, uid, ids, context=context):
invoice = order.invoice_id
data_lines = [x.id for x in invoice.move_id.line_id if x.account_id.id == invoice.account_id.id]
for st in order.statement_ids:
for move in st.move_ids:
data_lines += [x.id for x in move.line_id if x.account_id.id == invoice.account_id.id]
self.pool.get('account.move.line').reconcile(cr, uid, data_lines, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers:
# Guowei Yang <471184555@qq.com>
# Wenyang Zhou <576825820@qq.com>
# Meng-Hao Guo <guomenghao1997@gmail.com>
# Dun Liang <randonlang@gmail.com>.
#
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import jittor as jt
from jittor import init, Module
import numpy as np
import math
class Pool(Module):
def __init__(self, kernel_size, stride=None, padding=0, dilation=None, return_indices=None, ceil_mode=False, count_include_pad=True, op="maximum"):
assert dilation == None
assert return_indices == None
self.kernel_size = kernel_size
self.op = op
self.stride = stride if stride else kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad and padding != 0
def execute(self, x):
N,C,H,W = x.shape
if self.ceil_mode == False:
h = (H+self.padding*2-self.kernel_size)//self.stride+1
w = (W+self.padding*2-self.kernel_size)//self.stride+1
use_code_op = self.op in ['maximum', 'minimum']
# some second order avg_pool is require, so we don't use code op here
else:
h = (H+self.padding*2-self.kernel_size + self.stride - 1)//self.stride+1
w = (W+self.padding*2-self.kernel_size + self.stride - 1)//self.stride+1
use_code_op = self.op in ['maximum', 'minimum', 'mean']
if use_code_op:
if self.op == 'mean':
if self.count_include_pad:
count = f"int count = {self.kernel_size*self.kernel_size};"
else:
count = "int count = (k2_ - k2) * (k3_ - k3);"
count += "float32 rcount = 1.0f / count;"
else:
count = ""
forward_body = f'''{{
int k3 = i3*{self.stride}-{self.padding};
int k2 = i2*{self.stride}-{self.padding};
int k3_ = min(k3 + {self.kernel_size}, in0_shape3);
int k2_ = min(k2 + {self.kernel_size}, in0_shape2);
k3 = max(0, k3);
k2 = max(0, k2);
@out(i0, i1, i2, i3) = init_{self.op}(out_type);
{count}
for (int p = k2; p < k2_; ++p)
for (int q = k3; q < k3_; ++q)
@out(i0, i1, i2, i3) = {self.op}(out_type, @out(i0, i1, i2, i3), @in0(i0, i1, p, q));
}}'''
backward_body = f'''{{
int k3 = i3*{self.stride}-{self.padding};
int k2 = i2*{self.stride}-{self.padding};
int k3_ = min(k3 + {self.kernel_size}, in0_shape3);
int k2_ = min(k2 + {self.kernel_size}, in0_shape2);
k3 = max(0, k3);
k2 = max(0, k2);
{count}
int bo=1;
for (int p = k2; p < k2_ && bo; ++p)
for (int q = k3; q < k3_ && bo; ++q) {{
{"atomicAdd(&@out(i0,i1,p,q), @dout(i0,i1,i2,i3)/count);"
if self.op == "mean" else
f"""if (@pout(i0,i1,i2,i3) == @in0(i0,i1,p,q)) {{
atomicAdd(&@out(i0,i1,p,q), @dout(i0,i1,i2,i3)),
bo=0;
}}"""}
}}
}}'''
out = jt.code([N,C,h,w], x.dtype, [x],
cuda_header="""
#include <ops/binary_op_defs.h>
#include <misc/cuda_limits.h>
""",
cuda_src=f'''
__global__ static void kernel1(@ARGS_DEF) {{
@PRECALC
int p3 = threadIdx.x;
int s3 = blockDim.x;
int p2 = threadIdx.y + blockIdx.x * blockDim.y;
int s2 = blockDim.y * gridDim.x;
int i1 = blockIdx.y;
int i0 = blockIdx.z;
for (int i3 = p3; i3 < out_shape3; i3 += s3)
for (int i2 = p2; i2 < out_shape2; i2 += s2)
{forward_body}
}}
int tx = min(1024, out_shape3);
int ty = min(1024 / tx, out_shape2);
int bx = (out_shape2 - 1) / ty + 1;
int by = out_shape1;
int bz = out_shape0;
dim3 s1(bx, by, bz);
dim3 s2(tx, ty);
kernel1<<<s1, s2>>>(@ARGS);
''',
cuda_grad_src=[f'''
__global__ static void kernel3(@ARGS_DEF) {{
@PRECALC
int p3 = threadIdx.x;
int s3 = blockDim.x;
int p2 = threadIdx.y + blockIdx.x * blockDim.y;
int s2 = blockDim.y * gridDim.x;
int i1 = blockIdx.y;
int i0 = blockIdx.z;
for (int i3 = p3; i3 < pout_shape3; i3 += s3)
for (int i2 = p2; i2 < pout_shape2; i2 += s2)
{backward_body}
}}
cudaMemsetAsync(out_p, 0, out->size);
int tx = min(1024, pout_shape3);
int ty = min(1024 / tx, pout_shape2);
int bx = (pout_shape2 - 1) / ty + 1;
int by = pout_shape1;
int bz = pout_shape0;
dim3 s1_(bx, by, bz);
dim3 s2_(tx, ty);
kernel3<<<s1_, s2_>>>(@ARGS);
'''],
cpu_header='#include <ops/binary_op_defs.h>',
cpu_src=f'''
using namespace std;
for (int i0=0; i0<out_shape0; i0++)
for (int i1=0; i1<out_shape1; i1++)
for (int i2=0; i2<out_shape2; i2++)
for (int i3=0; i3<out_shape3; i3++)
{forward_body}
''',
cpu_grad_src = [f'''
using namespace std;
std::memset(out_p, 0, out->size);
#define atomicAdd(a,b) (*a) += b
for (int i0=0; i0<pout_shape0; i0++)
for (int i1=0; i1<pout_shape1; i1++)
for (int i2=0; i2<pout_shape2; i2++)
for (int i3=0; i3<pout_shape3; i3++)
{backward_body}
'''])
return out
else:
# TODO: backward
xx = x.reindex([N,C,h,w,self.kernel_size,self.kernel_size], [
"i0", # Nid
"i1", # Cid
f"i2*{self.stride}-{self.padding}+i4", # Hid
f"i3*{self.stride}-{self.padding}+i5", # Wid
])
return xx.reduce(self.op, [4,5])
class AdaptiveAvgPool2d(Module):
def __init__(self, output_size):
self.output_size = output_size
def execute(self, x):
if isinstance(self.output_size, int):
oh = self.output_size
ow = self.output_size
elif isinstance(self.output_size, tuple) or isinstance(self.output_size, list):
oh = x.shape[2] if self.output_size[0] is None else self.output_size[0]
ow = x.shape[3] if self.output_size[1] is None else self.output_size[1]
else:
raise TypeError(f"AdaptiveAvgPool2d only support int, tuple or list input. Not support {type(self.output_size)} yet.")
if oh == 1 and ow == 1:
return x.reduce("mean", [2,3], keepdims=True)
N,C,H,W = x.shape
self.sh = math.floor(H / oh)
self.sw = math.floor(W / ow)
self.ksh = H - (oh - 1) * self.sh
self.ksw = W - (ow - 1) * self.sw
h = (H-self.ksh)//self.sh+1
w = (W-self.ksw)//self.sw+1
xx = x.reindex([N,C,h,w,self.ksh,self.ksw], [
"i0", # Nid
"i1", # Cid
f"i2*{self.sh}+i4", # Hid
f"i3*{self.sw}+i5", # Wid
])
return xx.reduce("mean", [4,5])
def pool(x, kernel_size, op, padding=0, stride=None):
return Pool(kernel_size, stride, padding, op=op)(x)
class AvgPool2d(Module):
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
self.layer = Pool(kernel_size=kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode, count_include_pad=count_include_pad, op="mean")
def execute(self, x):
return self.layer(x)
def avg_pool2d(x, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
return AvgPool2d(kernel_size, stride, padding, ceil_mode, count_include_pad)(x)
class MaxPool2d(Module):
def __init__(self, kernel_size, stride=None, padding=0, dilation=None, return_indices=None, ceil_mode=False):
self.layer = Pool(kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, return_indices=return_indices, ceil_mode=ceil_mode, op="maximum")
def execute(self, x):
return self.layer(x)
def max_pool2d(x, kernel_size, stride=None, padding=0, dilation=None, return_indices=None, ceil_mode=False):
return MaxPool2d(kernel_size, stride, padding, dilation, return_indices, ceil_mode)(x) |
from __future__ import print_function, unicode_literals
import os
import tempfile
import codecs
from bs4 import BeautifulSoup
from glob import glob
from libraries.general_tools.file_utils import read_file
from libraries.resource_container.ResourceContainer import RC
from libraries.app.app import App
class ProjectPrinter(object):
"""
Prints a project given the project ID
Read from the project's dir in the cdn.door43.org bucket all the .html file and compile them into one for printing,
if the print_all.html page doesn't already exist. Return the contents of print_all.html
"""
def __init__(self):
self.project_id = None
def print_project(self, project_id):
"""
:param string project_id:
:return string:
"""
self.project_id = project_id
if len(project_id.split('/')) != 3:
raise Exception('Project not found.')
user_name, repo_name, commit_id = project_id.split('/')
source_path = 'u/{0}'.format(project_id)
print_all_key = '{0}/print_all.html'.format(source_path)
print_all_file = tempfile.mktemp(prefix='print_all_')
if App.cdn_s3_handler().key_exists(print_all_key):
return App.cdn_s3_handler().bucket_name + '/' + print_all_key
files_dir = tempfile.mkdtemp(prefix='files_')
App.cdn_s3_handler().download_dir(source_path, files_dir)
project_dir = os.path.join(files_dir, source_path.replace('/', os.path.sep))
if not os.path.isdir(project_dir):
raise Exception('Project not found.')
rc = RC(project_dir, repo_name)
with codecs.open(print_all_file, 'w', 'utf-8-sig') as print_all:
print_all.write("""<html lang="{0}" dir="{1}">
<head>
<meta charset="UTF-8"/>
<title>{2}: {3}</title>
<style type="text/css">
body > div {{
page-break-after: always;
}}
</style>
</head>
<body onLoad="window.print()">
<h1>{2}: {3}</h1>
""".format(rc.resource.language.identifier, rc.resource.language.direction, rc.resource.language.title,
rc.resource.title))
for fname in sorted(glob(os.path.join(project_dir, '*.html')), key=self.front_to_back):
with codecs.open(fname, 'r') as f:
soup = BeautifulSoup(f, 'html.parser')
# get the body of the raw html file
content = soup.div
if not content:
content = BeautifulSoup('<div>No content</div>', 'html.parser').find('div').extract()
content['id'] = os.path.basename(fname)
print_all.write(unicode(content))
print_all.write("""
</body>
</html>
""")
App.cdn_s3_handler().upload_file(print_all_file, print_all_key, cache_time=0, content_type='text/html')
return App.cdn_s3_handler().bucket_name + '/' + print_all_key
@staticmethod
def front_to_back(file_path):
"""
Prefixes any "front" or "back" file with a number so they are first and last respectively
Used with sorting. Primarily used with OBS
:param string file_path:
:return string:
"""
parent_dir = os.path.dirname(file_path)
file_name = os.path.basename(file_path)
if file_name == 'front.html':
return os.path.join(parent_dir, '00_{0}'.format(file_name))
elif file_name == 'back.html':
return os.path.join(parent_dir, '99_{0}'.format(file_name))
else:
return file_path
|
from .test_simple import EtcdIntegrationTest
from aio_etcd import auth
import aio_etcd as etcd
from . import helpers
import asyncio
from pytest import raises
class TestEtcdAuthBase(EtcdIntegrationTest):
cl_size = 1
def setUp(self):
# Sets up the root user, toggles auth
loop = asyncio.get_event_loop()
self.client = etcd.Client(port=6001, loop=loop)
u = auth.EtcdUser(self.client, 'root')
u.password = 'testpass'
loop.run_until_complete(u.write())
self.client = etcd.Client(port=6001, username='root',
password='testpass', loop=loop)
self.unauth_client = etcd.Client(port=6001, loop=loop)
a = auth.Auth(self.client)
loop.run_until_complete(a.set_active(True))
def tearDown(self):
loop = asyncio.get_event_loop()
u = auth.EtcdUser(self.client, 'test_user')
r = auth.EtcdRole(self.client, 'test_role')
try:
loop.run_until_complete(u.delete())
except:
pass
try:
loop.run_until_complete(r.delete())
except:
pass
a = auth.Auth(self.client)
loop.run_until_complete(a.set_active(False))
class EtcdUserTest(TestEtcdAuthBase):
@helpers.run_async
def test_names(loop,self):
u = auth.EtcdUser(self.client, 'test_user')
self.assertEquals((yield from u.get_names()), ['root'])
@helpers.run_async
def test_read(loop,self):
u = auth.EtcdUser(self.client, 'root')
# Reading an existing user succeeds
try:
yield from u.read()
except Exception:
self.fail("reading the root user raised an exception")
# roles for said user are fetched
self.assertEquals(u.roles, set(['root']))
# The user is correctly rendered out
self.assertEquals(u._to_net(), [{'user': 'root', 'password': None,
'roles': ['root']}])
# An inexistent user raises the appropriate exception
u = auth.EtcdUser(self.client, 'user.does.not.exist')
with raises(etcd.EtcdKeyNotFound):
yield from u.read()
# Reading with an unauthenticated client raises an exception
u = auth.EtcdUser(self.unauth_client, 'root')
with raises(etcd.EtcdInsufficientPermissions):
yield from u.read()
# Generic errors are caught
c = etcd.Client(port=9999)
u = auth.EtcdUser(c, 'root')
with raises(etcd.EtcdException):
yield from u.read()
@helpers.run_async
def test_write_and_delete(loop,self):
r = auth.EtcdRole(self.client, 'test_group')
r.acls = {'/*': 'R', '/test/*': 'RW'}
try:
yield from r.write()
except:
self.fail("Writing a simple groups should not fail")
# Create an user
u = auth.EtcdUser(self.client, 'test_user')
u.roles.add('guest')
u.roles.add('root')
# directly from my suitcase
u.password = '123456'
try:
yield from u.write()
except:
self.fail("creating a user doesn't work")
# Password gets wiped
self.assertEquals(u.password, None)
yield from u.read()
# Verify we can log in as this user and access the auth (it has the
# root role)
cl = etcd.Client(port=6001, username='test_user',
password='123456')
ul = auth.EtcdUser(cl, 'root')
try:
yield from ul.read()
except etcd.EtcdInsufficientPermissions:
self.fail("Reading auth with the new user is not possible")
self.assertEquals(u.name, "test_user")
self.assertEquals(u.roles, set(['guest', 'root']))
# set roles as a list, it works!
u.roles = ['guest', 'test_group']
try:
yield from u.write()
except:
self.fail("updating a user you previously created fails")
yield from u.read()
self.assertIn('test_group', u.roles)
# Unauthorized access is properly handled
ua = auth.EtcdUser(self.unauth_client, 'test_user')
with raises(etcd.EtcdInsufficientPermissions):
yield from ua.write()
# now let's test deletion
du = auth.EtcdUser(self.client, 'user.does.not.exist')
with raises(etcd.EtcdKeyNotFound):
yield from du.delete()
# Delete test_user
yield from u.delete()
with raises(etcd.EtcdKeyNotFound):
yield from u.read()
# Permissions are properly handled
with raises(etcd.EtcdInsufficientPermissions):
yield from ua.delete()
class EtcdRoleTest(TestEtcdAuthBase):
@helpers.run_async
def test_names(loop,self):
r = auth.EtcdRole(self.client, 'guest')
self.assertListEqual((yield from r.get_names()), [u'guest', u'root'])
@helpers.run_async
def test_read(loop,self):
r = auth.EtcdRole(self.client, 'guest')
try:
yield from r.read()
except:
self.fail('Reading an existing role failed')
# XXX The ACL path result changed from '*' to '/*' at some point
# between etcd-2.2.2 and 2.2.5. They're equivalent so allow
# for both.
if '/*' in r.acls:
self.assertEquals(r.acls, {'/*': 'RW'})
else:
self.assertEquals(r.acls, {'*': 'RW'})
# We can actually skip most other read tests as they are common
# with EtcdUser
@helpers.run_async
def test_write_and_delete(loop,self):
r = auth.EtcdRole(self.client, 'test_role')
r.acls = {'/*': 'R', '/test/*': 'RW'}
try:
yield from r.write()
except:
self.fail("Writing a simple groups should not fail")
r1 = auth.EtcdRole(self.client, 'test_role')
yield from r1.read()
self.assertEquals(r1.acls, r.acls)
r.revoke('/test/*', 'W')
yield from r.write()
yield from r1.read()
self.assertEquals(r1.acls, {'/*': 'R', '/test/*': 'R'})
r.grant('/pub/*', 'RW')
yield from r.write()
yield from r1.read()
self.assertEquals(r1.acls['/pub/*'], 'RW')
# All other exceptions are tested by the user tests
r1.name = None
with raises(etcd.EtcdException):
yield from r1.write()
# ditto for delete
try:
yield from r.delete()
except:
self.fail("A normal delete should not fail")
with raises(etcd.EtcdKeyNotFound):
yield from r.read()
|
# -*- coding: utf-8 -*-
# File: base.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
from abc import ABCMeta, abstractmethod
import signal
import re
import weakref
import six
from six.moves import range
import tqdm
import tensorflow as tf
from .config import TrainConfig
from ..utils import logger, get_tqdm_kwargs
from ..utils.timer import timed_operation
from ..callbacks import StatHolder
from ..tfutils import get_global_step, get_global_step_var
from ..tfutils.modelutils import describe_model
from ..tfutils.summary import create_summary
__all__ = ['Trainer', 'StopTraining']
class StopTraining(BaseException):
pass
@six.add_metaclass(ABCMeta)
class Trainer(object):
""" Base class for a trainer."""
"""a `StatHolder` instance"""
stat_holder = None
"""`tf.SummaryWriter`"""
summary_writer = None
"""a tf.Tensor which returns summary string"""
summary_op = None
""" TrainConfig """
config = None
""" a ModelDesc"""
model = None
""" the current session"""
sess = None
""" the `tf.train.Coordinator` """
coord = None
def __init__(self, config):
"""
:param config: a `TrainConfig` instance
"""
assert isinstance(config, TrainConfig), type(config)
self.config = config
self.model = config.model
self.sess = tf.Session(config=self.config.session_config)
self.coord = tf.train.Coordinator()
def train(self):
""" Start training"""
self.setup()
self.main_loop()
@abstractmethod
def run_step(self):
""" run an iteration"""
pass
def get_predict_func(self, input_names, output_names):
""" return a online predictor"""
raise NotImplementedError()
def get_predict_funcs(self, input_names, output_names, n):
""" return n predictor functions.
Can be overwritten by subclasses to exploit more
parallelism among funcs.
"""
return [self.get_predict_func(input_names, output_names) for k in range(n)]
def trigger_epoch(self):
# trigger subclass
self._trigger_epoch()
# trigger callbacks
self.config.callbacks.trigger_epoch()
self.summary_writer.flush()
@abstractmethod
def _trigger_epoch(self):
""" This is called right after all steps in an epoch are finished"""
pass
def _process_summary(self, summary_str):
summary = tf.Summary.FromString(summary_str)
for val in summary.value:
if val.WhichOneof('value') == 'simple_value':
val.tag = re.sub('tower[p0-9]+/', '', val.tag) # TODO move to subclasses
suffix = '-summary' # issue#6150
if val.tag.endswith(suffix):
val.tag = val.tag[:-len(suffix)]
self.stat_holder.add_stat(val.tag, val.simple_value)
self.summary_writer.add_summary(summary, get_global_step())
def write_scalar_summary(self, name, val):
self.summary_writer.add_summary(
create_summary(name, val), get_global_step())
self.stat_holder.add_stat(name, val)
def setup(self):
self._setup()
describe_model()
get_global_step_var()
# some final operations that might modify the graph
logger.info("Setup callbacks ...")
self.config.callbacks.setup_graph(weakref.proxy(self))
if not hasattr(logger, 'LOG_DIR'):
raise RuntimeError("logger directory wasn't set!")
self.summary_writer = tf.summary.FileWriter(logger.LOG_DIR, graph=self.sess.graph)
self.summary_op = tf.summary.merge_all()
# create an empty StatHolder
self.stat_holder = StatHolder(logger.LOG_DIR)
logger.info("Initializing graph variables ...")
# TODO newsession + sessinit?
try:
initop = tf.global_variables_initializer()
except:
initop = tf.initialize_all_variables()
self.sess.run(initop)
self.config.session_init.init(self.sess)
tf.get_default_graph().finalize()
tf.train.start_queue_runners(
sess=self.sess, coord=self.coord, daemon=True, start=True)
@abstractmethod
def _setup(self):
""" setup Trainer-specific stuff for training"""
def main_loop(self):
callbacks = self.config.callbacks
with self.sess.as_default():
try:
callbacks.before_train()
logger.info("Start training with global_step={}".format(get_global_step()))
for epoch_num in range(
self.config.starting_epoch, self.config.max_epoch+1):
with timed_operation(
'Epoch {} (global_step {})'.format(
epoch_num, get_global_step() + self.config.step_per_epoch)):
for step in tqdm.trange(
self.config.step_per_epoch,
**get_tqdm_kwargs(leave=True)):
if self.coord.should_stop():
return
self.run_step() # implemented by subclass
callbacks.trigger_step() # not useful?
# trigger epoch outside the timing region.
self.trigger_epoch()
except StopTraining:
logger.info("Training was stopped.")
except:
raise
finally:
callbacks.after_train()
self.coord.request_stop()
self.summary_writer.close()
self.sess.close()
|
#!/usr/bin/python3
"""
DOWNLOAD ALL THE PROBLEMS OF CODE FORCE
"""
CODEF_SET_URLS = list()
CODEF_SET_URLS.append("https://codeforces.com/problemset")
CODEF_SET_URLS.append("https://codeforces.com/problemset/acmsguru")
print("Code Forces Scrapping.")
|
# -*- coding: utf-8 -*-
'''
Sync encryption normalization
'''
|
# Generated by Django 3.1.1 on 2020-09-29 20:01
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("carbon_quiz", "0047_auto_20200929_0849"),
]
operations = [
migrations.RemoveField(
model_name="mission",
name="links",
),
migrations.AddField(
model_name="mission",
name="link_addresses",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(blank=True, max_length=300, null=True),
blank=True,
help_text="Links that user can click to complete the mission.",
null=True,
size=None,
),
),
migrations.AddField(
model_name="mission",
name="link_descriptions",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(blank=True, max_length=300, null=True),
blank=True,
help_text="What websites the user can click to complete the mission.",
null=True,
size=None,
),
),
]
|
"""
Runtime: 859 ms, faster than 34.17% of Python3 online submissions for Maximal Square.
Memory Usage: 16.5 MB, less than 69.25% of Python3 online submissions for Maximal Square.
"""
from typing import List
from typing import Optional
from copy import deepcopy
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
rows = len(matrix)
cols = len(matrix[0])
cache = deepcopy(matrix)
ones = False
for i in range(rows):
for j in range(cols):
cache[i][j] = int(cache[i][j])
if cache[i][j]==1 and not ones:
ones = True
if not ones:
return 0
max_edge = 1
for i in range(1, rows):
for j in range(1, cols):
if int(cache[i][j]) == 1:
val_1 = min(cache[i-1][j-1], cache[i-1][j])
val_2 = min(val_1, cache[i][j-1])
cache[i][j] = val_2 + 1
max_edge = max(max_edge, cache[i][j])
return max_edge**2
def main():
sol = Solution()
print('Output:', sol.maximalSquare([["1","0","1","0","0"],["1","0","1","1","1"],["1","1","1","1","1"],["1","0","0","1","0"]]))
print('Expected:', 4)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import sys
def main(number):
result = 0
for i in range(1, number):
if i % 3 == 0 or i % 5 == 0:
result += i
return result
if __name__ == '__main__':
print(main(int(sys.argv[1])))
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Pcc(Base):
"""Pcep Session (Device) level Configuration
The Pcc class encapsulates a list of pcc resources that are managed by the user.
A list of resources can be retrieved from the server using the Pcc.find() method.
The list can be managed by using the Pcc.add() and Pcc.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'pcc'
_SDM_ATT_MAP = {
'Active': 'active',
'Active_pre_established_lsps': 'active_pre_established_lsps',
'Authentication': 'authentication',
'BurstInterval': 'burstInterval',
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DeadInterval': 'deadInterval',
'DescriptiveName': 'descriptiveName',
'ErrorValue': 'errorValue',
'Errors': 'errors',
'ExpectedInitiatedLspsForTraffic': 'expectedInitiatedLspsForTraffic',
'KeepaliveInterval': 'keepaliveInterval',
'LspInstantiationCapability': 'lspInstantiationCapability',
'LspUpdateCapability': 'lspUpdateCapability',
'MD5Key': 'mD5Key',
'MaxLspPerPcReq': 'maxLspPerPcReq',
'MaxLspsPerPcRpt': 'maxLspsPerPcRpt',
'MaxReconnectInterval': 'maxReconnectInterval',
'MaxRequestedLspPerInterval': 'maxRequestedLspPerInterval',
'MaxSyncLspPerInterval': 'maxSyncLspPerInterval',
'MaximumSidDepth': 'maximumSidDepth',
'Multiplier': 'multiplier',
'Name': 'name',
'NumberOfBackupPCEs': 'numberOfBackupPCEs',
'PccPpagTLVType': 'pccPpagTLVType',
'PccTEPathBindingTLVType': 'pccTEPathBindingTLVType',
'PceIpv4Address': 'pceIpv4Address',
'PreEstablishedSrLspsPerPcc': 'preEstablishedSrLspsPerPcc',
'RateControl': 'rateControl',
'ReconnectInterval': 'reconnectInterval',
'RequestedLspsPerPcc': 'requestedLspsPerPcc',
'ReturnInstantiationError': 'returnInstantiationError',
'SessionStatus': 'sessionStatus',
'SrPceCapability': 'srPceCapability',
'Sr_capability_n_flag': 'sr_capability_n_flag',
'Sr_capability_x_flag': 'sr_capability_x_flag',
'Srv6MaxSL': 'srv6MaxSL',
'Srv6PceCapability': 'srv6PceCapability',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'StateTimeoutInterval': 'stateTimeoutInterval',
'Status': 'status',
'TcpPort': 'tcpPort',
}
_SDM_ENUM_MAP = {
'status': ['configured', 'error', 'mixed', 'notStarted', 'started', 'starting', 'stopping'],
}
def __init__(self, parent, list_op=False):
super(Pcc, self).__init__(parent, list_op)
@property
def ExpectedInitiatedLspList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.expectedinitiatedlsplist_c1edb3ac572c229482ac3b16768b81b1.ExpectedInitiatedLspList): An instance of the ExpectedInitiatedLspList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.expectedinitiatedlsplist_c1edb3ac572c229482ac3b16768b81b1 import ExpectedInitiatedLspList
if self._properties.get('ExpectedInitiatedLspList', None) is not None:
return self._properties.get('ExpectedInitiatedLspList')
else:
return ExpectedInitiatedLspList(self)._select()
@property
def LearnedInfo(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100.LearnedInfo): An instance of the LearnedInfo class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100 import LearnedInfo
if self._properties.get('LearnedInfo', None) is not None:
return self._properties.get('LearnedInfo')
else:
return LearnedInfo(self)
@property
def PccLearnedLspDb(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pcclearnedlspdb_1f09e65ced78209c908d7bf80bf0e73d.PccLearnedLspDb): An instance of the PccLearnedLspDb class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pcclearnedlspdb_1f09e65ced78209c908d7bf80bf0e73d import PccLearnedLspDb
if self._properties.get('PccLearnedLspDb', None) is not None:
return self._properties.get('PccLearnedLspDb')
else:
return PccLearnedLspDb(self)._select()
@property
def PcepBackupPCEs(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pcepbackuppces_f780e95e8b1b209ab7ad3ca8a9f3a4c6.PcepBackupPCEs): An instance of the PcepBackupPCEs class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.pcepbackuppces_f780e95e8b1b209ab7ad3ca8a9f3a4c6 import PcepBackupPCEs
if self._properties.get('PcepBackupPCEs', None) is not None:
return self._properties.get('PcepBackupPCEs')
else:
return PcepBackupPCEs(self)._select()
@property
def PreEstablishedSrLsps(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.preestablishedsrlsps_a4b5c388b0a9f1cd18fdc396c2ea1c6a.PreEstablishedSrLsps): An instance of the PreEstablishedSrLsps class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.preestablishedsrlsps_a4b5c388b0a9f1cd18fdc396c2ea1c6a import PreEstablishedSrLsps
if self._properties.get('PreEstablishedSrLsps', None) is not None:
return self._properties.get('PreEstablishedSrLsps')
else:
return PreEstablishedSrLsps(self)._select()
@property
def RequestedLsps(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.requestedlsps_13f940a8c982ec765fee3bc34ba5d305.RequestedLsps): An instance of the RequestedLsps class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.requestedlsps_13f940a8c982ec765fee3bc34ba5d305 import RequestedLsps
if self._properties.get('RequestedLsps', None) is not None:
return self._properties.get('RequestedLsps')
else:
return RequestedLsps(self)._select()
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def Active_pre_established_lsps(self):
# type: () -> int
"""
Returns
-------
- number:
"""
return self._get_attribute(self._SDM_ATT_MAP['Active_pre_established_lsps'])
@Active_pre_established_lsps.setter
def Active_pre_established_lsps(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Active_pre_established_lsps'], value)
@property
def Authentication(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The type of cryptographic authentication to be used on this link interface
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Authentication']))
@property
def BurstInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Interval in milisecond in which desired rate of messages needs to be maintained.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BurstInterval']))
@property
def ConnectedVia(self):
# type: () -> List[str]
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DeadInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): This is the time interval, after the expiration of which, a PCEP peer declares the session down if no PCEP message has been received.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DeadInterval']))
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def ErrorValue(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): To configure the type of error. Editable only if Return Instantiation Error is enabled.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ErrorValue']))
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def ExpectedInitiatedLspsForTraffic(self):
# type: () -> int
"""
Returns
-------
- number: Based on the value in this control the number of Expected Initiated LSPs for Traffic can be configured. This is used for traffic only.
"""
return self._get_attribute(self._SDM_ATT_MAP['ExpectedInitiatedLspsForTraffic'])
@ExpectedInitiatedLspsForTraffic.setter
def ExpectedInitiatedLspsForTraffic(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['ExpectedInitiatedLspsForTraffic'], value)
@property
def KeepaliveInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Frequency/Time Interval of sending PCEP messages to keep the session active.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['KeepaliveInterval']))
@property
def LspInstantiationCapability(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If Stateful PCE Capability is enabled then this control should be activated to set the LSP Instantiation capability in the Stateful PCE Capability TLV.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LspInstantiationCapability']))
@property
def LspUpdateCapability(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If Stateful PCE Capability is enabled then this control should be activated to set the update capability in the Stateful PCE Capability TLV.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LspUpdateCapability']))
@property
def MD5Key(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): A value to be used as the secret MD5 Key.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MD5Key']))
@property
def MaxLspPerPcReq(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Max LSPs Per PCReq
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MaxLspPerPcReq']))
@property
def MaxLspsPerPcRpt(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Controls the maximum LSP information that can be present in a Path report message when the session is stateful session.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MaxLspsPerPcRpt']))
@property
def MaxReconnectInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): This is the maximum time interval, by which recoonect timer will be increased upto.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MaxReconnectInterval']))
@property
def MaxRequestedLspPerInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Maximum number of LSP computation request messages can be sent per interval.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MaxRequestedLspPerInterval']))
@property
def MaxSyncLspPerInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Maximum number of LSP sync can be sent per interval.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MaxSyncLspPerInterval']))
@property
def MaximumSidDepth(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Maximum SID Depth field (MSD) specifies the maximum number of SIDs that a PCC is capable of imposing on a packet. Editable only if SR PCE Capability is enabled.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MaximumSidDepth']))
@property
def Multiplier(self):
# type: () -> int
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NumberOfBackupPCEs(self):
# type: () -> int
"""
Returns
-------
- number: Number of Backup PCEs
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberOfBackupPCEs'])
@NumberOfBackupPCEs.setter
def NumberOfBackupPCEs(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NumberOfBackupPCEs'], value)
@property
def PccPpagTLVType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): PPAG TLV Type specifies PCC's capability of interpreting this type of PPAG TLV
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PccPpagTLVType']))
@property
def PccTEPathBindingTLVType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): PCC TE-PATH-BINDING TLV Type is a TLV that carries MPLS label binding or SRv6 Binding SID. This is only configurable if the Binding SID Draft Version is set to ietf-pce-binding-label-sid. The minimum value is 0. The maximum value is 65535. The default value is 31.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PccTEPathBindingTLVType']))
@property
def PceIpv4Address(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): IPv4 address of the PCE. This column is greyed out in case of PCCv6.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PceIpv4Address']))
@property
def PreEstablishedSrLspsPerPcc(self):
# type: () -> int
"""
Returns
-------
- number: Pre-Established SR LSPs per PCC
"""
return self._get_attribute(self._SDM_ATT_MAP['PreEstablishedSrLspsPerPcc'])
@PreEstablishedSrLspsPerPcc.setter
def PreEstablishedSrLspsPerPcc(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['PreEstablishedSrLspsPerPcc'], value)
@property
def RateControl(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The rate control is an optional feature associated with PCE initiated LSP.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RateControl']))
@property
def ReconnectInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): This is the time interval, after the expiration of which, retry to establish the broken session by PCC happen.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReconnectInterval']))
@property
def RequestedLspsPerPcc(self):
# type: () -> int
"""
Returns
-------
- number: Requested LSPs per PCC
"""
return self._get_attribute(self._SDM_ATT_MAP['RequestedLspsPerPcc'])
@RequestedLspsPerPcc.setter
def RequestedLspsPerPcc(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['RequestedLspsPerPcc'], value)
@property
def ReturnInstantiationError(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If enabled, then PCC will reply PCErr upon receiving PCInitiate message.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReturnInstantiationError']))
@property
def SessionStatus(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def SrPceCapability(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The SR PCE Capability TLV is an optional TLV associated with the OPEN Object to exchange SR capability of PCEP speakers.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SrPceCapability']))
@property
def Sr_capability_n_flag(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): A PCC sets this flag bit to 1 to indicate that it is capable of resolving a Node or Adjacency Identifier (NAI) to a SID.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Sr_capability_n_flag']))
@property
def Sr_capability_x_flag(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): A PCC sets this flag bit to 1 to indicate that it does not impose any limit on the MSD.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Sr_capability_x_flag']))
@property
def Srv6MaxSL(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): This field specifies the maximum value of the Segments Left (SL) in the SRH.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6MaxSL']))
@property
def Srv6PceCapability(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): The SRv6 PCE Capability TLV is a sub-TLV that comes under PATH-SETUP-TYPE-CAPABILITY TLV if PST List contains SRv6 PST type. This TLV is associated with the OPEN Object to exchange SRv6 capability of PCEP speakers.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6PceCapability']))
@property
def StackedLayers(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
# type: (List[str]) -> None
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def StateTimeoutInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): This is the time interval, after the expiration of which, LSP is cleaned up by PCC.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StateTimeoutInterval']))
@property
def Status(self):
# type: () -> str
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def TcpPort(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): PCEP operates over TCP using a registered TCP port (default - 4189). This allows the requirements of reliable messaging and flow control to bemet without further protocol work. This control can be configured when user does not want to use the default one.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TcpPort']))
def update(self, Active_pre_established_lsps=None, ConnectedVia=None, ExpectedInitiatedLspsForTraffic=None, Multiplier=None, Name=None, NumberOfBackupPCEs=None, PreEstablishedSrLspsPerPcc=None, RequestedLspsPerPcc=None, StackedLayers=None):
# type: (int, List[str], int, int, str, int, int, int, List[str]) -> Pcc
"""Updates pcc resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Active_pre_established_lsps (number):
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- ExpectedInitiatedLspsForTraffic (number): Based on the value in this control the number of Expected Initiated LSPs for Traffic can be configured. This is used for traffic only.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NumberOfBackupPCEs (number): Number of Backup PCEs
- PreEstablishedSrLspsPerPcc (number): Pre-Established SR LSPs per PCC
- RequestedLspsPerPcc (number): Requested LSPs per PCC
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Active_pre_established_lsps=None, ConnectedVia=None, ExpectedInitiatedLspsForTraffic=None, Multiplier=None, Name=None, NumberOfBackupPCEs=None, PreEstablishedSrLspsPerPcc=None, RequestedLspsPerPcc=None, StackedLayers=None):
# type: (int, List[str], int, int, str, int, int, int, List[str]) -> Pcc
"""Adds a new pcc resource on the server and adds it to the container.
Args
----
- Active_pre_established_lsps (number):
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- ExpectedInitiatedLspsForTraffic (number): Based on the value in this control the number of Expected Initiated LSPs for Traffic can be configured. This is used for traffic only.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NumberOfBackupPCEs (number): Number of Backup PCEs
- PreEstablishedSrLspsPerPcc (number): Pre-Established SR LSPs per PCC
- RequestedLspsPerPcc (number): Requested LSPs per PCC
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Returns
-------
- self: This instance with all currently retrieved pcc resources using find and the newly added pcc resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained pcc resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Active_pre_established_lsps=None, ConnectedVia=None, Count=None, DescriptiveName=None, Errors=None, ExpectedInitiatedLspsForTraffic=None, Multiplier=None, Name=None, NumberOfBackupPCEs=None, PreEstablishedSrLspsPerPcc=None, RequestedLspsPerPcc=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves pcc resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve pcc resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all pcc resources from the server.
Args
----
- Active_pre_established_lsps (number):
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- ExpectedInitiatedLspsForTraffic (number): Based on the value in this control the number of Expected Initiated LSPs for Traffic can be configured. This is used for traffic only.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NumberOfBackupPCEs (number): Number of Backup PCEs
- PreEstablishedSrLspsPerPcc (number): Pre-Established SR LSPs per PCC
- RequestedLspsPerPcc (number): Requested LSPs per PCC
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns
-------
- self: This instance with matching pcc resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of pcc data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the pcc resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Abort(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
abort(SessionIndices=list, async_operation=bool)
------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
abort(SessionIndices=string, async_operation=bool)
--------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def ClearPccLearnedInfoInClient(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the clearPccLearnedInfoInClient operation on the server.
Clears ALL Learned LSP Information of PCC Device.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
clearPccLearnedInfoInClient(async_operation=bool)
-------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
clearPccLearnedInfoInClient(SessionIndices=list, async_operation=bool)
----------------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
clearPccLearnedInfoInClient(SessionIndices=string, async_operation=bool)
------------------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
clearPccLearnedInfoInClient(Arg2=list, async_operation=bool)list
----------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin.An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearPccLearnedInfoInClient', payload=payload, response_object=None)
def GetPccBasicAllSrLspLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getPccBasicAllSrLspLearnedInfo operation on the server.
Gets Basic Information about All SR LSPs learnt by this PCC.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getPccBasicAllSrLspLearnedInfo(async_operation=bool)
----------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicAllSrLspLearnedInfo(SessionIndices=list, async_operation=bool)
-------------------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicAllSrLspLearnedInfo(SessionIndices=string, async_operation=bool)
---------------------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicAllSrLspLearnedInfo(Arg2=list, async_operation=bool)list
-------------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getPccBasicAllSrLspLearnedInfo', payload=payload, response_object=None)
def GetPccBasicAllSrv6LspLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getPccBasicAllSrv6LspLearnedInfo operation on the server.
Gets Basic Information about All SRv6 LSPs learnt by this PCC.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getPccBasicAllSrv6LspLearnedInfo(async_operation=bool)
------------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicAllSrv6LspLearnedInfo(SessionIndices=list, async_operation=bool)
---------------------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicAllSrv6LspLearnedInfo(SessionIndices=string, async_operation=bool)
-----------------------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicAllSrv6LspLearnedInfo(Arg2=list, async_operation=bool)list
---------------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getPccBasicAllSrv6LspLearnedInfo', payload=payload, response_object=None)
def GetPccBasicSrPccRequestedLspLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getPccBasicSrPccRequestedLspLearnedInfo operation on the server.
Gets Basic Information about SR-TE PCC Requested LSPs learnt by this PCC.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getPccBasicSrPccRequestedLspLearnedInfo(async_operation=bool)
-------------------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrPccRequestedLspLearnedInfo(SessionIndices=list, async_operation=bool)
----------------------------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrPccRequestedLspLearnedInfo(SessionIndices=string, async_operation=bool)
------------------------------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrPccRequestedLspLearnedInfo(Arg2=list, async_operation=bool)list
----------------------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getPccBasicSrPccRequestedLspLearnedInfo', payload=payload, response_object=None)
def GetPccBasicSrPccSyncOrReportLspLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getPccBasicSrPccSyncOrReportLspLearnedInfo operation on the server.
Gets Basic Information about SR-TE PCC Sync/Report LSPs learnt by this PCC.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getPccBasicSrPccSyncOrReportLspLearnedInfo(async_operation=bool)
----------------------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrPccSyncOrReportLspLearnedInfo(SessionIndices=list, async_operation=bool)
-------------------------------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrPccSyncOrReportLspLearnedInfo(SessionIndices=string, async_operation=bool)
---------------------------------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrPccSyncOrReportLspLearnedInfo(Arg2=list, async_operation=bool)list
-------------------------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getPccBasicSrPccSyncOrReportLspLearnedInfo', payload=payload, response_object=None)
def GetPccBasicSrPceInitiatedLspLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getPccBasicSrPceInitiatedLspLearnedInfo operation on the server.
Gets Basic Information about SR-TE PCE Initiated LSPs learnt by this PCC.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getPccBasicSrPceInitiatedLspLearnedInfo(async_operation=bool)
-------------------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrPceInitiatedLspLearnedInfo(SessionIndices=list, async_operation=bool)
----------------------------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrPceInitiatedLspLearnedInfo(SessionIndices=string, async_operation=bool)
------------------------------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrPceInitiatedLspLearnedInfo(Arg2=list, async_operation=bool)list
----------------------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getPccBasicSrPceInitiatedLspLearnedInfo', payload=payload, response_object=None)
def GetPccBasicSrv6PccRequestedLspLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getPccBasicSrv6PccRequestedLspLearnedInfo operation on the server.
Gets Basic Information about SRv6 PCC Requested LSPs learnt by this PCC.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getPccBasicSrv6PccRequestedLspLearnedInfo(async_operation=bool)
---------------------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrv6PccRequestedLspLearnedInfo(SessionIndices=list, async_operation=bool)
------------------------------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrv6PccRequestedLspLearnedInfo(SessionIndices=string, async_operation=bool)
--------------------------------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrv6PccRequestedLspLearnedInfo(Arg2=list, async_operation=bool)list
------------------------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getPccBasicSrv6PccRequestedLspLearnedInfo', payload=payload, response_object=None)
def GetPccBasicSrv6PccSyncOrReportLspLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getPccBasicSrv6PccSyncOrReportLspLearnedInfo operation on the server.
Gets Basic Information about SRv6 PCC Sync/Report LSPs learnt by this PCC.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getPccBasicSrv6PccSyncOrReportLspLearnedInfo(async_operation=bool)
------------------------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrv6PccSyncOrReportLspLearnedInfo(SessionIndices=list, async_operation=bool)
---------------------------------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrv6PccSyncOrReportLspLearnedInfo(SessionIndices=string, async_operation=bool)
-----------------------------------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrv6PccSyncOrReportLspLearnedInfo(Arg2=list, async_operation=bool)list
---------------------------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getPccBasicSrv6PccSyncOrReportLspLearnedInfo', payload=payload, response_object=None)
def GetPccBasicSrv6PceInitiatedLspLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getPccBasicSrv6PceInitiatedLspLearnedInfo operation on the server.
Gets Basic Information about SRv6 PCE Initiated LSPs learnt by this PCC.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getPccBasicSrv6PceInitiatedLspLearnedInfo(async_operation=bool)
---------------------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrv6PceInitiatedLspLearnedInfo(SessionIndices=list, async_operation=bool)
------------------------------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrv6PceInitiatedLspLearnedInfo(SessionIndices=string, async_operation=bool)
--------------------------------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccBasicSrv6PceInitiatedLspLearnedInfo(Arg2=list, async_operation=bool)list
------------------------------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getPccBasicSrv6PceInitiatedLspLearnedInfo', payload=payload, response_object=None)
def GetPccLearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getPccLearnedInfo operation on the server.
Gets Detailed Information about All SR LSPs learnt by this PCC.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getPccLearnedInfo(async_operation=bool)
---------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccLearnedInfo(SessionIndices=list, async_operation=bool)
------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccLearnedInfo(SessionIndices=string, async_operation=bool)
--------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccLearnedInfo(Arg2=list, async_operation=bool)list
------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin.An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getPccLearnedInfo', payload=payload, response_object=None)
def GetPccSrv6LearnedInfo(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the getPccSrv6LearnedInfo operation on the server.
Gets Detailed Information about All SRv6 LSPs learnt by this PCC.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getPccSrv6LearnedInfo(async_operation=bool)
-------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccSrv6LearnedInfo(SessionIndices=list, async_operation=bool)
----------------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccSrv6LearnedInfo(SessionIndices=string, async_operation=bool)
------------------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
getPccSrv6LearnedInfo(Arg2=list, async_operation=bool)list
----------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getPccSrv6LearnedInfo', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(async_operation=bool)
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
restartDown(SessionIndices=list, async_operation=bool)
------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
restartDown(SessionIndices=string, async_operation=bool)
--------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=list, async_operation=bool)
------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=string, async_operation=bool)
--------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=list, async_operation=bool)
-----------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=string, async_operation=bool)
-------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, Active=None, Authentication=None, BurstInterval=None, DeadInterval=None, ErrorValue=None, KeepaliveInterval=None, LspInstantiationCapability=None, LspUpdateCapability=None, MD5Key=None, MaxLspPerPcReq=None, MaxLspsPerPcRpt=None, MaxReconnectInterval=None, MaxRequestedLspPerInterval=None, MaxSyncLspPerInterval=None, MaximumSidDepth=None, PccPpagTLVType=None, PccTEPathBindingTLVType=None, PceIpv4Address=None, RateControl=None, ReconnectInterval=None, ReturnInstantiationError=None, SrPceCapability=None, Sr_capability_n_flag=None, Sr_capability_x_flag=None, Srv6MaxSL=None, Srv6PceCapability=None, StateTimeoutInterval=None, TcpPort=None):
"""Base class infrastructure that gets a list of pcc device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- Authentication (str): optional regex of authentication
- BurstInterval (str): optional regex of burstInterval
- DeadInterval (str): optional regex of deadInterval
- ErrorValue (str): optional regex of errorValue
- KeepaliveInterval (str): optional regex of keepaliveInterval
- LspInstantiationCapability (str): optional regex of lspInstantiationCapability
- LspUpdateCapability (str): optional regex of lspUpdateCapability
- MD5Key (str): optional regex of mD5Key
- MaxLspPerPcReq (str): optional regex of maxLspPerPcReq
- MaxLspsPerPcRpt (str): optional regex of maxLspsPerPcRpt
- MaxReconnectInterval (str): optional regex of maxReconnectInterval
- MaxRequestedLspPerInterval (str): optional regex of maxRequestedLspPerInterval
- MaxSyncLspPerInterval (str): optional regex of maxSyncLspPerInterval
- MaximumSidDepth (str): optional regex of maximumSidDepth
- PccPpagTLVType (str): optional regex of pccPpagTLVType
- PccTEPathBindingTLVType (str): optional regex of pccTEPathBindingTLVType
- PceIpv4Address (str): optional regex of pceIpv4Address
- RateControl (str): optional regex of rateControl
- ReconnectInterval (str): optional regex of reconnectInterval
- ReturnInstantiationError (str): optional regex of returnInstantiationError
- SrPceCapability (str): optional regex of srPceCapability
- Sr_capability_n_flag (str): optional regex of sr_capability_n_flag
- Sr_capability_x_flag (str): optional regex of sr_capability_x_flag
- Srv6MaxSL (str): optional regex of srv6MaxSL
- Srv6PceCapability (str): optional regex of srv6PceCapability
- StateTimeoutInterval (str): optional regex of stateTimeoutInterval
- TcpPort (str): optional regex of tcpPort
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
from math import atan2, sqrt, degrees, radians, cos, sin
from random import randint
class Vector2D:
#region INIT
def _get_xy(self, args):
"""Generates a x and y from any input
Returns:
[tuple]: x, y
"""
number_of_args = len(args)
if number_of_args == 0 : return 0, 0 # no arguments
elif number_of_args == 2 : x, y = args ; return x, y # both x and y passed in
if number_of_args == 1: # one argument
arg_type = type(args[0])
if arg_type is float or arg_type is int: # single int or float argument
return args[0], args[0]
if arg_type is list or arg_type is tuple:
return args[0][0], args[0][1] # single list argument
if arg_type is Vector2D:
return args[0].x, args[0].y
def __init__(self, *args):
self.x, self.y = self._get_xy(args)
self.data = {}
#endregion
#region AUTO CREATE METHODS
def random_pos():
"""Returns a vector in normalised 0-1 space
Returns:
Vector2D: a vector in normal space
"""
return Vector2D(randint(0, 1000)/1000, randint(0, 1000)/1000)
def random_unit():
"""Generates a unit vector with a random heading
Returns:
Vector2D: unit vector
"""
pos = Vector2D(randint(-1000, 1000), randint(-1000, 1000))
pos.normalise()
return pos
def from_angle(angle):
"""Creates a unit vector with the same heading as the angle
Args:
angle (float): angle of direction in radians
Returns:
Vector2D: unit vector
"""
return Vector2D(cos(angle), sin(angle))
#endregion
#region CUSTOM METHODS
def get(self):
"""Gets the x and y components as an integer tuple
Returns:
tuple: contains x and y as integers
"""
return (int(self.x), int(self.y))
def set(self, *args):
"""Sets the x and y components
"""
x, y = self._get_xy(args)
self.x = x ; self.y = y
def copy(self):
"""Gets a copy of this vector
Returns:
Vector2D: a copy of this vector
"""
return Vector2D(self.x, self.y)
def clear(self):
"""Sets both components to 0
"""
self.x = self.y = 0
#endregion
#region CUSTOM MATHEMATICAL METHODS
def dist_sqrt(self, *args):
"""Gets the distance between this point and another (uses square root)
Returns:
float: distance
"""
x, y = self._get_xy(args)
return sqrt((self.x - x)**2 + (self.y - y)**2)
def dist(self, *args):
"""Gets the distance between this point and another (does not use square root)
Returns:
float: distance
"""
x, y = self._get_xy(args)
return (self.x - x)**2 + (self.y - y)**2
def get_heading_angle(self):
"""Returns the heading angle in radians assuming 0 is aligned with x
Returns:
float: angle in radians
"""
return atan2(self.x, self.y)
def get_magnitude(self):
"""Gets the magnitude/length of the vector
Returns:
float: magnitude
"""
return sqrt(self.x**2 + self.y**2)
def normalise(self):
"""Normalises this vector making it a unit vector
"""
mag = self.get_magnitude()
self.div(mag)
def normalize(self):
"""Normalises this vector making it a unit vector
"""
self.normalise()
def truncate(self, max_val):
"""Clamps the x and y components to be in range -max_val to max_val
Args:
max_val (float): max and min for each component
"""
if self.x > max_val : self.x = max_val
if self.y > max_val : self.y = max_val
if self.x < -max_val : self.x = -max_val
if self.y < -max_val : self.y = -max_val
def add(self, *args):
x, y = self._get_xy(args)
self.x += x ; self.y += y
def sub(self, *args):
x, y = self._get_xy(args)
self.x /= x ; self.y /= y
def mult(self, *args):
x, y = self._get_xy(args)
self.x *= x ; self.y *= y
def div(self, *args):
x, y = self._get_xy(args)
self.x /= x ; self.y /= y
def linear_interpolate(self, *args, t=0.5):
"""Linearly interpolates between current position and passed in position
Args:
t (float, optional): speed. Defaults to 0.5.
"""
x, y = self._get_xy(args)
x = self.x + t * (x - self.x);
y = self.y + t * (y - self.y);
self.set(x, y)
def dot_product(self, *args):
"""Dot product of this and another vector
Returns:
float: dot product result
"""
x, y = self._get_xy(args)
return sum([self.x * x, self.y * y])
#endregion
#region MAGIC METHODS
def __iadd__(self, *args):
x, y = self._get_xy(args)
self.x += x ; self.y += y
return self
def __isub__(self, *args):
x, y = self._get_xy(args)
self.x /= x ; self.y /= y
return self
def __imul__(self, *args):
x, y = self._get_xy(args)
self.x *= x ; self.y *= y
return self
def __idiv__(self, *args):
x, y = self._get_xy(args)
self.x /= x ; self.y /= y
return self
def __add__(self, *args):
x, y = self._get_xy(args)
return Vector2D(self.x + x, self.y + y)
def __sub__(self, *args):
x, y = self._get_xy(args)
return Vector2D(self.x - x, self.y - y)
def __mul__(self, *args):
x, y = self._get_xy(args)
return Vector2D(self.x * x, self.y * y)
def __div__(self, *args):
x, y = self._get_xy(args)
return Vector2D(self.x / x, self.y / y)
#endregion
class Vector3D:
#region INIT
def _get_xyz(self, args):
"""Generates a x, y and z from any input
Returns:
[tuple]: x, y, z
"""
number_of_args = len(args)
if number_of_args == 0 : return 0, 0, 0 # no arguments
elif number_of_args == 3 : x, y, z = args ; return x, y, z # both x and y passed in
if number_of_args == 1: # one argument
arg_type = type(args[0])
if arg_type is float or arg_type is int: # single int or float argument
return args[0], args[0], args[0]
if arg_type is list or arg_type is tuple:
return args[0][0], args[0][1], args[0][2] # single list argument
if arg_type is Vector3D:
return args[0].x, args[0].y, args[0].z
def __init__(self, *args):
self.x, self.y, self.z = self._get_xyz(args)
self.data = {}
#endregion
#region AUTO CREATE METHODS
def random_pos():
"""Returns a vector in normalised 0-1 space
Returns:
Vector2D: a vector in normal space
"""
return Vector3D(randint(0, 1000)/1000, randint(0, 1000)/1000, randint(0, 1000)/1000)
def random_unit():
"""Generates a unit vector with a random heading
Returns:
Vector2D: unit vector
"""
pos = Vector2D(randint(-1000, 1000), randint(-1000, 1000), randint(-1000, 1000))
pos.normalise()
return pos
#endregion
#region CUSTOM METHODS
def get(self):
"""Gets the x and y components as an integer tuple
Returns:
tuple: contains x and y as integers
"""
return (int(self.x), int(self.y), int(self.z))
def set(self, *args):
"""Sets the x and y components
"""
x, y, z = self._get_xyz(args)
self.x = x ; self.y = y ; self.z = z
def copy(self):
"""Gets a copy of this vector
Returns:
Vector2D: a copy of this vector
"""
return Vector2D(self.x, self.y, self.z)
def clear(self):
"""Sets both components to 0
"""
self.x = self.y = self.z = 0
#endregion
#region CUSTOM MATHEMATICAL METHODS
def dist_sqrt(self, *args):
"""Gets the distance between this point and another (uses square root)
Returns:
float: distance
"""
x, y, z = self._get_xyz(args)
return sqrt((self.x - x)**2 + (self.y - y)**2 + (self.z - z)**2)
def dist(self, *args):
"""Gets the distance between this point and another (does not use square root)
Returns:
float: distance
"""
x, y, z = self._get_xyz(args)
return (self.x - x)**2 + (self.y - y)**2 + (self.z - z)**2
def get_magnitude(self):
"""Gets the magnitude/length of the vector
Returns:
float: magnitude
"""
return sqrt(self.x**2 + self.y**2 + self.z**2)
def normalise(self):
"""Normalises this vector making it a unit vector
"""
mag = self.get_magnitude()
self.div(mag)
def normalize(self):
"""Normalises this vector making it a unit vector
"""
self.normalise()
def truncate(self, max_val):
"""Clamps the x and y components to be in range -max_val to max_val
Args:
max_val (float): max and min for each component
"""
if self.x > max_val : self.x = max_val
if self.y > max_val : self.y = max_val
if self.z > max_val : self.z = max_val
if self.x < -max_val : self.x = -max_val
if self.y < -max_val : self.y = -max_val
if self.z < -max_val : self.z = -max_val
def add(self, *args):
x, y, z = self._get_xyz(args)
self.x += x ; self.y += y ; self.z += z
def sub(self, *args):
x, y, z = self._get_xyz(args)
self.x /= x ; self.y /= y ; self.z /= z
def mult(self, *args):
x, y, z = self._get_xyz(args)
self.x *= x ; self.y *= y ; self.z *= z
def div(self, *args):
x, y, z = self._get_xyz(args)
self.x /= x ; self.y /= y ; self.z /= z
def linear_interpolate(self, *args, t=0.5):
"""Linearly interpolates between current position and passed in position
Args:
t (float, optional): speed. Defaults to 0.5.
"""
x, y, z = self._get_xyz(args)
x = self.x + t * (x - self.x);
y = self.y + t * (y - self.y);
z = self.z + t * (y - self.z);
self.set(x, y, z)
#endregion
#region MAGIC METHODS
def __iadd__(self, *args):
x, y, z = self._get_xyz(args)
self.x += x ; self.y += y ; self.z += z
return self
def __isub__(self, *args):
x, y, z = self._get_xyz(args)
self.x /= x ; self.y /= y ; self.z /= z
return self
def __imul__(self, *args):
x, y, z = self._get_xyz(args)
self.x *= x ; self.y *= y ; self.z *= z
return self
def __idiv__(self, *args):
x, y, z = self._get_xyz(args)
self.x /= x ; self.y /= y ; self.z /= z
return self
def __add__(self, *args):
x, y, z = self._get_xyz(args)
return Vector3D(self.x + x, self.y + y, self.z + z)
def __sub__(self, *args):
x, y, z = self._get_xyz(args)
return Vector3D(self.x - x, self.y - y, self.z - z)
def __mul__(self, *args):
x, y, z = self._get_xyz(args)
return Vector3D(self.x * x, self.y * y, self.z * z)
def __div__(self, *args):
x, y, z = self._get_xyz(args)
return Vector3D(self.x / x, self.y / y, self.z / z)
#endregion
|
# -*- coding: utf-8 -*-
"""
Created on 17-03-2019 at 02:42 PM
@author: Vivek
"""
# The for-else loop to return the first even number or a '-1' if an even number is not found
def findEven(listOfNums):
for number in listOfNums:
if number % 2 == 0:
return(number)
else:
return -1
if __name__ == '__main__':
listOfEvenNums = [1, 7, 3, 4, 5]
print('Return Value:',findEven(listOfEvenNums))
listOfOddNums = [1, 1, 3, 5, 5]
print('Return Value:',findEven(listOfOddNums))
|
from flask_restplus import fields
from app import app_api
BaseError = app_api.model('BaseError', {
'status': fields.String(required=True, description='Status'),
'statusCode': fields.String(required=True, description='Status code')
})
|
# This is a sample Python script.
import os
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import selenium.common.exceptions
import sys
from selenium import webdriver
import webbrowser
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def get_driver():
try:
if getattr(sys, 'frozen', False):
chromedriver_path = os.path.join(sys._MEIPASS, "chromedriver.exe")
driver = webdriver.Chrome(chromedriver_path)
else:
driver = webdriver.Chrome()
survey_url = 'https://webinfo.dankook.ac.kr/comm/surv/surp/views/findSrvshInfoBasList.do?_view=ok'
driver.get(survey_url)
driver.implicitly_wait(0.5)
return driver
except FileNotFoundError:
print('Chromedriver를 찾을 수 없습니다.')
print('웹사이트에 접속합니다. 이 사이트에서 프로그램과 같은 디렉토리에 다운로드하세요.')
print('현재 Chrome의 버전과 일치한 Chromedriver를 다운로드해야 합니다.')
webbrowser.open("https://chromedriver.chromium.org/downloads")
return
def try_login(driver):
login = driver.find_element(By.CSS_SELECTOR, 'input#username')
try:
WebDriverWait(driver, timeout=120).until(EC.staleness_of(login))
except StaleElementReferenceException:
driver.implicitly_wait(1)
try_login(driver)
return
def assure_able_to_enter_attendance(driver):
driver.implicitly_wait(0.5)
# 메인화면 -> 학사정보
link = driver.find_element(By.CSS_SELECTOR, '#WUNIV > .ico_school')
link.click()
driver.implicitly_wait(0.5)
# 학사정보 -> 수업관리 -> 출강관리 -> 출석확인 조회
side_link = driver.find_element(By.CSS_SELECTOR, '#WLSSN > a')
side_link.click()
driver.implicitly_wait(0.5)
side_link2 = driver.find_element(By.CSS_SELECTOR, '#WBZTM')
side_link2_link = side_link2.find_element(By.CSS_SELECTOR, 'a')
side_link2_link.click()
driver.implicitly_wait(0.5)
side_link3_list = driver.find_elements(By.CSS_SELECTOR, '#WBZTM > ul > li')
side_link3_link = side_link3_list[0]
side_link3_link.click()
driver.implicitly_wait(0.5)
try:
if EC.alert_is_present():
alert = driver.switch_to.alert
alert.accept()
except:
return
driver.implicitly_wait(0.5)
return
def go_to_ability_survey(driver):
survey_table = driver.find_elements(By.CSS_SELECTOR, 'table.tbl_striped > tbody > tr')
driver.implicitly_wait(0.5)
survey_link = None
index_num = 0
for row in survey_table:
index_num += 1
if row.find_element(By.CLASS_NAME, 'ta_l').text == '2022학년도 역량진단검사(1차)':
survey_link = row
break
try:
survey_link_btn = survey_link.find_element(By.CSS_SELECTOR, f'button#joinBtn{index_num}')
survey_link_btn.click()
except:
return
driver.implicitly_wait(0.5)
return
def reply_to_survey_questions(driver):
# 부정 질문 문항 리스트
negative_question_number_list = ['문항6.', '문항7.', '문항14.', '문항35.', '문항36.']
current_question_number = ''
form = driver.find_elements(By.CSS_SELECTOR, 'form#surpListWrapper > div.items_wrap')
driver.implicitly_wait(1)
# 부정적인 내용 질문 및 주관식 문항에 유의하여 자동으로 체크박스에 번호를 체크한다.
for item in form:
# 체크박스 목록을 가져옴
answer_list = item.find_elements(By.CSS_SELECTOR, 'div > div.form_inline > div.form_chck')
# 문항 번호를 가져옴
try:
current_question_number = item.find_element(By.CSS_SELECTOR, 'p.tit_q > strong').text
except selenium.common.exceptions.NoSuchElementException:
continue
if len(answer_list) > 0: # 주관식 문항 및 지시문이 아닐 때 맨 아래 항목에 체크
if current_question_number not in negative_question_number_list:
answer_list[-1].click()
else:
answer_list[0].click()
driver.implicitly_wait(0.05)
return
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
driver = get_driver() # 크롬 창을 열고 포털 로그인창에 접속
if driver != None:
try_login(driver) # 사용자가 로그인할 때까지 대기
assure_able_to_enter_attendance(driver) # 출석확인 조회 페이지 열기
go_to_ability_survey(driver) # 역량조사 페이지 열기
reply_to_survey_questions(driver) |
import os
import unittest
import numpy as np
import pytest
from tests.fixtures.algorithms import DeviatingFromMedian
from timeeval.adapters import MultivarAdapter
from timeeval.adapters.multivar import AggregationMethod
class TestMultivarAdapter(unittest.TestCase):
def setUp(self) -> None:
np.random.seed(4444)
self.X = np.random.rand(100, 3)
tmp = np.abs(self.X - np.median(self.X, axis=0))
self.y = tmp / tmp.max(axis=0)
self.y_median = np.median(self.y, axis=1)
self.y_mean = np.mean(self.y, axis=1)
self.y_max = np.max(self.y, axis=1)
def test_multivar_deviating_from_median_mean(self):
algorithm = MultivarAdapter(DeviatingFromMedian(), AggregationMethod.MEAN)
score = algorithm(self.X)
np.testing.assert_array_equal(self.y_mean, score)
self.assertEqual(len(self.X), len(score))
def test_multivar_deviating_from_median_median(self):
algorithm = MultivarAdapter(DeviatingFromMedian(), AggregationMethod.MEDIAN)
score = algorithm(self.X)
np.testing.assert_array_equal(self.y_median, score)
self.assertEqual(len(self.X), len(score))
def test_multivar_deviating_from_median_max(self):
algorithm = MultivarAdapter(DeviatingFromMedian(), AggregationMethod.MAX)
score = algorithm(self.X)
np.testing.assert_array_equal(self.y_max, score)
self.assertEqual(len(self.X), len(score))
@pytest.mark.skipif(condition=os.getenv("CI", "false") == "true", reason="CI never finishes on sopedu")
def test_multivar_deviating_from_median_parallel(self):
algorithm = MultivarAdapter(DeviatingFromMedian(), AggregationMethod.MEAN, n_jobs=2)
score = algorithm(self.X)
np.testing.assert_array_equal(self.y_mean, score)
self.assertEqual(len(self.X), len(score))
|
# python standard library
import unittest
# the tuna
from tuna.commands.iperf.sumparser import SumParser
test_output = """
------------------------------------------------------------
Client connecting to 192.168.103.17, TCP port 5001
TCP window size: 22.9 KByte (default)
------------------------------------------------------------
[ 6] local 192.168.103.50 port 57388 connected with 192.168.103.17 port 5001
[ 5] local 192.168.103.50 port 57386 connected with 192.168.103.17 port 5001
[ 3] local 192.168.103.50 port 57387 connected with 192.168.103.17 port 5001
[ 4] local 192.168.103.50 port 57385 connected with 192.168.103.17 port 5001
[ ID] Interval Transfer Bandwidth
[ 6] 0.0- 1.0 sec 28.5 MBytes 239 Mbits/sec
[ 5] 0.0- 1.0 sec 28.2 MBytes 237 Mbits/sec
[ 3] 0.0- 1.0 sec 28.2 MBytes 237 Mbits/sec
[ 4] 0.0- 1.0 sec 29.1 MBytes 244 Mbits/sec
[SUM] 0.0- 1.0 sec 114 MBytes 957 Mbits/sec
[ 5] 1.0- 2.0 sec 28.0 MBytes 235 Mbits/sec
[ 6] 1.0- 2.0 sec 28.1 MBytes 236 Mbits/sec
[ 3] 1.0- 2.0 sec 28.0 MBytes 235 Mbits/sec
[ 4] 1.0- 2.0 sec 28.0 MBytes 235 Mbits/sec
[SUM] 1.0- 2.0 sec 112 MBytes 941 Mbits/sec
[ 5] 2.0- 3.0 sec 28.1 MBytes 236 Mbits/sec
[ 4] 2.0- 3.0 sec 27.8 MBytes 233 Mbits/sec
[ 6] 2.0- 3.0 sec 28.0 MBytes 235 Mbits/sec
[ 3] 2.0- 3.0 sec 28.0 MBytes 235 Mbits/sec
[SUM] 2.0- 3.0 sec 112 MBytes 938 Mbits/sec
[ 4] 3.0- 4.0 sec 27.9 MBytes 234 Mbits/sec
[ 6] 3.0- 4.0 sec 27.9 MBytes 234 Mbits/sec
[ 5] 3.0- 4.0 sec 28.0 MBytes 235 Mbits/sec
[ 3] 3.0- 4.0 sec 27.9 MBytes 234 Mbits/sec
[SUM] 3.0- 4.0 sec 112 MBytes 936 Mbits/sec
[ 3] 4.0- 5.0 sec 27.9 MBytes 234 Mbits/sec
[ 4] 4.0- 5.0 sec 28.0 MBytes 235 Mbits/sec
[ 6] 4.0- 5.0 sec 28.0 MBytes 235 Mbits/sec
[ 5] 4.0- 5.0 sec 28.0 MBytes 235 Mbits/sec
[SUM] 4.0- 5.0 sec 112 MBytes 938 Mbits/sec
[ 6] 5.0- 6.0 sec 28.0 MBytes 235 Mbits/sec
[ 5] 5.0- 6.0 sec 27.9 MBytes 234 Mbits/sec
[ 3] 5.0- 6.0 sec 28.0 MBytes 235 Mbits/sec
[ 4] 5.0- 6.0 sec 28.1 MBytes 236 Mbits/sec
[SUM] 5.0- 6.0 sec 112 MBytes 940 Mbits/sec
[ 4] 6.0- 7.0 sec 27.8 MBytes 233 Mbits/sec
[ 6] 6.0- 7.0 sec 27.9 MBytes 234 Mbits/sec
[ 5] 6.0- 7.0 sec 28.0 MBytes 235 Mbits/sec
[ 3] 6.0- 7.0 sec 27.9 MBytes 234 Mbits/sec
[SUM] 6.0- 7.0 sec 112 MBytes 935 Mbits/sec
[ 4] 7.0- 8.0 sec 28.0 MBytes 235 Mbits/sec
[ 6] 7.0- 8.0 sec 28.0 MBytes 235 Mbits/sec
[ 5] 7.0- 8.0 sec 27.9 MBytes 234 Mbits/sec
[ 3] 7.0- 8.0 sec 28.2 MBytes 237 Mbits/sec
[SUM] 7.0- 8.0 sec 112 MBytes 941 Mbits/sec
[ 4] 8.0- 9.0 sec 28.0 MBytes 235 Mbits/sec
[ 6] 8.0- 9.0 sec 28.0 MBytes 235 Mbits/sec
[ 5] 8.0- 9.0 sec 28.0 MBytes 235 Mbits/sec
[ 3] 8.0- 9.0 sec 28.0 MBytes 235 Mbits/sec
[SUM] 8.0- 9.0 sec 112 MBytes 940 Mbits/sec
[ 6] 9.0-10.0 sec 27.9 MBytes 234 Mbits/sec
[ 6] 0.0-10.0 sec 280 MBytes 235 Mbits/sec
[ 5] 9.0-10.0 sec 28.0 MBytes 235 Mbits/sec
[ 5] 0.0-10.0 sec 280 MBytes 235 Mbits/sec
[ 4] 0.0-10.0 sec 281 MBytes 236 Mbits/sec
[ 3] 9.0-10.0 sec 27.9 MBytes 234 Mbits/sec
[ 3] 0.0-10.0 sec 280 MBytes 235 Mbits/sec
[SUM] 0.0-10.0 sec 1.10 GBytes 940 Mbits/sec
""".split('\n')
class TestSumParser(unittest.TestCase):
def setUp(self):
self.parser = SumParser()
return
def test_constructor(self):
"""
Does it build with the right defaults?
"""
self.assertEqual(self.parser.threads,4)
self.assertEqual(self.parser.expected_interval, 1)
self.assertEqual(self.parser.interval_tolerance, 0.1)
self.assertEqual(self.parser.units, 'Mbits')
self.assertEqual(self.parser.maximum, 1000000000)
return
def test_test_string(self):
"""
Does it correctly get the SUMS?
"""
expected = [957,941,938,936,938,940,935,941,940]
for line in test_output:
self.parser(line)
self.assertEqual(self.parser.intervals.values(), expected)
return
def test_search(self):
"""
Does it match the SUM line?
"""
line = '[SUM] 0.0- 1.0 sec 113 MBytes 951 Mbits/sec\n'
match = self.parser.search(line)
self.assertIsNotNone(match)
|
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class ExampleItem(Item):
examplefield = Field()
|
'''Copyright 2018 Province of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from app.models.bookings import Base
from qsystem import db
from sqlalchemy_utc import UtcDateTime
from sqlalchemy import func, or_
from datetime import datetime, timedelta
from dateutil.parser import parse
from dateutil import tz
class Appointment(Base):
appointment_id = db.Column(db.Integer, primary_key=True, autoincrement=True, nullable=False)
office_id = db.Column(db.Integer, db.ForeignKey("office.office_id"), nullable=False)
service_id = db.Column(db.Integer, db.ForeignKey("service.service_id"), nullable=True)
citizen_id = db.Column(db.Integer, db.ForeignKey("citizen.citizen_id"), nullable=True)
start_time = db.Column(UtcDateTime, nullable=False)
end_time = db.Column(UtcDateTime, nullable=False)
checked_in_time = db.Column(UtcDateTime, nullable=True)
comments = db.Column(db.String(255), nullable=True)
citizen_name = db.Column(db.String(255), nullable=False)
contact_information = db.Column(db.String(255), nullable=True)
blackout_flag = db.Column(db.String(1), default='N', nullable=False)
recurring_uuid = db.Column(db.String(255), nullable=True)
online_flag = db.Column(db.Boolean(), nullable=True, default=False)
office = db.relationship("Office")
service = db.relationship("Service")
def __repr__(self):
return '<Appointment ID: (name={self.appointment_id!r})>'.format(self=self)
def __init__(self, **kwargs):
super(Appointment, self).__init__(**kwargs)
@classmethod
def find_appointment_availability(cls, office_id: int, timezone:str, first_date: datetime, last_date: datetime):
"""Find appointment availability for dates in a month"""
query = db.session.query(Appointment).filter(func.date_trunc('day', func.timezone(timezone, Appointment.start_time)).between(func.date_trunc('day', func.timezone(timezone, first_date)), func.date_trunc('day', func.timezone(timezone, last_date))))
query = query.filter(Appointment.office_id == office_id)
query = query.order_by(Appointment.start_time.asc())
return query.all()
@classmethod
def find_next_day_appointments(cls):
"""Find next day appointments."""
from app.models.theq import Office, PublicUser, Citizen, Timezone
tomorrow = datetime.now() + timedelta(days=1)
tomorrow = tomorrow.astimezone(tz.tzlocal())
query = db.session.query(Appointment, Office, Timezone, PublicUser). \
join(Citizen, Citizen.citizen_id == Appointment.citizen_id). \
join(Office, Office.office_id == Appointment.office_id). \
join(Timezone, Timezone.timezone_id == Office.timezone_id). \
outerjoin(PublicUser, PublicUser.user_id == Citizen.user_id). \
filter(func.date_trunc('day',
func.timezone(Timezone.timezone_name,Appointment.start_time)) ==
func.date_trunc('day', tomorrow))
return query.all()
@classmethod
def get_appointment_conflicts(cls, office_id: int, start_time: str, end_time: str, appointment_id=None):
"""Find appointment availability for dates in a month"""
from app.models.theq import Office, PublicUser, Citizen, Timezone
start_datetime = parse(start_time)
end_datetime = parse(end_time)
start_time_1 = start_datetime
end_time_1 = end_datetime - timedelta(minutes=1)
start_time_2 = start_datetime + timedelta(minutes=1)
end_time_2 = end_datetime
query = db.session.query(Appointment, Office, Timezone, PublicUser). \
join(Office, Office.office_id == Appointment.office_id). \
join(Timezone, Timezone.timezone_id == Office.timezone_id). \
join(Citizen, Citizen.citizen_id == Appointment.citizen_id). \
outerjoin(PublicUser, PublicUser.user_id == Citizen.user_id). \
filter(or_(Appointment.start_time.between(start_time_1, end_time_1), Appointment.end_time.between( start_time_2, end_time_2)))
query = query.filter(Appointment.office_id == office_id)
if appointment_id:
query = query.filter(Appointment.appointment_id != appointment_id)
return query.all()
@classmethod
def find_by_username_and_office_id(cls, office_id: int, user_name: str, start_time, timezone, appointment_id=None):
"""Find apponintment for the user at an office for a date."""
from app.models.theq import PublicUser, Citizen
start_datetime = parse(start_time)
query = db.session.query(Appointment). \
join(Citizen). \
join(PublicUser). \
filter(Appointment.citizen_id == Citizen.citizen_id). \
filter(Citizen.user_id == PublicUser.user_id). \
filter(func.date_trunc('day', func.timezone(timezone, Appointment.start_time)) == (func.date_trunc('day', func.timezone(timezone, start_datetime)))). \
filter(Appointment.office_id == office_id). \
filter(PublicUser.username == user_name). \
filter(Appointment.checked_in_time.is_(None))
if appointment_id:
query = query.filter(Appointment.appointment_id != appointment_id)
return query.all()
@classmethod
def delete_appointments(cls, appointment_ids: list):
"""Delete all appointments with ids in the list provided."""
delete_qry = Appointment.__table__.delete().where(Appointment.appointment_id.in_(appointment_ids))
db.session.execute(delete_qry)
db.session.commit()
|
if e1234123412341234.winerror not in (_winapi.ERROR_SEM_TIMEOUT,
_winapi.ERROR_PIPE_BUSY) or _check_timeout(t):
pass
class X:
def get_help_text(self):
return ngettext(
"Your password must contain at least %(min_length)d character.",
"Your password must contain at least %(min_length)d characters.",
self.min_length,
) % {'min_length': self.min_length}
class A:
def b(self):
if self.connection.mysql_is_mariadb and (
10,
4,
3,
) < self.connection.mysql_version < (10, 5, 2):
pass
# output
if (
e1234123412341234.winerror
not in (
_winapi.ERROR_SEM_TIMEOUT,
_winapi.ERROR_PIPE_BUSY,
)
or _check_timeout(t)
):
pass
class X:
def get_help_text(self):
return (
ngettext(
"Your password must contain at least %(min_length)d character.",
"Your password must contain at least %(min_length)d characters.",
self.min_length,
)
% {"min_length": self.min_length}
)
class A:
def b(self):
if (
self.connection.mysql_is_mariadb
and (
10,
4,
3,
)
< self.connection.mysql_version
< (10, 5, 2)
):
pass
|
while condition1:
pass
test1()
if condition:
pass
else:
test2() |
import unittest
import datetime
import os
from pickled_database.database import PickledDatabase
class TestPickledDatabaseA(unittest.TestCase):
def setUp(self):
self.file = 'test_db.pkl'
self.db = PickledDatabase(self.file)
self.db.clear_database()
def tearDown(self) -> None:
os.remove(self.file)
def test_key_creation(self):
for k in ['key1', 'key2']:
self.db.create_key(k)
self.assertTrue(k in self.db)
self.assertFalse('key-foo' in self.db)
def test_setting_keys(self):
self.db.create_key('key1')
self.db.set('key1', 1)
self.assertEqual(1, self.db.get('key1'))
def test_dont_allow_duplicate_keys(self):
for k in ['key1', 'key2']:
self.db.create_key(k)
self.assertRaises(
KeyError,
self.db.create_key,
k
)
def test_error_in_tests_from_create_key(self):
self.db.create_key('key1', 1, lambda x: type(x) is int)
self.assertRaises(
ValueError,
self.db.set,
'key1',
'error'
)
self.db.create_key('key2', 2, [
lambda x: type(x) is int,
lambda x: x > 0,
]
)
self.assertRaises(
ValueError,
self.db.set,
'key2',
-1
)
def test_error_in_tests_from_setter(self):
self.db.create_key('key1', tests=lambda x: type(x) is int)
self.assertRaises(
ValueError,
self.db.set,
'key1',
'error'
)
self.db.create_key('key2', tests=[
lambda x: type(x) is int,
lambda x: x > 0,
]
)
self.assertRaises(
ValueError,
self.db.set,
'key2',
-1
)
def test_key_fails_test_on_create_not_added(self):
self.assertRaises(
ValueError,
self.db.create_key,
'key1',
1,
lambda x: type(x) is str
)
self.assertFalse('key1' in self.db)
self.assertRaises(
ValueError,
self.db.create_key,
'key2',
'hi',
[
lambda x: type(x) is str,
lambda x: len(x) > 10,
]
)
self.assertFalse('key2' in self.db)
def test_unset_key(self):
self.assertRaises(
KeyError,
self.db.get,
'foo'
)
self.db.create_key('key')
self.assertRaises(
ValueError,
self.db.get,
'key'
)
self.assertFalse(self.db.is_set('key'))
self.db.set('key', 'hi')
self.assertTrue(self.db.is_set('key'))
def test_last_set(self):
key = 'key'
self.db.create_key(key)
for _ in range(10):
self.assertIsNone(
self.db.get_last_set(key)
)
self.db.set(key, 1)
self.assertIsNotNone(
self.db.get_last_set(key)
)
self.assertGreaterEqual(
datetime.datetime.now(),
self.db.get_last_set(key)
)
self.db.clear_value(key)
self.assertIsNone(
self.db.get_last_set(key)
)
|
import os
import sys
import subprocess
from logfetch_base import log
from termcolor import colored
def cat_files(args, all_logs):
log('\n', args, False)
if all_logs:
all_logs.sort()
for filename in all_logs:
log('=> ' + colored(filename, 'cyan') + '\n', args, False)
if filename.endswith('.gz'):
cat = subprocess.Popen(['cat', filename], stdout=subprocess.PIPE)
content = subprocess.Popen(['zcat'], stdin=cat.stdout)
content.communicate()
else:
cat = subprocess.Popen(['cat', filename])
cat.communicate()
sys.stdout.write('\n')
else:
log(colored('No log files found\n', 'magenta'), args, False)
|
"""Packaging settings."""
from codecs import open
from os.path import abspath, dirname, join
from subprocess import call
from setuptools import Command, find_packages, setup
import motey
this_dir = abspath(dirname(__file__))
with open(join(this_dir, 'README.rst'), encoding='utf-8') as file:
long_description = file.read()
class RunTests(Command):
"""Run all tests."""
description = 'run tests'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""Run all tests!"""
import pytest, sys
sys.exit(pytest.main(self.test_args))
setup(
name = motey.__appname__,
version = motey.__version__,
description = 'A fog node prototype.',
long_description = long_description,
url = motey.__url__,
author = motey.__author__,
author_email = motey.__email__,
license = motey.__licence__,
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Utilities',
'Topic :: Internet of Things',
'License :: Apache Version 2.0',
'Natural Language :: English',
'Operating System :: Linux',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords = ['cli', 'IoT', 'Fog Node'],
packages = find_packages(exclude=['motey-docker-image', 'docs', 'resources', 'samples', 'scripts', 'tests*', 'webclient']),
include_package_data=True,
zip_safe=False,
install_requires = [
'aiofiles==0.3.1',
'alabaster==0.7.10',
'appdirs==1.4.3',
'Babel==2.4.0',
'click==6.7',
'daemonize==2.4.7',
'dependency-injector==3.4.5',
'docker==2.2.1',
'docker-pycreds==0.2.1',
'docopt==0.6.2',
'docutils==0.13.1',
'Flask==1.0.2',
'Flask-Cors==3.0.2',
'future==0.16.0',
'gevent==1.2.1',
'greenlet==0.4.12',
'httptools==0.0.9',
'imagesize==0.7.1',
'itsdangerous==0.24',
'Jinja2==2.9.6',
'jsonschema==2.6.0',
'lockfile==0.12.2',
'Logbook==1.0.0',
'MarkupSafe==1.0',
'msgpack-python==0.4.8',
'packaging==16.8',
'paho-mqtt==1.2.3',
'psutil==5.2.2',
'pycodestyle==2.3.1',
'Pygments==2.2.0',
'pyparsing==2.2.0',
'python-dateutil==2.6.0',
'pytz==2017.2',
'PyYAML==3.12',
'pyzmq==16.0.2',
'requests==2.22.0',
'Rx==1.5.9',
'sanic==0.5.2',
'six==1.10.0',
'snowballstemmer==1.2.1',
'Sphinx==1.6.2',
'sphinx-rtd-theme==0.2.4',
'sphinxcontrib-websupport==1.0.1',
'tinydb==3.2.3',
'typing==3.6.1',
'ujson==1.35',
'uvloop==0.8.0',
'websocket-client==0.40.0',
'websockets==3.3',
'Werkzeug==0.12.1',
'Yapsy==1.11.223',
'zerorpc==0.6.1'
],
tests_require = {
'pycodestyle==2.3.1',
'pytest',
'mock',
},
entry_points = {
'console_scripts': [
'motey = motey.cli.main:main',
],
},
cmdclass = {'test': RunTests},
)
|
import abc
import services.gesture.GestureValidatorService as gesture
import services.people.PeopleFilterService as people
import services.person.PersonValidatorService as person
import numpy as np
class InputProcessor:
def __init__(self):
self.gestureValidatorService=gesture.GestureValidationService()
self.peopleFilterService=people.PeopleFilterService()
self.personValidatorService=person.PersonValidatorService()
def processStream(self,image,users,commands)->dict:
people=self.peopleFilterService.get_people(image)
validated_people=self.personValidatorService.is_user(people,users)
return self.gestureValidatorService.calsifie_gesture(validated_people,commands)
|
'''
Created on May 3, 2013
@author: Jonas Zaddach <zaddach@eurecom.fr>
'''
class Breakpoint():
"""This is an interface for breakpoints that are created by Debuggable.set_breakpoint"""
def __init__(self):
self._handler = None
def wait(self):
"""Wait until this breakpoint is hit"""
assert(False) #Not implemented
def delete(self):
"""Delete this breakpoint"""
assert(False) #Not implemented
def set_handler(self, handler):
"""Set the handler function that is called when the breakpoint is hit"""
self._handler = handler
class Debuggable():
"""This is an interface for all objects that support a minimal set of debugging operations"""
def read_typed_memory(self, address, size):
"""Read a memory word with size <b>size</b> from memory address
<b>address</b>"""
assert(False) #No implementation
def write_typed_memory(self, address, size, value):
"""Write a memory word <b>val</b> with size <b>size</b> to memory address
<b>address</b>"""
assert(False) #No implementation
def read_untyped_memory(self, address, length):
"""Read a range of untyped (byte) memory"""
data = []
for i in range(0, length):
data.append(self.read_typed_memory(address + i, 1))
return bytes(data)
def write_untyped_memory(self, address, data):
"""Write untyped (byte) data"""
for byte in data:
self.write_typed_memory(address, 1, byte)
address += 1
def get_register(self, register):
"""Return the value of a register"""
assert(False) #No implementation
def set_register(self, register, value):
"""Set the value of a register"""
assert(False) #No implementation
def set_breakpoint(self, address, **properties):
"""
Set a code breakpoint.
Properties can be:
- temporary (boolean): If True, breakpoint is deleted upon hit
- thumb (boolean): If True and architecture is arm, a thumb
breakpoint will be used.
"""
assert(False) #Not implemented
def remove_breakpoint(self, address):
"""Remove a breakpoint"""
assert(False) #Not implemented
def dump_registers(self):
"""Dump the value of general registers"""
assert(False) #No implementation
def dump_all_registers(self):
"""Dump the value of all registers"""
assert(False) #No implementation
def cont(self):
"""Continue execution"""
assert(False) #No implementation
def halt(self):
"""Continue execution"""
assert(False) #No implementation
#TODO: Merge with set_breakpoint
def put_bp(self, addr):
"""Put a breakpoint"""
assert(False) #No implementation
#TODO: Merge with remove_bp
def remove_bp(self, addr):
"""Remove a breakpoint"""
assert(False) #No implementation
|
import numpy as np
from numpy import exp, sqrt, power, cos, sin, pi
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
class WaveFunction:
def __init__(self, method='rk2'):
self.method = method # Numerical method solutions: rk2
self.m, self.hbar, self.k0 = 1.0, 1.0, 20.0 # Wave function constants
self.xc, self.sigma = -0.5, 0.05 # Gaussian wave packet parameters
self.gaussian = lambda x: sqrt(
exp(-power(x - self.xc, 2) / (2 * power(self.sigma, 2))) / (self.sigma * sqrt(2 * pi)))
self.v0, self.vx0, self.vxn = 0, -0.2, 0 # Potential barrier value
self.v = lambda x: self.v0 if self.vx0 <= x <= self.vxn else 0 # Lambda function for potential v(i)
self.dx, self.dt, self.x0, self.xn, self.t0, self.tn = 0.02, 0.0001, -1, 1, 0, 0.02 # Steps and boundary
# conditions
self.i, self.n = int((self.xn - self.x0) / self.dx), int((self.tn - self.t0) / self.dt) # How many steps
self.x = np.linspace(self.x0, self.xn, self.i) # Spatial array
self.v = np.asarray([self.v(i) for i in self.x]) # Potential array along space v[i]
self.repsi, self.impsi = lambda x: self.gaussian(x) * cos(self.k0 * x), \
lambda x: self.gaussian(x) * sin(self.k0 * x)
self.re_psi = np.asarray([self.repsi(i) for i in self.x])
self.im_psi = np.asarray([self.impsi(i) for i in self.x])
@property
def re_psi(self):
return self._repsi
@re_psi.setter
def re_psi(self, repsi):
if isinstance(repsi, (tuple, list, np.ndarray)):
self._repsi = np.asarray(repsi)
else:
raise ValueError("RePsi must be a tuple or list")
@property
def im_psi(self):
return self._impsi
@im_psi.setter
def im_psi(self, impsi):
if isinstance(impsi, (tuple, list, np.ndarray)):
self._impsi = np.asarray(impsi)
else:
raise ValueError("ImPsi must be a tuple or list")
@property
def psi_squared(self):
return power(self.re_psi, 2) + power(self.im_psi, 2)
@property
def hbar(self):
return self.h_bar
@hbar.setter
def hbar(self, h_bar):
if isinstance(h_bar, float):
self.h_bar = h_bar
else:
raise ValueError("h_bar must be a float")
@property
def m(self):
return self._m
@m.setter
def m(self, mass):
if isinstance(mass, float):
self._m = mass
else:
raise ValueError("Mass must be a float")
def rk2(self):
k1r, k1i = np.asarray([self.DRePsi(self.im_psi, i) for i in range(len(self.x))]), \
np.asarray([self.DImPsi(self.re_psi, i) for i in range(len(self.x))])
_repsi, _impsi = self.re_psi + k1r * self.dt, self.im_psi + k1i * self.dt
k2r, k2i = np.asarray([self.DRePsi(_impsi, i) for i in range(len(self.x))]), \
np.asarray([self.DImPsi(_repsi, i) for i in range(len(self.x))])
self.re_psi, self.im_psi = self.re_psi + (k1r + k2r) / 2 * self.dt, self.im_psi + (k1i + k2i) / 2 * self.dt
def update(self):
if self.method == 'rk2':
self.rk2()
def DRePsi(self, impsi, i):
impsi = np.append(impsi, 0)
return -(impsi[i + 1] - 2 * impsi[i] + impsi[i - 1]) / (2 * power(self.dx, 2)) + self.v[i] * impsi[i]
def DImPsi(self, repsi, i):
repsi = np.append(repsi, 0)
return (repsi[i + 1] - 2 * repsi[i] + repsi[i - 1]) / (2 * power(self.dx, 2)) - self.v[i] * repsi[i]
def main():
wave_function = WaveFunction()
fig, ax_psi = plt.subplots()
ax_psi.set_title("Temporal evolution of a gaussian wave packet")
ax_psi.set_xlim(wave_function.x0, wave_function.xn)
ax_psi.set_ylim(-5, 10)
re_psi, = plt.plot([], [], color='b', label=r"$Re(\Psi(x,t))$")
im_psi, = plt.plot([], [], color='r', label=r"$Im(\Psi(x,t))$")
psi_squared, = plt.plot([], [], color='purple', label=r"$|\Psi(x,t)|^2$")
plt.xlabel("x")
plt.xticks([])
plt.yticks([])
plt.plot(wave_function.x, wave_function.v, color='g', label="V(x)={v}".format(v=wave_function.v0))
plt.legend(title='Functions:')
time = ax_psi.text(0.125, 0.925, "time=0.0000s", horizontalalignment='center',
verticalalignment='center',
transform=ax_psi.transAxes)
def update(frame):
re_psi.set_data(wave_function.x, wave_function.re_psi)
im_psi.set_data(wave_function.x, wave_function.im_psi)
psi_squared.set_data(wave_function.x, wave_function.psi_squared)
time.set_text("time={t:.4f}s".format(t=frame * wave_function.dt))
wave_function.update()
return re_psi, im_psi, psi_squared, time
ani = FuncAnimation(fig, update, frames=wave_function.n, blit=True, repeat=False, interval=100)
plt.show()
if __name__ == '__main__':
main()
|
import subprocess
import os
import shutil
import bigdataUtilities as util
import sys
if __name__ == "__main__":
# base directory in HDFS. Make sure you're able to write/delete in this directory
base_dir = "/analytics/leaf-compression"
version = "-0.1-SNAPSHOT"
# Comma seperated list of zookeeper nodes in ip:port format. 2181 is the default port.
zkList = "localhost:2181"
slaves = 1
if len(sys.argv) > 1:
slaves = int(sys.argv[1])
util.obnoxiousPrint("Uploading data to hdfs")
util.subprocessCall(["hadoop","fs","-rmr", base_dir + "/input"],False)
util.subprocessCall(["hadoop","fs","-rmr", base_dir + "/output/leaf"],False)
util.subprocessCall(["hadoop","fs","-mkdir","-p", base_dir + "/input"],False)
util.subprocessCall(["hadoop","fs","-copyFromLocal","example_data.csv",base_dir + "/input/small_graph"])
util.obnoxiousPrint("Running leafcompress job")
util.subprocessCall(["hadoop","jar","target/leaf-compression"+version+"-jar-with-dependencies.jar",
"org.apache.giraph.GiraphRunner",
"-Dgiraph.zkList=" + zkList,
"com.soteradefense.bsp.KeyDataVertex",
"-w", str(slaves),
"-vif", "com.soteradefense.bsp.KeyDataVertexInputFormat",
"-of", "com.soteradefense.bsp.KeyDataVertexOutputFormat",
"-vip", base_dir + "/input/small_graph",
"-op", base_dir + "/output/leaf"])
# From this point down we are simply copying the output
# and formating it for local reading.
util.obnoxiousPrint("Copying results from hdfs to local output directory")
try:
shutil.rmtree("output")
except:
pass
util.subprocessCall(["hadoop","fs","-get",base_dir + "/output","."])
# read in results and move the smaller key to the left
# this ensures consistent outputs from different methods.
results = []
files = filter(lambda x: x[0][0] != "_" and x[0][0] != '.' ,os.listdir("output/leaf"))
for file in files:
fobj = open("output/leaf/"+file,"r")
for line in fobj:
temp_array = line.strip().split("\t")
if (temp_array[0] > temp_array[1]):
temp = temp_array[0]
temp_array[0] = temp_array[1]
temp_array[1] = temp
results.append(temp_array)
fobj.close()
# first pass sort, sort by first ip
results = sorted(results,key=lambda x: x[1])
results = sorted(results,key=lambda x: x[0])
# put the results in a local file
fobj = open("output/sorted_out.txt","w")
for result in results:
fobj.write("\t".join(result)+"\n")
|
import fileinput
import re
# ROW, COL = (int(x) for x in re.findall(r'\d+', fileinput.input()[0]))
ROW, COL = 3010, 3019
def next_code(c):
return (c * 252533) % 33554393
def code_no(row, col):
# s = 1
# for x in range(row + col - 1):
# s += x
# return s + col - 1
return (((row + col - 1) * (row + col - 2)) / 2) + col
code = 20151125
# for x in range(code_no(ROW, COL) - 1):
# code = next_code(code)
print (code * pow(252533, code_no(ROW, COL) - 1, 33554393)) % 33554393
|
from font.Font import Font
from parser.OptionParserBuilder import OptionParserBuilder
import platform
from logger.Logger import Logger
from net.url import Uri
from net.process import FontDownloader
logger = Logger()
class EntryPoint:
@staticmethod
def main():
# 옵션 파서 생성
args = OptionParserBuilder.build()
font_name = args.font.strip()
is_debug =args.debug
enabled_download = args.enabled_download
if enabled_download:
FontDownloader(args.url or 'https://github.com/biud436/font-parser/raw/main/res/NanumGothicCoding.ttf')
Logger.DEBUG_FLAG = is_debug == True
# 폰트 파싱 객체 생성
font = Font(font_name or "나눔고딕")
font.parse()
# 필터링
results = filter(lambda x: x.language_id == 1033 or x.language_id == 1042, font.fonts)
# 폰트 파싱 완료
if platform.system().lower().find("windows") != -1:
print("윈도우즈")
logger.print_log(message="폰트 파싱이 완료되었습니다.", color="green")
for font in list(results):
print("{font_name} [{language_id}]".format(font_name=font.name, language_id = font.language_id))
if __name__ == '__main__':
EntryPoint.main()
|
from app import db
from typing import List
from .model import Log
class LogService:
@staticmethod
def get_all() -> List[Log]:
return Log.query.all()
@staticmethod
def get_by_id(id: int) -> Log:
return Log.query.get(id)
@staticmethod
def update(id: int, body) -> Log:
model = LogService.get_by_id(id)
if model is None:
return None
model.update(body)
db.session.commit()
return model
@staticmethod
def delete_by_id(id: int) -> List[int]:
model = Log.query.filter(Log.id == id).first()
if not model:
return []
db.session.delete(model)
db.session.commit()
return [id]
@staticmethod
def create(new_attrs) -> Log:
model = Log(**new_attrs)
db.session.add(model)
db.session.commit()
return model |
# import the necessary packages
import face_recognition
import numpy as np
import argparse
import cv2
def alignFace(image, face_locations, face_landmarks):
'''
Let's find and angle of the face. First calculate
the center of left and right eye by using eye landmarks.
'''
leftEyePts = face_landmarks['left_eye']
rightEyePts = face_landmarks['right_eye']
leftEyeCenter = np.array(leftEyePts).mean(axis=0).astype("int")
rightEyeCenter = np.array(rightEyePts).mean(axis=0).astype("int")
leftEyeCenter = (leftEyeCenter[0],leftEyeCenter[1])
rightEyeCenter = (rightEyeCenter[0],rightEyeCenter[1])
# find and angle of line by using slop of the line.
dY = rightEyeCenter[1] - leftEyeCenter[1]
dX = rightEyeCenter[0] - leftEyeCenter[0]
angle = np.degrees(np.arctan2(dY, dX))
# to get the face at the center of the image,
# set desired left eye location. Right eye location
# will be found out by using left eye location.
# this location is in percentage.
desiredLeftEye=(0.35, 0.35)
#Set the croped image(face) size after rotaion.
desiredFaceWidth = 128
desiredFaceHeight = 128
desiredRightEyeX = 1.0 - desiredLeftEye[0]
# determine the scale of the new resulting image by taking
# the ratio of the distance between eyes in the *current*
# image to the ratio of distance between eyes in the
# *desired* image
dist = np.sqrt((dX ** 2) + (dY ** 2))
desiredDist = (desiredRightEyeX - desiredLeftEye[0])
desiredDist *= desiredFaceWidth
scale = desiredDist / dist
# compute center (x, y)-coordinates (i.e., the median point)
# between the two eyes in the input image
eyesCenter = ((leftEyeCenter[0] + rightEyeCenter[0]) // 2,
(leftEyeCenter[1] + rightEyeCenter[1]) // 2)
# grab the rotation matrix for rotating and scaling the face
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
# update the translation component of the matrix
tX = desiredFaceWidth * 0.5
tY = desiredFaceHeight * desiredLeftEye[1]
M[0, 2] += (tX - eyesCenter[0])
M[1, 2] += (tY - eyesCenter[1])
# apply the affine transformation
(w, h) = (desiredFaceWidth, desiredFaceHeight)
(y2,x2,y1,x1) = face_locations
output = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC)
return output
if __name__ == "__main__":
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
# load image and find face locations.
image = cv2.imread(args["image"])
face_locations = face_recognition.face_locations(image, model="hog")
# detect 68-landmarks from image. This includes left eye, right eye, lips, eye brows, nose and chins
face_landmarks = face_recognition.face_landmarks(image)
for i in range(0, len(face_locations)):
# align faces
faceAligned = alignFace(image, face_locations[i], face_landmarks[i])
# display the output images
cv2.imshow("Original", image)
cv2.imshow("Aligned", faceAligned)
cv2.waitKey(0) |
import pathlib
import numpy as np
from Bio import SeqIO
import pandas as pd
import random
from skorch.dataset import Dataset
from .helper import Helper
from tqdm import tqdm
class TFBSDataset(Dataset):
seqs_start: int = 200
seqs_length: int
y_type: np.dtype = np.int64
dataframe: pd.DataFrame
helper_class: Helper
def __init__(self, file, neg_file, binary, save_df):
self.helper_class = Helper()
seqs = self.load_file(file)
self.seqs_length = len(seqs[0])
df = pd.DataFrame(seqs, columns=['sequence'])
df['label'] = self.helper_class.get_LABEL_dict()['Promoter']
if(neg_file is not None):
neg_seqs = self.load_file(neg_file)
neg_seqs_length = len(neg_seqs[0])
if(self.seqs_length != neg_seqs_length):
raise Exception(r"Promoter and Non-Promoter sequence lengths don't match")
neg_df = pd.DataFrame(neg_seqs, columns=['sequence'])
neg_df['label'] = self.helper_class.get_LABEL_dict()['Non-Promoter']
self.dataframe = df.append(neg_df, ignore_index=True)
if(binary):
self.y_type = np.float32
if(save_df):
self.save_dataframe('models/tfbs_check/dataframe.csv')
def load_file(self, file):
records = []
with open(file, 'rU') as fasta_file:
for record in SeqIO.parse(fasta_file, 'fasta'):
r_seq = self.sequence_extractor(record.seq._data.upper())
if 'N' not in r_seq:
records.append(r_seq)
return records
def save_dataframe(self, file):
print('Saving dataframe to: %s' % file)
self.dataframe.to_csv(file, index=False)
def one_hot_encoder(self, seq):
one_hot = np.zeros((len(self.helper_class.get_DNA_dict()), len(seq)), dtype=np.float32)
for idx, token in enumerate(seq):
one_hot[self.helper_class.get_DNA_dict()[token], idx] = 1
return one_hot
def TFIIB_TATA_Box_extractor(self, seq):
return seq[self.seqs_start -45:self.seqs_start -20 + 1]
def sequence_extractor(self, seq):
#element = self.TFIIB_TATA_Box_extractor(seq)
return seq
def __getitem__(self, idx):
row = self.dataframe.iloc[idx]
x = self.one_hot_encoder(row.sequence)
return x, np.array(row.label, dtype=self.y_type)
def __len__(self):
return len(self.dataframe) |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import logging
import json
import time
from ScrapyFrame.utils.base import EndsPipeline
from ScrapyFrame.utils.base import database
class MySQLPipeline(EndsPipeline):
"""MySQL Data Item Pipeline"""
def __init__(self, settings=None):
self.settings = settings
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
settings = settings
return cls(settings=settings)
def open_spider(self, spider):
self._logger = logging.getLogger(__class__.__name__+"."+spider.name)
super().open_spider(spider)
self._db_type = "mysql"
self.conn = database.MySQLConnect(db=self._db)
self.file = open(self.settings.get("LOG_FILE").replace(".log", "error.csv"), "w")
def process_item(self, item, spider):
try:
fields = self.settings.get("OPINION_FIELDS")
table = self.settings.get("default_tb")
sentence = self.insert_sentence(table, fields)
data = []
for field in fields:
if isinstance(item.get(field), (str, float, int)):
data.append(item.get(field))
elif isinstance(item.get(field), (list, tuple)):
data.append("\t".join(item.get(field)))
else:
data.append(None)
self.conn.cursor.execute(sentence, data)
self.conn.Connection.commit()
self.log(f"Insert {table} Successful", level=logging.INFO)
except Exception as err:
self.file.write(json.dumps(dict(item, error_reason=err), ensure_ascii=False)+ "\n")
self.log(f"Insert Failed resson {err}, address {item['url']}", level=logging.CRITICAL)
return item
def close_spider(self, spider):
super().close_spider(spider)
self.conn.close()
self.file.close()
def insert_sentence(self, table, fields, symbol=r"%s"):
"""Create SQL insert sentence
Create a insert sentence, like that:
INSERT INTO <table> (`col1`, `col2`) VALUES (%s, %s)
"""
sentence = """
INSERT INTO {tb} {fieldnames} VALUES {values_symbol};
"""
fieldnames = "({column})".format(
column=",".join("`{}`".format(field) for field in fields)
)
values_symbol = "(" + ",".join((symbol for i in range(len(fields)))) + ")"
sentence = sentence.format(
tb=table, fieldnames=fieldnames, values_symbol=values_symbol
)
return sentence
class MongoDBPipeline(EndsPipeline):
"""MongoDB Pipeline"""
def __init__(self, settings=None):
self.settings = settings
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
settings = settings
return cls(settings=settings)
def open_spider(self, spider):
super().open_spider(spider)
self._logger = logging.getLogger(__class__.__name__+"."+spider.name)
self.conn = database.MongoDBConnect(db=self._db)
self.database = self.conn.database
self.file = open(self.settings.get("LOG_FILE").replace(".log", "error.csv"), "w")
def process_item(self, item, spider):
try:
collections = self.database[self.settings.get("default_tb")]
item["create_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
item["update_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
insert_ = collections.insert_one(dict(item))
if insert_.inserted_id:
self.log(f"Insert collections {collections.name} successful", level=logging.INFO)
else:
self.log(f"Insert collections {collections.name} failed, maybe duplicated: {item}", \
level=logging.DEBUG)
except Exception as err:
self.file.write(json.dumps(dict(item, error_reason=err), ensure_ascii=False)+ "\n")
self.log(f"Insert Failed resson {err}, address {item['url']}", level=logging.CRITICAL)
return item
def close_spider(self, spider):
super().close_spider(spider)
self.conn.close()
self.file.close() |
for i in range(10):
print('Привет', i)
# [перегрузка функции range()]
# Напишем программу, которая выводит те числа
# из промежутка [100;999], которые оканчиваются на 7.
for i in range(100, 1000): # перебираем числа от 100 до 999
if i % 10 == 7: # используем остаток от деления на 10, для получения последней цифры
print(i)
# 3 параметра
for i in range(56, 171, 2):
print(i)
# Отрицательный шаг генерации
for i in range(5, 0, -1):
print(i, end=' ')
print('Взлетаем!!!')
|
import unittest
class Solution:
@staticmethod
def findNthDigit(n: int) -> int:
"""
Find the nth digit of the infinite integer sequence 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ...
Note:
n is positive and will fit within the range of a 32-bit signed integer (n < 231).
Example 1:
Input:
3
Output:
3
Example 2:
Input:
11
Output:
0
Explanation:
The 11th digit of the sequence 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ... is a 0, which is part of the number 10.
Runtime: 24 ms, faster than 87.55% of Python3 online submissions for Nth Digit.
Memory Usage: 13 MB, less than 100.00% of Python3 online submissions for Nth Digit.
"""
current_decimal_size = 0
while True:
current_decimal_size += 1
min_value = int("1" + (current_decimal_size - 1) * "0")
max_value = int("9" * current_decimal_size)
digit_count = (max_value - min_value + 1) * current_decimal_size
if n <= digit_count:
break
n -= digit_count
# Proper decimal and min value is found
n -= 1
skip_count = n // current_decimal_size
position = n % current_decimal_size
min_value += skip_count
return int(str(min_value)[position])
class FindNthDigit(unittest.TestCase):
def test_case_1(self):
n = 3
expected_output = 3
self.assertEqual(expected_output, Solution.findNthDigit(n))
def test_case_2(self):
n = 11
expected_output = 0
self.assertEqual(expected_output, Solution.findNthDigit(n))
def test_case_3(self):
n = 200
expected_output = 0
self.assertEqual(expected_output, Solution.findNthDigit(n))
def test_case_4(self):
n = 9
expected_output = 9
self.assertEqual(expected_output, Solution.findNthDigit(n))
|
import nodepy.runge_kutta_method as rk
import nodepy.convergence as cv
from nodepy import ivp
import matplotlib.pyplot as pl
pl.switch_backend('agg')
#Load some methods:
rk4=rk.loadRKM('RK44')
SSP2=rk.loadRKM('SSP22')
SSP104=rk.loadRKM('SSP104')
#Define an IVP:
#myivp=ivp.exp_fun(1.)
myivp=ivp.load_ivp('test')
#Start and end time:
T=[0.,5.]
cv.ctest([rk4,SSP2,SSP104],myivp)
pl.show()
pl.savefig('./OverThreeOrders/nodepy/examples/test.eps')
|
from django.apps import AppConfig
class MathematicsConfig(AppConfig):
name = 'modules.mathematics'
|
class Library:
def __init__(self, list_of_books, library_name):
# creating a blank dictionary
self.lend_data = {}
self.list_of_books = list_of_books
self.library_name = library_name
# add book to dictionary
for books in self.list_of_books:
self.lend_data[books] = None
def display_books(self):
for index, books in enumerate(self.list_of_books):
print(f"{index}){books}")
def lend_book(self, book, author):
if book in self.list_of_books:
if self.lend_data[book] is None:
self.lend_data[book] = author
else:
print(f"Sorry This book is lend by {self.lend_data[book]}")
else:
print("You have written wrong book name")
def add_books(self, book_name):
self.list_of_books.append(book_name)
self.lend_data[book_name] = None
def return_books(self, book, author):
if book in self.list_of_books:
if self.lend_data[book] is not None:
self.lend_data.pop(book)
else:
print("This book is already lended")
else:
print("Sorry can't find the book")
def main():
list_books = ['Python', 'Linux', 'Go', 'javascript', 'django']
library_name = 'Programming'
programming = Library(list_books, library_name)
print(f"Welcome to {programming.library_name} library \n \n 'q' for exit \n 'd' for display books \n 'a' to add "
f"book \n "
f"'l' for lend book \n 'r' for return book \n")
print('------------------------')
exit_system = False
while exit_system is not True:
_input = input(" Enter option : ")
print("\n")
if _input == "q":
exit_system = True
elif _input == "d":
programming.display_books()
elif _input == "a":
_addbook = input("Book Name : ")
programming.add_books(_addbook)
elif _input == "l":
_name = input("Name : ")
_bookname = input("Book Name : ")
print("\n Book Lend \n")
programming.lend_book(_bookname, _name)
elif _input == "r":
_name = input("Name : ")
_bookname = input("Book Name : ")
programming.return_books(_bookname, _name)
if __name__ == '__main__':
main()
|
'''
Created on May 14, 2020
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from intelligence.intelligence import Intelligence
# Variable name for tracking people
AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME = "amplitude_user"
# HTTP timeout
AMPLITUDE_HTTP_TIMEOUT_S = 2
class LocationAmplitudeMicroservice(Intelligence):
def __init__(self, botengine, parent):
"""
Instantiate this object
:param parent: Parent object, either a location or a device object.
"""
Intelligence.__init__(self, botengine, parent)
self.analytics_track(botengine, {'event_name': 'reset', 'properties': None})
def analytics_track(self, botengine, content):
"""
Track an event.
This will buffer your events and flush them to the server altogether at the end of all bot executions,
and before variables get saved.
:param botengine: BotEngine environment
:param event_name: (string) A name describing the event
:param properties: (dict) Additional data to record; keys should be strings and values should be strings, numbers, or booleans
"""
if botengine.is_test_location():
return
event_name = content['event_name']
properties = content['properties']
botengine.get_logger().info("Analytics: Tracking {}".format(event_name))
if properties is None:
properties = {}
properties["locationId"] = botengine.get_location_id()
properties["organizationId"] = botengine.get_organization_id()
self._flush(botengine,
[
{
"user_id": self._get_user_id(botengine),
"device_id": self._get_device_id(botengine),
"time": botengine.get_timestamp(),
"event_type": event_name,
"event_properties": properties,
"user_properties": {
"locationId": botengine.get_location_id(),
"organizationId": botengine.get_organization_id()
}
}
])
def analytics_people_set(self, botengine, content):
"""
Set some key/value attributes for this user
:param botengine: BotEngine environment
:param properties_dict: Dictionary of key/value pairs to track
"""
if botengine.is_test_location():
return
properties_dict = content['properties_dict']
botengine.get_logger().info("analytics.py: Setting user info - {}".format(properties_dict))
focused_properties = botengine.load_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME)
if focused_properties is None:
focused_properties = properties_dict
focused_properties.update(properties_dict)
focused_properties["locationId"] = botengine.get_location_id()
focused_properties["organizationId"] = botengine.get_organization_id()
botengine.save_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME, focused_properties, required_for_each_execution=False)
self._flush(botengine,
[
{
"user_id": self._get_user_id(botengine),
"device_id": self._get_device_id(botengine),
"time": botengine.get_timestamp(),
"user_properties": focused_properties
}
])
def analytics_people_increment(self, botengine, content):
"""
Adds numerical values to properties of a people record. Nonexistent properties on the record default to zero. Negative values in properties will decrement the given property.
:param botengine: BotEngine environment
:param properties_dict: Dictionary of key/value pairs. The value is numeric, either positive or negative. Default record is 0. The value will increment or decrement the property by that amount.
"""
if botengine.is_test_location():
return
properties_dict = content['properties_dict']
botengine.get_logger().info("Analytics: Incrementing user info - {}".format(properties_dict))
focused_properties = botengine.load_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME)
if focused_properties is None:
focused_properties = properties_dict
for p in properties_dict:
if p not in focused_properties:
focused_properties[p] = 0
focused_properties[p] += properties_dict[p]
focused_properties["locationId"] = botengine.get_location_id()
focused_properties["organizationId"] = botengine.get_organization_id()
botengine.save_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME, focused_properties, required_for_each_execution=False)
self._flush(botengine,
[
{
"user_id": self._get_user_id(botengine),
"device_id": self._get_device_id(botengine),
"time": botengine.get_timestamp(),
"user_properties": focused_properties
}
])
def analytics_people_unset(self, botengine, content):
"""
Delete a property from a user
:param botengine: BotEngine
:param properties_dict: Key/Value dictionary pairs to remove from a people record.
"""
if botengine.is_test_location():
return
properties_list = content['properties_list']
botengine.get_logger().info("Analytics: Removing user info - {}".format(properties_list))
focused_properties = botengine.load_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME)
if focused_properties is None:
# Nothing to unset
return
for p in properties_list:
if p in focused_properties:
del focused_properties[p]
focused_properties["locationId"] = botengine.get_location_id()
focused_properties["organizationId"] = botengine.get_organization_id()
botengine.save_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME, focused_properties, required_for_each_execution=False)
self._flush(botengine,
[
{
"user_id": self._get_user_id(botengine),
"device_id": self._get_device_id(botengine),
"time": botengine.get_timestamp(),
"user_properties": focused_properties
}
])
def _flush(self, botengine, data):
"""
Required. Implement the mechanisms to flush your analytics.
:param botengine: BotEngine
"""
if botengine.is_test_location():
botengine.get_logger().info("Analytics: This test location will not record analytics.")
return
import domain
import json
import requests
import bundle
token = None
for cloud_address in domain.AMPLITUDE_TOKENS:
if cloud_address in bundle.CLOUD_ADDRESS:
token = domain.AMPLITUDE_TOKENS[cloud_address]
if token is None:
# Nothing to do
botengine.get_logger().info("analytics_amplitude.flush(): No analytics token for {}".format(bundle.CLOUD_ADDRESS))
return
if token == "":
# Nothing to do
botengine.get_logger().info("analytics_amplitude.flush(): No analytics token for {}".format(bundle.CLOUD_ADDRESS))
return
http_headers = {"Content-Type": "application/json"}
body = {
"api_key": token,
"events": data
}
url = "https://api.amplitude.com/2/httpapi"
try:
requests.post(url, headers=http_headers, data=json.dumps(body), timeout=AMPLITUDE_HTTP_TIMEOUT_S)
botengine.get_logger().info("location_amplitude_microservice: Flushed()")
except self.requests.HTTPError:
self.get_logger().info("Generic HTTP error calling POST " + url)
except self.requests.ConnectionError:
self.get_logger().info("Connection HTTP error calling POST " + url)
except self.requests.Timeout:
self.get_logger().info(str(AMPLITUDE_HTTP_TIMEOUT_S) + " second HTTP Timeout calling POST " + url)
except self.requests.TooManyRedirects:
self.get_logger().info("Too many redirects HTTP error calling POST " + url)
except Exception as e:
return
def _get_user_id(self, botengine):
"""
Generate an Amplitude User ID
To us, this user ID will always have a "bot_" prefix, followed by the bot instance ID.
:return:
"""
return "bot_{}".format(botengine.bot_instance_id)
def _get_device_id(self, botengine):
"""
Get the Device ID
:param botengine:
:return:
"""
return botengine.get_bundle_id()
|
from __future__ import annotations
from typing import Literal
from prettyqt import core, network
from prettyqt.qt import QtCore, QtNetwork
from prettyqt.utils import InvalidParamError, bidict, mappers
mod = QtNetwork.QAbstractSocket
BIND_MODE = bidict(
share_address=mod.BindFlag.ShareAddress,
dont_share_address=mod.BindFlag.DontShareAddress,
reuse_address_hint=mod.BindFlag.ReuseAddressHint,
default_for_platform=mod.BindFlag.DefaultForPlatform,
)
BindModeStr = Literal[
"share_address",
"dont_share_address",
"reuse_address_hint",
"default_for_platform",
]
NETWORK_LAYER_PROTOCOL = bidict(
ipv4=mod.NetworkLayerProtocol.IPv4Protocol,
ipv6=mod.NetworkLayerProtocol.IPv6Protocol,
any_ip=mod.NetworkLayerProtocol.AnyIPProtocol,
unknown=mod.NetworkLayerProtocol.UnknownNetworkLayerProtocol,
)
NetworkLayerProtocolStr = Literal["ipv4", "ipv6", "any_ip", "unknown"]
PAUSE_MODES = mappers.FlagMap(
mod.PauseMode,
never=mod.PauseMode.PauseNever,
on_ssl_errors=mod.PauseMode.PauseOnSslErrors,
)
PauseModeStr = Literal["never", "on_ssl_errors"]
mod = mod
SOCKET_ERROR = bidict(
connection_refused=mod.SocketError.ConnectionRefusedError,
remote_host_closed=mod.SocketError.RemoteHostClosedError,
host_not_found=mod.SocketError.HostNotFoundError,
socket_access=mod.SocketError.SocketAccessError,
socket_resource=mod.SocketError.SocketResourceError,
socket_timeout=mod.SocketError.SocketTimeoutError,
diagram_too_large=mod.SocketError.DatagramTooLargeError,
network=mod.SocketError.NetworkError,
address_in_use=mod.SocketError.AddressInUseError,
socket_address_not_available=mod.SocketError.SocketAddressNotAvailableError,
unsupported_socket_operation=mod.SocketError.UnsupportedSocketOperationError,
proxy_authentication_required=mod.SocketError.ProxyAuthenticationRequiredError,
ssl_handshake_failed=mod.SocketError.SslHandshakeFailedError,
unfinished_socket_operation=mod.SocketError.UnfinishedSocketOperationError,
proxy_connection_refused=mod.SocketError.ProxyConnectionRefusedError,
proxy_connection_closed=mod.SocketError.ProxyConnectionClosedError,
proxy_connection_timeout=mod.SocketError.ProxyConnectionTimeoutError,
proxy_not_found=mod.SocketError.ProxyNotFoundError,
proxy_protocol=mod.SocketError.ProxyProtocolError,
operation=mod.SocketError.OperationError,
ssl_internal=mod.SocketError.SslInternalError,
ssl_invalid_user_data=mod.SocketError.SslInvalidUserDataError,
temporary=mod.SocketError.TemporaryError,
unknown_socket=mod.SocketError.UnknownSocketError,
)
SocketErrorStr = Literal[
"connection_refused",
"remote_host_closed",
"host_not_found",
"socket_access",
"socket_resource",
"socket_timeout",
"diagram_too_large",
"network",
"address_in_use",
"socket_address_not_available",
"unsupported_socket_operation",
"proxy_authentication_required",
"ssl_handshake_failed",
"unfinished_socket_operation",
"proxy_connection_refused",
"proxy_connection_closed",
"proxy_connection_timeout",
"proxy_not_found",
"proxy_protocol",
"operation",
"ssl_internal",
"ssl_invalid_user_data",
"temporary",
"unknown_socket",
]
SOCKET_OPTION = bidict(
low_delay=mod.SocketOption.LowDelayOption,
keep_alive=mod.SocketOption.KeepAliveOption,
multicast_ttl=mod.SocketOption.MulticastTtlOption,
multicast_loopback=mod.SocketOption.MulticastLoopbackOption,
type_of_service=mod.SocketOption.TypeOfServiceOption,
send_buffer_size_socket=mod.SocketOption.SendBufferSizeSocketOption,
receive_buffer_size=mod.SocketOption.ReceiveBufferSizeSocketOption,
path_mtu_socket=mod.SocketOption.PathMtuSocketOption,
)
SocketOptionStr = Literal[
"low_delay",
"keep_alive",
"multicast_ttl",
"multicast_loopback",
"type_of_service",
"send_buffer_size_socket",
"receive_buffer_size",
"path_mtu_socket",
]
SOCKET_STATE = bidict(
unconnected=mod.SocketState.UnconnectedState,
host_lookup=mod.SocketState.HostLookupState,
connecting=mod.SocketState.ConnectingState,
connected=mod.SocketState.ConnectedState,
bound=mod.SocketState.BoundState,
closing=mod.SocketState.ClosingState,
listening=mod.SocketState.ListeningState,
)
SocketStateStr = Literal[
"unconnected",
"host_lookup",
"connecting",
"connected",
"bound",
"closing",
"listening",
]
SOCKET_TYPE = bidict(
tcp=mod.SocketType.TcpSocket,
udp=mod.SocketType.UdpSocket,
sctp=mod.SocketType.SctpSocket,
unknown=mod.SocketType.UnknownSocketType,
)
SocketTypeStr = Literal["tcp", "udp", "sctp", "unknown"]
TYPE_OF_SERVICE = bidict(
network_control=224,
internetwork_control=192,
critic_ecp=160,
flash_override=128,
flash=96,
immediate=64,
priority=32,
routine=0,
)
TypeOfServiceStr = Literal[
"network_control",
"internetwork_control",
"critic_ecp",
"flash_override",
"flash",
"immediate",
"priority",
"routine",
]
QtNetwork.QAbstractSocket.__bases__ = (core.IODevice,)
class AbstractSocket(QtNetwork.QAbstractSocket):
def __repr__(self):
return f"{type(self).__name__}()"
def bind_to(
self,
address: str | QtNetwork.QHostAddress,
port: int = 0,
bind_mode: (
QtNetwork.QAbstractSocket.BindMode | BindModeStr
) = "default_for_platform",
) -> bool:
if isinstance(address, str):
address = QtNetwork.QHostAddress(address)
if bind_mode in BIND_MODE:
mode = BIND_MODE[bind_mode]
else:
mode = bind_mode
return self.bind(address, port, mode)
def connect_to_host(
self,
hostname: str,
port: int,
open_mode: (QtCore.QIODevice.OpenMode | core.iodevice.OpenModeStr) = "read_write",
protocol: (
QtNetwork.QAbstractSocket.NetworkLayerProtocol | NetworkLayerProtocolStr
) = "any_ip",
):
if isinstance(open_mode, QtCore.QIODevice.OpenMode):
mode = open_mode
else:
mode = core.iodevice.OPEN_MODES[open_mode]
if isinstance(protocol, QtNetwork.QAbstractSocket.NetworkLayerProtocol):
prot = protocol
else:
prot = NETWORK_LAYER_PROTOCOL[protocol]
self.connectToHost(hostname, port, mode, prot)
def get_error(self) -> SocketErrorStr:
return SOCKET_ERROR.inverse[self.error()]
def set_pause_mode(self, mode: PauseModeStr):
"""Set pause mode.
Args:
mode: pause mode
Raises:
InvalidParamError: pause mode does not exist
"""
if mode not in PAUSE_MODES:
raise InvalidParamError(mode, PAUSE_MODES)
self.setPauseMode(PAUSE_MODES[mode])
def get_pause_mode(self) -> PauseModeStr:
return PAUSE_MODES.inverse[self.pauseMode()]
def get_proxy(self) -> network.NetworkProxy:
return network.NetworkProxy(self.proxy())
# def set_socket_option(self, name: str, value):
# if name not in SOCKET_OPTION:
# raise InvalidParamError(name, SOCKET_OPTION)
# self.setSocketOption(SOCKET_OPTION[name], value)
# def get_socket_option(self, name: str):
# return self.socketOption(SOCKET_OPTION[name])
# def set_type_of_service(self, typ: str):
# if typ not in TYPE_OF_SERVICE:
# raise InvalidParamError(typ, TYPE_OF_SERVICE)
# self.set_socket_option("type_of_service", TYPE_OF_SERVICE[typ])
# def get_type_of_service(self):
# opt = self.get_socket_option("type_of_service")
# return TYPE_OF_SERVICE.inverse[opt]
def get_socket_type(self) -> SocketTypeStr:
return SOCKET_TYPE.inverse[self.socketType()]
def get_state(self) -> SocketStateStr:
return SOCKET_STATE.inverse[self.state()]
def get_local_address(self) -> network.HostAddress:
return network.HostAddress(self.localAddress())
|
#!/usr/local/bin/python3
# -*- coding: UTF-8 -*-
"""https://adventofcode.com/2018/day/1
--- Day 1: Chronal Calibration (part 1)---
After feeling like you've been falling for a few minutes, you look at the
device's tiny screen. "Error: Device must be calibrated before first use.
Frequency drift detected. Cannot maintain destination lock." Below the message,
the device shows a sequence of changes in frequency (your puzzle input). A value
like +6 means the current frequency increases by 6; a value like -3 means the
current frequency decreases by 3.
For example, if the device displays frequency changes of +1, -2, +3, +1, then
starting from a frequency of zero, the following changes would occur:
Current frequency 0, change of +1; resulting frequency 1.
Current frequency 1, change of -2; resulting frequency -1.
Current frequency -1, change of +3; resulting frequency 2.
Current frequency 2, change of +1; resulting frequency 3.
In this example, the resulting frequency is 3.
Here are other example situations:
+1, +1, +1 results in 3
+1, +1, -2 results in 0
-1, -2, -3 results in -6
Starting with a frequency of zero, what is the resulting frequency after all of
the changes in frequency have been applied?
"""
import pyperclip
from puzzle_inputs import day1_inputs
# Note: puzzle inputs differ by user
ans = sum([int(i) for i in day1_inputs])
print(f'The answer is: {ans:,}')
pyperclip.copy(ans)
|
"""System utility functions."""
import errno
import logging
import os
import shutil
import subprocess
import sys
from typing import Optional, Union
import sparv.core.paths as paths
log = logging.getLogger(__name__)
def kill_process(process):
"""Kill a process, and ignore the error if it is already dead."""
try:
process.kill()
except OSError as exc:
if exc.errno == errno.ESRCH: # No such process
pass
else:
raise
def clear_directory(path):
"""Create a new empty dir.
Remove it's contents if it already exists.
"""
shutil.rmtree(path, ignore_errors=True)
os.makedirs(path, exist_ok=True)
def call_java(jar, arguments, options=[], stdin="", search_paths=(),
encoding=None, verbose=False, return_command=False):
"""Call java with a jar file, command line arguments and stdin.
Returns a pair (stdout, stderr).
If the verbose flag is True, pipes all stderr output to stderr,
and an empty string is returned as the stderr component.
If return_command is set, then the process is returned.
"""
assert isinstance(arguments, (list, tuple))
assert isinstance(options, (list, tuple))
jarfile = find_binary(jar, search_paths, executable=False)
# For WSD: use = instead of space in arguments
# TODO: Remove when fixed!
if isinstance(arguments[0], tuple):
arguments = ["{}={}".format(x, y) for x, y in arguments]
java_args = list(options) + ["-jar", jarfile] + list(arguments)
return call_binary("java", arguments=java_args, stdin=stdin,
search_paths=search_paths, encoding=encoding,
verbose=verbose, return_command=return_command)
def call_binary(name, arguments=(), stdin="", raw_command=None, search_paths=(), encoding=None, verbose=False,
use_shell=False, allow_error=False, return_command=False):
"""Call a binary with arguments and stdin, return a pair (stdout, stderr).
If the verbose flag is True, pipes all stderr output from the subprocess to
stderr in the terminal, and an empty string is returned as the stderr component.
If return_command is set, then the process is returned.
"""
from subprocess import Popen, PIPE
assert isinstance(arguments, (list, tuple))
assert isinstance(stdin, (str, list, tuple))
binary = find_binary(name, search_paths)
if raw_command:
use_shell = True
command = raw_command % binary
if arguments:
command = " ".join([command] + arguments)
else:
command = [binary] + [str(a) for a in arguments]
if isinstance(stdin, (list, tuple)):
stdin = "\n".join(stdin)
if encoding is not None and isinstance(stdin, str):
stdin = stdin.encode(encoding)
log.info("CALL: %s", " ".join(str(c) for c in command) if not raw_command else command)
command = Popen(command, shell=use_shell,
stdin=PIPE, stdout=PIPE,
stderr=(None if verbose else PIPE),
close_fds=False)
if return_command:
return command
else:
stdout, stderr = command.communicate(stdin)
if not allow_error and command.returncode:
if stdout:
log.info(stdout.decode())
if stderr:
log.warning(stderr.decode())
raise OSError("%s returned error code %d" % (binary, command.returncode))
if encoding:
stdout = stdout.decode(encoding)
if stderr:
stderr = stderr.decode(encoding)
return stdout, stderr
def find_binary(name: Union[str, list], search_paths=(), executable: bool = True, allow_dir: bool = False,
raise_error: bool = False) -> Optional[str]:
"""Search for the binary for a program.
Args:
name: Name of the binary, either a string or a list of strings with alternative names.
search_paths: List of paths where to look, in addition to the environment variable PATH.
executable: Set to False to not fail when binary is not executable.
allow_dir: Set to True to allow the target to be a directory instead of a file.
raise_error: Raise error if binary could not be found.
Returns:
Path to binary, or None if not found.
"""
if isinstance(name, str):
name = [name]
name = list(map(os.path.expanduser, name))
search_paths = list(search_paths) + ["."] + [paths.bin_dir] + os.getenv("PATH").split(":")
search_paths = list(map(os.path.expanduser, search_paths))
# Use 'which' first
for binary in name:
if not os.path.dirname(binary) == "":
continue
path_to_bin = shutil.which(binary)
if path_to_bin:
return path_to_bin
# Look for file in paths
for directory in search_paths:
for binary in name:
path_to_bin = os.path.join(directory, binary)
if os.path.isfile(path_to_bin) or (allow_dir and os.path.isdir(path_to_bin)):
if executable and not allow_dir:
assert os.access(path_to_bin, os.X_OK), "Binary is not executable: %s" % path_to_bin
return path_to_bin
if raise_error:
raise LookupError("Couldn't find binary: %s\nSearched in: %s\nFor binary names: %s" %
(name[0], ", ".join(search_paths), ", ".join(binary)))
else:
return None
def rsync(local, host, remote=None):
"""Transfer files and/or directories using rsync.
When syncing directories, extraneous files in destination dirs are deleted.
"""
if remote is None:
remote = local
if os.path.isdir(local):
remote_dir = os.path.dirname(remote)
log.info("Copying directory: %s => %s", local, remote)
args = ["--recursive", "--delete", "%s/" % local]
else:
remote_dir = os.path.dirname(remote)
log.info("Copying file: %s => %s", local, remote)
args = [local]
subprocess.check_call(["ssh", host, "mkdir -p '%s'" % remote_dir])
subprocess.check_call(["rsync"] + args + ["%s:%s" % (host, remote)])
|
# Recursive Solution
def keypad(num):
# Base case
if num <= 1:
return [""]
# If `num` is single digit, get the LIST having one element - the associated string
elif 1 < num <= 9:
return list(get_characters(num))
# Otherwise `num` >= 10. Find the unit's (last) digits of `num`
last_digit = num % 10
'''Step 1'''
# Recursive call to the same function with “floor” of the `num//10`
small_output = keypad(num//10) # returns a LIST of strings
'''Step 2'''
# Get the associated string for the `last_digit`
keypad_string = get_characters(last_digit) # returns a string
'''Permute the characters of result obtained from Step 1 and Step 2'''
output = list()
'''
The Idea:
Each character of keypad_string must be appended to the
end of each string available in the small_output
'''
for character in keypad_string:
for item in small_output:
new_item = item + character
output.append(new_item)
return output # returns a LIST of strings
|
"""
To run this example, you need to put a VOAuth token with "google" scope in
a file called "TOKEN".
"""
from pyenlone.tasks import Tasks
from pyenlone.v import V
with open("TOKEN") as token:
tk = token.read().strip()
T = Tasks(voauth=tk)
V = V(token=tk)
your_google_id = V.googledata()["gid"]
ops = []
for op in T.get_operations():
for task in op.get_tasks():
for com in task.done:
if com["user"] == your_google_id and op not in ops:
print(op.name)
ops.append(op)
print("Whoa! You helped in all of this operations? You are awesome!")
|
"""Define the Cluster object and cluster window"""
from __future__ import division
from __future__ import print_function
__authors__ = ['Martin Spacek']
import sys
import time
import random
import numpy as np
from PyQt4 import QtCore, QtGui, QtOpenGL, uic
from PyQt4.QtCore import Qt
getSaveFileName = QtGui.QFileDialog.getSaveFileName
from OpenGL import GL, GLU
from .core import SpykeToolWindow, lstrip, lst2shrtstr, tocontig
from .plot import CLUSTERCOLOURSRGB, GREYRGB, CLUSTERCOLOURRGBDICT
CLUSTERPARAMMAXSAMPLES = 2000
VIEWDISTANCE = 50
class Cluster(object):
"""A container for scaled multidim cluster parameters.
A Cluster will always correspond to a Neuron"""
def __init__(self, neuron):
self.neuron = neuron
self.pos = {'x0':0, 'y0':0, 'sx':0, 'sy':0, 'Vpp':0, 'V0':0, 'V1':0,
'dt':0, 't':0}
# cluster normpos are scaled values, suitable for plotting
self.normpos = {'x0':0, 'y0':0, 'sx':0, 'sy':0, 'Vpp':0, 'V0':0, 'V1':0,
'dt':0, 't':0}
def get_id(self):
return self.neuron.id
def set_id(self, id):
self.neuron.id = id
id = property(get_id, set_id)
'''
# unused:
def get_colour(self):
return CLUSTERCOLOURRGBDICT[self.id]
colour = property(get_colour)
'''
def __getstate__(self):
"""Get object state for pickling"""
d = self.__dict__.copy()
# don't save any PCA/ICA positions, too transient to be useful:
pos = self.pos.copy() # don't modify original
normpos = self.normpos.copy() # don't modify original
assert sorted(pos) == sorted(normpos) # make sure they have same set of keys
for key in list(pos): # need list() for snapshot of keys before any are deleted
if key.startswith('c') and key[-1].isdigit():
del pos[key]
del normpos[key]
d['pos'] = pos
d['normpos'] = normpos
return d
def update_pos(self, dims=None, nsamples=CLUSTERPARAMMAXSAMPLES):
"""Update unnormalized and normalized cluster positions for self along specified
dims. Use median instead of mean to reduce influence of outliers on cluster
position. Subsample for speed"""
sort = self.neuron.sort
spikes = sort.spikes
if dims is None: # use all of them
dims = list(self.pos) # some of these might not exist in spikes array
sids = self.neuron.sids
nspikes = len(sids)
if nsamples and nspikes > nsamples: # subsample spikes
step = nspikes // nsamples + 1
print('n%d: update_pos() sampling every %d spikes instead of all %d'
% (self.id, step, nspikes))
sids = sids[::step]
nspikes = len(sids) # update
# check for pre-calculated spike param means and stds
try: sort.means
except AttributeError: sort.means = {}
try: sort.stds
except AttributeError: sort.stds = {}
## FIXME: some code duplication from sort.get_param_matrix()?
for dim in dims:
try:
spikes[dim]
except ValueError:
continue # this dim doesn't exist in spikes record array, ignore it
# data from all spikes:
data = spikes[dim]
# data from neuron's spikes, potentially subsample of them,
# copied for in-place normalization:
subdata = np.float64(data[sids].copy())
# update unnormalized position:
self.pos[dim] = float(np.median(subdata)) # from np.float64 for clean jsonpickle
# calculate mean and std for normalization:
try:
mean = sort.means[dim]
except KeyError:
mean = data.mean()
sort.means[dim] = float(mean) # save, from np.float for clean jsonpickle
if dim in ['x0', 'y0'] and sort.probe.ncols > 1: # norm spatial params by x0 std
try:
std = sort.stds['x0']
except KeyError:
std = spikes['x0'].std()
sort.stds['x0'] = float(std) # save, from np.float for clean jsonpickle
else: # normalize all other params by their std
try:
std = sort.stds[dim]
except KeyError:
std = data.std()
sort.stds[dim] = float(std) # save, from np.float for clean jsonpickle
# now do the actual normalization:
subdata -= mean
if std != 0:
subdata /= std
# update normalized position:
self.normpos[dim] = float(np.median(subdata)) # from float64 for clean jsonpickle
def update_comppos(self, X, sids, nsamples=CLUSTERPARAMMAXSAMPLES):
"""Update unnormalized and normalized component analysis (PCA/ICA) values for
self. Use median instead of mean to reduce influence of outliers on cluster
position. Subsample for speed"""
sort = self.neuron.sort
ncomp = X.shape[1]
mysids = self.neuron.sids
# get all sids that belong to self:
mysids = np.intersect1d(sids, mysids, assume_unique=True)
nspikes = len(mysids)
if nsamples and nspikes > nsamples: # subsample spikes
step = nspikes // nsamples + 1
print('n%d: update_comppos() sampling every %d spikes instead '
'of all %d in last CA' % (self.id, step, nspikes))
mysids = mysids[::step]
nspikes = len(mysids) # update
sidis = sids.searchsorted(mysids)
subX = X[sidis].copy() # this copy is necessary for in-place operations
medians = np.median(subX, axis=0)
mean = X.mean(axis=0)
std = X.std(axis=0)
subX -= mean
subX /= std
normmedians = np.median(subX, axis=0)
# write component fields to dicts:
for compid in range(ncomp):
dim = 'c%d' % compid
self.pos[dim] = medians[compid]
self.normpos[dim] = normmedians[compid]
class ClusterWindow(SpykeToolWindow):
def __init__(self, parent, pos=None, size=None):
SpykeToolWindow.__init__(self, parent, flags=QtCore.Qt.Tool)
self.spykewindow = parent
self.setWindowTitle("Cluster Window")
self.move(*pos)
self.resize(*size)
self.glWidget = GLWidget(parent=self)
self.setCentralWidget(self.glWidget)
def closeEvent(self, event):
self.spykewindow.HideWindow('Cluster')
def keyPressEvent(self, event):
key = event.key()
if key == Qt.Key_F11:
SpykeToolWindow.keyPressEvent(self, event) # pass it up
else:
self.glWidget.keyPressEvent(event) # pass it down
def keyReleaseEvent(self, event):
self.glWidget.keyReleaseEvent(event) # pass it down
def plot(self, X, sids, nids):
"""Plot 3D projection of (possibly clustered) spike params in X"""
X = tocontig(X) # ensure it's contig
gw = self.glWidget
gw.points = X
gw.npoints = len(X)
gw.sids = sids
gw.nids = nids
gw.colour() # set colours
gw.updateGL()
class GLWidget(QtOpenGL.QGLWidget):
def __init__(self, parent=None):
QtOpenGL.QGLWidget.__init__(self, parent)
self.spw = self.parent().spykewindow
#self.setMouseTracking(True) # req'd for tooltips purely on mouse motion, slow
self.lastPressPos = QtCore.QPoint()
self.lastPos = QtCore.QPoint()
self.focus = np.float32([0, 0, 0]) # init camera focus
self.axes = 'both' # display both mini and focal xyz axes by default
self.selecting = None # True (selecting), False (deselecting), or None
self.plot_selection = True # plot selection in sort panel? False for quicker selection
self.update_sigma()
self.spw.ui.sigmaSpinBox.valueChanged.connect(self.update_focal_axes)
format = QtOpenGL.QGLFormat()
format.setDoubleBuffer(True) # req'd for picking
self.setFormat(format)
def get_sids(self):
return self._sids
def set_sids(self, sids):
"""Set up rgbsids array for later use in self.pick()"""
self._sids = sids
# encode sids in RGB
r = sids // 256**2
rem = sids % 256**2 # remainder
g = rem // 256
b = rem % 256
self.rgbsids = np.zeros((self.npoints, 3), dtype=np.uint8)
self.rgbsids[:, 0] = r
self.rgbsids[:, 1] = g
self.rgbsids[:, 2] = b
sids = property(get_sids, set_sids)
def colour(self, sids=None, sat=1):
"""Set colours of points corresponding to sids according to their nids, with
saturation level sat. Caller is responsible for calling self.updateGL()"""
if sids is None: # init/overwrite self.colours
nids = self.nids
# uint8, single unit nids are 1-based:
self.colours = CLUSTERCOLOURSRGB[nids % len(CLUSTERCOLOURSRGB) - 1] * sat
# overwrite unclustered/multiunit points with GREYRGB
self.colours[nids < 1] = GREYRGB * sat
else: # assume self.colours exists
sidis = self.sids.searchsorted(sids)
nids = self.nids[sidis]
self.colours[sidis] = CLUSTERCOLOURSRGB[nids % len(CLUSTERCOLOURSRGB) - 1] * sat
self.colours[sidis[nids < 1]] = GREYRGB * sat
def initializeGL(self):
# these are the defaults anyway, but just to be thorough:
GL.glClearColor(0.0, 0.0, 0.0, 1.0)
GL.glClearDepth(1.0)
# display points according to occlusion, not order of plotting:
GL.glEnable(GL.GL_DEPTH_TEST)
# doesn't seem to work right, proper way to antialiase?:
#GL.glEnable(GL.GL_POINT_SMOOTH)
GL.glEnable(GL.GL_LINE_SMOOTH) # works better, makes lines thicker
#GL.glPointSize(1.5) # truncs to the nearest pixel if antialiasing is off
# set initial position and orientation of camera
GL.glTranslate(0, 0, -VIEWDISTANCE)
GL.glRotate(-45, 0, 0, 1)
GL.glRotate(-45, 0, 1, 0)
def reset(self):
"""Stop plotting"""
self.npoints = 0
self.updateGL()
def paintGL(self):
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# Don't load identity matrix. Do all transforms in place against current matrix
# and take advantage of OpenGL's state-machineness.
#GL.glLoadIdentity() # loads identity matrix into top of matrix stack
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glColorPointerub(self.colours) # should be n x rgb uint8, ie usigned byte
GL.glVertexPointerf(self.points) # should be n x 3 contig float32
GL.glDrawArrays(GL.GL_POINTS, 0, self.npoints)
if self.axes: # paint xyz axes
GL.glClear(GL.GL_DEPTH_BUFFER_BIT) # make axes paint on top of data points
if self.axes in ['both', 'mini']:
self.paint_mini_axes()
if self.axes in ['both', 'focal']:
self.paint_focal_axes()
# doesn't seem to be necessary, even though double-buffered mode is set with the
# back buffer for RGB sid encoding. In fact, swapBuffers() call seems to cause
# flickering, so leave disabled:
#self.swapBuffers()
def resizeGL(self, width, height):
GL.glViewport(0, 0, width, height)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
# fov (deg) controls amount of perspective, and as a side effect initial apparent size.
# fov, aspect, nearz & farz clip planes:
GLU.gluPerspective(45, width/height, 0.0001, 1000)
GL.glMatrixMode(GL.GL_MODELVIEW)
def paint_mini_axes(self):
"""Paint mini xyz axes in bottom left of widget"""
w, h = self.width(), self.height()
vt = self.getTranslation() # this is in eye coordinates
GL.glViewport(0, 0, w//8, h//8) # mini viewport at bottom left of widget
self.setTranslation((0, 0, -3)) # draw in center of this mini viewport
self.paint_axes()
self.setTranslation(vt) # restore translation vector to MV matrix
GL.glViewport(0, 0, w, h) # restore full viewport
def paint_focal_axes(self):
"""Paint xyz axes proportional in size to sigma, at focus"""
GL.glTranslate(*self.focus) # translate to focus
self.paint_axes(self.sigma)
GL.glTranslate(*-self.focus) # translate back
def update_focal_axes(self):
"""Called every time sigma is changed in main spyke window"""
self.update_sigma()
self.updateGL()
def update_sigma(self):
"""Update self.sigma from main spyke window"""
self.sigma = self.spw.ui.sigmaSpinBox.value()
def paint_axes(self, l=1):
"""Paint axes at origin, with lines of length l"""
GL.glBegin(GL.GL_LINES)
GL.glColor3f(1, 0, 0) # red x axis
GL.glVertex3f(0, 0, 0)
GL.glVertex3f(l, 0, 0)
GL.glColor3f(0, 1, 0) # green y axis
GL.glVertex3f(0, 0, 0)
GL.glVertex3f(0, l, 0)
GL.glColor3f(0, 0, 1) # blue z axis
GL.glVertex3f(0, 0, 0)
GL.glVertex3f(0, 0, l)
GL.glEnd()
def get_MV(self):
"""Return modelview matrix"""
return GL.glGetDoublev(GL.GL_MODELVIEW_MATRIX)
def set_MV(self, MV):
GL.glLoadMatrixd(MV)
MV = property(get_MV, set_MV)
# modelview matrix is column major, so we work on columns instead of rows
def getViewRight(self):
"""View right vector: 1st col of modelview matrix"""
return self.MV[:3, 0]
def getViewUp(self):
"""View up vector: 2nd col of modelview matrix"""
return self.MV[:3, 1]
def getViewNormal(self):
"""View normal vector: 3rd col of modelview matrix"""
return self.MV[:3, 2]
def getTranslation(self):
"""Translation vector: 4th row of modelview matrix"""
return self.MV[3, :3]
def setTranslation(self, vt):
"""Translation vector: 4th row of modelview matrix"""
MV = self.MV
MV[3, :3] = vt
self.MV = MV
def getDistance(self):
v = self.getTranslation()
# for pan and zoom, doesn't seem to matter whether d is from origin or focus
#return np.sqrt((v**2).sum()) # from data origin
return np.sqrt(((v-self.focus)**2).sum()) # from focus
def lookDownXAxis(self):
"""Look down x axis: make x, y, z axes point out, right, and up"""
MV = self.MV
MV[:3, :3] = [[0, 0, 1],
[1, 0, 0],
[0, 1, 0]]
self.MV = MV
def lookUpXAxis(self):
"""Look up x axis: make x, y, z axes point in, left, and up"""
MV = self.MV
MV[:3, :3] = [[ 0, 0,-1],
[-1, 0, 0],
[ 0, 1, 0]]
self.MV = MV
def lookDownYAxis(self):
"""Look down y axis: make x, y, z axes point left, out, and up"""
MV = self.MV
MV[:3, :3] = [[-1, 0, 0],
[ 0, 0, 1],
[ 0, 1, 0]]
self.MV = MV
def lookUpYAxis(self):
"""Look up y axis: make x, y, z axes point right, in, and up"""
MV = self.MV
MV[:3, :3] = [[1, 0, 0],
[0, 0,-1],
[0, 1, 0]]
self.MV = MV
def lookDownZAxis(self):
"""Look down z axis: make x, y, z axes point down, right, and out"""
MV = self.MV
MV[:3, :3] = [[0,-1, 0],
[1, 0, 0],
[0, 0, 1]]
self.MV = MV
def lookUpZAxis(self):
"""Look up z axis: make x, y, z axes point up, right, and in"""
MV = self.MV
MV[:3, :3] = [[0, 1, 0],
[1, 0, 0],
[0, 0,-1]]
self.MV = MV
def rotateXOut(self):
"""Make x axis point out. Work on top left 3x3 subset of MV matrix.
This was deduced by watching behaviour of MV matrix while manually
rotating the x axis out. This is what we want, where a**2 + b**2 = 1:
[0 0 1 *
a -b 0 *
b a 0 *
* * * *]
"""
MV = self.MV
MV[:3, 2] = 1, 0, 0 # 3rd col is normal vector, make it point along x axis
# set top left and top middle values to zero:
MV[0, 0] = 0
MV[0, 1] = 0
b = MV[2, 0] # grab bottom left value
a = np.sqrt(1 - b**2) # calc new complementary value to get normalized vectors
#if MV[1, 0] < 0:
# a = -a # keep a -ve, reduce jumping around of axes
MV[1, 0] = a
MV[2, 1] = a
MV[1, 1] = -b # needs to be -ve of MV[2, 0]
self.MV = MV
def rotateYRight(self):
"""Make y axis point right. Work on top left 3x3 subset of MV matrix.
This was deduced by watching behaviour of MV matrix while manually
rotating the y axis right. This is what we want, where a**2 + b**2 = 1:
[0 a b *
1 0 0 *
0 b -a *
* * * *]
"""
MV = self.MV
MV[:3, 0] = 0, 1, 0 # 1st col is right vector, make it point along y axis
# set middle middle and middle right values to zero:
MV[1, 1] = 0
MV[1, 2] = 0
a = MV[0, 1] # grab top middle value
b = np.sqrt(1 - a**2) # calc new complementary value to get normalized vectors
if MV[2, 1] < 0:
b = -b # keep b -ve, reduce jumping around of axes
MV[2, 1] = b
MV[0, 2] = b
MV[2, 2] = -a # needs to be -ve of MV[0, 1]
self.MV = MV
def rotateZUp(self):
"""Make z axis point up. Work on top left 3x3 subset of MV matrix.
This was deduced by watching behaviour of MV matrix while manually
rotating the z axis up. This is what we want, where a**2 + b**2 = 1:
[a 0 b *
b 0 -a *
0 1 0 *
* * * *]
"""
MV = self.MV
MV[:3, 1] = 0, 0, 1 # 2nd col is up vector, make it point along z axis
# set bottom left and bottom right z values to zero:
MV[2, 0] = 0
MV[2, 2] = 0
a = MV[0, 0] # grab top left value
b = np.sqrt(1 - a**2) # calc new complementary value to get normalized vectors
if MV[1, 0] < 0:
b = -b # keep b -ve, reduce jumping around of axes
MV[1, 0] = b
MV[0, 2] = b
MV[1, 2] = -a # needs to be -ve of MV[0, 0]
self.MV = MV
def pan(self, dx, dy):
"""Translate along view right and view up vectors"""
d = self.getDistance()
vr = self.getViewRight()
vr *= dx*d
GL.glTranslate(vr[0], vr[1], vr[2])
vu = self.getViewUp()
vu *= dy*d
GL.glTranslate(vu[0], vu[1], vu[2])
def zoom(self, dr):
"""Translate along view normal vector"""
d = self.getDistance()
vn = self.getViewNormal()
vn *= dr*d
GL.glTranslate(vn[0], vn[1], vn[2])
def pitch(self, dangle): # aka elevation
"""Rotate around view right vector"""
vr = self.getViewRight()
GL.glTranslate(*self.focus)
GL.glRotate(dangle, *vr)
GL.glTranslate(*-self.focus)
def yaw(self, dangle): # aka azimuth
"""Rotate around view up vector"""
vu = self.getViewUp()
GL.glTranslate(*self.focus)
GL.glRotate(dangle, *vu)
GL.glTranslate(*-self.focus)
def roll(self, dangle):
"""Rotate around view normal vector"""
vn = self.getViewNormal()
GL.glTranslate(*self.focus)
GL.glRotate(dangle, *vn)
GL.glTranslate(*-self.focus)
def panTo(self, p=None):
"""Translate along view right and view up vectors such that data point p is
centered in the viewport. Not entirely sure why or how this works, figured
it out using guess and test"""
if p == None:
p = self.focus
MV = self.MV
vr = self.getViewRight()
vu = self.getViewUp()
p = -p
x = np.dot(p, vr) # dot product
y = np.dot(p, vu)
MV[3, :2] = x, y # set first two entries of 4th row to x, y
self.MV = MV
def pick(self, x, y, pb=2, multiple=False):
"""Return sid of point at window coords x, y (bottom left origin),
or first or multiple sids that fall within a square 2*pb+1 pix on a side,
centered on x, y. pb is the pixel border to include around x, y"""
width = self.size().width()
height = self.size().height()
#print('coords: %d, %d' % (x, y))
# constrain to within border 1 pix smaller than widget, for glReadPixels call
if not (pb <= x < width-pb and pb <= y < height-pb): # cursor out of range
return
if self.npoints > 2**24-2: # the last one is the full white background used as a no hit
raise OverflowError("Can't pick from more than 2**24-2 sids")
# draw encoded RGB values to back buffer
#GL.glDrawBuffer(GL_BACK) # defaults to back
GL.glClearColor(1.0, 1.0, 1.0, 1.0) # highest possible RGB means no hit
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glColorPointerub(self.rgbsids) # unsigned byte, ie uint8
GL.glVertexPointerf(self.points) # float32
GL.glDrawArrays(GL.GL_POINTS, 0, self.npoints) # to back buffer
GL.glClearColor(0.0, 0.0, 0.0, 1.0) # restore to default black
# grab back buffer:
#GL.glReadBuffer(GL.GL_BACK) # defaults to back
# find rgb at or around cursor coords, decode sid:
backbuffer = GL.glReadPixels(x=x-pb, y=y-pb, width=2*pb+1, height=2*pb+1,
format=GL.GL_RGB, type=GL.GL_UNSIGNED_BYTE,
array=None, outputType=None)
# NOTE: outputType kwarg above must be set to something other than str to ensure
# that an array is returned, instead of a string of bytes
if (backbuffer == 255).all(): # no hit
return
if not multiple:
sid = self.decodeRGB(backbuffer[pb, pb]) # check center of backbuffer
if sid != None:
#print('hit at exact cursor pos')
return sid # hit at exact cursor position
# 2D array with nonzero entries at hits:
hitpix = (backbuffer != [255, 255, 255]).sum(axis=2)
if not multiple:
ri = np.where(hitpix.ravel())[0][0] # get ravelled index of first hit
i, j = np.unravel_index(ri, dims=hitpix.shape) # unravel to 2D index
#print('Hit at %d, %d' % (i, j))
return self.decodeRGB(backbuffer[i, j]) # should be a valid sid
ijs = zip(*np.where(hitpix)) # list of ij tuples
return np.asarray([ self.decodeRGB(backbuffer[i, j]) for i, j in ijs ])
def decodeRGB(self, rgb):
"""Convert encoded rgb value to sid"""
r, g, b = rgb
sid = r*65536 + g*256 + b
if sid < 16777215: # 2**24 - 1
return sid # it's a valid sid
def cursorPosQt(self):
"""Get current mouse cursor position in Qt coords (top left origin)"""
globalPos = QtGui.QCursor.pos()
pos = self.mapFromGlobal(globalPos)
return pos.x(), pos.y()
def cursorPosGL(self):
"""Get current mouse cursor position in OpenGL coords (bottom left origin)"""
globalPos = QtGui.QCursor.pos()
pos = self.mapFromGlobal(globalPos)
y = self.size().height() - pos.y()
return pos.x(), y
def GLtoQt(self, x, y):
"""Convert GL screen coords to Qt, return as QPoint"""
y = self.size().height() - y
return QtCore.QPoint(x, y)
def mousePressEvent(self, event):
"""Record mouse position on button press, for use in mouseMoveEvent. On middle
click, select spikes"""
#sw = self.spw.windows['Sort']
buttons = event.buttons()
if buttons == QtCore.Qt.MiddleButton:
#sw.on_actionSelectRandomSpikes_triggered()
#sw.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist
self.selecting = True
self.setMouseTracking(True) # while selecting
self.selectPointsUnderCursor()
self.lastPressPos = QtCore.QPoint(event.pos())
self.lastPos = QtCore.QPoint(event.pos())
def mouseReleaseEvent(self, event):
# seems have to use event.button(), not event.buttons(). I guess you can't
# release multiple buttons simultaneously the way you can press them simultaneously?
sw = self.spw.windows['Sort']
button = event.button()
if button == QtCore.Qt.MiddleButton:
self.selecting = None
self.setMouseTracking(False) # done selecting
elif button == QtCore.Qt.RightButton:
if QtCore.QPoint(event.pos()) == self.lastPressPos: # mouse didn't move
sw.on_actionSelectRandomSpikes_triggered()
def mouseDoubleClickEvent(self, event):
"""Clear selection, if any"""
if event.button() == QtCore.Qt.LeftButton:
sw = self.spw.windows['Sort']
sw.clear()
def mouseMoveEvent(self, event):
buttons = event.buttons()
if buttons != Qt.NoButton:
modifiers = event.modifiers()
shift = modifiers == Qt.ShiftModifier # only modifier is shift
ctrl = modifiers == Qt.ControlModifier # only modifier is ctrl
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
if buttons == QtCore.Qt.LeftButton:
if shift:
self.pan(dx/700, -dy/700) # qt viewport y axis points down
elif ctrl:
self.roll(-0.5*dx - 0.5*dy)
else:
self.yaw(0.5*dx)
self.pitch(0.5*dy)
elif buttons == QtCore.Qt.RightButton:
if shift or ctrl:
self.spw.ui.sigmaSpinBox.stepBy(-dy)
else:
self.zoom(-dy/500) # qt viewport y axis points down
self.updateGL()
self.lastPos = QtCore.QPoint(event.pos())
if self.selecting != None:
self.selectPointsUnderCursor()
'''
# pop up a tooltip on mouse movement, requires mouse tracking enabled
if buttons == Qt.NoButton:
self.showToolTip()
else:
QtGui.QToolTip.hideText()
'''
def wheelEvent(self, event):
modifiers = event.modifiers()
shift = modifiers == Qt.ShiftModifier # only modifier is shift
ctrl = modifiers == Qt.ControlModifier # only modifier is ctrl
if shift or ctrl: # modify sigma
# event.delta() seems to always be a multiple of 120 for some reason:
self.spw.ui.sigmaSpinBox.stepBy(5 * event.delta() / 120)
else: # zoom
self.zoom(event.delta() / 2000)
self.updateGL()
def keyPressEvent(self, event):
key = event.key()
modifiers = event.modifiers()
sw = self.spw.windows['Sort']
shift = modifiers == Qt.ShiftModifier # only modifier is shift
ctrl = modifiers == Qt.ControlModifier # only modifier is ctrl
if key == Qt.Key_Left:
if shift:
self.pan(-0.05, 0)
elif ctrl:
self.roll(5)
else:
self.yaw(-5)
elif key == Qt.Key_Right:
if shift:
self.pan(0.05, 0)
elif ctrl:
self.roll(-5)
else:
self.yaw(5)
elif key == Qt.Key_Up:
if shift:
self.pan(0, 0.05)
elif ctrl:
self.zoom(0.05)
else:
self.pitch(-5)
elif key == Qt.Key_Down:
if shift:
self.pan(0, -0.05)
elif ctrl:
self.zoom(-0.05)
else:
self.pitch(5)
elif key == Qt.Key_Question:
self.showToolTip()
elif key == Qt.Key_0: # reset focus to origin
self.focus = np.float32([0, 0, 0])
self.panTo() # pan to new focus
elif key == Qt.Key_F: # reset focus to cursor position
sid = self.pick(*self.cursorPosGL())
if sid != None:
self.focus = self.points[self.sids.searchsorted(sid)]
self.panTo() # pan to new focus
elif key == Qt.Key_A and ctrl: # cycle through xyz axes display, A on its own plots
if self.axes == False:
self.axes = 'both'
elif self.axes == 'both':
self.axes = 'mini'
elif self.axes == 'mini':
self.axes = 'focal'
elif self.axes == 'focal':
self.axes = False
elif key == Qt.Key_1: # look along x axis
if ctrl:
self.lookUpXAxis()
else:
self.lookDownXAxis()
elif key == Qt.Key_2: # look along y axis
if ctrl:
self.lookUpYAxis()
else:
self.lookDownYAxis()
elif key == Qt.Key_3: # look along z axis
if ctrl:
self.lookUpZAxis()
else:
self.lookDownZAxis()
#elif key == Qt.Key_X: # make x axis point out
# self.rotateXOut()
#elif key == Qt.Key_Y: # make y axis point right
# self.rotateYRight()
#elif key == Qt.Key_Z: # make z axis point up
# self.rotateZUp()
elif key == Qt.Key_S:
if event.isAutoRepeat():
return # event.ignore()?
if shift:
self.plot_selection = False # for quicker selection
# select points under the cursor, if any:
self.selecting = True
self.setMouseTracking(True) # while selecting
self.selectPointsUnderCursor()
elif key == Qt.Key_D: # deselect points under the cursor, if any
if event.isAutoRepeat():
return # event.ignore()?
self.selecting = False
self.setMouseTracking(True) # while deselecting
self.selectPointsUnderCursor()
elif key == Qt.Key_V: # V for View
self.showProjectionDialog()
elif key in [Qt.Key_Enter, Qt.Key_Return]:
sw.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist
elif key == Qt.Key_F11:
self.parent().keyPressEvent(event) # pass it on to parent Cluster window
elif key in [Qt.Key_A, Qt.Key_X, Qt.Key_N, Qt.Key_Escape, Qt.Key_Delete, Qt.Key_M,
Qt.Key_G, Qt.Key_Equal, Qt.Key_Minus,
Qt.Key_Slash, Qt.Key_P, Qt.Key_Backslash, Qt.Key_NumberSign, Qt.Key_R,
Qt.Key_Space, Qt.Key_B, Qt.Key_Comma, Qt.Key_Period, Qt.Key_F5,
Qt.Key_E, Qt.Key_C, Qt.Key_T, Qt.Key_W]:
sw.keyPressEvent(event) # pass it on to Sort window
self.updateGL()
def keyReleaseEvent(self, event):
key = event.key()
modifiers = event.modifiers()
shift = modifiers == Qt.ShiftModifier # only modifier is shift
if not event.isAutoRepeat() and key in [Qt.Key_S, Qt.Key_D]:
# stop selecting/deselecting, reset plot_selection flag
self.selecting = None
self.setMouseTracking(False)
self.plot_selection = True
def save(self):
"""Save cluster plot to file"""
fname = getSaveFileName(self, "Save cluster plot to", 'cluster_plot.png')
if fname:
fname = str(fname) # convert from QString
image = self.grabFrameBuffer() # defaults to withAlpha=False, makes no difference
try:
image.save(fname)
except Exception as e:
QtGui.QMessageBox.critical(
self.panel, "Error saving file", str(e),
QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton)
print('Cluster plot saved to %r' % fname)
def showToolTip(self):
"""Pop up a nid or sid tooltip at current mouse cursor position"""
# hide first if you want tooltip to move even when text is unchanged:
#QtGui.QToolTip.hideText()
spw = self.spw
sort = spw.sort
x, y = self.cursorPosGL()
sid = self.pick(x, y)
if sid != None:
spos = []
dims = spw.GetClusterPlotDims()
for dim in dims:
if dim.startswith('c') and dim[-1].isdigit(): # it's a CA dim
compid = int(lstrip(dim, 'c'))
sidi = self.sids.searchsorted(sid)
spos.append(sort.X[sort.Xhash][sidi, compid])
else: # it's a standard dim stored in spikes array
spos.append(sort.spikes[sid][dim])
tip = 'sid: %d' % sid
tip += '\n%s: %s' % (lst2shrtstr(dims), lst2shrtstr(spos))
nid = sort.spikes[sid]['nid']
if nid != 0:
tip += '\nnid: %d' % nid
cpos = [ sort.neurons[nid].cluster.pos[dim] for dim in dims ]
tip += '\n%s: %s' % (lst2shrtstr(dims), lst2shrtstr(cpos))
globalPos = self.mapToGlobal(self.GLtoQt(x, y))
QtGui.QToolTip.showText(globalPos, tip)
else:
QtGui.QToolTip.hideText()
def selectPointsUnderCursor(self):
"""Update point selection with those currently under cursor, within pixel border pb.
Call this method on S and D down, and on mouse motion when either S or D are down"""
spw = self.spw
sw = spw.windows['Sort']
#if clear:
# sw.uslist.clearSelection()
# sw.nlist.clearSelection()
x, y = self.cursorPosGL()
sids = self.pick(x, y, pb=10, multiple=True)
if sids is None:
return
#t0 = time.time()
spw.SelectSpikes(sids, on=self.selecting, nslistplot=self.plot_selection)
#print('SelectSpikes took %.3f sec' % (time.time()-t0))
if self.selecting == True:
sat = 0.2 # desaturate
else: # self.selecting == False
sat = 1 # resaturate
self.colour(sids, sat=sat)
self.updateGL()
def showProjectionDialog(self):
"""Get and set OpenGL ModelView matrix and focus.
Useful for setting two different instances to the exact same projection"""
dlg = uic.loadUi('multilineinputdialog.ui')
dlg.setWindowTitle('Get and set OpenGL ModelView matrix and focus')
precision = 8 # use default precision
MV_repr = np.array_repr(self.MV, precision=precision)
focus_repr = np.array_repr(self.focus, precision=precision)
txt = ("self.MV = \\\n"
"%s\n\n"
"self.focus = %s" % (MV_repr, focus_repr))
dlg.plainTextEdit.insertPlainText(txt)
dlg.plainTextEdit.selectAll()
if dlg.exec_(): # returns 1 if OK, 0 if Cancel
txt = str(dlg.plainTextEdit.toPlainText())
from numpy import array, float32 # required for exec()
exec(txt) # update self.MV and self.focus, with hopefully no maliciousness
|
while True:
if something.changed:
do.stuff() # trailing comment
# Comment belongs to the `if` block.
# This one belongs to the `while` block.
# Should this one, too? I guess so.
# This one is properly standalone now.
for i in range(100):
# first we do this
if i % 33 == 0:
break
# then we do this
print(i)
# and finally we loop around
with open(some_temp_file) as f:
data = f.read()
try:
with open(some_other_file) as w:
w.write(data)
except OSError:
print("problems")
import sys
# leading function comment
def wat():
...
# trailing function comment
# SECTION COMMENT
# leading 1
@deco1
# leading 2
@deco2(with_args=True)
# leading 3
@deco3
def decorated1():
...
# leading 1
@deco1
# leading 2
@deco2(with_args=True)
# leading function comment
def decorated1():
...
# Note: crappy but inevitable. The current design of EmptyLineTracker doesn't
# allow this to work correctly. The user will have to split those lines by
# hand.
some_instruction
# This comment should be split from `some_instruction` by two lines but isn't.
def g():
...
if __name__ == "__main__":
main()
|
class Config():
def __init__(self):
self.api_token = None
self.module_name = None
self.port = None
self.enable_2fa = None
self.creds = None
self.seen = set()
self.verbose = False
|
"""
Code generation and import tests.
"""
from hamcrest import (
assert_that,
calling,
equal_to,
is_,
raises,
)
from jsonschematypes.modules import ModuleLoader
from jsonschematypes.registry import Registry
from jsonschematypes.tests.fixtures import schema_for
def test_package_names():
"""
Package name generation from URI works as expected.
"""
registry = Registry()
loader = ModuleLoader(registry.factory, basename="test")
assert_that(
loader.package_name_for("http://x.y.z/foo"),
is_(equal_to("test"))
)
assert_that(
loader.package_name_for("http://x.y.z/foo/bar"),
is_(equal_to("test.foo"))
)
assert_that(
loader.package_name_for("http://x.y.z/Foo/Bar/Baz"),
is_(equal_to("test.foo.bar"))
)
assert_that(
loader.package_name_for("foo"),
is_(equal_to("test"))
)
assert_that(
loader.package_name_for("foo/bar"),
is_(equal_to("test.foo"))
)
def test_illegal_package_name():
"""
Illegal package names are detected.
"""
registry = Registry()
loader = ModuleLoader(registry.factory, basename="test")
assert_that(
calling(loader.package_name_for).with_args("foo/1.0/bar"),
raises(ValueError),
)
assert_that(
calling(loader.package_name_for).with_args("_foo/bar"),
raises(ValueError),
)
def test_keep_part_of_package_name():
"""
URI withs otherwise illegal package names can be truncated to form legal ones
by keeping only part of the URI.
"""
registry = Registry()
loader = ModuleLoader(registry.factory, basename="test", keep_uri_parts=2)
assert_that(
loader.package_name_for("foo/bar"),
is_(equal_to("test.foo"))
)
assert_that(
loader.package_name_for("foo/bar/baz"),
is_(equal_to("test.bar"))
)
assert_that(
loader.package_name_for("foo/1.0/bar/baz"),
is_(equal_to("test.bar"))
)
def test_imports():
"""
Can import a class from a registry.
"""
registry = Registry()
registry.load(schema_for("data/name.json"))
registry.configure_imports()
from generated.foo import Name
name = Name(
first="George",
last="Washington",
)
name.validate()
|
from django.contrib import admin
from .models import Fcuser
# Register your models here.
class FcuserAdmin(admin.ModelAdmin):
list_display = ('email',)
admin.site.register(Fcuser, FcuserAdmin)
|
'''
实验名称:有源蜂鸣器
版本:v1.0
日期:2023.3
作者:01Studio
说明:让有源蜂鸣器发出滴滴响声
社区:www.01studio.org
'''
from gpiozero import Buzzer
import time
#引脚16,底电平发出响声
beep = Buzzer(16,active_high=False)
while True:
#循环发出响声
beep.on()
time.sleep(1)
beep.off()
time.sleep(1)
|
"""
Implement optics algorithms for optical phase tomography using GPU
Michael Chen mchen0405@berkeley.edu
David Ren david.ren@berkeley.edu
October 22, 2018
"""
import numpy as np
import arrayfire as af
import contexttimer
from opticaltomography import settings
from opticaltomography.opticsmodel import MultiTransmittance, MultiPhaseContrast
from opticaltomography.opticsmodel import Defocus, Aberration
from opticaltomography.opticsutil import ImageRotation, calculateNumericalGradient
from opticaltomography.regularizers import Regularizer
np_complex_datatype = settings.np_complex_datatype
np_float_datatype = settings.np_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class AlgorithmConfigs:
"""
Class created for all parameters for tomography solver
"""
def __init__(self):
self.method = "FISTA"
self.stepsize = 1e-2
self.max_iter = 20
self.error = []
self.reg_term = 0.0 #L2 norm
#FISTA
self.fista_global_update = False
self.restart = False
#total variation regularization
self.total_variation = False
self.reg_tv = 1.0 #lambda
self.max_iter_tv = 15
self.order_tv = 1
self.total_variation_gpu = False
#lasso
self.lasso = False
self.reg_lasso = 1.0
#positivity constraint
self.positivity_real = (False, "larger")
self.positivity_imag = (False, "larger")
self.pure_real = False
self.pure_imag = False
#aberration correction
self.pupil_update = False
self.pupil_global_update = False
self.pupil_step_size = 1.0
self.pupil_update_method = "gradient"
#batch gradient update
self.batch_size = 1
#random order update
self.random_order = False
class PhaseObject3D:
"""
Class created for 3D objects.
Depending on the scattering model, one of the following quantities will be used:
- Refractive index (RI)
- Transmittance function (Trans)
- PhaseContrast
- Scattering potential (V)
shape: shape of object to be reconstructed in (x,y,z), tuple
voxel_size: size of each voxel in (x,y,z), tuple
RI_obj: refractive index of object(Optional)
RI: background refractive index (Optional)
slice_separation: For multislice algorithms, how far apart are slices separated, array (Optional)
"""
def __init__(self, shape, voxel_size, RI_obj = None, RI = 1.0, slice_separation = None):
assert len(shape) == 3, "shape should be 3 dimensional!"
self.shape = shape
self.RI_obj = RI * np.ones(shape, dtype = np_complex_datatype) if RI_obj is None else RI_obj.astype(np_complex_datatype)
self.RI = RI
self.pixel_size = voxel_size[0]
self.pixel_size_z = voxel_size[2]
if slice_separation is not None:
#for discontinuous slices
assert len(slice_separation) == shape[2]-1, "number of separations should match with number of layers!"
self.slice_separation = np.asarray(slice_separation).astype(np_float_datatype)
else:
#for continuous slices
self.slice_separation = self.pixel_size_z * np.ones((shape[2]-1,), dtype = np_float_datatype)
def convertRItoTrans(self, wavelength):
k0 = 2.0 * np.pi / wavelength
self.trans_obj = np.exp(1.0j*k0*(self.RI_obj - self.RI)*self.pixel_size_z)
def convertRItoPhaseContrast(self):
self.contrast_obj = self.RI_obj - self.RI
def convertRItoV(self, wavelength):
k0 = 2.0 * np.pi / wavelength
self.V_obj = k0**2 * (self.RI**2 - self.RI_obj**2)
def convertVtoRI(self, wavelength):
k0 = 2.0 * np.pi / wavelength
B = -1.0 * (self.RI**2 - self.V_obj.real/k0**2)
C = -1.0 * (-1.0 * self.V_obj.imag/k0**2/2.0)**2
RI_obj_real = ((-1.0 * B + (B**2-4.0*C)**0.5)/2.0)**0.5
RI_obj_imag = -0.5 * self.V_obj.imag/k0**2/RI_obj_real
self.RI_obj = RI_obj_real + 1.0j * RI_obj_imag
class TomographySolver:
"""
Highest level solver object for tomography problem
phase_obj_3d: phase_obj_3d object defined from class PhaseObject3D
fx_illu_list: illumination angles in x, default = [0] (on axis)
fy_illu_list: illumination angles in y
rotation_angle_list: angles of rotation in tomogrpahy
propagation_distance_list: defocus distances for each illumination
"""
def __init__(self, phase_obj_3d, fx_illu_list = [0], fy_illu_list = [0], rotation_angle_list = [0], propagation_distance_list = [0], **kwargs):
self.phase_obj_3d = phase_obj_3d
self.wavelength = kwargs["wavelength"]
#Rotation angels and objects
self.rot_angles = rotation_angle_list
self.number_rot = len(self.rot_angles)
self.rotation_pad = kwargs.get("rotation_pad", True)
#Illumination angles
assert len(fx_illu_list) == len(fy_illu_list)
self.fx_illu_list = fx_illu_list
self.fy_illu_list = fy_illu_list
self.number_illum = len(self.fx_illu_list)
#Aberation object
self._aberration_obj = Aberration(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size,\
self.wavelength, kwargs["na"], pad = False)
#Defocus distances and object
self.prop_distances = propagation_distance_list
self._defocus_obj = Defocus(phase_obj_3d.shape[:2], phase_obj_3d.pixel_size, **kwargs)
self.number_defocus = len(self.prop_distances)
#Scattering models and algorithms
self._opticsmodel = {"MultiTrans": MultiTransmittance,
"MultiPhaseContrast": MultiPhaseContrast,
}
self._algorithms = {"GradientDescent": self._solveFirstOrderGradient,
"FISTA": self._solveFirstOrderGradient
}
self.scat_model_args = kwargs
def setScatteringMethod(self, model = "MultiTrans"):
"""
Define scattering method for tomography
model: scattering models, it can be one of the followings:
"MultiTrans", "MultiPhaseContrast"(Used in the paper)
"""
self.scat_model = model
if hasattr(self, '_scattering_obj'):
del self._scattering_obj
if model == "MultiTrans":
self.phase_obj_3d.convertRItoTrans(self.wavelength)
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.trans_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 1, \
flag_gpu_inout = True, flag_inplace = True)
elif model == "MultiPhaseContrast":
if not hasattr(self.phase_obj_3d, 'contrast_obj'):
self.phase_obj_3d.convertRItoPhaseContrast()
self._x = self.phase_obj_3d.contrast_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_inplace = True)
else:
if not hasattr(self.phase_obj_3d, 'V_obj'):
self.phase_obj_3d.convertRItoV(self.wavelength)
self._x = self.phase_obj_3d.V_obj
if np.any(self.rot_angles != [0]):
self._rot_obj = ImageRotation(self.phase_obj_3d.shape, axis=0, pad = self.rotation_pad, pad_value = 0, \
flag_gpu_inout = True, flag_inplace = True)
self._scattering_obj = self._opticsmodel[model](self.phase_obj_3d, **self.scat_model_args)
def forwardPredict(self, field = False):
"""
Uses current object in the phase_obj_3d to predict the amplitude of the exit wave
Before calling, make sure correct object is contained
"""
obj_gpu = af.to_array(self._x)
with contexttimer.Timer() as timer:
forward_scattered_predict= []
if self._scattering_obj.back_scatter:
back_scattered_predict = []
for rot_idx in range(self.number_rot):
forward_scattered_predict.append([])
if self._scattering_obj.back_scatter:
back_scattered_predict.append([])
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, self.rot_angles[rot_idx])
for illu_idx in range(self.number_illum):
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
fields = self._forwardMeasure(fx_illu, fy_illu, obj = obj_gpu)
if field:
forward_scattered_predict[rot_idx].append(np.array(fields["forward_scattered_field"]))
if self._scattering_obj.back_scatter:
back_scattered_predict[rot_idx].append(np.array(fields["back_scattered_field"]))
else:
forward_scattered_predict[rot_idx].append(np.abs(fields["forward_scattered_field"]))
if self._scattering_obj.back_scatter:
back_scattered_predict[rot_idx].append(np.abs(fields["back_scattered_field"]))
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, -1.0*self.rot_angles[rot_idx])
if len(forward_scattered_predict[0][0].shape)==2:
forward_scattered_predict = np.array(forward_scattered_predict).transpose(2, 3, 1, 0)
elif len(forward_scattered_predict[0][0].shape)==3:
forward_scattered_predict = np.array(forward_scattered_predict).transpose(2, 3, 4, 1, 0)
if self._scattering_obj.back_scatter:
if len(back_scattered_predict[0][0].shape)==2:
back_scattered_predict = np.array(back_scattered_predict).transpose(2, 3, 1, 0)
elif len(back_scattered_predict[0][0].shape)==3:
back_scattered_predict = np.array(back_scattered_predict).transpose(2, 3, 4, 1, 0)
return forward_scattered_predict, back_scattered_predict
else:
return forward_scattered_predict
def checkGradient(self, delta = 1e-4):
"""
check if the numerical gradient is similar to the analytical gradient. Only works for 64 bit data type.
"""
assert af_float_datatype == af.Dtype.f64, "This will only be accurate if 64 bit datatype is used!"
shape = self.phase_obj_3d.shape
point = (np.random.randint(shape[0]), np.random.randint(shape[1]), np.random.randint(shape[2]))
illu_idx = np.random.randint(len(self.fx_illu_list))
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
x = np.ones(shape, dtype = np_complex_datatype)
if self._defocus_obj.pad:
amplitude = af.randu(shape[0]//2, shape[1]//2, dtype = af_float_datatype)
else:
amplitude = af.randu(shape[0], shape[1], dtype = af_float_datatype)
print("testing the gradient at point : ", point)
def func(x0):
fields = self._scattering_obj.forward(x0, fx_illu, fy_illu)
field_scattered = self._aberration_obj.forward(fields["forward_scattered_field"])
field_measure = self._defocus_obj.forward(field_scattered, self.prop_distances)
residual = af.abs(field_measure) - amplitude
function_value = af.sum(residual*af.conjg(residual)).real
return function_value
numerical_gradient = calculateNumericalGradient(func, x, point, delta = delta)
fields = self._scattering_obj.forward(x, fx_illu, fy_illu)
forward_scattered_field = fields["forward_scattered_field"]
cache = fields["cache"]
forward_scattered_field = self._aberration_obj.forward(forward_scattered_field)
field_measure = self._defocus_obj.forward(forward_scattered_field, self.prop_distances)
analytical_gradient = self._computeGradient(field_measure, amplitude, cache)[point]
print("numerical gradient: %5.5e + %5.5e j" %(numerical_gradient.real, numerical_gradient.imag))
print("analytical gradient: %5.5e + %5.5e j" %(analytical_gradient.real, analytical_gradient.imag))
def _forwardMeasure(self, fx_illu, fy_illu, obj = None):
"""
From an illumination angle, this function computes the exit wave.
fx_illu, fy_illu: illumination angle in x and y (scalars)
obj: object to be passed through (Optional, default pick from phase_obj_3d)
"""
if obj is None:
fields = self._scattering_obj.forward(self._x, fx_illu, fy_illu)
else:
fields = self._scattering_obj.forward(obj, fx_illu, fy_illu)
field_scattered = self._aberration_obj.forward(fields["forward_scattered_field"])
field_scattered = self._defocus_obj.forward(field_scattered, self.prop_distances)
fields["forward_scattered_field"] = field_scattered
if self._scattering_obj.back_scatter:
field_scattered = self._aberration_obj.forward(fields["back_scattered_field"])
field_scattered = self._defocus_obj.forward(field_scattered, self.prop_distances)
fields["back_scattered_field"] = field_scattered
return fields
def _computeGradient(self, field_measure, amplitude, cache):
"""
Error backpropagation to return a gradient
field_measure: exit wave computed in forward model
amplitude: amplitude measured
cache: exit wave at each layer, saved previously
"""
field_bp = field_measure - amplitude*af.exp(1.0j*af.arg(field_measure))
field_bp = self._defocus_obj.adjoint(field_bp, self.prop_distances)
field_bp = self._aberration_obj.adjoint(field_bp)
gradient = self._scattering_obj.adjoint(field_bp, cache)
return gradient["gradient"]
def _initialization(self,configs, x_init = None):
"""
Initialize algorithm
configs: configs object from class AlgorithmConfigs
x_init: initial guess of object
"""
if x_init is None:
if self.scat_model is "MultiTrans":
self._x[:, :, :] = 1.0
else:
self._x[:, :, :] = 0.0
else:
self._x[:, :, :] = x_init
def _solveFirstOrderGradient(self, configs, amplitudes, verbose):
"""
MAIN part of the solver, runs the FISTA algorithm
configs: configs object from class AlgorithmConfigs
amplitudes: all measurements
verbose: boolean variable to print verbosely
"""
flag_FISTA = False
if configs.method == "FISTA":
flag_FISTA = True
# update multiple angles at a time
batch_update = False
if configs.fista_global_update or configs.batch_size != 1:
gradient_batch = af.constant(0.0, self.phase_obj_3d.shape[0],\
self.phase_obj_3d.shape[1],\
self.phase_obj_3d.shape[2], dtype = af_complex_datatype)
batch_update = True
if configs.fista_global_update:
configs.batch_size = 0
#TODO: what if num_batch is not an integer
if configs.batch_size == 0:
num_batch = 1
else:
if self.number_rot < 2:
num_batch = self.number_illum // configs.batch_size
else:
num_batch = self.number_rot // configs.batch_size
stepsize = configs.stepsize
max_iter = configs.max_iter
reg_term = configs.reg_term
configs.error = []
obj_gpu = af.constant(0.0, self.phase_obj_3d.shape[0],\
self.phase_obj_3d.shape[1],\
self.phase_obj_3d.shape[2], dtype = af_complex_datatype)
#Initialization for FISTA update
if flag_FISTA:
restart = configs.restart
y_k = self._x.copy()
t_k = 1.0
#Start of iterative algorithm
with contexttimer.Timer() as timer:
if verbose:
print("---- Start of the %5s algorithm ----" %(self.scat_model))
for iteration in range(max_iter):
cost = 0.0
obj_gpu[:] = af.to_array(self._x)
if configs.random_order:
rot_order = np.random.permutation(range(self.number_rot))
illu_order = np.random.permutation(range(self.number_illum))
else:
rot_order = range(self.number_rot)
illu_order = range(self.number_illum)
for batch_idx in range(num_batch):
if batch_update:
gradient_batch[:,:,:] = 0.0
if configs.batch_size == 0:
rot_indices = rot_order
illu_indices = illu_order
else:
if self.number_rot < 2:
rot_indices = rot_order
illu_indices = illu_order[batch_idx * configs.batch_size : (batch_idx+1) * configs.batch_size]
else:
illu_indices = illu_order
rot_indices = rot_order[batch_idx * configs.batch_size : (batch_idx+1) * configs.batch_size]
for rot_idx in rot_indices:
# Rotate the object
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, self.rot_angles[rot_idx])
if batch_update:
self._rot_obj.rotate(gradient_batch, self.rot_angles[rot_idx])
for illu_idx in illu_indices:
#forward scattering
fx_illu = self.fx_illu_list[illu_idx]
fy_illu = self.fy_illu_list[illu_idx]
fields = self._forwardMeasure(fx_illu, fy_illu, obj = obj_gpu)
field_measure = fields["forward_scattered_field"]
cache = fields["cache"]
#calculate error
amplitude = af.to_array(amplitudes[:,:,:,illu_idx, rot_idx])
residual = af.abs(field_measure) - amplitude
cost += af.sum(residual*af.conjg(residual)).real
#calculate gradient
if batch_update:
gradient_batch[:, :, :] += self._computeGradient(field_measure, amplitude, cache)
else:
obj_gpu[:, :, :] -= stepsize * self._computeGradient(field_measure, amplitude, cache)
field_measure = None
cache = None
amplitude = None
if self.rot_angles[rot_idx] != 0:
self._rot_obj.rotate(obj_gpu, -1.0*self.rot_angles[rot_idx])
if batch_update:
self._rot_obj.rotate_adj(gradient_batch, self.rot_angles[rot_idx])
if batch_update:
obj_gpu[:, :, :] -= stepsize * gradient_batch
if np.isnan(obj_gpu).sum() > 0:
stepsize *= 0.5
print("WARNING: Gradient update diverges! Resetting stepsize to %3.2f" %(stepsize))
return obj_gpu
# L2 regularizer
obj_gpu[:, :, :] -= stepsize * reg_term * obj_gpu
#record total error
configs.error.append(cost + reg_term * af.sum(obj_gpu*af.conjg(obj_gpu)).real)
if flag_FISTA:
#check convergence
if iteration > 0:
if configs.error[-1] > configs.error[-2]:
if restart:
t_k = 1.0
self._x[:, :, :] = y_k
print("WARNING: FISTA Restart! Error: %5.5f" %(np.log10(configs.error[-1])))
continue
else:
print("WARNING: Error increased! Error: %5.5f" %(np.log10(configs.error[-1])))
#FISTA auxiliary variable
y_k1 = np.array(self._regularizer_obj.applyRegularizer(obj_gpu))
#FISTA update
t_k1 = 0.5*(1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0) / t_k1
self._x[:, :, :] = y_k1 + beta * (y_k1 - y_k)
t_k = t_k1
y_k = y_k1.copy()
else:
#check convergence
self._x[:, :, :] = np.array(obj_gpu)
if iteration > 0:
if configs.error[-1] > configs.error[-2]:
print("WARNING: Error increased! Error: %5.5f" %(np.log10(configs.error[-1])))
stepsize *= 0.8
if verbose:
print("iteration: %d/%d, error: %5.5f, elapsed time: %5.2f seconds" %(iteration+1, max_iter, np.log10(configs.error[-1]), timer.elapsed))
return self._x
def solve(self, configs, amplitudes, x_init = None, verbose = True):
"""
function to solve for the tomography problem
configs: configs object from class AlgorithmConfigs
amplitudes: measurements in amplitude not INTENSITY, ordered by (x,y,illumination,defocus,rotation)
x_init: initial guess for object
verbose: boolean variable to print verbosely
"""
self._initialization(configs, x_init)
self._aberration_obj.setUpdateParams(flag_update = configs.pupil_update,\
pupil_step_size = configs.pupil_step_size,\
update_method = configs.pupil_update_method,\
global_update = configs.pupil_global_update,\
measurement_num = self.number_illum*self.number_rot)
self._regularizer_obj = Regularizer(configs, verbose)
if self.number_defocus < 2:
amplitudes = amplitudes[:,:, np.newaxis]
if self.number_illum < 2:
amplitudes = amplitudes[:,:,:, np.newaxis]
if self.number_rot < 2:
amplitudes = amplitudes[:,:,:,:, np.newaxis]
return self._algorithms[configs.method](configs, amplitudes, verbose) |
import csv
import json
colNames = ['Title','Price','Stock','Rating']
dataSet = [
['Rip it Up and ...', 35.02, 'In stock', 5],
['Our Band Could Be ...', 57.25, 'In stock', 4],
['How Music Works', 37.32, 'In stock', 2],
['Love Is a Mix ...', 18.03, 'Out of stock',1],
['Please Kill Me: The ...', 31.19, 'In stock', 4],
["Kill 'Em and Leave: ...", 45.0, 'In stock',5],
['Chronicles, Vol. 1', 52.60, 'Out of stock',2],
['This Is Your Brain ...', 38.4, 'In stock',1],
['Orchestra of Exiles: The ...', 12.36, 'In stock',3],
['No One Here Gets ...', 20.02, 'In stock',5],
['Life', 31.58, 'In stock',5],
['Old Records Never Die: ...', 55.66, 'Out of Stock',2],
['Forever Rockers (The Rocker ...', 28.80, 'In stock',3]
]
print(dataSet)
fileCsv = open('bookdetails.csv', 'w', newline='', encoding='utf-8')
writer = csv.writer(fileCsv)
writer.writerow(colNames)
for data in dataSet:
writer.writerow(data)
fileCsv.close()
finalDataSet=list() #empty Dataset
for data in dataSet:
print(dict(zip(colNames,data)))
finalDataSet.append(dict(zip(colNames,data)))
print(finalDataSet)
with open('bookdetails.json', 'w') as jsonfile:
json.dump(finalDataSet,jsonfile)
with open('bookdetails.json', 'r+') as jsonfile:
data = json.load(jsonfile)
print(data)
print(data[0])
print(data[0]['id'])
print(data[0]['price'])
print(data[0:2])
|
from Utils.Serializable import Serializable
from dataclasses import dataclass
from enum import Enum
class RecognitionOutcome(Enum):
NOT_RECOGNIZED = 1
RECOGNIZED = 2
UNCERTAIN = 3
UNKNOWN = 4
@dataclass
class RecognitionResponse(Serializable):
recognized_class: str
probability: float
outcome: str
@classmethod
def deserialize(cls, serialization):
instance = super().deserialize(serialization)
if isinstance(instance, dict):
return RecognitionResponse(**instance)
return None
|
import seaborn as sns
from matplotlib import pyplot as plt
from PyPDF2 import PdfFileMerger, PdfFileReader
import os
import shutil
import math
import warnings
warnings.filterwarnings('ignore')
def MakePlots(df, cat_features, con_features):
mergedObject = PdfFileMerger()
if os.path.exists('Plots'):
shutil.rmtree('Plots')
os.mkdir('Plots')
os.chdir('Plots')
# Heat Map
h, axs = plt.subplots()
sns.heatmap(df.corr())
axs.set_title('HeatMap :- ')
plt.savefig("HeatMap.pdf")
def CalcuateRows(TotalPlots):
if TotalPlots <= 3:
return 2
elif TotalPlots % 3 == 0:
return (int(TotalPlots / 3))
else:
return (TotalPlots // 3 + 1)
# DistPlot
TotalDistPlots = len(con_features)
f, axes = plt.subplots(CalcuateRows(TotalDistPlots), 3, figsize=(10, 10))
column = 0
row = 0
while (row < TotalDistPlots):
for i in con_features:
if column < 3:
try:
axes[row, column].set_title('DistPlot :- ' + i)
sns.distplot(df[i].dropna(), ax=axes[row, column])
column += 1
if column == 3:
row += 1
column = 0
except:
print("", end="")
row += 1
plt.savefig("DistPlots.pdf")
plt.tight_layout()
# CountPlot
TotalCountPlots = len(cat_features)
f, axes = plt.subplots(CalcuateRows(TotalCountPlots), 3, figsize=(10, 10))
column = 0
row = 0
while (row < TotalCountPlots):
for i in cat_features:
if column < 3:
try:
axes[row, column].set_title('CountPlot :- ' + i)
sns.countplot(df[i].dropna(), ax=axes[row, column])
column += 1
if column == 3:
row += 1
column = 0
except:
print("", end="")
row += 1
plt.savefig("CountPlots.pdf")
plt.tight_layout()
# Bar Plot
TotalBarPlots = len(cat_features)
f, axes = plt.subplots(CalcuateRows(TotalBarPlots), 3, figsize=(10, 10))
column = 0
row = 0
while (row < TotalBarPlots):
for i in con_features:
for j in cat_features:
if column < 3:
try:
axes[row, column].set_title('BarPlot :- ' + i + ' Vs ' + j)
sns.barplot(x=i, y=j, data=df.dropna(), ax=axes[row, column])
column += 1
if column == 3:
row += 1
column = 0
except:
print("", end="")
row += 1
plt.savefig("BarPlots.pdf")
plt.tight_layout()
# Box Plot
TotalBoxPlots = len(cat_features)
f, axes = plt.subplots(CalcuateRows(TotalBoxPlots), 3, figsize=(10, 10))
column = 0
row = 0
while (row < TotalBoxPlots):
for i in con_features:
for j in cat_features:
if column < 3:
try:
axes[row, column].set_title('BoxPlot :- ' + j + ' Vs ' + i)
sns.boxplot(x=j, y=i, data=df.dropna(), ax=axes[row, column])
column += 1
if column == 3:
row += 1
column = 0
except:
print("", end="")
row += 1
plt.savefig("BoxPlots.pdf")
plt.tight_layout()
# Violin Plot
TotalViolinPlots = len(cat_features)
f, axes = plt.subplots(CalcuateRows(TotalViolinPlots), 3, figsize=(10, 10))
column = 0
row = 0
while (row < TotalViolinPlots):
for i in con_features:
for j in cat_features:
if column < 3:
try:
axes[row, column].set_title('ViolinPlot :- ' + i + ' Vs ' + j)
sns.violinplot(x=i, y=j, data=df.dropna(), ax=axes[row, column])
column += 1
if column == 3:
row += 1
column = 0
except:
print("", end="")
row += 1
plt.savefig("ViolinPlots.pdf")
plt.tight_layout()
# Pair Plot
a = sns.pairplot(df)
a.fig.suptitle('PairPlot :- ')
plt.savefig("PairPlot.pdf")
for i in cat_features:
try:
a = sns.pairplot(df, hue=i)
a.fig.suptitle("PairPlot :- " + i)
plt.savefig("PairPlot%s.pdf" % i)
except:
continue
CurrentDirectory = os.getcwd()
AllFiles = os.listdir(CurrentDirectory)
for file in AllFiles:
if file.endswith('.pdf'):
mergedObject.append(PdfFileReader(file, 'rb'))
mergedObject.write("FinalPlots.pdf")
os.chdir('../')
|
from __future__ import annotations
import typing
import networkx
def depth_first_search(graph: networkx.DiGraph, root: typing.Any, explored: list, stack: list) -> None:
if root not in explored:
explored.append(root)
for vertex in graph.neighbors(root):
depth_first_search(graph, vertex, explored, stack)
stack.append(root)
def kosaraju_algorithm(graph: networkx.DiGraph) -> list[set]:
stack, explored = [], []
for vertex in graph.nodes:
depth_first_search(graph, vertex, explored, stack)
explored = []
components = []
for vertex in reversed(stack):
if vertex not in explored:
component = []
depth_first_search(graph.reverse(copy = True), vertex, explored, component)
components.append(set(component))
return components
|
#!/usr/bin/env python
# Copyright 2021 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
def AddDirToPathIfNeeded(*path_parts):
# pylint: disable=no-value-for-parameter
path = os.path.abspath(os.path.join(*path_parts))
if os.path.isdir(path) and path not in sys.path:
sys.path.append(path)
def GetPDFiumDir():
if not GetPDFiumDir.pdfium_dir:
# Expect |skia_gold_dir| to be .../pdfium/testing/tools/skia_gold.
skia_gold_dir = os.path.dirname(os.path.realpath(__file__))
tools_dir = os.path.dirname(skia_gold_dir)
testing_dir = os.path.dirname(tools_dir)
if (os.path.basename(tools_dir) != 'tools' or
os.path.basename(testing_dir) != 'testing'):
raise RuntimeError(
'Confused, can not find pdfium root directory, aborting.')
GetPDFiumDir.pdfium_dir = os.path.dirname(testing_dir)
return GetPDFiumDir.pdfium_dir
GetPDFiumDir.pdfium_dir = None
|
#i know my code is shit you dont need to remind me
state = None
|
import os, datetime
# Class-based application configuration
class ConfigClass(object):
""" Flask application config """
#DO NOT use "DEBUG = True" in production environments
DEBUG = False
# Flask settings
SECRET_KEY = 'bestspangeopfjieaorhjfoiawoicewafoajpoewfjpoaewjfoewjf'
# Flask-SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', 'sqlite:///data.db') # File-based SQL database
SQLALCHEMY_TRACK_MODIFICATIONS = False # Avoids SQLAlchemy warning
#JWT
# app.config['JWT_AUTH_URL_RULE'] = '/login'
JWT_EXPIRATION_DELTA = datetime.timedelta(seconds=1800)
# Flask-Mail SMTP server settings
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_USERNAME = 'designer@abbok.net'
MAIL_PASSWORD = 'Abb0kMyAbb0k!'
MAIL_DEFAULT_SENDER = '"MyApp" <designer@abbok.net>'
# Flask-User settings
USER_APP_NAME = "abbok design" # Shown in and email templates and page footers
USER_ENABLE_EMAIL = True # Enable email authentication
USER_ENABLE_CONFIRM_EMAIL = True
USER_ENABLE_MULTIPLE_EMAILS = False
USER_ENABLE_CHANGE_PASSWORD = True
USER_ENABLE_FORGOT_PASSWORD = True
USER_ENABLE_INVITE_USER = False
USER_ENABLE_USERNAME = True # Disable username authentication
USER_EMAIL_SENDER_NAME = USER_APP_NAME
USER_EMAIL_SENDER_EMAIL = "designer@abbok.net"
USER_REQUIRE_RETYPE_PASSWORD = True
USER_SHOW_EMAIL_DOES_NOT_EXIST = False
USER_SHOW_USERNAME_DOES_NOT_EXIST = False
# 2 days (2*24*3600 seconds).
USER_CONFIRM_EMAIL_EXPIRATION = 172800
USER_RESET_PASSWORD_EXPIRATION = 172800
USER_USER_SESSION_EXPIRATION = 3600
USER_ENABLE_REGISTER = True
USER_ENABLE_REMEMBER_ME = True
USER_REGISTER_TEMPLATE = 'flask_user/login_or_register.html'
USER_LOGIN_TEMPLATE = 'flask_user/login_or_register.html'
USER_LOGIN_URL = '/user/sign-in'
USER_LOGOUT_URL = '/user/sign-out'
USER_MANAGE_EMAILS_URL = '/user/manage-emails'
USER_REGISTER_URL = '/user/register'
|
# Copyright 2020 Ville Vestman
# This file is licensed under the MIT license (see LICENSE.txt).
import os
import importlib
import torch
from asvtorch.src.settings.settings import Settings
import asvtorch.src.misc.fileutils as fileutils
def load_network(epoch: int, device):
model_filepath = os.path.join(fileutils.get_network_folder(), 'epoch.{}.pt'.format(epoch))
loaded_states = torch.load(model_filepath, map_location=device)
state_dict = loaded_states['model_state_dict']
key1 = 'feat_dim_param'
key2 = 'n_speakers_param'
feat_dim = state_dict[key1].item()
n_speakers = state_dict[key2].item()
net = initialize_net(feat_dim, n_speakers)
net.to(device)
net.load_state_dict(state_dict)
return net
def save_state(filename, epoch, net, optimizer):
model_dict = {'model_state_dict': net.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}
filename = fileutils.ensure_ext('{}.{}'.format(fileutils.remove_ext(filename, '.pt'), epoch), '.pt')
torch.save(model_dict, filename)
print('x-vector extractor model saved to: {}'.format(filename))
def load_state(filename, epoch, net, optimizer, device):
filename = fileutils.ensure_ext('{}.{}'.format(fileutils.remove_ext(filename, '.pt'), epoch), '.pt')
loaded_states = torch.load(filename, map_location=device)
net.load_state_dict(loaded_states['model_state_dict'])
optimizer.load_state_dict(loaded_states['optimizer_state_dict'])
# This allows to select the network class by using the class name in Settings
def initialize_net(feat_dim: int, n_speakers: int):
module, class_name = Settings().network.network_class.rsplit('.', 1)
FooBar = getattr(importlib.import_module(module), class_name)
return FooBar(feat_dim, n_speakers)
|
#!/usr/bin/python3
import os
import subprocess
import sys
import time
import boto3
from ruamel.yaml import YAML
AWS_REGIONS = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ap-south-1', 'ap-northeast-3',
'ap-northeast-2', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'ca-central-1',
'eu-central-1', 'eu-west-1', 'eu-west-2', 'eu-west-3', 'eu-north-1', 'sa-east-1']
VERSION = '0.0.1'
BUCKET_BASE = 'logzio-aws-integrations'
NAME = 'aws-cross-accounts'
AWS_ACCESS_KEY = os.environ['AWS_ACCESS_KEY']
AWS_SECRET_KEY = os.environ['AWS_SECRET_KEY']
PATH_PREFIX = f'{NAME}/{VERSION}'
LAMBDA_KEY = f'{PATH_PREFIX}/lambda_function.zip'
SAM_MAIN_KEY = f'{PATH_PREFIX}/sam-template-main.yaml'
SAM_DESTINATION_KEY = f'{PATH_PREFIX}/sam-template-destination.yaml'
def run():
zip_path = './lambda_function.zip' # TODO: may need to change in the workflow
zip_lambda_function()
successful_uploads = 0
for region in AWS_REGIONS:
s3_client = boto3.client(
's3',
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_SECRET_KEY,
region_name=region
)
bucket_name = f'{BUCKET_BASE}-{region}'
# validate that the version number is correct, so we do not override an existing version:
result = s3_client.list_objects(Bucket=bucket_name, Prefix=f'{PATH_PREFIX}')
if 'Contents' in result:
print('ERROR! This version already exists. Please bump version with variable VERSION, and run the script again!')
os.remove(zip_path)
exit(1)
# upload lambda function
uploaded = upload_file(s3_client, bucket_name, LAMBDA_KEY, zip_path, region)
if not uploaded:
sys.exit('Error occurred while uploading lambda zip')
# handle + upload main sam template
uploaded = edit_and_upload_template(s3_client, bucket_name, region)
if not uploaded:
sys.exit('Error occurred while uploading main sam template')
# upload destination template
uploaded = upload_file(s3_client, bucket_name, SAM_DESTINATION_KEY,
'./sam-templates/sam-template-destination.yaml', region)
if not uploaded:
sys.exit('Error occurred while uploading destination sam template')
successful_uploads += 1
os.remove(zip_path)
print(f'Successfully uploaded {successful_uploads} out of {len(AWS_REGIONS)} regions')
def upload_file(s3_client, bucket, key, file_to_upload, region):
max_retries = 5
for attempt in range(max_retries):
try:
response = s3_client.upload_file(file_to_upload, bucket, key, ExtraArgs={'ACL': 'public-read'})
except Exception as e:
print(e)
time.sleep(3)
print(f'Successfully deployed {key} in region: {region}')
return True
return False
def zip_lambda_function():
subprocess.call('./zip.sh')
def edit_and_upload_template(s3_client, bucket_name, region):
try:
base_file_path = './sam-templates/sam-template-main-default.yaml'
file_path = './sam-templates/sam-template-main.yaml'
yaml = YAML()
yaml.preserve_quotes = True
with open(base_file_path, 'r') as template_base:
template_yaml = yaml.load(template_base)
template_yaml['Resources']['LogzioCrossAccountKinesisLambda']['Properties']['CodeUri']['Bucket'] = bucket_name
template_yaml['Resources']['LogzioCrossAccountKinesisLambda']['Properties']['CodeUri']['Key'] = LAMBDA_KEY
with open(file_path, 'w') as template:
yaml.dump(template_yaml, template)
uploaded = upload_file(s3_client, bucket_name, SAM_MAIN_KEY, file_path, region)
os.remove(file_path)
return uploaded
except Exception as e:
print(e)
return False
if __name__ == '__main__':
run()
|
# -*- coding: utf-8 -*-
__author__ = 'André Roberge'
__email__ = 'andre.roberge@gmail.com'
__version__ = '0.9.2'
from .easygui_qt import *
from .easygui_qt import __all__
|
from django.db import models
class Roles(object):
WHITE = "white"
BLACK = "black"
ROLE_CHOICES = [
(value, value) for attr, value in sorted(Roles.__dict__.items()) if not attr.startswith("_")
]
class Player(models.Model):
nickname = models.CharField(max_length=50)
role = models.CharField(max_length=50, choices=ROLE_CHOICES, null=True, blank=True)
game = models.ForeignKey('Game', related_name="players")
|
# -*- coding: utf-8 -*-
"""Functions for grouping BEL graphs into sub-graphs."""
from . import annotations, provenance
from .annotations import *
from .provenance import *
__all__ = (
annotations.__all__ +
provenance.__all__
)
|
__all__ = []
try:
from featuretools.wrappers import DFSTransformer
__all__.append('DFSTransformer')
except ImportError:
pass
|
from PIL import Image, ImageFilter
img = Image.open('./images/yumi.png')
print(img)
print(img.mode)
print(img.size)
print(img.format)
#! Blur
filtered_img = img.filter(ImageFilter.BLUR)
filtered_img.save('./images/converted/yumi_blur.png', 'png')
#! Smooth
filtered_img2 = img.filter(ImageFilter.SMOOTH)
filtered_img2.save('./images/converted/yumi_smooth.png', 'png')
#! Sharpen
filtered_img3 = img.filter(ImageFilter.SHARPEN)
filtered_img3.save('./images/converted/yumi_sharpen.png', 'png')
#! Grey Scale
filtered_img4 = img.convert('L') # 'L' = grey scale
filtered_img4.save('./images/converted/yumi_grey_scale.png', 'png')
#! Show Image
# filtered_img4.show()
#! Rotate
rotated_img = filtered_img4.rotate(75)
rotated_img.save('./images/converted/yumi_rotated.png', 'png')
#! Resize
resized_img = img.resize((300, 300))
resized_img.save('./images/converted/yumi_300x300.png', 'png')
#! Crop
box = (100, 100, 400, 400)
cropped_img = img.crop(box)
cropped_img.save('./images/converted/yumi_cropped.png', 'png')
#! Thumbnail
img.thumbnail((150 ,150))
img.save('./images/converted/yumi_thumbnail.png') |
'''
Give the following parameters:
cityscapesPath: default is './data/CityDatabase'
classnames: specify which class you want to segment with instance
*IMPORTANT* if you change this, you have to modify label.py
and regenerate '*_gt*_instanceTrainIds.png' gt files.
MAX_instances: specify max number of instances of each class
Output: the corresponding ground truth masks for each '*_gt*_instanceTrainIds.png' gt file
e.g. input file: aachen_000000_000019_gtFine_instanceTrainIds.png
output file: aachen_000000_000019_gtFine_mask.png
*NOTE* The output file is a full size matrix, not sparse!
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from PIL import Image
import math
import sys
import os
import glob
from scipy import sparse
from scipy.misc import toimage
from scipy.misc import imsave
# os.environ["CITYSCAPES_DATASET"] = "/Users/WY/Downloads/CityDatabase"
os.environ["CITYSCAPES_DATASET"] = "./data/CityDatabase"
def get_file_list(cityscapesPath):
'''
Give data path, find all .json files for gtFine
'''
searchFinetrain = os.path.join( cityscapesPath , "gtFine" , "train" , "*" , "*_gt*_instanceTrainIds.png")
searchFineval = os.path.join( cityscapesPath , "gtFine" , "val" , "*" , "*_gt*_instanceTrainIds.png")
filesFinetrain = glob.glob(searchFinetrain)
filesFineval = glob.glob(searchFineval)
filesFine = filesFinetrain + filesFineval
filesFine.sort()
if not filesFine:
sys.exit('Did not find any files.')
print('Got {} instance files. '.format(len(filesFine)))
return filesFine
def open_gt_file(fname):
img = Image.open(fname)
image = np.array(img, dtype=np.int16)
(Height, Width) = np.shape(image)
img_shape = (Height, Width)
return image, img_shape
def create_instance_data(instances, classname, image, img_shape):
'''
For given image and the classname it contains,
create corresponding instance data.
instances: Dict
classname: (class_label, class_id)
image: 1024*2048 np.array
img_shape: (Height, Width)
'''
Height = img_shape[0]
Width = img_shape[1]
for row in range(Height):
for col in range(Width):
pixel = image[row][col]
label_id = pixel / 1000
label_id = int(label_id)
# print('pixel {}, lable {}'.format(pixel, label_id))
if pixel == 19: #Background
# Ignore background
continue
elif label_id == classname[1]: #class_id
inst_id = pixel % 1000
if inst_id in instances[classname[0]]:
# remember the pixel's coordinates
x_coord = row
y_coord = col
instances[classname[0]][inst_id]['pixels'].append((x_coord, y_coord))
else: # the first time seeing this instance id
instances[classname[0]][inst_id] = {}
# and remember its coordinates
x_coord = row
y_coord = col
instances[classname[0]][inst_id]['pixels'] = []
instances[classname[0]][inst_id]['pixels'].append((x_coord, y_coord))
instances[classname[0]][inst_id]['pixel_avg'] = [0.0, 0.0]
else:
# Ignore othere values
continue
def cal_pixel_avg(instances):
'''
Compute pixel average coordinates (x_avg, y_avg)
for every instance in an image
'''
class_labels = instances.keys()
for label in class_labels:
for inst, values in instances[label].items():
coord_avg = np.mean(values['pixels'], axis=0)
instances[label][inst]['pixel_avg'] = coord_avg
def sort_instances(instances):
'''
Build a list for each class where each item is: (inst, [x_avg, y_avg])
Return: ordered instances for each class
'''
class_avg_pixels = {}
class_labels = instances.keys()
# extract pixel_avg to a list
for label in class_labels:
# print('sort_instance, class: {}'.format(label))
class_avg_pixels[label] = []
for inst, values in instances[label].items():
class_avg_pixels[label].append((inst, values['pixel_avg'].tolist()))
# sort the list
for label in class_labels:
class_avg_pixels[label] = sorted(class_avg_pixels[label], key=lambda tup: tup[1])
return class_avg_pixels
def generate_sparse_mask(instances, class_avg_pixels, MAX_instances, img_shape):
Gt_mask = {}
Height = img_shape[0]
Width = img_shape[1]
class_labels = class_avg_pixels.keys()
for label in class_labels:
index = 0
for index, item in enumerate(class_avg_pixels[label]):
if index < MAX_instances:
inst = item[0]
pixel_array = np.array(instances[label][inst]['pixels'])
row = pixel_array[:,0]
col = pixel_array[:,1]
fill_data = np.ones(len(row), dtype=np.int8) * (index + 1)
data = sparse.coo_matrix((fill_data, (row, col)), shape=(Height, Width), dtype=np.int8).tocsc()
if label in Gt_mask:
Gt_mask[label] += data
else:
Gt_mask[label] = data
# if lable is not in Gt_mask, generate 0 matrix
# print('label is {}, after index is {}.'.format(label, index))
if label not in Gt_mask:
Gt_mask[label] = sparse.csc_matrix((Height, Width), dtype=np.int8)
# Gt_mask_final
# print('final assembly: ')
for key in iter(Gt_mask):
# Convert to full size matrix
Gt_mask[key] = Gt_mask[key].toarray()
# print('key: {}'.format(key))
Gt_mask_final = np.dstack((Gt_mask['person'],Gt_mask['car']))
# print('final mask shape: ', np.shape(Gt_mask_final))
return Gt_mask_final
def generate_masks(instances, class_avg_pixels, MAX_instances, img_shape):
'''
Generate masks for the Ground truth;
MAX_instances: specify the max number of instances for each class
Return: stacked masks, car comes first, then person for now
'''
Gt_mask = {}
Height = img_shape[0]
Width = img_shape[1]
class_labels = class_avg_pixels.keys()
print('in generate, the class_labels are: {}'.format(class_labels))
for label in class_labels:
index = 0
for index, item in enumerate(class_avg_pixels[label]):
if index < MAX_instances:
inst = item[0]
pixel_array = np.array(instances[label][inst]['pixels'])
row = pixel_array[:,0]
col = pixel_array[:,1]
fill_data = np.ones(len(row), dtype=np.int8)
# mask = sparse.coo_matrix((fill_data, (row, col)), shape=(Height, Width), dtype=np.int8).toarray()
mask = sparse.coo_matrix((fill_data, (row, col)), shape=(Height, Width), dtype=np.int8)
if label in Gt_mask:
# Gt_mask[label] = np.dstack((Gt_mask[label], mask))
Gt_mask[label].append(mask)
# print('shape of gt_label {} is {}'.format(label, np.shape(Gt_mask[label])))
else:
# Gt_mask[label] = mask
Gt_mask[label] = []
Gt_mask[label].append(mask)
# fill the remaining masks with zeros
if index < MAX_instances - 1:
if index == 0:
remaining = MAX_instances
else:
remaining = MAX_instances - index - 1
mask = np.zeros((Height, Width), dtype=np.int8)
for i in range(remaining):
# need to check if there exists such an instance of this class
if label in Gt_mask:
# Gt_mask[label] = np.dstack((Gt_mask[label], mask))
Gt_mask[label].append(mask)
# print('shape of gt_label {} is {}'.format(label, np.shape(Gt_mask[label])))
else:
# Gt_mask[label] = mask
Gt_mask[label] = []
Gt_mask[label].append(mask)
# The final masks of ground truth
# print('final assembly: ')
# for key in iter(Gt_mask):
# print('key: {}'.format(key))
# Gt_mask_final = np.dstack((Gt_mask['car'],Gt_mask['person']))
Gt_mask_final = Gt_mask['car'] + Gt_mask['person']
return Gt_mask_final
def main():
if 'CITYSCAPES_DATASET' in os.environ:
cityscapesPath = os.environ['CITYSCAPES_DATASET']
instances = {}
classnames = [('car', 13), ('person', 11)]
MAX_instances = 30
files = get_file_list(cityscapesPath)
# files = ['/Users/WY/Desktop/instance-data/aachen_000004_000019_gtFine_instanceTrainIds.png']
progress = 0
print("Progress: {:>3} %".format( progress * 100 / len(files) ))
for fname in files:
# image is np.array, dtype=np.int16, has a shape of img_shape
(image, img_shape) = open_gt_file(fname)
# print('open file {}, shape {}'.format(fname, img_shape))
for classname in classnames:
instances[classname[0]] = {}
create_instance_data(instances, classname, image, img_shape)
cal_pixel_avg(instances)
class_avg_pixels = sort_instances(instances)
# print('in main, class labels are {}'.format(class_avg_pixels.keys()))
# Gt_mask = generate_masks(instances, class_avg_pixels, MAX_instances, img_shape)
Gt_mask = generate_sparse_mask(instances, class_avg_pixels, MAX_instances, img_shape)
# fname = fname.replace('png', 'npy')
fname = fname.replace('instanceTrainIds', 'mask')
# fname = fname.replace('png', 'pickle')
# np.save(fname, Gt_mask)
height= np.shape(Gt_mask)[0]
width = np.shape(Gt_mask)[1]
stacked = np.zeros((height, width), dtype=np.int8)
save_mask = np.dstack((Gt_mask, stacked))
# print('shape of saved is {}'.format(np.shape(save_mask)))
print('Save gt mask to {}.'.format(fname))
# imsave(fname, save_mask)
toimage(save_mask, high=29, low=0, cmin=0, cmax=29).save(fname)
# cPickle.dump(Gt_mask, open(fname, "w"))
progress += 1
print("\rProgress: {:>3} %".format( progress * 100 / len(files) ))
sys.stdout.flush()
if __name__ == "__main__":
main()
|
import numpy as np
import numba
import numba.cuda as cuda
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import _check_sample_weight
from scipy.sparse import issparse, csr_matrix, coo_matrix
from enstop.utils import (
normalize,
coherence,
mean_coherence,
log_lift,
mean_log_lift,
standardize_input,
)
from enstop.plsa import plsa_init
from enstop.block_parallel_plsa import log_likelihood_by_blocks
@cuda.jit()
def plsa_e_step(
block_rows,
block_cols,
p_w_given_z_block,
p_z_given_d_block,
p_z_given_wd_block,
e_step_thresh,
):
i = cuda.blockIdx.x
j = cuda.blockIdx.y
nz_offset = cuda.threadIdx.x
threads_per_blocks = cuda.blockDim.x
k = p_z_given_d_block[i].shape[1]
nnz = block_rows.shape[2]
n_passes = (nnz // threads_per_blocks) + 1
for n in range(n_passes):
nz_idx = n * threads_per_blocks + nz_offset
if nz_idx < nnz:
if block_rows[i, j, nz_idx] < 0:
break
d = block_rows[i, j, nz_idx]
w = block_cols[i, j, nz_idx]
norm = 0.0
for z in range(k):
v = p_w_given_z_block[j, z, w] * p_z_given_d_block[i, d, z]
if v > e_step_thresh:
p_z_given_wd_block[i, j, nz_idx, z] = v
norm += v
else:
p_z_given_wd_block[i, j, nz_idx, z] = 0.0
for z in range(k):
if norm > 0.0:
p_z_given_wd_block[i, j, nz_idx, z] /= norm
@cuda.jit()
def plsa_partial_m_step(
block_rows,
block_cols,
block_vals,
p_w_given_z_block,
p_z_given_d_block,
result_p_w_given_z_block,
result_p_z_given_d_block,
p_z_given_wd_block,
pwz_norms,
):
z = cuda.threadIdx.x
i = cuda.blockIdx.x
j = cuda.blockIdx.y
k = p_z_given_d_block[i].shape[1]
nnz = block_rows.shape[2]
if z < k:
result_p_w_given_z_block[i, j, z, :] = 0.0
result_p_z_given_d_block[j, i, :, z] = 0.0
pwz_norms[i, j, z] = 0.0
for nz_idx in range(block_rows[i, j].shape[0]):
if block_rows[i, j, nz_idx] < 0:
break
d = block_rows[i, j, nz_idx]
w = block_cols[i, j, nz_idx]
x = block_vals[i, j, nz_idx]
s = x * p_z_given_wd_block[i, j, nz_idx, z]
result_p_w_given_z_block[i, j, z, w] += s
result_p_z_given_d_block[j, i, d, z] += s
pwz_norms[i, j, z] += s
@cuda.jit()
def normalize_m_step_p_z_given_d(blocked_next_p_z_given_d, p_z_given_d):
d_offset = cuda.threadIdx.x
i = cuda.blockIdx.x
threads_per_block = cuda.blockDim.x
k = p_z_given_d[i].shape[1]
n_passes = ((p_z_given_d.shape[0] * p_z_given_d.shape[1]) // threads_per_block) + 1
for n in range(n_passes):
d = threads_per_block * n + d_offset
if (
i < blocked_next_p_z_given_d.shape[1]
and d < blocked_next_p_z_given_d.shape[2]
):
norm = 0.0
for z in range(k):
p_z_given_d[i, d, z] = 0.0
for j in range(blocked_next_p_z_given_d.shape[0]):
p_z_given_d[i, d, z] += blocked_next_p_z_given_d[j, i, d, z]
norm += blocked_next_p_z_given_d[j, i, d, z]
for z in range(k):
p_z_given_d[i, d, z] /= norm
@cuda.jit()
def normalize_m_step_p_w_given_z(blocked_next_p_w_given_z, p_w_given_z, pwz_norms):
w_offset = cuda.threadIdx.x
i = cuda.blockIdx.x
threads_per_block = cuda.blockDim.x
k = p_w_given_z[i].shape[0]
n_passes = ((p_w_given_z.shape[0] * p_w_given_z.shape[2]) // threads_per_block) + 1
norms = cuda.local.array(1024, numba.float64)
for z in range(k):
norms[z] = 0.0
for p in range(pwz_norms.shape[0]):
for q in range(pwz_norms.shape[1]):
for z in range(k):
norms[z] += pwz_norms[p, q, z]
for n in range(n_passes):
w = n * threads_per_block + w_offset
if (
i < blocked_next_p_w_given_z.shape[1]
and w < blocked_next_p_w_given_z.shape[3]
):
for z in range(k):
p_w_given_z[i, z, w] = 0.0
for j in range(blocked_next_p_w_given_z.shape[0]):
p_w_given_z[i, z, w] += blocked_next_p_w_given_z[j, i, z, w]
for z in range(k):
p_w_given_z[i, z, w] /= norms[z]
def plsa_fit(
data,
k,
n_row_blocks=8,
n_col_blocks=8,
init="random",
n_iter=100,
n_iter_per_test=10,
tolerance=0.001,
e_step_thresh=1e-32,
random_state=None,
):
rng = check_random_state(random_state)
p_z_given_d_init, p_w_given_z_init = plsa_init(data, k, init=init, rng=rng)
A = data.tocsr().astype(np.float32)
n = A.shape[0]
m = A.shape[1]
block_row_size = np.uint16(np.ceil(A.shape[0] / n_row_blocks))
block_col_size = np.uint16(np.ceil(A.shape[1] / n_col_blocks))
p_z_given_d = np.zeros((block_row_size * n_row_blocks, k), dtype=np.float32)
p_z_given_d[: p_z_given_d_init.shape[0]] = p_z_given_d_init
p_z_given_d = p_z_given_d.reshape(n_row_blocks, block_row_size, k)
p_w_given_z = np.zeros((k, block_col_size * n_col_blocks), dtype=np.float32)
p_w_given_z[:, : p_w_given_z_init.shape[1]] = p_w_given_z_init
p_w_given_z = np.transpose(
p_w_given_z.T.reshape(n_col_blocks, block_col_size, k), axes=[0, 2, 1]
).astype(np.float32, order="C")
A_blocks = [[0] * n_col_blocks for i in range(n_row_blocks)]
max_nnz_per_block = 0
for i in range(n_row_blocks):
row_start = block_row_size * i
row_end = min(row_start + block_row_size, n)
for j in range(n_col_blocks):
col_start = block_col_size * j
col_end = min(col_start + block_col_size, m)
A_blocks[i][j] = A[row_start:row_end, col_start:col_end].tocoo()
if A_blocks[i][j].nnz > max_nnz_per_block:
max_nnz_per_block = A_blocks[i][j].nnz
block_rows_ndarray = np.full(
(n_row_blocks, n_col_blocks, max_nnz_per_block), -1, dtype=np.int32
)
block_cols_ndarray = np.full(
(n_row_blocks, n_col_blocks, max_nnz_per_block), -1, dtype=np.int32
)
block_vals_ndarray = np.zeros(
(n_row_blocks, n_col_blocks, max_nnz_per_block), dtype=np.float32
)
for i in range(n_row_blocks):
for j in range(n_col_blocks):
nnz = A_blocks[i][j].nnz
block_rows_ndarray[i, j, :nnz] = A_blocks[i][j].row
block_cols_ndarray[i, j, :nnz] = A_blocks[i][j].col
block_vals_ndarray[i, j, :nnz] = A_blocks[i][j].data
n_d_blocks = block_rows_ndarray.shape[0]
n_w_blocks = block_rows_ndarray.shape[1]
block_size = block_rows_ndarray.shape[2]
p_z_given_wd_block = np.zeros(
(n_d_blocks, n_w_blocks, block_size, k), dtype=np.float32
)
blocked_next_p_w_given_z = np.zeros(
(
np.int64(n_d_blocks),
np.int64(n_w_blocks),
np.int64(k),
np.int64(block_col_size),
),
dtype=np.float32,
)
blocked_next_p_z_given_d = np.zeros(
(
np.int64(n_w_blocks),
np.int64(n_d_blocks),
np.int64(block_row_size),
np.int64(k),
),
dtype=np.float32,
)
norms_pwz = np.zeros((n_d_blocks, n_w_blocks, k), dtype=np.float64)
previous_log_likelihood = log_likelihood_by_blocks(
block_rows_ndarray,
block_cols_ndarray,
block_vals_ndarray,
p_w_given_z,
p_z_given_d,
)
d_block_rows_ndarray = cuda.to_device(block_rows_ndarray)
d_block_cols_ndarray = cuda.to_device(block_cols_ndarray)
d_block_vals_ndarray = cuda.to_device(block_vals_ndarray)
d_blocked_next_p_w_given_z = cuda.to_device(blocked_next_p_w_given_z)
d_blocked_next_p_z_given_d = cuda.to_device(blocked_next_p_z_given_d)
d_p_z_given_wd_block = cuda.to_device(p_z_given_wd_block)
d_p_w_given_z = cuda.to_device(p_w_given_z)
d_p_z_given_d = cuda.to_device(p_z_given_d)
d_norms_pwz = cuda.to_device(norms_pwz)
n_d = p_z_given_d.shape[1]
n_w = p_w_given_z.shape[2]
for i in range(n_iter // n_iter_per_test):
for j in range(n_iter_per_test):
plsa_e_step[(n_d_blocks, n_w_blocks), 256](
d_block_rows_ndarray,
d_block_cols_ndarray,
d_p_w_given_z,
d_p_z_given_d,
d_p_z_given_wd_block,
e_step_thresh,
)
cuda.synchronize()
plsa_partial_m_step[(n_d_blocks, n_w_blocks), k](
d_block_rows_ndarray,
d_block_cols_ndarray,
d_block_vals_ndarray,
d_p_w_given_z,
d_p_z_given_d,
d_blocked_next_p_w_given_z,
d_blocked_next_p_z_given_d,
d_p_z_given_wd_block,
d_norms_pwz,
)
cuda.synchronize()
normalize_m_step_p_z_given_d[n_d_blocks, 256](
d_blocked_next_p_z_given_d, d_p_z_given_d
)
normalize_m_step_p_w_given_z[n_w_blocks, 256](
d_blocked_next_p_w_given_z, d_p_w_given_z, d_norms_pwz
)
cuda.synchronize()
p_z_given_d = d_p_z_given_d.copy_to_host()
p_w_given_z = d_p_w_given_z.copy_to_host()
current_log_likelihood = log_likelihood_by_blocks(
block_rows_ndarray,
block_cols_ndarray,
block_vals_ndarray,
p_w_given_z,
p_z_given_d,
)
change = np.abs(current_log_likelihood - previous_log_likelihood)
if change / np.abs(current_log_likelihood) < tolerance:
break
else:
previous_log_likelihood = current_log_likelihood
for i in range(n_iter % n_iter_per_test):
plsa_e_step[(n_d_blocks, n_w_blocks), 256](
d_block_rows_ndarray,
d_block_cols_ndarray,
d_p_w_given_z,
d_p_z_given_d,
d_p_z_given_wd_block,
e_step_thresh,
)
cuda.synchronize()
plsa_partial_m_step[(n_d_blocks, n_w_blocks), k](
d_block_rows_ndarray,
d_block_cols_ndarray,
d_block_vals_ndarray,
d_p_w_given_z,
d_p_z_given_d,
d_blocked_next_p_w_given_z,
d_blocked_next_p_z_given_d,
d_p_z_given_wd_block,
d_norms_pwz,
)
cuda.synchronize()
normalize_m_step_p_z_given_d[n_d_blocks, 256](
d_blocked_next_p_z_given_d, d_p_z_given_d
)
normalize_m_step_p_w_given_z[n_w_blocks, 256](
d_blocked_next_p_w_given_z, d_p_w_given_z, d_norms_pwz
)
cuda.synchronize()
p_z_given_d = d_p_z_given_d.copy_to_host()
p_w_given_z = d_p_w_given_z.copy_to_host()
p_z_given_d = np.vstack(p_z_given_d)[:n, :]
p_w_given_z = np.hstack(p_w_given_z)[:, :m]
return p_z_given_d, p_w_given_z
class GPUPLSA(BaseEstimator, TransformerMixin):
def __init__(
self,
n_components=10,
init="random",
n_row_blocks=8,
n_col_blocks=8,
n_iter=100,
n_iter_per_test=10,
tolerance=0.001,
e_step_thresh=1e-32,
transform_random_seed=42,
random_state=None,
):
self.n_components = n_components
self.init = init
self.n_row_blocks = n_row_blocks
self.n_col_blocks = n_col_blocks
self.n_iter = n_iter
self.n_iter_per_test = n_iter_per_test
self.tolerance = tolerance
self.e_step_thresh = e_step_thresh
self.transform_random_seed = transform_random_seed
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Learn the pLSA model for the data X and return the document vectors.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: array or sparse matrix of shape (n_docs, n_words)
The data matrix pLSA is attempting to fit to.
y: Ignored
sample_weight: array of shape (n_docs,)
Input document weights.
Returns
-------
self
"""
self.fit_transform(X, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Learn the pLSA model for the data X and return the document vectors.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: array or sparse matrix of shape (n_docs, n_words)
The data matrix pLSA is attempting to fit to.
y: Ignored
sample_weight: array of shape (n_docs,)
Input document weights.
Returns
-------
embedding: array of shape (n_docs, n_topics)
An embedding of the documents into a topic space.
"""
X = check_array(X, accept_sparse="csr")
X = standardize_input(X)
if not issparse(X):
X = csr_matrix(X)
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float32)
if np.any(X.data < 0):
raise ValueError(
"PLSA is only valid for matrices with non-negative " "entries"
)
row_sums = np.array(X.sum(axis=1).T)[0]
good_rows = row_sums != 0
if not np.all(good_rows):
zero_rows_found = True
data_for_fitting = X[good_rows]
else:
zero_rows_found = False
data_for_fitting = X
U, V = plsa_fit(
data_for_fitting,
self.n_components,
n_row_blocks=self.n_row_blocks,
n_col_blocks=self.n_col_blocks,
init=self.init,
n_iter=self.n_iter,
n_iter_per_test=self.n_iter_per_test,
tolerance=self.tolerance,
e_step_thresh=self.e_step_thresh,
random_state=self.random_state,
)
if zero_rows_found:
self.embedding_ = np.zeros((X.shape[0], self.n_components))
self.embedding_[good_rows] = U
else:
self.embedding_ = U
self.components_ = V
self.training_data_ = X
return self.embedding_
|
from builtins import getattr
import os
from warnings import warn, showwarning
try:
from numpy.linalg import inv, pinv, cond, solve
from scipy.stats import pearsonr
from sympy import symbols
from sympy.utilities.lambdify import lambdify
from sympy.parsing.sympy_parser import parse_expr
import matplotlib.pyplot as plt
from mpi4py.MPI import (
DOUBLE as MPI_DOUBLE, COMM_WORLD as MPI_COMM_WORLD, SUM as MPI_SUM
)
import numpy as np
except:
warn('Ensure that all required packages are installed.')
exit()
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
is_manager = (rank == 0)
def switch_backend(backend):
"""
Inputs: backend- the backend to change to
A wrapper for the matplotlib method that switches the backend.
"""
plt.switch_backend(backend)
def user_function(user_func_str, var_list):
"""
Inputs: user_func_str- a string with which responses will be generated
var_list- the list of variables (which contain the values used
to create the responses)
Generates responses for an function based on the corresponding variable
values.
"""
var_count = len(var_list)
var_list_symb = [''] * var_count
user_func_str = parse_expr(''.join((user_func_str.split())))
for j in range(var_count):
var_list_symb[j] = symbols(f'x{j}')
func = lambdify((var_list_symb,), user_func_str, ('numpy', 'sympy'))
gen_res = func([variable.vals for variable in var_list])
return {'generated_responses': gen_res}
def get_str_vars(matrix):
"""
Inputs: matrix- the model matrix of the different interraction terms
Generates strings that represent the sobol interactions. (i.e. 'x0**2')
"""
length, width = matrix.shape
out_strings = [''] * (length - 1)
for row in range(1, length):
curr = ''
for col in range(width):
curr_elem = matrix[row][col]
if curr_elem != 0.:
if curr_elem != 1.:
curr += f'x{col}^{int(matrix[row][col])}*'
else:
curr += f'x{col}*'
out_strings[row - 1] = curr[0:-1]
return out_strings
def create_total_sobols(var_count, matrix, sobols):
"""
Inputs: var_count- number of variables
matrix- the interaction matrix
sobols- the previously-calculated sobol indices for each interaction
Using the existing sobol indices, the total sobol index for each variable
is created. The string for the output is created.
"""
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
mat_size = len(matrix)
iter_count = mat_size - 1
base = iter_count // size
rem = iter_count % size
beg = base * rank + (rank >= rem) * rem + (rank < rem) * rank + 1
count = base + (rank < rem)
end = beg + count
total_sobols = np.zeros(var_count)
temp_sobols = np.zeros(var_count)
for i in range(beg, end):
for j in range(var_count):
if matrix[i, j] != 0:
temp_sobols[j] += sobols[i - 1]
comm.Allreduce(
[temp_sobols, MPI_DOUBLE], [total_sobols, MPI_DOUBLE], op=MPI_SUM
)
return total_sobols
def check_directory(directory, verbose):
"""
Inputs: directory- the directory for the graphs to be place
verbose- the verbose flag
Checks to see if the graph directory exists. If it doesn't exit, the
folder is created.
"""
if not os.path.isdir(directory):
if is_manager:
os.mkdir(directory)
if verbose:
print(f'Making directory {directory}\n')
else:
directory_exists = True
i = 0
while directory_exists:
i += 1
if not os.path.isdir(f'{directory}_{i}'):
directory = f'{directory}_{i}'
if is_manager:
os.mkdir(directory)
if verbose:
print(f'Making directory {directory}\n')
directory_exists = False
return directory
def evaluate_points_verbose(func, begin, total_samps, var_list, attr):
"""
Inputs: func- the lambda function used to generate the data from the
evaluation vector
begin- the index to start at in the `attr` array
total_samps- the total number of samples to generate
var_list- list of the variables
attr- the attribute that holds the values to be used in the
evaluation vector
From a specified attirbute, a lambda function is used to generate
values that populate matrix.
"""
rank = MPI_COMM_WORLD.rank
var_count = len(var_list)
term_count = func(np.zeros(var_count)).shape
if rank == 0:
inter_vals = (np.arange(0.1, 1.1, 0.1) * total_samps).astype(int)
if len(term_count) > 0:
term_count = term_count[1] # len(func(np.zeros(var_count)))
else:
term_count = 1
eval_vect = np.zeros([total_samps, var_count])
matrix = np.zeros([total_samps, term_count])
end = begin + total_samps
for j in range(var_count):
attr_arr = getattr(var_list[j], attr)
eval_vect[:, j] = attr_arr[begin:end].T
for i in range(total_samps):
matrix[i, :] = func(eval_vect[i, :])
if rank == 0 and (inter_vals is not None) and (i + 1 + begin in inter_vals):
print(f'{(i+1)/total_samps:.0%} Complete\n')
return matrix
def evaluate_points(func, begin, total_samps, var_list, attr):
"""
Inputs: func- the lambda function used to generate the data from the
evaluation vector
begin- the index to start at in the `attr` array
total_samps- the total number of samples to generate
var_list- list of the variables
attr- the attribute that holds the values to be used in the
evaluation vector
Identical to evaluate_points_verbose, but doesn't check for a verbose
option every iteration. This version also deals with indexing only part of
eval_vect.
"""
var_count = len(var_list)
term_count = func(np.zeros(var_count)).shape
if len(term_count) > 0:
term_count = term_count[1] # len(func(np.zeros(var_count)))
else:
term_count = 1
eval_vect = np.zeros([total_samps, var_count])
matrix = np.zeros([total_samps, term_count])
end = begin + total_samps
for j in range(var_count):
attr_arr = getattr(var_list[j], attr)
eval_vect[:, j] = attr_arr[begin:end].T
for i in range(total_samps):
matrix[i, :] = func(eval_vect[i, :])
return matrix
def calc_difference(array_1, array_2):
"""
Inputs: array_1- the array being subtracted from
array_2- the array being subtracted
Finds difference between the two input arrays.
"""
return array_1 - array_2
def calc_mean_err(error):
"""
Inputs: error- an array of error values
Calculates the mean of the error.
"""
return np.mean(np.abs(error))
def uniform_hypercube(low, high, samp_size=1):
"""
Inputs: low- the low bound of the hypercube
high- the high bound of the hypercube
samp_size- the number of samples to generate
Generates a uniformly-distributed Latin Hypercube.
"""
intervals = np.linspace(low, high, samp_size + 1)
vals = np.zeros(samp_size)
for i in range(samp_size):
vals[i] = np.random.uniform(low=intervals[i], high=intervals[i + 1])
np.random.shuffle(vals)
return vals
def solve_coeffs(var_basis, responses):
"""
Inputs: var_basis- the variable basis matrix
responses- the array of responses
Uses the matrix system to solve for the matrix coefficients.
"""
cond_num_thresh = 20
var_basis = np.atleast_2d(var_basis)
var_basis_T = np.transpose(var_basis)
basis_transform = np.dot(var_basis_T, var_basis)
cond_num = cond(basis_transform, -np.inf)
if cond_num > cond_num_thresh:
warn(
'The condition number of the matrix used to solve for the matrix '
f'coefficients is a large value, {cond_num}.'
)
matrix_coeffs = solve(basis_transform, np.dot(var_basis_T, responses))
return matrix_coeffs
def generate_sample_set(var_list, sample_count=1):
"""
Inputs: var_list- the list of varibles
sample_count- the number of samples to generate
Creates and returns a random, standardized value for each variable present.
"""
# generate random samples for each variable
var_count = len(var_list)
test_points = np.zeros([var_count, sample_count])
i = 0
for var in var_list:
test_points[i, :] = var.standardize_points(
var.generate_samples(sample_count)
)
i += 1
return test_points
def unstandardize_set(var_list, sample_array):
"""
Inputs: var_list- list of variables
sample_array- array with one sample corresponding to each variable
Takes an array of standardized values, unstandardizes them, and returns the
array of unstandardized values.
"""
var_count, iters = sample_array.shape
unstandard_array = np.zeros((var_count, iters))
for i in range(var_count):
unstandard_array[i] = var_list[i].unstandardize_points(sample_array[i])
return unstandard_array
def standardize_set(var_list, sample_array):
"""
Inputs: var_list- list of variables
sample_array- array with one sample corresponding to each variable
Takes an array of unstandardized values, standardizes them, and returns the
array of standardized values.
"""
var_count, iters = sample_array.shape
standard_array = np.zeros((var_count, iters))
for i in range(var_count):
standard_array[i] = var_list[i].standardize_points(sample_array[i])
return standard_array
def check_error_trends(var_list, error, order, thresh=0.5, shift=2):
"""
Inputs: var_list- list of variables
error- the array of error
order- the order to start the error checking at
thresh- the minimum pearsonr value the correlation must be at to get
flagged as an error correlation
shift- how many orders higher than input `order` to test for
correlations
Returns the names of the variables that have a pearsonr correlation higher
than 'thresh.'
"""
i = 0
var_cnt = len(var_list)
shift_order = order + shift
exp_range = range(1, shift_order + 1)
corr = np.zeros(var_cnt)
for i in range(var_cnt):
correlate = np.zeros(shift_order)
var = var_list[i]
for exp in exp_range:
# Check for correlations between the error and x^N
pear = pearsonr(error, var.std_vals ** exp)[0]
if np.abs(pear) > thresh:
correlate[exp - order] = 1
corr[i] = correlate.any()
i += 1
names = [var_list[i].name for i in range(var_cnt) if corr[i]]
return names
def check_error_magnitude(error):
"""
Inputs: error- the array of error
Checks for large outliers in the error.
"""
const = 3
largest_mag = np.max(np.abs(error))
mean_error = calc_mean_err(error)
if largest_mag >= const * mean_error:
text = (
'The error has large outliers. The order may not be high enough to '
'capture the interactions.'
)
else:
text = 'There are no error outliers.'
return text
def _warn(warn_message, *args, **kwargs):
"""
Inputs: warn_message- the warning message
Used to override "warnings.formatwarning" to output only the warning
message.
"""
return f'{warn_message}\n\n'
def calc_sobols(matrix_coeffs, norm_sq):
"""
Inputs: matrix_coeffs- the matrix coefficient of the model
norm_sq- the norm squared of the model
Returns the sobols from the matrix coefficients and norm squared.
"""
min_model_size = len(matrix_coeffs)
norm_sq = norm_sq.reshape(min_model_size, 1)
matrix_coeffs_sq = (
np.reshape(
matrix_coeffs, (len(matrix_coeffs), 1)
)[1:] ** 2
)
prod = (norm_sq[1:] * matrix_coeffs_sq)
sigma_sq = np.sum(prod)
sobols = np.zeros(min_model_size - 1)
for i in range(1, min_model_size):
sobols[i - 1] = (
(matrix_coeffs[i] ** 2 * norm_sq[i]) / sigma_sq
)
return sobols
|
from typing import List
from google.api_core.exceptions import NotFound, GoogleAPICallError
from google.cloud.pubsub import PublisherClient, SubscriberClient
from testframework.exceptions.pubsub_checker_error import PubsubCheckerError
class PubsubChecker:
def __init__(self, project_id, publisher_client: PublisherClient = None, subscriber_client: SubscriberClient = None):
self.__project_id = project_id
self.__publisher_client = publisher_client
if self.__publisher_client is None:
self.__publisher_client = PublisherClient()
self.__subscriber_client = subscriber_client
if self.__subscriber_client is None:
self.__subscriber_client = SubscriberClient()
def topic_exists(self, topic_name: str):
topic_path = self.topic_path(topic_name)
try:
topic = self.__publisher_client.get_topic(topic_path)
return topic is not None
except NotFound:
return False
except Exception as e:
raise PubsubCheckerError(e)
def subscriptions_exist(self, topic_name: str, subscription_names: List[str], topic_project_id: str = None, subscription_project_id: str = None):
topic_path = self.topic_path(topic_name, topic_project_id)
subscription_paths = map(lambda x: self.subscription_path(x, subscription_project_id), subscription_names)
try:
actual_subscription_names = list(self.__publisher_client.list_topic_subscriptions(topic_path))
return self._is_subset_of(subscription_paths, actual_subscription_names)
except GoogleAPICallError as e:
raise PubsubCheckerError(e)
def topics_exist(self, topic_names: List[str]) -> (bool, List[str]):
topic_paths = map(lambda x: self.topic_path(x), topic_names)
try:
actual_topic_names = map(lambda x: x.name, self.__publisher_client.list_topics(self.__project_id))
return self._is_subset_of(topic_paths, actual_topic_names)
except GoogleAPICallError as e:
raise PubsubCheckerError(e)
def _is_subset_of(self, subset, full_set):
missing = []
for element in subset:
if element not in full_set:
missing.append(element)
return len(missing) == 0, missing
def topic_path(self, topic_name: str, project_id: str = None):
if project_id is None:
project_id = self.__project_id
return self.__publisher_client.topic_path(project_id, topic_name)
def subscription_path(self, subscription_name, project_id):
if project_id is None:
project_id = self.__project_id
return self.__subscriber_client.subscription_path(project_id, subscription_name)
|
from pytest_bdd import scenarios, then, when, parsers
from ui_automation_tests.pages.shared import Shared
from ui_automation_tests.pages.application_page import ApplicationPage
scenarios("../features/withdraw_and_surrender_application.feature", strict_gherkin=False)
RADIO_BUTTONS = "[type='radio']"
@when("I click the button 'Withdraw Application'")
def i_click_withdraw_application(driver):
ApplicationPage(driver).click_withdraw_application_button()
@when("I click the button 'Surrender Application'")
def i_click_withdraw_application(driver):
ApplicationPage(driver).click_surrender_application_button()
@then("I should see a confirmation page")
def i_should_see_a_confirmation_page(driver):
assert len(driver.find_elements_by_css_selector(RADIO_BUTTONS)) == 2
@when("I select the yes radiobutton")
def i_select_the_yes_radiobutton(driver):
driver.find_element_by_id("choice-yes").click()
@then(parsers.parse('the application will have the status "{status}"'))
def the_application_will_have_status(driver, status):
assert ApplicationPage(driver).get_status() == status
@then("I won't be able to see the withdraw button")
def i_wont_be_able_to_see_the_withdraw_button(driver):
text = ApplicationPage(driver).get_text_of_case_buttons()
assert "withdraw" not in text.lower()
assert "copy" in text.lower()
@then("I won't be able to see the surrender button")
def i_wont_be_able_to_see_the_surrender_button(driver):
text = ApplicationPage(driver).get_text_of_case_buttons()
assert "surrender" not in text.lower()
assert "copy" in text.lower()
@then("the edit application button is not present")
def edit_button_not_present(driver):
text = ApplicationPage(driver).get_text_of_case_buttons()
assert "edit" not in text.lower()
@then("the case note text area is not present")
def edit_button_not_present(driver):
text = Shared(driver).get_text_of_main_content()
assert "post note" not in text.lower()
assert "cancel" not in text.lower()
assert "add a note" not in text.lower()
|
"""
PyAudio-Play_example_01.py
Example to show how to use Play for PyAudio
Reference:
PyAudio, https://people.csail.mit.edu/hubert/pyaudio/
"""
# TODO: This didn't work at first. So do it again.
import pyaudio
import wave
import sys
from os import path
# Prepare the file
#filename = "jackhammer.wav"
filename = "harvard.wav"
dirname = "."
file = path.join(dirname, filename)
# Open the wave file "file" with rb (read, binary)
wf = wave.open( file, 'rb' )
sample_width = wf.getsampwidth()
channel = wf.getchannels()
framerate = wf.getframerate()
# Set up a PyAudio stream.
p = pyaudio.PyAudio()
stream = p.open( format = p.get_format_from_width( sample_width ),
channels = channel,
rate = framerate,
output = True
)
# Read frames (as much as the CHUNK size) from the wave file
# and write the read data to stream until no data is left.
CHUNK = 1024
data = wf.readframes( CHUNK )
while data !='':
stream.write( data )
data = wf.readframes( CHUNK )
stream.stop_stream()
stream.close()
p.terminate()
|
import MudCommand
import MudConst
class cmdCredits(MudCommand.MudCommand):
def __init__(self):
MudCommand.MudCommand.__init__(self)
self.info['cmdName'] = "credits"
self.info['helpText'] = '''Displays the MUD credits'''
self.info['useExample'] = '''credits'''
def process(self, player, args=''):
file = open(MudConst.creditFile, 'r')
for eachLine in file.readlines():
player.writePlain(eachLine)
player.writeWithPrompt("")
|
##-*****************************************************************************
##
## Copyright (c) 2009-2011,
## Sony Pictures Imageworks, Inc. and
## Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd.
##
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following disclaimer
## in the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Sony Pictures Imageworks, nor
## Industrial Light & Magic nor the names of their contributors may be used
## to endorse or promote products derived from this software without specific
## prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##-*****************************************************************************
from maya import cmds as MayaCmds
import maya.OpenMaya as OpenMaya
import os
import unittest
import util
def getObjFromName( nodeName ):
selectionList = OpenMaya.MSelectionList()
selectionList.add( nodeName )
obj = OpenMaya.MObject()
selectionList.getDependNode(0, obj)
return obj
class MeshUVsTest(unittest.TestCase):
def setUp(self):
MayaCmds.file(new=True, force=True)
self.__files = []
def tearDown(self):
for f in self.__files:
os.remove(f)
# we only support writing out the most basic uv sets (and static only)
def testPolyUVs(self):
MayaCmds.polyCube(name = 'cube')
cubeObj = getObjFromName('cubeShape')
fnMesh = OpenMaya.MFnMesh(cubeObj)
# get the name of the current UV set
uvSetName = fnMesh.currentUVSetName()
uArray = OpenMaya.MFloatArray()
vArray = OpenMaya.MFloatArray()
fnMesh.getUVs(uArray, vArray, uvSetName)
newUArray = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, -1, -1]
newVArray = [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 0, 1 , 0, 1]
for i in range(0, 14):
uArray[i] = newUArray[i]
vArray[i] = newVArray[i]
fnMesh.setUVs(uArray, vArray, uvSetName)
self.__files.append(util.expandFileName('polyUvsTest.abc'))
MayaCmds.AbcExport(j='-uv -root cube -file ' + self.__files[-1])
# reading test
MayaCmds.AbcImport(self.__files[-1], mode='open')
MayaCmds.select('cube.map[0:13]', replace=True)
uvs = MayaCmds.polyEditUV(query=True)
for i in range(0, 14):
self.failUnlessAlmostEqual(newUArray[i], uvs[2*i], 4,
'map[%d].u is not the same' % i)
self.failUnlessAlmostEqual(newVArray[i], uvs[2*i+1], 4,
'map[%d].v is not the same' % i)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.