content stringlengths 7 1.05M |
|---|
class Products:
# Surface Reflectance 8-day 500m
modisRefl = 'MODIS/006/MOD09A1' # => NDDI
# Surface Temperature 8-day 1000m */
modisTemp = 'MODIS/006/MOD11A2' # => T/NDVI
# fAPAR 8-day 500m
modisFpar = 'MODIS/006/MOD15A2H' # => fAPAR
#Evapotranspiration 8-day 500m
modisEt = 'MODIS/006/MOD16A2' # => ET
# EVI 16-day 500m
modisEvi = 'MODIS/006/MOD13A1' # => EVI
|
# 3. Longest Substring Without Repeating Characters
# Runtime: 60 ms, faster than 74.37% of Python3 online submissions for Longest Substring Without Repeating Characters.
# Memory Usage: 14.4 MB, less than 53.07% of Python3 online submissions for Longest Substring Without Repeating Characters.
class Solution:
# Two Pointers
def lengthOfLongestSubstring(self, s: str) -> int:
# Define a mapping of the characters to its index.
idx = {}
max_len = 0
left = 0
for right in range(len(s)):
char = s[right]
if char in idx:
# Skip the characters immediately when finding a repeated character.
left = max(left, idx[char])
max_len = max(max_len, right - left + 1)
idx[char] = right + 1
return max_len |
class MXWarmSpareSettings(object):
def __init__(self, session):
super(MXWarmSpareSettings, self).__init__()
self._session = session
def swapNetworkWarmSpare(self, networkId: str):
"""
**Swap MX primary and warm spare appliances**
https://developer.cisco.com/meraki/api/#!swap-network-warm-spare
- networkId (string)
"""
metadata = {
'tags': ['MX warm spare settings'],
'operation': 'swapNetworkWarmSpare',
}
resource = f'/networks/{networkId}/swapWarmSpare'
return self._session.post(metadata, resource)
def getNetworkWarmSpareSettings(self, networkId: str):
"""
**Return MX warm spare settings**
https://developer.cisco.com/meraki/api/#!get-network-warm-spare-settings
- networkId (string)
"""
metadata = {
'tags': ['MX warm spare settings'],
'operation': 'getNetworkWarmSpareSettings',
}
resource = f'/networks/{networkId}/warmSpareSettings'
return self._session.get(metadata, resource)
def updateNetworkWarmSpareSettings(self, networkId: str, enabled: bool, **kwargs):
"""
**Update MX warm spare settings**
https://developer.cisco.com/meraki/api/#!update-network-warm-spare-settings
- networkId (string)
- enabled (boolean): Enable warm spare
- spareSerial (string): Serial number of the warm spare appliance
- uplinkMode (string): Uplink mode, either virtual or public
- virtualIp1 (string): The WAN 1 shared IP
- virtualIp2 (string): The WAN 2 shared IP
"""
kwargs.update(locals())
metadata = {
'tags': ['MX warm spare settings'],
'operation': 'updateNetworkWarmSpareSettings',
}
resource = f'/networks/{networkId}/warmSpareSettings'
body_params = ['enabled', 'spareSerial', 'uplinkMode', 'virtualIp1', 'virtualIp2']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
|
# Party member and chosen four names
CHOSEN_FOUR = (
'NESS',
'PAULA',
'JEFF',
'POO',
)
PARTY_MEMBERS = (
'NESS',
'PAULA',
'JEFF',
'POO',
'POKEY',
'PICKY',
'KING',
'TONY',
'BUBBLE_MONKEY',
'DUNGEON_MAN',
'FLYING_MAN_1',
'FLYING_MAN_2',
'FLYING_MAN_3',
'FLYING_MAN_4',
'FLYING_MAN_5',
'TEDDY_BEAR',
'SUPER_PLUSH_BEAR',
)
|
### Variaveis compostas TUPLAS
'''print(lanche[2])'''
'''print(lanche[0:2])'''
'''print(lanche[1:])'''
'''print(lanche[-1])'''
'''len(lanche)'''
'''for c in lanche:
print(c)'''
###
''''As tuplas são IMUTÁVEIS'''
### Brincando
lanche = ('Hambúrger', 'Suco', 'Pizza', 'Pudím')
a = (2, 5, 4)
b = (5, 8, 1, 2)
c = b +a
pessoa = ('Jean', 23, 'M', 65.5)
print(lanche[1])
'''for c in lanche:
print(f'Eu vou comer {c}')'''
'''for c in range(0, len(lanche)):
print(f'Eu vou comer {lanche[c]} na posição {c}')'''
'''for pos, c in enumerate(lanche):
print(f'Eu vou comer {c} na posição {pos}')'''
'''print(sorted(lanche))'''
'''print(c.count(5))'''
'''print(c.index(5, 1))'''
'''del pessoa'''
|
# -*- coding: utf-8 -*-
def test_receiving_events(vim):
vim.command('call rpcnotify(%d, "test-event", 1, 2, 3)' % vim.channel_id)
event = vim.next_message()
assert event[1] == 'test-event'
assert event[2] == [1, 2, 3]
vim.command('au FileType python call rpcnotify(%d, "py!", bufnr("$"))' %
vim.channel_id)
vim.command('set filetype=python')
event = vim.next_message()
assert event[1] == 'py!'
assert event[2] == [vim.current.buffer.number]
def test_sending_notify(vim):
# notify after notify
vim.command("let g:test = 3", async_=True)
cmd = 'call rpcnotify(%d, "test-event", g:test)' % vim.channel_id
vim.command(cmd, async_=True)
event = vim.next_message()
assert event[1] == 'test-event'
assert event[2] == [3]
# request after notify
vim.command("let g:data = 'xyz'", async_=True)
assert vim.eval('g:data') == 'xyz'
def test_broadcast(vim):
vim.subscribe('event2')
vim.command('call rpcnotify(0, "event1", 1, 2, 3)')
vim.command('call rpcnotify(0, "event2", 4, 5, 6)')
vim.command('call rpcnotify(0, "event2", 7, 8, 9)')
event = vim.next_message()
assert event[1] == 'event2'
assert event[2] == [4, 5, 6]
event = vim.next_message()
assert event[1] == 'event2'
assert event[2] == [7, 8, 9]
vim.unsubscribe('event2')
vim.subscribe('event1')
vim.command('call rpcnotify(0, "event2", 10, 11, 12)')
vim.command('call rpcnotify(0, "event1", 13, 14, 15)')
msg = vim.next_message()
assert msg[1] == 'event1'
assert msg[2] == [13, 14, 15]
|
"""
Constants for common property names
===================================
In order for different parts of the code to have got a common convention for
the names of properties of importance of data points, here in this module,
constants are defined for these names to facilitate the interoperability of
different parts of codes. Also, by using constants in this module, another
advantage is that typos in the property names are able to be captured by the
python interpreter.
Properties for the input of computations
----------------------------------------
.. py:data:: CONFIGURATION
The configuration of the structure being computed. Normally given as a
string for the name of the configuration.
.. py:data:: METHOD
The computational methodology. Usually can be given as a string for the
computational method of the computation. But it can also be more
complicated structure if finer recording of method is what is concentrated.
Properties for the output of computations
-----------------------------------------
.. py:data:: COORDINATES
The atomic coordinates. Usually given as a list of lists of atomic
element symbol followed by three floating point numbers for the
coordinate of the atoms, in Angstrom.
.. py:data:: ELECTRON_ENERGY
The electronic energy of the system.
.. py:data:: ZERO_POINT_CORRECTION
The zero-point correction to the base energy.
.. py:data:: GIBBS_THERMAL_CORRECTION
The thermal correction to the Gibbs free energy.
.. py:data:: COUNTERPOISE_CORRECTION
The counterpoise correction fot the basis set superposition error.
By convention the units for the energies is Hartree.
"""
CONFIGURATION = 'configuration'
METHOD = 'method'
COORDINATES = 'coordinates'
ELECTRON_ENERGY = 'electron_energy'
ZERO_POINT_CORRECTION = 'zero_point_correction'
GIIBS_THERMAL_CORRECTION = 'gibbs_thermal_correction'
COUNTERPOISE_CORRECTION = 'counterpoise_correction'
|
#%% Imports and function declaration
class Node:
"""LinkedListNode class to be used for this problem"""
def __init__(self, data):
self.data = data
self.next = None
# helper functions for testing purpose
def create_linked_list(arr):
if len(arr)==0:
return None
head = Node(arr[0])
tail = head
for data in arr[1:]:
tail.next = Node(data)
tail = tail.next
return head
def print_linked_list(head):
while head:
print(head.data, end=" ")
head = head.next
print()
# Personal Implementation
def swap_nodes(head, left_index, right_index):
"""
:param: head- head of input linked list
:param: left_index - indicates position
:param: right_index - indicates position
return: head of updated linked list with nodes swapped
Do not create a new linked list
"""
# Values to swap
node = head
position = 0
while position <= right_index:
if position == left_index:
left_data = node.data
if position == right_index:
right_data = node.data
position += 1
node = node.next
# Swapping values
node = head
position = 0
while position <= right_index:
if position == left_index:
node.data = right_data
if position == right_index:
node.data = left_data
position += 1
node = node.next
return head
#%% Testing
# Case 1
arr = [3, 4, 5, 2, 6, 1, 9]
left_index = 3
right_index = 4
head = create_linked_list(arr)
print_linked_list(swap_nodes(head=head, left_index=left_index, right_index=right_index))
# Case 2
arr = [3, 4, 5, 2, 6, 1, 9]
left_index = 2
right_index = 4
head = create_linked_list(arr)
print_linked_list(swap_nodes(head=head, left_index=left_index, right_index=right_index))
# Case 3
arr = [3, 4, 5, 2, 6, 1, 9]
left_index = 0
right_index = 1
head = create_linked_list(arr)
print_linked_list(swap_nodes(head=head, left_index=left_index, right_index=right_index)) |
class PlotmanError(Exception):
"""An exception type for all plotman errors to inherit from. This is
never to be raised.
"""
pass
class UnableToIdentifyPlotterFromLogError(PlotmanError):
def __init__(self) -> None:
super().__init__("Failed to identify the plotter definition for parsing log")
|
class TreeError:
TYPE_ERROR = 'ERROR'
TYPE_ANOMALY = 'ANOMALY'
ON_INDI = 'INDIVIDUAL'
ON_FAM = 'FAMILY'
def __init__(self, err_type, err_on, err_us, err_on_id, err_msg):
self.err_type = err_type
self.err_on = err_on
self.err_us = err_us
self.err_on_id = err_on_id
self.err_msg = err_msg
def __str__(self):
return f'{self.err_type}: {self.err_on}: {self.err_us}: {self.err_on_id}: {self.err_msg}'
|
budget_header = 'Budget'
forecast_total_header = 'Forecast outturn'
variance_header = 'Variance -overspend/underspend'
variance_percentage_header = 'Variance %'
year_to_date_header = 'Year to Date Actuals'
budget_spent_percentage_header = '% of budget spent to date'
variance_outturn_header = "Forecast movement"
|
'''
Commands: Basic module to handle all commands. Commands for mops are driven by one-line
exec statements; these exec statements are held in a dictionary.
Model Operations Processing System. Copyright Brian Fairbairn 2009-2010. Licenced under the EUPL.
You may not use this work except in compliance with the Licence. You may obtain a copy of the
Licence at http://ec.europa.eu/idabc/eupl or as attached with this application (see Licence file).
Unless required by applicable law or agreed to in writing, software distributed under the Licence
is distributed on an 'AS IS' basis WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed
or implied. See the Licence governing permissions and limitations under the Licence.
Changes:
Rev 1 Added Params as a parameter to CXSCHD for triMOPS
Added Params as a parameter to ADROUT for default direction
Added Params to LINEUP for passenger car checking
Added Params to LICONS for passenger car checking
'''
def load_commands(commands):
"""This contains a list of commands to be exec'd in the main loop. The key
is the input command and the value is the line to be executed (generally a
class method).
Command structure:
verb data
where verb is (almost always) a 6-figure word which translates to a
class method and data is a set of parameters for the class method
delimited by semi-colons
Verb structure
special verbs are available which equate to specific commands and these
are described in the user manual or can be see below.
general verbs are constructed on the following basis:
chars 1 + 2 AD - Add a new item
CH - Change existing item
(addittional changes will be C*)
DX - Delete item
LI - List items (to screen)
(additional list verions will be LA, LB etc)
LX - unformatted list versions
PA - Print items (to file)
(additional print version will be PA, PB etx)
PX - unformatted print versions (for .csv files)
XX - Supervisor command functions
chars 3-6 type of data being updated (can also use char 6)
AREA - Maintain Areas
USER - Maintain users
TYPE - Maintain Stax Types
STAX - Maintain Stax
PARM - Parameter values
"""
#areas
commands['ADAREA'] = 'Area.adarea(input_data)\n'
commands['CHAREA'] = 'Area.charea(input_data)\n'
commands['DXAREA'] = 'Area.dxarea(input_data)\n'
commands['LIAREA'] = 'Area.liarea(input_data)\n'
commands['LXAREA'] = 'Area.dump_to_screen()\n'
commands['PRAREA'] = 'Area.prarea(input_data, Params)\n'
commands['PXAREA'] = 'Area.extract_to_file(Params)\n'
#calendar
commands['HOLIDX'] = 'Calendar.holidx(input_data)\n'
commands['LICALX'] = 'Calendar.licalx(input_data)\n'
#car
commands['ADCARX'] = 'Car.adcarx(input_data)\n'
commands['ACARXB'] = 'Car.acarxb(input_data)\n'
commands['ACARXS'] = 'Car.acarxs(input_data)\n'
commands['CARXAT'] = 'Car.carxat(input_data)\n'
commands['CHCARX'] = 'Car.chcarx(input_data)\n'
commands['CLEANX'] = 'Car.cleanx(input_data)\n'
commands['MAINTC'] = 'Car.maintc(input_data)\n'
commands['CXCARS'] = 'Car.cxcars(input_data)\n'
commands['DXCARX'] = 'Car.dxcarx(input_data)\n'
commands['LACARS'] = 'Car.lacars(input_data)\n'
commands['LICARS'] = 'Car.licars(input_data)\n'
commands['LMTCAR'] = 'Car.lmtcar(input_data)\n'
commands['LONWAY'] = 'Car.lonway(input_data)\n'
commands['LXCARS'] = 'Car.dump_to_screen()\n'
commands['MTYORD'] = 'Car.mtyord(input_data)\n'
commands['PRCARS'] = 'Car.prcars(input_data, Params)\n'
commands['PXCARS'] = 'Car.extract_to_file(Params)\n'
commands['CARXSP'] = 'Car.carxsp(input_data)\n'
commands['XCARXB'] = 'Car.xcarxb(input_data)\n'
commands['XCARXS'] = 'Car.xcarxs(input_data)\n'
#carclass
commands['ADCLAS'] = 'CarClass.adclas(input_data)\n'
commands['CHCLAS'] = 'CarClass.chclas(input_data)\n'
commands['DXCLAS'] = 'CarClass.dxclas(input_data)\n'
commands['LICLAS'] = 'CarClass.liclas(input_data)\n'
commands['LXCLAS'] = 'CarClass.dump_to_screen()\n'
commands['PRCLAS'] = 'CarClass.prclas(input_data, Params)\n'
commands['PXCLAS'] = 'CarClass.extract_to_file(Params)\n'
#cartype
commands['ADCART'] = 'CarType.adcart(input_data)\n'
commands['CHCART'] = 'CarType.chcart(input_data)\n'
commands['DXCART'] = 'CarType.dxcart(input_data)\n'
commands['LICART'] = 'CarType.licart(input_data)\n'
commands['LXCART'] = 'CarType.dump_to_screen()\n'
commands['PRCART'] = 'CarType.prcart(input_data, Params,)\n'
commands['PXCART'] = 'CarType.extract_to_file(Params)\n'
#commodities
commands['ADCOMM'] = 'Commodity.adcomm(input_data)\n'
commands['CHCOMM'] = 'Commodity.chcomm(input_data)\n'
commands['DXCOMM'] = 'Commodity.dxcomm(input_data)\n'
commands['LICOMM'] = 'Commodity.licomm(input_data)\n'
commands['LXCOMM'] = 'Commodity.dump_to_screen()\n'
commands['PRCOMM'] = 'Commodity.prcomm(input_data, Params)\n'
commands['PXCOMM'] = 'Commodity.extract_to_file(Params)\n'
#flash
commands['FLASHX'] = 'Flash.flashx(input_data, Params)\n'
#help
commands['ABOUT'] = 'MOPS_Help.about()\n'
commands['HELP'] = 'MOPS_Help.help()\n'
commands['ASSIST'] = 'MOPS_Commands.assist()\n'
#instruction
commands['ADINST'] = 'Instruction.adinst(input_data)\n'
commands['DXINST'] = 'Instruction.dxinst(input_data)\n'
#loading
commands['ADLOAD'] = 'Loading.adload(input_data)\n'
commands['CHLOAD'] = 'Loading.chload(input_data)\n'
commands['DXLOAD'] = 'Loading.dxload(input_data)\n'
commands['LILOAD'] = 'Loading.liload(input_data)\n'
commands['LXLOAD'] = 'Loading.dump_to_screen()\n'
commands['PRLOAD'] = 'Loading.prload(input_data, Params)\n'
commands['PXLOAD'] = 'Loading.extract_to_file(Params)\n'
#loco
commands['ADLOCO'] = 'Loco.adloco(input_data)\n'
commands['FUELXX'] = 'Loco.fuelxx(input_data)\n'
commands['CHLOCO'] = 'Loco.chloco(input_data)\n'
commands['MAINTL'] = 'Loco.maintl(input_data)\n'
commands['POWERX'] = 'Loco.powerx(input_data)\n'
commands['DXLOCO'] = 'Loco.dxloco(input_data)\n'
commands['LOCOAT'] = 'Loco.locoat(input_data)\n'
commands['LOCOSP'] = 'Loco.locosp(input_data)\n'
commands['LILOCO'] = 'Loco.liloco(input_data)\n'
commands['LSLOCO'] = 'Loco.lsloco(input_data)\n'
commands['LXLOCO'] = 'Loco.dump_to_screen()\n'
commands['PRLOCO'] = 'Loco.prloco(input_data, Params)\n'
commands['PSLOCO'] = 'Loco.psloco(input_data, Params)\n'
commands['PXLOCO'] = 'Loco.extract_to_file(Params)\n'
#loco type
commands['ADLOCT'] = 'LocoType.adloct(input_data)\n'
commands['CHLOCT'] = 'LocoType.chloct(input_data)\n'
commands['DXLOCT'] = 'LocoType.dxloct(input_data)\n'
commands['LILOCT'] = 'LocoType.liloct(input_data)'
commands['LXLOCT'] = 'LocoType.dump_to_screen()\n'
commands['PRLOCT'] = 'LocoType.prloct(input_data, Params)\n'
commands['PXLOCT'] = 'LocoType.extract_to_file(Params)\n'
#order
commands['LEMPTY'] = 'Order.lempty(input_data)'
commands['LORDER'] = 'Order.lorder(input_data)'
commands['LXORDR'] = 'Order.dump_to_screen()\n'
commands['DEMPTY'] = 'Order.dempty(input_data)'
commands['PEMPTY'] = 'Order.pempty(input_data, Params)\n'
commands['PXORDR'] = 'Order.extract_to_file(Params)\n'
#parameter
commands['CHPARM'] = 'Params.chparm(input_data)\n'
commands['CSPEED'] = 'Params.cspeed(input_data)\n'
commands['LIPARM'] = 'Params.liparm(input_data)\n'
commands['PRPARM'] = 'Params.prparm(input_data, Params)\n'
commands['LXPARM'] = 'Params.dump_to_screen()\n'
commands['PXPARM'] = 'Params.extract_to_file(Params)\n'
commands['SETTIM'] = 'Params.settim(input_data)\n'
commands['XXSTOP'] = 'Params.xxstop()\n'
#place
commands['ADPLAX'] = 'Place.adplax(input_data)\n'
commands['ADINDY'] = 'Place.adindy(input_data)\n'
commands['CHPLAX'] = 'Place.chplax(input_data)\n'
commands['CHINDY'] = 'Place.chindy(input_data)\n'
commands['DXPLAX'] = 'Place.dxplax(input_data)\n'
commands['DXINDY'] = 'Place.dxindy(input_data)\n'
commands['LIPLAX'] = 'Place.liplax(input_data)\n'
commands['LIGEOG'] = 'Place.ligeog(input_data)\n'
commands['LXPLAX'] = 'Place.dump_to_screen()\n'
commands['PRPLAX'] = 'Place.prplax(input_data, Params)\n'
commands['PRGEOG'] = 'Place.prgeog(input_data, Params)\n'
commands['PXPLAX'] = 'Place.extract_to_file(Params)\n'
#railroad
commands['ADRAIL'] = 'Railroad.adrail(input_data)\n'
commands['CHRAIL'] = 'Railroad.chrail(input_data)\n'
commands['DXRAIL'] = 'Railroad.dxrail(input_data)\n'
commands['LIRAIL'] = 'Railroad.lirail(input_data)\n'
commands['LXRAIL'] = 'Railroad.dump_to_screen()\n'
commands['PRRAIL'] = 'Railroad.prrail(input_data, Params)\n'
commands['PXRAIL'] = 'Railroad.extract_to_file(Params)\n'
#route
commands['ADROUT'] = 'Route.adrout(input_data, Params)\n' #Rev 1
commands['CHROUT'] = 'Route.chrout(input_data)\n'
commands['DXROUT'] = 'Route.dxrout(input_data)\n'
commands['LIROUT'] = 'Route.lirout(input_data)\n'
commands['LXROUT'] = 'Route.dump_to_screen()\n'
commands['PRROUT'] = 'Route.prrout(input_data, Params)\n'
commands['UNPUBL'] = 'Route.unpubl(input_data)\n'
commands['PUBLSH'] = 'Route.publsh(input_data)\n'
commands['PXROUT'] = 'Route.extract_to_file(Params)\n'
commands['VALIDR'] = 'Route.validr(input_data)\n'
#routing code
commands['ADCURO'] = 'Routing.adcuro(input_data)\n'
commands['CHCURO'] = 'Routing.chcuro(input_data)\n'
commands['DXCURO'] = 'Routing.dxcuro(input_data)\n'
commands['LICURO'] = 'Routing.licuro(input_data)\n'
commands['LXCURO'] = 'Routing.dump_to_screen()\n'
commands['PRCURO'] = 'Routing.prcuro(input_data, Params)\n'
commands['PXCURO'] = 'Routing.extract_to_file(Params)\n'
#schedule
commands['ADSCHD'] = 'Schedule.adschd(input_data)\n'
commands['CHSCHD'] = 'Schedule.chschd(input_data)\n'
commands['CPSCHD'] = 'Schedule.cpschd(input_data)\n'
commands['DXSCHD'] = 'Schedule.dxschd(input_data)\n'
commands['CXSCHD'] = 'Schedule.cxschd(input_data, Params)\n'
commands['XCTIVE'] = 'Schedule.xctive(input_data)\n'
commands['LISCHD'] = 'Schedule.lischd(input_data)\n'
commands['LSSCHD'] = 'Schedule.lsschd(input_data)\n'
commands['LXSCHD'] = 'Schedule.dump_to_screen()\n'
commands['ACTIVE'] = 'Schedule.active(input_data)\n'
commands['PRSCHD'] = 'Schedule.prschd(input_data, Params)\n'
commands['PXSCHD'] = 'Schedule.extract_to_file(Params)\n'
commands['PRTABL'] = 'Schedule.prtabl(input_data, user_type, user_id)\n'
commands['PXTABL'] = 'Schedule.pxtabl(input_data, user_type, user_id)\n'
#section
commands['ADSECT'] = 'Section.adsect(input_data)\n'
commands['DXSECT'] = 'Section.dxsect(input_data)\n'
commands['LXSECT'] = 'Section.dump_to_screen()\n'
commands['LSROUT'] = 'Section.lsrout(input_data)\n'
commands['LDROUT'] = 'Section.ldrout(input_data)\n'
commands['PDROUT'] = 'Section.pdrout(input_data, Params)\n'
commands['PXSECT'] = 'Section.extract_to_file(Params)\n'
#stations
commands['ADSTAX'] = 'Station.adstax(input_data, Params)\n'
commands['CHSTAX'] = 'Station.chstax(input_data, Params)\n'
commands['DXSTAX'] = 'Station.dxstax(input_data)\n'
commands['LISTAX'] = 'Station.listax(input_data)\n'
commands['LXSTAX'] = 'Station.dump_to_screen()\n'
commands['PRSTAX'] = 'Station.prstax(input_data, Params)\n'
commands['PXSTAX'] = 'Station.extract_to_file(Params)\n'
#station types
commands['ADSTAT'] = 'StationType.adstat(input_data)\n'
commands['CHSTAT'] = 'StationType.chstat(input_data)\n'
commands['DXSTAT'] = 'StationType.dxstat(input_data)\n'
commands['LISTAT'] = 'StationType.listat(input_data)\n'
commands['LXSTAT'] = 'StationType.dump_to_screen()\n'
commands['PRSTAT'] = 'StationType.prstat(input_data, Params)\n'
commands['PXSTAT'] = 'StationType.extract_to_file(Params)\n'
#timings
commands['ADTIMS'] = 'Timings.adtims(input_data)\n'
commands['CHTIMS'] = 'Timings.chtims(input_data)\n'
commands['TIMING'] = 'Timings.timing(input_data)\n'
commands['LDTIMS'] = 'Timings.ldtims(input_data)\n'
commands['PRTIMS'] = 'Timings.prtims(input_data, Params)\n'
commands['PXTIMS'] = 'Timings.extract_to_file(Params)\n'
#train
commands['UTRAIN'] = 'Train.utrain(input_data, Params)\n'
commands['STRAIN'] = 'Train.strain(input_data, Params)\n'
commands['ETRAIN'] = 'Train.etrain(input_data, Params)\n'
commands['ALOCOT'] = 'Train.alocot(input_data)\n'
commands['ACARXT'] = 'Train.acarxt(input_data)\n'
commands['LTRAIN'] = 'Train.ltrain(input_data, Flash, Params)\n'
commands['LINEUP'] = 'Train.lineup(input_data, Params)\n' #Rev 1
commands['REPORT'] = 'Train.report(input_data, Params)\n'
commands['TTRAIN'] = 'Train.ttrain(input_data, Flash, Params)\n'
commands['XLOCOT'] = 'Train.xlocot(input_data)\n'
commands['XCARXT'] = 'Train.xcarxt(input_data)\n'
commands['TRAINS'] = 'Train.trains(input_data)\n'
commands['LICONS'] = 'Train.licons(input_data, Params)\n' #Rev 1
commands['PRCONS'] = 'Train.prcons(input_data, Params)\n'
#users
commands['ADUSER'] = 'User.aduser(input_data)\n'
commands['CHPASS'] = 'User.chpass(uid)\n'
commands['CHUSER'] = 'User.chuser(input_data)\n'
commands['DXUSER'] = 'User.dxuser(input_data)\n'
commands['EDUSER'] = 'User.eduser(input_data)\n'
commands['LIUSER'] = 'User.liuser(input_data)\n'
commands['LXUSER'] = 'User.dump_to_screen()\n'
commands['PRUSER'] = 'User.pruser(input_data, Params)\n'
commands['PXUSER'] = 'User.extract_to_file(Params)\n'
commands['RESETP'] = 'User.resetp(input_data)\n'
#warehouses
commands['ADWARE'] = 'Warehouse.adware(input_data)\n'
commands['CHWARE'] = 'Warehouse.chware(input_data)\n'
commands['CPWARE'] = 'Warehouse.cpware(input_data)\n'
commands['DXWARE'] = 'Warehouse.dxware(input_data)\n'
commands['LIWARE'] = 'Warehouse.liware(input_data)\n'
commands['LDWARE'] = 'Warehouse.ldware(input_data)\n'
commands['LSWARE'] = 'Warehouse.lsware(input_data)\n'
commands['LXWARE'] = 'Warehouse.dump_to_screen()\n'
commands['PRWARE'] = 'Warehouse.prware(input_data, Params)\n'
commands['PXWARE'] = 'Warehouse.extract_to_file(Params)\n'
return commands
def load_helper(helper):
"""Loads short descriptions about commands into an array
"""
#areas
helper['ADAREA'] = 'ADD AREA'
helper['CHAREA'] = 'CHANGE AREA'
helper['DXAREA'] = 'DELETE AREA'
helper['LIAREA'] = 'LIST AREAS'
helper['LXAREA'] = 'SHOW AREAS FILE'
helper['PRAREA'] = 'PRINT AREAS'
helper['PXAREA'] = 'EXPORT AREAS'
#calendar
helper['HOLIDX'] = 'SET HOLIDAY'
helper['LICALX'] = 'LIST NEXT 10 DAYS'
#car
helper['ADCARX'] = 'ADD CAR DETAIL'
helper['ACARXB'] = 'ALLOCATE CAR TO BLOCK'
helper['ACARXS'] = 'ALLOCATE CAR TO SET'
helper['CARXAT'] = 'LOCATE CAR AT STATION'
helper['CHCARX'] = 'CHANGE CAR DETAILS'
helper['CLEANX'] = 'SET CAR TO EMPTY/CLEAN'
helper['MAINTC'] = 'CHANGE CAR MAINTENANCE STATE'
helper['CXCARS'] = 'CHANGE CAR LOCATION'
helper['DXCARX'] = 'DELETE CAR'
helper['LICARS'] = 'LIST CARS'
helper['LACARS'] = 'REPORT CARS BY STATUS'
helper['LMTCAR'] = 'REPORT UNALLOCATED EMPTY CARS'
helper['LONWAY'] = 'REPORT EMPTIES EN ROUTE'
helper['LXCARS'] = 'SHOW CARS FILE'
helper['MTYORD'] = 'ALLOCATE EMPTY TO ORDER'
helper['PRCARS'] = 'PRINT CARS'
helper['PXCARS'] = 'EXPORT CARS'
helper['CARXSP'] = 'SPOT CAR'
helper['XCARXB'] = 'REMOVE CAR FROM BLOCK'
helper['XCARXS'] = 'REMOVE CAR FROM SET'
#carclass
helper['ADCLAS'] = 'ADD CAR CLASSIFICATION'
helper['CHCLAS'] = 'CHANGE CAR CLASSSIFICATION'
helper['DXCLAS'] = 'DELETE CAR CLASSIFICATION'
helper['LICLAS'] = 'LIST CAR CLASSIFICATIONS'
helper['LXCLAS'] = 'SHOW CAR CLASSIFICATIONS FILE'
helper['PRCLAS'] = 'PRINT CAR CLASSIFICATIONS'
helper['PXCLAS'] = 'EXPORT CAR CLASSIFICATIONS'
#carbuild
helper['ADCART'] = 'ADD CAR TYPE'
helper['CHCART'] = 'CHANGE CAR TYPE'
helper['DXCART'] = 'DELETE CAR TYPE'
helper['LICART'] = 'LIST CAR TYPES'
helper['LXCART'] = 'SHOW CAR TYPES FILE'
helper['PRCART'] = 'PRINT CAR TYPES'
helper['PXCART'] = 'EXPORT CAR TYPES'
#commodities
helper['ADCOMM'] = 'ADD COMMODITY'
helper['CHCOMM'] = 'CHANGE COMMODITY'
helper['DXCOMM'] = 'DELETE COMMODITY'
helper['LICOMM'] = 'LIST COMMODITIES'
helper['LXCOMM'] = 'SHOW COMMODITIES FILE'
helper['PRCOMM'] = 'PRINT COMMODITIES'
helper['PXCOMM'] = 'EXPORT COMMODITIES'
#flash
helper['FLASHX'] = 'FLASH MESSAGE'
#help
helper['HELP'] = 'GENERAL HELP'
helper['ABOUT'] = 'ABOUT MOPS'
helper['ASSIST'] = 'LIST AVAILABLE COMMANDS'
#instruction
helper['ADINST'] = 'ADD INSTRUCTION'
helper['DXINST'] = 'DELETE INSTRUCTION'
#loading
helper['ADLOAD'] = 'ADD LOADING DEFINITIONS'
helper['CHLOAD'] = 'CHANGE LOADING DEFINITIONS'
helper['DXLOAD'] = 'DELETE LOADING DEFINITION'
helper['LILOAD'] = 'LIST LOADING DEFINITIONS'
helper['LXLOAD'] = 'SHOW LOADING DEFINITIONS'
helper['PRLOAD'] = 'PRINT LOADING DEFINITIONS'
helper['PXLOAD'] = 'EXPORT LOADING DEFINITIONS'
#loco
helper['ADLOCO'] = 'ADD LOCOMOTIVE'
helper['FUELXX'] = 'CHANGE LOCO FUEL STATE'
helper['CHLOCO'] = 'CHANGE LOCOMOTIVE'
helper['MAINTL'] = 'CHANGE LOCO MAINTENANCE STATE'
helper['POWERX'] = 'CHANGE LOCO POWER STATE'
helper['DXLOCO'] = 'DELETE LOCOMOTIVE'
helper['LILOCO'] = 'LIST LOCOMOTIVES'
helper['LOCOAT'] = 'LOCATE LOCO AT STATION'
helper['LXLOCO'] = 'SHOW LOCOMOTIVE FILE'
helper['PRLOCO'] = 'PRINT LOCOMOTIVES'
helper['PXLOCO'] = 'EXPORT LOCOMOTIVES'
helper['LOCOSP'] = 'SPOT LOCO'
helper['LSLOCO'] = 'LIST LOCOMOTIVE DETAILS'
helper['PSLOCO'] = 'PRINT LOCOMOTIVE DETAILS'
#loco type
helper['ADLOCT'] = 'ADD LOCOMOTIVE TYPE'
helper['CHLOCT'] = 'CHANGE LOCOMOTIVE TYPE'
helper['DXLOCT'] = 'DELETE LOCOMOTVE TYPE'
helper['LILOCT'] = 'LIST LOCOMOTIVE TYPES'
helper['LXLOCT'] = 'SHOW LOCOMOTIVE TYPES'
helper['PRLOCT'] = 'PRINT LOCOMOTIVE TYPES'
helper['PXLOCT'] = 'EXPORT LOCOMOTIVE TYPES'
#order
helper['LEMPTY'] = 'LIST EMPTY CAR REQUESTS'
helper['LORDER'] = 'LIST EMPTY AND WAYBILL REQUESTS'
helper['DEMPTY'] = 'DETAIL EMPTY CAR REQUESTS'
helper['LXORDR'] = 'SHOW ORDERS FILE'
helper['PEMPTY'] = 'REPORT EMPTY CAR REQUESTS'
helper['PXORDR'] = 'EXPORT ORDERS FILE'
#parameter
helper['CHPARM'] = 'CHANGE PARAMETER'
helper['CSPEED'] = 'SET MOPS CLOCK SPEED'
helper['LIPARM'] = 'LIST PARAMETERS'
helper['PRPARM'] = 'REPORT PARAMETERS'
helper['LXPARM'] = 'SHOW PARAMETERS'
helper['PXPARM'] = 'EXPORT PARAMETERS'
helper['SETTIM'] = 'SET MOPS DATE AND TIME'
helper['XXSTOP'] = 'STOP MOPS'
#place
helper['ADPLAX'] = 'ADD PLACE'
helper['CHPLAX'] = 'CHANGE PLACE'
helper['DXPLAX'] = 'DELETE PLACE'
helper['ADINDY'] = 'ADD INDUSTRY'
helper['CHINDY'] = 'CHANGE INDUSTRY'
helper['DXINDY'] = 'DELETE INDUSTRY'
helper['LIPLAX'] = 'LIST PLACES'
helper['LXPLAX'] = 'SHOW PLACES FILE'
helper['PRPLAX'] = 'PRINT PLACES'
helper['PRGEOG'] = 'PRINT GEOGRAPHY'
helper['LIGEOG'] = 'PRINT GEOGRAPHY'
helper['PXPLAX'] = 'EXPORT PLACES'
#railroad
helper['ADRAIL'] = 'ADD RAILROAD'
helper['CHRAIL'] = 'CHANGE RAILROAD'
helper['DXRAIL'] = 'DELETE RAILROAD'
helper['LIRAIL'] = 'LIST RAILROADS'
helper['LXRAIL'] = 'SHOW RAILROAD FILE'
helper['PRRAIL'] = 'PRINT RAILROADS'
helper['PXRAIL'] = 'EXPORT RAILROADS'
#route
helper['ADROUT'] = 'ADD ROUTE'
helper['CHROUT'] = 'CHANGE ROUTE NAME'
helper['DXROUT'] = 'DELETE ROUTE'
helper['LIROUT'] = 'LIST ALL ROUTES'
helper['LXROUT'] = 'SHOW ROUTES FILE'
helper['PRROUT'] = 'PRINT ALL ROUTES'
helper['UNPUBL'] = 'SET PUBLISHED ROUTE TO DRAFT'
helper['PUBLSH'] = 'PUBLISH ROUTE'
helper['PXROUT'] = 'EXPORT ALL ROUTES'
helper['VALIDR'] = 'VALIDATE ROUTE'
#routing code
helper['ADCURO'] = 'ADD CUSTOMER ROUTING INFORMATION'
helper['CHCURO'] = 'CHANGE CUSTOMER ROUTING INFORMATION'
helper['DXCURO'] = 'DELETE CUSTOMER ROUTING INFORMATION'
helper['LICURO'] = 'LIST CUSTOMER ROUTING INFORMATION'
helper['LXCURO'] = 'SHOW CUSTOMER ROUTINGS FILE'
helper['PRCURO'] = 'PRINT CUSTOMER ROUTING INFORMATION'
helper['PXCURO'] = 'EXPORT ROUTINGS'
#schedule
helper['ADSCHD'] = 'ADD SCHEDULE'
helper['CHSCHD'] = 'CHANGE SCHEDULE'
helper['CPSCHD'] = 'COPY SCHEDULE'
helper['DXSCHD'] = 'DELETE SCHEDULE'
helper['CXSCHD'] = 'CANCEL SCHEDULE'
helper['ACTIVE'] = 'ACTIVATE SCHEDULE'
helper['XCTIVE'] = 'SET SCHEDULE INACTIVE'
helper['LISCHD'] = 'LIST ALL SCHEDULES'
helper['LSSCHD'] = 'LIST ACTIVE/RUNNING SCHEDULES'
helper['LXSCHD'] = 'SHOW SCHEDULES FILE'
helper['PRSCHD'] = 'PRINT ALL SCHEDULES'
helper['PRTABL'] = 'PRINT TIMETABLE'
helper['PUBLIS'] = 'PUBLISH SCHEDULE'
helper['PXSCHD'] = 'EXPORT ALL SCHEDULES'
helper['PXTABL'] = 'EXPORT TIMETABLE'
#section
helper['ADSECT'] = 'ADD ROUTE SECTION'
helper['DXSECT'] = 'DELETE ROUTE SECTION'
helper['LDROUT'] = 'LIST DETAIL FOR SELECTED ROUTE'
helper['LSROUT'] = 'LIST SECTIONS FOR ROUTE'
helper['LXSECT'] = 'SHOW ALL SECTIONS'
helper['PDROUT'] = 'PRINT DETAIL FOR SELECTED ROUTE'
helper['PXSECT'] = 'EXPORT SECTIONS FOR ALL ROUTES'
#stations
helper['ADSTAX'] = 'ADD STATION'
helper['CHSTAX'] = 'CHANGE STATION'
helper['DXSTAX'] = 'DELETE STATION'
helper['LISTAX'] = 'LIST STATIONS'
helper['LXSTAX'] = 'SHOW STATIONS DATA'
helper['PRSTAX'] = 'PRINT STATIONS'
helper['PXSTAX'] = 'EXPORT STATIONS'
#station types
helper['ADSTAT'] = 'ADD STATION TYPE'
helper['CHSTAT'] = 'CHANGE STATION TYPE'
helper['DXSTAT'] = 'DELETE STATION TYPE'
helper['LISTAT'] = 'LIST STATION TYPES'
helper['PXSTAT'] = 'EXPORT STATION TYPES'
helper['LXSTAT'] = 'SHOW STATION TYPE DATA'
helper['PRSTAT'] = 'PRINT STATION TYPES'
#timings
helper['ADTIMS'] = 'ADD SCHEDULE TIMINGS'
helper['CHTIMS'] = 'CHANGE SCHEDULE TIMINGS'
helper['TIMING'] = 'LIST TIMINGS FOR SELECTED SCHEDULE'
helper['LDTIMS'] = 'LIST TIMING RECORD DETAILS FOR SELECTED SCHEDULE'
helper['PRTIMS'] = 'PRINT TIMINGS FOR SELECTED SCHEDULE'
helper['PXTIMS'] = 'EXPORT TIMINGS FOR ALL SCHEDULES'
#train
helper['UTRAIN'] = 'SET UNSCHEDULED TRAIN'
helper['STRAIN'] = 'SET SCHEDULED TRAIN'
helper['ETRAIN'] = 'SET EXTRA TRAIN'
helper['ACARXT'] = 'ALLOCATE CAR TO TRAIN'
helper['ALOCOT'] = 'ALLOCATE LOCO TO TRAIN'
helper['LTRAIN'] = 'START TRAIN AT LATER ORIGIN'
helper['LINEUP'] = 'REPORT LINE-UP'
helper['REPORT'] = 'REPORT TRAIN'
helper['TTRAIN'] = 'TERMINATE TRAIN'
helper['XLOCOT'] = 'REMOVE LOCO FROM TRAIN'
helper['XCARXT'] = 'REMOVE CAR FROM TRAIN'
helper['TRAINS'] = 'LIST RUNNING TRAINS'
helper['LICONS'] = 'REPORT CONSIST'
helper['PRCONS'] = 'PRINT CONSIST'
#users
helper['ADUSER'] = 'ADD USER'
helper['CHPASS'] = 'CHANGE PASSWORD'
helper['CHUSER'] = 'CHANGE USER'
helper['DXUSER'] = 'DELETE USER'
helper['EDUSER'] = 'ENABLE/DISABLE USER'
helper['LIUSER'] = 'LIST USERS'
helper['LXUSER'] = 'SHOW USER DATA'
helper['PRUSER'] = 'PRINT USERS'
helper['PXUSER'] = 'EXPORT USER DATA'
helper['RESETP'] = 'RESET PASSWORD'
#warehouses
helper['ADWARE'] = 'ADD WAREHOUSE'
helper['CHWARE'] = 'CHANGE WAREHOUSE DETAILS'
helper['CPWARE'] = 'CHANGE PRODUCTION AT WAREHOUSE'
helper['DXWARE'] = 'DELETE WAREHOUSE'
helper['LIWARE'] = 'LIST WAREHOUSES'
helper['LDWARE'] = 'LIST WAREHOUSE DETAILS'
helper['LSWARE'] = 'WAREHOUSES AT STATIONS'
helper['LXWARE'] = 'SHOW WAREHOUSE DATA'
helper['PRWARE'] = 'PRINT WAREHOUSES'
helper['PXWARE'] = 'EXPORT WAREHOUSES'
#action
helper['QUIT'] = 'EXIT MOPS'
helper['EXIT'] = 'EXIT MOPS'
return helper
def assist():
"""Provides a sorted list (by command name) of all the commands on the system
with a short description of what the command does
"""
i = 0
commands = {}
helpers = {}
commands = load_commands(commands)
helpers = load_helper(helpers)
listed = list(commands.keys())
listed.sort()
for entry in listed:
i = i + 1
if i > 23:
dummy = raw_input(' ... HIT ENTER TO CONTINUE')
i = 0
try:
print(entry + ' ' + helpers[entry])
except:
print(entry)
print(' ... END OF LIST ...')
def get_helper(command):
"""Gets the helper (short description) information for a given command
"""
desc = ''
helpers = {}
helpers = load_helper(helpers)
try:
desc = helpers[command]
except:
desc = 'NOT FOUND'
return desc
|
# Задача: Дан массив целых чисел. Вывести максимальную сумму элементов в массиве.
# Суммировать элементы можно только последовательно.
# Пример: [-1, 10, -9, 5, 6, -10]
# Вывод: 11
def max_sum(a):
if a == 1: return a[0]
return max([a[i] + a[i+1] for i in range(len(a)-1)])
assert max_sum([-1, 10, -9, 5, 6, -10]) == 11 |
""" base.py: Base class for differentiable neural network layers."""
class Layer(object):
""" Abstract base class for differentiable layer."""
def forward_pass(self, input_):
""" Forward pass returning and storing outputs."""
raise NotImplementedError()
def backward_pass(self, err):
""" Backward pass returning and storing errors."""
raise NotImplementedError()
def update_params(self, learning_rate):
""" Update layer weights based on stored errors."""
raise NotImplementedError()
|
def get_distance_matrix(orig, edited):
# initialize the matrix
orig_len = len(orig) + 1
edit_len = len(edited) + 1
distance_matrix = [[0] * edit_len for _ in range(orig_len)]
for i in range(orig_len):
distance_matrix[i][0] = i
for j in range(edit_len):
distance_matrix[0][j] = j
# calculate the edit distances
for i in range(1, orig_len):
for j in range(1, edit_len):
deletion = distance_matrix[i - 1][j] + 1
insertion = distance_matrix[i][j - 1] + 1
substitution = distance_matrix[i - 1][j - 1]
if orig[i - 1] != edited[j - 1]:
substitution += 1
distance_matrix[i][j] = min(insertion, deletion, substitution)
return distance_matrix
class Compare:
def __init__(self, original, edited):
self.original = original
self.edited = edited
self.distance_matrix = get_distance_matrix(original, edited)
i = len(self.distance_matrix) - 1
j = len(self.distance_matrix[i]) - 1
self.edit_distance = self.distance_matrix[i][j]
self.num_orig_elements = i
def __repr__(self):
edited_str = str(self.edited)
original_str = str(self.original)
if len(edited_str) > 10:
edited_str = edited_str[10:] + " ..."
if len(original_str) > 10:
original_str = original_str[10:] + " ..."
return "Compare({}, {})".format(edited_str, original_str)
def set_alignment_strings(self):
original = self.original
edited = self.edited
num_orig_elements = self.num_orig_elements
i = num_orig_elements
j = len(self.edited)
# edit_distance = self.edit_distance
distance_matrix = self.distance_matrix
num_deletions = 0
num_insertions = 0
num_substitutions = 0
align_orig_elements = []
align_edited_elements = []
align_label_str = []
# start at the cell containing the edit distance and analyze the
# matrix to figure out what is a deletion, insertion, or
# substitution.
while i or j:
# if deletion
if distance_matrix[i][j] == distance_matrix[i - 1][j] + 1:
num_deletions += 1
align_orig_elements.append(original[i - 1])
align_edited_elements.append(" ")
align_label_str.append('D')
i -= 1
# if insertion
elif distance_matrix[i][j] == distance_matrix[i][j - 1] + 1:
num_insertions += 1
align_orig_elements.append(" ")
align_edited_elements.append(edited[j - 1])
align_label_str.append('I')
j -= 1
# if match or substitution
else:
orig_element = original[i - 1]
edited_element = edited[j - 1]
if orig_element != edited_element:
num_substitutions += 1
label = 'S'
else:
label = ' '
align_orig_elements.append(orig_element)
align_edited_elements.append(edited_element)
align_label_str.append(label)
i -= 1
j -= 1
align_orig_elements.reverse()
align_edited_elements.reverse()
align_label_str.reverse()
self.align_orig_elements = align_orig_elements
self.align_edited_elements = align_edited_elements
self.align_label_str = align_label_str
def show_changes(self):
if not hasattr(self, 'align_orig_elements'):
self.set_alignment_strings()
assert (len(self.align_orig_elements) ==
len(self.align_edited_elements) ==
len(self.align_label_str))
assert len(self.align_label_str) == len(self.align_edited_elements) == len(
self.align_orig_elements), "different number of elements"
# for each word in line, determine whether there's a change and append with the according format
print_string = ''
for index in range(len(self.align_label_str)):
if self.align_label_str[index] == ' ':
print_string += self.align_edited_elements[index] + ' '
elif self.align_label_str[index] == 'S' or self.align_label_str[index] == 'I':
element = self.align_edited_elements[index]
print_string += changed(element) + ' '
else: # a deletion - need to print what was in the original that got deleted
element = self.align_orig_elements[index]
print_string += changed(element)
return print_string
def changed(plain_text):
return "<CORRECTION>" + plain_text + "</CORRECTION>"
|
plural_suffixes = {
'ches': 'ch',
'shes': 'sh',
'ies': 'y',
'ves': 'fe',
'oes': 'o',
'zes': 'z',
's': ''
}
plural_words = {
'pieces': 'piece',
'bunches': 'bunch',
'haunches': 'haunch',
'flasks': 'flask',
'veins': 'vein',
'bowls': 'bowl'
} |
__author__ = 'jwely'
__all__ = ["fetch_AVHRR"]
def fetch_AVHRR():
"""
fetches AVHRR-pathfinder data via ftp
server: ftp://ftp.nodc.noaa.gov/pub/data.nodc/pathfinder/
"""
print("this function is an unfinished stub!")
return |
# change this file to add additional keywords
def make_states(TodoState):
# do not remove any of these
# any of these mappings must be in NAME_TO_STATE as well
STATE_TO_NAME = {
TodoState.open: "offen",
TodoState.waiting: "wartet auf Rückmeldung",
TodoState.in_progress: "in Bearbeitung",
TodoState.after: "ab",
TodoState.before: "vor",
TodoState.orphan: "verwaist",
TodoState.done: "erledigt",
TodoState.rejected: "abgewiesen",
TodoState.obsolete: "obsolet"
}
# the text version has to be in lower case
# Please don't add something that matches a date
NAME_TO_STATE = {
"offen": TodoState.open,
"open": TodoState.open,
"wartet auf rückmeldung": TodoState.waiting,
"wartet": TodoState.waiting,
"waiting": TodoState.waiting,
"in bearbeitung": TodoState.in_progress,
"bearbeitung": TodoState.in_progress,
"läuft": TodoState.in_progress,
"in progress": TodoState.in_progress,
"ab": TodoState.after,
"erst ab": TodoState.after,
"nicht vor": TodoState.after,
"wiedervorlage": TodoState.after,
"after": TodoState.after,
"not before": TodoState.after,
"vor": TodoState.before,
"bis": TodoState.before,
"nur vor": TodoState.before,
"nicht nach": TodoState.before,
"before": TodoState.before,
"not after": TodoState.before,
"verwaist": TodoState.orphan,
"orphan": TodoState.orphan,
"orphaned": TodoState.orphan,
"erledigt": TodoState.done,
"fertig": TodoState.done,
"done": TodoState.done,
"abgewiesen": TodoState.rejected,
"abgelehnt": TodoState.rejected,
"passiert nicht": TodoState.rejected,
"nie": TodoState.rejected,
"niemals": TodoState.rejected,
"rejected": TodoState.rejected,
"obsolet": TodoState.obsolete,
"veraltet": TodoState.obsolete,
"zu spät": TodoState.obsolete,
"obsolete": TodoState.obsolete
}
return STATE_TO_NAME, NAME_TO_STATE
def make_state_glyphes(TodoState):
STATE_TO_GLYPH = {
TodoState.open: "unchecked",
TodoState.waiting: "share",
TodoState.in_progress: "edit",
TodoState.after: "log-out",
TodoState.before: "log-in",
TodoState.orphan: "modal-window",
TodoState.done: "check",
TodoState.rejected: "remove-circle",
TodoState.obsolete: "ban-circle"
}
return STATE_TO_GLYPH
|
lista = list()
dic = dict()
while True:
dic["nome"] = str(input('Nome: '))
dic["idade"] = int(input('Idade: '))
dic["sexo"] = str(input('Sexo [F/M]: ')).strip().upper()[0]
lista.append(dic.copy())
dic.clear()
resp = str(input('Quer continuar? S/N: ')).strip().upper()[0]
if resp in 'N':
break
print('-=' * 30)
print(lista)
print('-=' * 30)
print(f'Foram cadastradas {len(lista)} pessoas.')
soma = 0
for d in lista:
soma = soma + d["idade"]
media = soma/len(lista)
print(f'A média de idade do grupo é {media:.1f} anos.')
print(f'As mulheres cadastradas foram: ', end='')
for d in lista:
if d["sexo"] in 'F':
print(f'{d["nome"]}', end=' ')
print()
print(f'As pessoas com idade acima da média são ', end='')
for d in lista:
if d["idade"] > media:
print(f'{d["nome"]} com {d["idade"]} anos.', end=' ')
print()
print('-=' * 30)
print('<<< PROGRAMA ENCERRADO >>>') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ============================================================================
# Erfr - One-time pad encryption tool
# Substitution-box core module
# Copyright (C) 2018 by Ralf Kilian
# Distributed under the MIT License (https://opensource.org/licenses/MIT)
#
# Website: http://www.urbanware.org
# GitHub: https://github.com/urbanware-org/erfr
# ============================================================================
__version__ = "4.3.3"
FSB_RIJNDAEL = [
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16]
ISB_RIJNDAEL = [
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d]
def get_version():
"""
Return the version of this module.
"""
return __version__
# EOF
|
'''
Задача «Четные элементы»
Условие
Выведите все четные элементы списка. При этом используйте цикл for, перебирающий элементы списка, а не их индексы!
'''
l1 = input().split()
# print(l1)
for i in range(len(l1)):
l1[i] = int(l1[i]) # список изменяемый, преобразуем!
# print(l1)
for i in l1:
if i % 2 == 0:
print(i, end=' ')
# разработчики
s = input()
a = [int(s) for s in s.split()] # задачи на закрепление конструкции. Все - объект!
for i in a:
if int(i) % 2 == 0:
print(i, end=' ')
# Svetlana Tetyusheva
a = [int(s) for s in input().split()] # !!!
for i in a:
if i % 2 == 0:
print(i, end=' ')
|
def insert_space(msg, idx):
msg = msg[:idx] + " " + msg[idx:]
print(msg)
return msg
def reverse(msg, substring):
if substring in msg:
msg = msg.replace(substring, "", 1)
msg += substring[::-1]
print(msg)
return msg
else:
print("error")
return msg
def change_all(msg, old_substring, new_substring):
msg = msg.replace(old_substring, new_substring)
print(msg)
return msg
concealed_msg = input()
command = input()
while not command == "Reveal":
cmd = command.split(":|:")
operation = cmd[0]
if operation == "InsertSpace":
curr_idx = int(cmd[1])
concealed_msg = insert_space(concealed_msg, curr_idx)
elif operation == "Reverse":
curr_substring = cmd[1]
concealed_msg = reverse(concealed_msg, curr_substring)
elif operation == "ChangeAll":
curr_substring, replacement = cmd[1:]
concealed_msg = change_all(concealed_msg, curr_substring, replacement)
command = input()
print(f"You have a new text message: {concealed_msg}")
|
class Evaluator(object):
"""
Evaluates a model on a Dataset, using metrics specific to the Dataset.
"""
def __init__(self, dataset_cls, model, embedding, data_loader, batch_size, device, keep_results=False):
self.dataset_cls = dataset_cls
self.model = model
self.embedding = embedding
self.data_loader = data_loader
self.batch_size = batch_size
self.device = device
self.keep_results = keep_results
def get_sentence_embeddings(self, batch):
sent1 = self.embedding(batch.sentence_1).transpose(1, 2)
sent2 = self.embedding(batch.sentence_2).transpose(1, 2)
return sent1, sent2
def get_scores(self):
"""
Get the scores used to evaluate the model.
Should return ([score1, score2, ..], [score1_name, score2_name, ...]).
The first score is the primary score used to determine if the model has improved.
"""
raise NotImplementedError('Evaluator subclass needs to implement get_score')
|
# Time: O(n)
# Space: O(h)
class Solution(object):
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.depth(root, 0)[1]
def depth(self, root, diameter):
if not root:
return 0, diameter
left, diameter = self.depth(root.left, diameter)
right, diameter = self.depth(root.right, diameter)
return 1 + max(left, right), max(diameter, left + right)
|
#
# Copyright (c) Members of the EGEE Collaboration. 2006-2009.
# See http://www.eu-egee.org/partners/ for details on the copyright holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Andrea Ceccanti (INFN)
#
commands_def="""<?xml version="1.0" encoding="UTF-8"?>
<voms-commands>
<command-group
name="User management commands"
shortname="user">
<command
name="list-users">
<description>list-users</description>
<help-string
xml:space="preserve">
Lists the VO users.</help-string>
</command>
<command
name="list-suspended-users">
<description>list-suspended-users</description>
<help-string
xml:space="preserve">
Lists the VO users that are currently suspended. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="list-expired-users">
<description>list-expired-users</description>
<help-string
xml:space="preserve">
Lists the VO users that are currently expired. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="count-expired-users">
<description>count-expired-users</description>
<help-string
xml:space="preserve">
Prints how many VO users are currently expired. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="count-suspended-users">
<description>count-suspended-users</description>
<help-string
xml:space="preserve">
Counts how many VO users are currently suspended. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="count-users">
<description>count-users</description>
<help-string
xml:space="preserve">
Counts how many users are in the VO. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="list-user-stats">
<description>list-user-stats</description>
<help-string
xml:space="preserve">
List users statistics for this VO. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="create-user">
<description>[options] create-user CERTIFICATE.PEM</description>
<help-string
xml:space="preserve">
Registers a new user in VOMS.
Personal information can be specified with the following options:
name, surname, address, institution, phone-number.
(Personal info submission requires VOMS Admin server >= 2.7.0)
All these options must be provided when registering a new user,
or no option regarding personal information should be set.
Besides the personal information, information about user certificate
can be provided specifying a certificate file parameter.
When using the --nousercert option, then four parameters are
required (DN CA CN MAIL) to create the user.
Examples:
voms-admin --vo test --name Andrea --surname Ceccanti --institution IGI \\
--phoneNumber 243 --address "My Address" \\
create-user .globus/usercert.pem
voms-admin --vo test_vo create-user .globus/usercert.pem
voms-admin --nousercert --vo test_vo create-user \
'My DN' 'My CA' 'My CN' 'My Email'</help-string>
<arg
type="X509v2" />
</command>
<command
name="suspend-user">
<description>suspend-user USER REASON</description>
<help-string
xml:space="preserve">
Supends a VOMS user.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.
(Requires VOMS Admin server >= 2.7.0)
</help-string>
<arg type="User"/>
<arg type="String"/>
</command>
<command
name="restore-user">
<description>restore-user USER</description>
<help-string
xml:space="preserve">
Restores a VOMS user.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.
(Requires VOMS Admin server >= 2.7.0)
</help-string>
<arg type="User"/>
</command>
<command
name="restore-all-suspended-users">
<description>restore-all-suspended-users</description>
<help-string
xml:space="preserve">
Restores all the users currently suspended in the VOMS database. (Requires VOMS Admin server >= 2.7.0)</help-string>
</command>
<command
name="delete-user">
<description>delete-user USER</description>
<help-string
xml:space="preserve">
Deletes a user from VOMS, including all their attributes
and membership information.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.
Examples:
voms-admin --vo test_vo delete-user .globus/usercert.pem
voms-admin --nousercert --vo test_vo delete-user \
'My DN' 'MY CA'</help-string>
<arg
type="User" />
</command>
</command-group>
<command-group
name="Role management commands"
shortname="role">
<command
name="list-roles">
<description>list-roles</description>
<help-string
xml:space="preserve">
Lists the roles defined in the VO.</help-string>
</command>
<command
name="create-role">
<description>create-role ROLENAME</description>
<help-string
xml:space="preserve">
Creates a new role</help-string>
<arg
type="Role" />
</command>
<command
name="delete-role">
<description>delete-role ROLENAME</description>
<help-string
xml:space="preserve">
Deletes a role.</help-string>
<arg
type="Role" />
</command>
</command-group>
<command-group
name="Group management commands"
shortname="group">
<command
name="list-groups">
<description>list-groups</description>
<help-string
xml:space="preserve">
Lists all the groups defined in the VO.</help-string>
</command>
<command
name="list-sub-groups">
<description>list-sub-groups GROUPNAME</description>
<help-string
xml:space="preserve">
List the subgroups of GROUPNAME.</help-string>
<arg
type="Group" />
</command>
<command
name="create-group">
<description>[options] create-group GROUPNAME</description>
<help-string xml:space="preserve">
Creates a new group named GROUPNAME.
If the --description option is given, a description is registered
for the group in the VOMS database (requires VOMS Admin server >= 2.7.0).
Note that the vo root group part of the fully qualified group name
can be omitted, i.e., if the group to be created is called /vo/ciccio,
where /vo is the vo root group, this command accepts both the "ciccio"
and "/vo/ciccio" syntaxes.</help-string>
<arg
type="NewGroup" />
</command>
<command
name="delete-group">
<description>delete-group GROUPNAME</description>
<help-string
xml:space="preserve">
Deletes a group.</help-string>
<arg
type="Group" />
</command>
<command
name="list-user-groups">
<description>list-user-groups USER</description>
<help-string xml:space="preserve">
Lists the groups that USER is a member of. USER is either
an X509 certificate file in PEM format, or a DN, CA couple when the
--nousercert option is set.</help-string>
<arg
type="User" />
</command>
</command-group>
<command-group
name="Group membership management commands"
shortname="membership">
<command
name="add-member">
<description>add-member GROUPNAME USER</description>
<help-string xml:space="preserve">
Adds USER to the GROUPNAME group.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.</help-string>
<arg
type="Group" />
<arg
type="User" />
</command>
<command
name="remove-member">
<description>remove-member GROUPNAME USER</description>
<help-string xml:space="preserve">
Removes USER from the GROUPNAME group.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.</help-string>
<arg
type="Group" />
<arg
type="User" />
</command>
<command
name="list-members">
<description>list-members GROUPNAME</description>
<help-string
xml:space="preserve">
Lists all members of a group.</help-string>
<arg
type="Group" />
</command>
</command-group>
<command-group
name="Role assignment commands"
shortname="role-assign">
<command
name="assign-role">
<description>assign-role GROUPNAME ROLENAME USER</description>
<help-string xml:space="preserve">
Assigns role ROLENAME to user USER in group GROUPNAME.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.</help-string>
<arg
type="Group" />
<arg
type="Role" />
<arg
type="User" />
</command>
<command
name="dismiss-role">
<description>dismiss-role GROUPNAME ROLENAME USER
</description>
<help-string xml:space="preserve">
Dismiss role ROLENAME from user USER in group GROUPNAME.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.</help-string>
<arg
type="Group" />
<arg
type="Role" />
<arg
type="User" />
</command>
<command
name="list-users-with-role">
<description>list-users-with-role GROUPNAME ROLENAME
</description>
<help-string xml:space="preserve">
Lists all users with ROLENAME in GROUPNAME.</help-string>
<arg
type="Group" />
<arg
type="Role" />
</command>
<command
name="list-user-roles">
<description>list-user-roles USER</description>
<help-string xml:space="preserve">
Lists the roles that USER is assigned.
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.</help-string>
<arg
type="User" />
</command>
</command-group>
<command-group
name="Attribute class management commands"
shortname="attr-class">
<command
name="create-attribute-class">
<description> create-attribute-class CLASSNAME DESCRIPTION UNIQUE
</description>
<help-string xml:space="preserve">
Creates a new generic attribute class named CLASSNAME, with
description DESCRIPTION.
UNIQUE is a boolean argument. If UNIQUE is true,
attribute values assigned to users for this class are checked for
uniqueness. Otherwise no checks are performed on user attribute values.
</help-string>
<arg
type="String" />
<arg
type="String"
nillable="true" />
<arg
type="Boolean"
nillable="true" />
</command>
<command
name="delete-attribute-class">
<description>delete-attribute-class CLASSNAME
</description>
<help-string xml:space="preserve">
Removes the generic attribute class CLASSNAME. All the
user, group and role attribute mappings will be deleted as well.
</help-string>
<arg
type="String" />
</command>
<command
name="list-attribute-classes">
<description>list-attribute-classes</description>
<help-string xml:space="preserve">
Lists the attribute classes defined for the VO.</help-string>
</command>
</command-group>
<command-group
name="Generic attribute assignment commands"
shortname="attrs">
<command
name="set-user-attribute">
<description> set-user-attribute USER ATTRIBUTE ATTRIBUTE_VALUE
</description>
<help-string xml:space="preserve">
Sets the generic attribute ATTRIBUTE value to
ATTRIBUTE_VALUE for user USER. USER is either an X509 certificate file
in PEM format, or a DN, CA couple when the --nousercert option is set.
</help-string>
<arg
type="User" />
<arg
type="String" />
<arg
type="String" />
</command>
<command
name="delete-user-attribute">
<description>delete-user-attribute USER ATTRIBUTE
</description>
<help-string xml:space="preserve">
Deletes the generic attribute ATTRIBUTE value from user
USER. USER is either an X509 certificate file in PEM format, or a DN,
CA couple when the --nousercert option is set.</help-string>
<arg
type="User" />
<arg
type="String" />
</command>
<command
name="list-user-attributes">
<description>list-user-attributes USER</description>
<help-string xml:space="preserve">
Lists the generic attributes defined for user USER. USER is
either an X509 certificate file in PEM format, or a DN, CA couple when
the --nousercert option is set.</help-string>
<arg
type="User" />
</command>
<command
name="set-group-attribute">
<description> set-group-attribute GROUP ATTRIBUTE ATTRIBUTE_VALUE
</description>
<help-string xml:space="preserve">
Sets the generic attribute ATTRIBUTE value to
ATTRIBUTE_VALUE for group GROUP.</help-string>
<arg
type="Group" />
<arg
type="String" />
<arg
type="String" />
</command>
<command
name="set-role-attribute">
<description> set-role-attribute GROUP ROLE ATTRIBUTE ATTRIBUTE_VALUE
</description>
<help-string xml:space="preserve">
Sets the generic attribute ATTRIBUTE value to
ATTRIBUTE_VALUE for role ROLE in group GROUP.</help-string>
<arg
type="Group" />
<arg
type="Role" />
<arg
type="String" />
<arg
type="String" />
</command>
<command
name="delete-group-attribute">
<description>delete-group-attribute GROUP ATTRIBUTE
</description>
<help-string xml:space="preserve">
Deletes the generic attribute ATTRIBUTE value from group
GROUP.</help-string>
<arg
type="Group" />
<arg
type="String" />
</command>
<command
name="list-group-attributes">
<description>list-group-attributes GROUP
</description>
<help-string xml:space="preserve">
Lists the generic attributes defined for group GROUP.</help-string>
<arg
type="Group" />
</command>
<command
name="list-role-attributes">
<description>list-role-attributes GROUP ROLE
</description>
<help-string xml:space="preserve">
Lists the generic attributes defined for role ROLE in group
GROUP.</help-string>
<arg
type="Group" />
<arg
type="Role" />
</command>
<command
name="delete-role-attribute">
<description> delete-role-attribute GROUP ROLE ATTRIBUTE</description>
<help-string xml:space="preserve">
Deletes the generic attribute ATTRIBUTE value from role
ROLE in group GROUP.</help-string>
<arg
type="Group" />
<arg
type="Role" />
<arg
type="String" />
</command>
</command-group>
<command-group
name="ACL management commands"
shortname="acl">
<command
name="get-ACL">
<description>get-ACL CONTEXT</description>
<help-string xml:space="preserve">
Gets the ACL defined for voms context CONTEXT. CONTEXT may
be either a group (e.g. /groupname ) or a qualified role
(e.g./groupname/Role=VO-Admin).</help-string>
<arg
type="String" />
</command>
<command
name="get-default-ACL">
<description>get-default-ACL GROUP</description>
<help-string xml:space="preserve">
Gets the default ACL defined for group GROUP.</help-string>
<arg
type="Group" />
</command>
<command
name="add-ACL-entry">
<description> add-ACL-entry CONTEXT USER PERMISSION PROPAGATE
</description>
<help-string xml:space="preserve">
Adds an entry to the ACL for CONTEXT assigning PERMISSION
to user/admin USER. If PROPAGATE is true, the entry is
propagated to children contexts.
CONTEXT may be either a group (e.g. /groupname ) or
a qualified role (e.g./groupname/Role=VO-Admin).
USER is either an X509 certificate file in PEM format,
or a DN, CA couple when the --nousercert option is set.
PERMISSION is a VOMS permission expressed using the
VOMS-Admin 2.x format. Allowed permission values are:
ALL
CONTAINER_READ CONTAINER_WRITE
MEMBERSHIP_READ MEMBERSHIP_WRITE
ATTRIBUTES_READ ATTRIBUTES_WRITE
ACL_READ ACL_WRITE ACL_DEFAULT
REQUESTS_READ REQUESTS_WRITE
PERSONAL_INFO_READ PERSONAL_INFO_WRITE
SUSPEND
Multiple permissions can be assigned by combining them
in a comma separated list, e.g.:
"CONTAINER_READ,MEMBERSHIP_READ"
Special meaning DN,CA couples (to be used with
the --nousercert option set) are listed hereafter:
If DN is ANYONE and CA is VOMS_CA, an entry will be created
that assigns the specified PERMISSION to to any
authenticated user (i.e., any client that authenticates
with a certificates signed by a trusted CA).
if CA is GROUP_CA, DN is interpreted as a group and entry
will be assigned to members of such group.
if CA is ROLE_CA, DN is interpreted as a qualified role
(i.e., /test_vo/Role=TestRole), the entry will be assigned
to VO members that have the given role in the given group.
Examples:
voms-admin --vo test_vo add-ACL-entry /test_vo \\
.globus/usercert.pem ALL true
(The above command grants full rights to the user identified by
'.globus/usercert.pem' on the whole VO, since PROPAGATE is true)
voms-admin --nousercert --vo test_vo add-ACL-entry /test_vo \\
'ANYONE' 'VOMS_CA' 'CONTAINER_READ,MEMBERSHIP_READ' true
(The above command grants READ rights on VO structure and membership
to any authenticated user on the whole VO, since PROPAGATE is true)
To get more detailed information about Voms admin AuthZ
framework, either consult the voms-admin user's guide
or type:
voms-admin --help-acl</help-string>
<arg
type="String" />
<arg
type="User" />
<arg
type="Permission" />
<arg
type="Boolean" />
</command>
<command
name="add-default-ACL-entry">
<description> add-default-ACL-entry GROUP USER PERMISSION</description>
<help-string xml:space="preserve">
Adds an entry to the default ACL for GROUP assigning
PERMISSION to user/admin USER.
USER is either an X509 certificate file
in PEM format, or a DN, CA couple when the --nousercert option is set.
PERMISSION is a VOMS permission expressed using the VOMS-Admin 2.x
format.
Allowed permission values are:
ALL
CONTAINER_READ CONTAINER_WRITE
MEMBERSHIP_READ MEMBERSHIP_WRITE
ATTRIBUTES_READ ATTRIBUTES_WRITE
ACL_READ ACL_WRITE ACL_DEFAULT
REQUESTS_READ REQUESTS_WRITE
PERSONAL_INFO_READ PERSONAL_INFO_WRITE
SUSPEND
Multiple permissions can be assigned by combining them
in a comma separated list, e.g.:
"CONTAINER_READ,MEMBERSHIP_READ"
Special meaning DN,CA couples are listed hereafter:
If DN is ANYONE and CA is VOMS_CA, an entry will be created that
assigns the specified PERMISSION to to any authenticated user (i.e.,
any client that authenticates with a certificates signed by
a trusted CA).
if CA is GROUP_CA, DN is interpreted as a group and entry will be
assigned to members of such group.
if CA is ROLE_CA, DN is interpreted as a qualified role
(i.e., /test_vo/Role=TestRole), the entry will be assigned to VO
members that have the given role in the given group.
To get more detailed information about Voms admin AuthZ framework,
either consult the voms-admin user's guide or type:
voms-admin --help-acl</help-string>
<arg
type="Group" />
<arg
type="User" />
<arg
type="Permission" />
</command>
<command
name="remove-ACL-entry">
<description>remove-ACL-entry CONTEXT USER PROPAGATE
</description>
<help-string xml:space="preserve">
Removes the entry from the ACL for CONTEXT for user/admin USER.
If PROPAGATE is true, the entry is removed also from children
contexts.
CONTEXT may be either a group (e.g. /groupname ) or a
qualified role (e.g./groupname/Role=VO-Admin).
USER is either an X509 certificate file
in PEM format, or a DN, CA couple when the --nousercert option is set.
Special meaning DN,CA couples are listed hereafter:
If DN is ANYONE and CA is VOMS_CA, an entry will be created that
assigns the specified PERMISSION to to any authenticated user (i.e.,
any client that authenticates with a certificates signed by
a trusted CA).
if CA is GROUP_CA, DN is interpreted as a group and entry will be
assigned to members of such group.
if CA is ROLE_CA, DN is interpreted as a qualified role
(i.e., /test_vo/Role=TestRole), the entry will be assigned to VO
members that have the given role in the given group.
Examples:
voms-admin --nousercert --vo test_vo remove-ACL-entry \\
/test_vo 'ANYONE' 'VOMS_CA' true
(The above command removes any right on the VO from any authenticated
user)
To get more detailed information about Voms admin AuthZ framework,
either consult the voms-admin user's guide or type:
voms-admin --help-acl</help-string>
<arg
type="String" />
<arg
type="User" />
<arg
type="Boolean" />
</command>
<command
name="remove-default-ACL-entry">
<description>remove-default-ACL-entry GROUP USER
</description>
<help-string xml:space="preserve">
Removes the entry for user/admin USER from the default ACL
for GROUP.
USER is either an X509 certificate file in PEM format, or a DN,
CA couple when the --nousercert option is set.
Special meaning DN,CA couples are listed hereafter:
If DN is ANYONE and CA is VOMS_CA, an entry will be created that
assigns the specified PERMISSION to to any authenticated user (i.e.,
any client that authenticates with a certificates signed by
a trusted CA).
if CA is GROUP_CA, DN is interpreted as a group and entry will be
assigned to members of such group.
if CA is ROLE_CA, DN is interpreted as a qualified role
(i.e., /test_vo/Role=TestRole), the entry will be assigned to VO
members that have the given role in the given group.
To get more detailed information about Voms admin AuthZ framework,
either consult the voms-admin user's guide or type:
voms-admin --help-acl</help-string>
<arg
type="Group" />
<arg
type="User" />
</command>
</command-group>
<command-group
name="Other commands"
shortname="other">
<command
name="get-vo-name">
<description>get-vo-name</description>
<help-string xml:space="preserve">
This command returns the name of the contacted vo.</help-string>
</command>
<command
name="list-cas">
<description>list-cas</description>
<help-string xml:space="preserve">
Lists the certificate authorities accepted by the VO.</help-string>
</command>
</command-group>
<command-group
name="Certificate management commands"
shortname="Certificate"
>
<command
name="add-certificate">
<description>add-certificate USER CERT</description>
<help-string xml:space="preserve">
Binds a certificate to an existing VO user.
This operation may take either two pem certficate files as argument, or,
if the --nousercert option is set, two DN CA couples.
Example:
voms-admin --vo infngrid add-certificate my-cert.pem my-other-cert.pem
voms-admin --vo infngrid --nousercert add-certificate \\
'/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Andrea Ceccanti' '/C=IT/O=INFN/CN=INFN CA' \\
'/C=IT/ST=Test/CN=user0/Email=andrea.ceccanti@cnaf.infn.it' '/C=IT/ST=Test/L=Bologna/O=Voms-Admin/OU=Voms-Admin testing/CN=Test CA'
</help-string>
<arg type="User"/>
<arg type="User"/>
</command>
<command
name="remove-certificate">
<description>remove-certificate USER</description>
<help-string xml:space="preserve">
Unbinds a certificate from an existing VO user.
This operation takes either a pem certificate as argument, or,
if the --nousercert option is set, a DN CA couple.
Example:
voms-admin --vo infngrid remove-certificate my-cert.pem
voms-admin --vo infngrid --nousercert remove-certificate \\
'/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Andrea Ceccanti' '/C=IT/O=INFN/CN=INFN CA'
</help-string>
<arg type="User"/>
</command>
<command
name="suspend-certificate">
<description>suspend-certificate USER REASON</description>
<help-string xml:space="preserve">
Suspends a user certificate, and specifies a reason for the suspension.
This operation takes, for the first argument, either a pem certificate as argument, or,
if the --nousercert option is set, a DN CA couple.
Example:
voms-admin --vo infngrid suspend-certificate usercert.pem 'Security incident!'
voms-admin --vo infngrid --nousercert suspend-certificate \\
'/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Andrea Ceccanti' '/C=IT/O=INFN/CN=INFN CA' \\
'Security incident!'
</help-string>
<arg type="User"/>
<arg type="String"/>
</command>
<command
name="restore-certificate">
<description>restore-certificate USER</description>
<help-string xml:space="preserve">
Restores a user certificate.
This operation takes, for the first argument, either a pem certificate as argument, or,
if the --nousercert option is set, a DN CA couple.
Example:
voms-admin --vo infngrid restore-certificate usercert.pem
voms-admin --vo infngrid --nousercert restore-certificate \\
'/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Andrea Ceccanti' '/C=IT/O=INFN/CN=INFN CA'
</help-string>
<arg type="User"/>
</command>
<command
name="get-certificates">
<description>get-certificates USER</description>
<help-string xml:space="preserve">
Lists the certificates associated to a user.
This operation takes either a pem certificate as argument, or, if the --nousercert option is set, a DN CA couple.
Example:
voms-admin --vo infngrid get-certificates usercert.pem
voms-admin --vo infngrid --nousercert get-certificates \\
'/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Andrea Ceccanti' '/C=IT/O=INFN/CN=INFN CA'
</help-string>
<arg
type="User"/>
</command>
</command-group>
</voms-commands>""" |
#!/usr/bin/env python
# coding: utf-8
# # Calculating protein mass, from [Rosalind.info](https://www.rosalind.info)
#
# (Specific exercise can be found at: http://rosalind.info/problems/prtm/)
#
# ## My personal interpretation
#
# 1. The exercise is about calculating the molecular weight of a protein
#
# 2. The protein is represented as an amino acid sequence (a string of letters)
#
# 3. Molecular weights per amino acid are given in a table of monoisotopic masses
#
# 4. The practical side of the exercise comes down to reading the table with masses and then translating the letters from a given sequence into numbers using the table and adding the numbers up.
#
# I think I can do this in three functions:
#
# 1. Read the monoisotopic mass table and convert to a dictionary
#
# 2. Read the text file with the amino acid sequence
#
# 3. Take the amino acid sequence and mass table to calculate the mass
# In[5]:
def read_monoisotopic_mass_table(input_file):
"""
Given a tab-separatedd input file with amino acids (as capital letters)
in the first column, and molecular weights (as floating point numbers)
in the second column - create a dictionary with the amino acids as keys
and their respective weights as values.
"""
mass_dict = {}
with open(input_file, "r") as read_file:
for line in read_file:
elements = line.split()
amino_acid = str(elements[0])
weight = float(elements[1])
mass_dict[amino_acid] = weight
return mass_dict
# In[6]:
mass_dict = read_monoisotopic_mass_table("data/monoisotopic_mass_table.tsv")
print(mass_dict)
# So far so good, now make the second function:
# In[13]:
def read_amino_acid_sequence(input_file):
"""
Read a text file with an amino acid sequence and
return the sequence as string.
"""
with open(input_file, "r") as read_file:
for line in read_file:
amino_acids = str(line.strip())
#Note: the .strip() is required to remove the
# newline, which otherwise would be interpreted
# as amino acid!
return amino_acids
# In[14]:
example_protein = read_amino_acid_sequence("data/Example_calculating_protein_mass.txt")
print(example_protein)
# Now that works as well, time to make the final function: the one that converts the amino acid sequence to its weight.
# In[15]:
def calculate_protein_weight(protein, mass_table):
"""
Given a protein sequence as string and a mass table as dictionary
(with amino acids as keys and their respective weights as values),
calculate the molecular weight of the protein by summing up the
weight of each amino acid in the protein.
"""
total_weight = 0
for amino_acid in protein:
weight = mass_table[amino_acid]
total_weight += weight
return total_weight
# In[16]:
calculate_protein_weight(example_protein, mass_dict)
# Now this answer looks good, except the rounding of the decimals is slightly different from the example on rosalind.info... Perhaps I should just round the answer to 3 decimals?
# In[17]:
round(calculate_protein_weight(example_protein, mass_dict), 3)
# Perfect! Now let me just overwrite the function to incorporate the rounding:
# In[18]:
def calculate_protein_weight(protein, mass_table):
"""
Given a protein sequence as string and a mass table as dictionary
(with amino acids as keys and their respective weights as values),
calculate the molecular weight of the protein by summing up the
weight of each amino acid in the protein.
"""
total_weight = 0
for amino_acid in protein:
weight = mass_table[amino_acid]
total_weight += weight
return round(total_weight, 3)
# And let's give the actual exercise a shot with this!
# In[19]:
test_protein = read_amino_acid_sequence("data/rosalind_prtm.txt")
molecular_weight = calculate_protein_weight(test_protein, mass_dict)
print(molecular_weight)
|
class PokepayResponse(object):
def __init__(self, response, response_body):
self.body = response_body
self.elapsed = response.elapsed
self.status_code = response.status_code
self.ok = response.ok
self.headers = response.headers
self.url = response.url
def body(self):
return self.body
def elapsed(self):
return self.elapsed
def status_code(self):
return self.status_code
def ok(self):
return self.ok
def headers(self):
return self.headers
def url(self):
return self.url
|
str1=input("enter first string:")
str2=input("enter second string:")
new_a = str2[:2] + str1[2:]
new_b = str1[:2] + str2[2:]
print("the new string after swapping first two charaters of both string:",(new_a+' ' +new_b))
|
class Solution(object):
# 画图理解两个区间的关系
# A,B形如[[0,2],[5,10]...]
def intervalIntersection(self, firstList, secondList):
"""
:type firstList: List[List[int]]
:type secondList: List[List[int]]
:rtype: List[List[int]]
"""
i,j = 0, 0 # 双指针
res = []
while i < len(firstList) and j < len(secondList):
a1, a2 = firstList[i][0], firstList[i][1]
b1, b2 = secondList[j][0], secondList[j][1]
# 两个区间不存在交集就是 a2 < b1 or b2 < a1,取否命题就是存在交集的条件
# 两个区间存在交集
if b2 >= a1 and a2 >= b1:
# 计算出交集,加入res
res.append([max(a1, b1), min(a2, b2)])
# 指针前进
if b2 < a2: j+= 1
else: i += 1
return res |
a = int(input())
b = int(input())
if a > b:
print(1)
elif a == b:
print(0)
else:
print(2)
|
keys = ['a', 'b', 'c']
values = [1, 2, 3]
hash = dict(list(zip(keys, values)))
# Lazily, Python 2.3+, not 3.x:
hash = dict(zip(keys, values))
|
class Space():
"""
Common definitions for observations and actions.
"""
def sample(self, size=None, null=False):
"""
Uniformly randomly sample a random element(s) of this space.
"""
raise NotImplementedError
|
#criando uma matriz
matriz = [[0, 0, 0],[0, 0, 0],[0, 0, 0]]
for linha in range(0,3):
for coluna in range(0,3):
matriz[linha][coluna] = int(input(f'Digite o valor para posição [{linha},{coluna}]: '))
print('-='*30)
for linha in range(0, 3):
for coluna in range(0,3):
print(f'[{matriz[linha][coluna]:^5}]', end='') #printa cada item da matriz
print() #quebra a linha
|
def main(request, response):
"""Send a response with the Origin-Policy header given in the query string.
"""
header = request.GET.first(b"header")
response.headers.set(b"Origin-Policy", header)
response.headers.set(b"Content-Type", b"text/html")
return u"""
<!DOCTYPE html>
<meta charset="utf-8">
<title>Origin policy bad header subframe</title>
"""
|
description = 'Refsans 4 analog 1 GPIO on Raspberry'
group = 'optional'
tango_base = 'tango://%s:10000/test/ads/' % setupname
lowlevel = ()
devices = {
'%s_ch1' % setupname : device('nicos.devices.entangle.Sensor',
description = 'ADin0',
tangodevice = tango_base + 'ch1',
unit = 'V',
fmtstr = '%.4f',
visibility = lowlevel,
),
'%s_ch2' % setupname : device('nicos.devices.entangle.Sensor',
description = 'ADin1',
tangodevice = tango_base + 'ch2',
unit = 'V',
fmtstr = '%.4f',
visibility = lowlevel,
),
'%s_ch3' % setupname : device('nicos.devices.entangle.Sensor',
description = 'ADin2',
tangodevice = tango_base + 'ch3',
unit = 'V',
fmtstr = '%.4f',
visibility = lowlevel,
),
'%s_ch4' % setupname : device('nicos.devices.entangle.Sensor',
description = 'ADin3',
tangodevice = tango_base + 'ch4',
unit = 'V',
fmtstr = '%.4f',
visibility = lowlevel,
),
}
|
class Solution:
"""
@param s: a string
@return: the number of segments in a string
"""
def countSegments(self, s):
# write yout code here
return len(s.split())
|
file = open('JacobiMatrix.java', 'w')
for i in range(10):
for j in range(10):
file.write('jacobiMatrix.setEntry(' + str(i) + ', ' + str(j) + ', Allfunc.cg' + str(i) + str(j) + '(currentApprox));\n')
file.close()
|
#!/usr/bin/python2
lst = []
with open('lst.txt', 'r') as f:
lst = f.read().split('\n')
i=1
for img in lst:
if 'resized' in img:
with open(img, 'r') as rd:
with open("combined/%05d.%s.png" % (i, img.split('.')[1]), 'w') as wr:
wr.write(rd.read())
i+=1
|
class Task:
def __init__(self):
self.name = "";
self.active = False;
def activate(self):
pass
def update(self, dt):
pass
def is_complete(self):
pass
def close(self):
pass
|
"""Generate AXT release artifacts."""
load("//build_extensions:remove_from_jar.bzl", "remove_from_jar")
load("//build_extensions:add_or_update_file_in_zip.bzl", "add_or_update_file_in_zip")
def axt_release_lib(
name,
deps,
custom_package = None,
proguard_specs = None,
proguard_library = None,
multidex = "off",
jarjar_rules = "//build_extensions:noJarJarRules.txt",
keep_spec = None,
remove_spec = None,
overlapping_jars = [],
resource_files = None):
"""Generates release artifacts for a AXT library.
Resulting output will be two files:
name_no_deps.jar and name.aar
Args:
name: The target name
deps: The dependencies that make up the library
custom_package: Option custom android package to use
proguard_specs: Proguard to apply when building the jar
proguard_library: Proguard to bundle with the jar
jarjar_rules: Optional file containing jarjar rules to be applied
keep_spec: A regex to match items to retain in the jar. This is typically the
root java namespace of the library.
remove_spec: A regex to match items to remove from the jar.
overlapping_jars: jars containing entries to be removed from the main jar.
This is useful when the library has dependencies whose java package namespaces
overlap with this jar. See remove_from_jar docs for more details.
resource_files: res files to include in library
"""
# The rules here produce a final .aar artifact and jar for external release.
# It is a 5 stage pipeline:
# 1. Produce a placeholder .aar
# 2. Produce a .jar including all classes and all its dependencies, and optionally proguard it via
# proguard_specs
# 3. Rename classes if necessary via jarjar
# 4. Strip out external dependencies from .jar
# 5. Optionally, add in the proguard_library files to be bundled in the .aar
# 6. Update the classes.jar inside the .aar from step 1 with the .jar from step 3
# Step 1. Generate initial shell aar. The generated classes.jar will be empty.
# See
# https://bazel.build/versions/master/docs/be/android.html#android_library,
# name.aar
native.android_library(
name = "%s_initial" % name,
manifest = "AndroidManifest.xml",
resource_files = resource_files,
visibility = ["//visibility:private"],
custom_package = custom_package,
testonly = 1,
exports = deps,
)
# Step 2. Generate jar containing all classes including dependencies.
native.android_binary(
name = "%s_all" % name,
testonly = 1,
manifest = "AndroidManifest.xml",
multidex = multidex,
custom_package = custom_package,
proguard_specs = proguard_specs,
deps = [
":%s_initial" % name,
],
)
expected_output = ":%s_all_deploy.jar" % name
if proguard_specs:
expected_output = ":%s_all_proguard.jar" % name
# Step 3. Rename classes via jarjar
native.java_binary(
name = "jarjar_bin",
main_class = "org.pantsbuild.jarjar.Main",
runtime_deps = ["@maven//:org_pantsbuild_jarjar"],
)
native.genrule(
name = "%s_jarjared" % name,
srcs = [expected_output],
outs = ["%s_jarjared.jar" % name],
cmd = ("$(location :jarjar_bin) process " +
"$(location %s) '$<' '$@'") % jarjar_rules,
tools = [
jarjar_rules,
":jarjar_bin",
],
)
# Step 4. Strip out external dependencies. This produces the final name_no_deps.jar.
remove_from_jar(
name = "%s_no_deps" % name,
jar = ":%s_jarjared.jar" % name,
keep_spec = keep_spec,
remove_spec = remove_spec,
overlapping_jars = overlapping_jars,
)
expected_output = ":%s_initial.aar" % name
if proguard_library:
expected_output = "%s_with_proguard.aar" % name
# Step 5. Add the proguard library file to the aar from the first step
add_or_update_file_in_zip(
name = "%s_add_proguard" % name,
src = ":%s_initial.aar" % name,
out = expected_output,
update_path = "proguard.txt",
update_src = proguard_library,
)
# Step 6. Update the .aar produced in the first step with the final .jar
add_or_update_file_in_zip(
name = name,
src = expected_output,
out = "%s.aar" % name,
update_path = "classes.jar",
update_src = ":%s_no_deps.jar" % name,
)
|
'''
define some function to use.
'''
def bytes_to_int(bytes_string, order_type):
'''
the bind of the int.from_bytes function.
'''
return int.from_bytes(bytes_string, byteorder=order_type)
def bits_to_int(bit_string):
'''
the bind of int(string, 2) function.
'''
return int(bit_string, 2) |
"""dbcfg - Annon configuration
This is mutable object.
"""
dbcfg = {
"created_on": None
,"modified_on": None
,"timestamp": None
,"anndb_id": None
,"rel_id": None
,"dbname": None
,"dbid": None
,"allowed_file_type":['.txt','.csv','.yml','.json']
,"allowed_image_type":['.pdf','.png','.jpg','.jpeg','.gif']
,"allowed_video_type":['.mp4']
,"dataset": {}
,"load_data_from_file": False
,"train":[]
,"evaluate": []
,"predict": []
,"publish": []
,"report": []
,"description": "AI Dataset"
,"files": {}
,"id": "hmd"
,"name": "hmd"
,"problem_id": "hmd"
,"annon_type": "hmd"
,"dataclass": "AnnonDataset"
,"classes": ""
,"classinfo": None
,"class_ids": None
,"class_map": None
,"num_classes": None
,"splits": None
,"stats": {}
,"summary": {}
,"metadata": {}
# set to negative value to load all data, '0' loads no data at all
,"data_read_threshold": -1
,"db_dir": None
,"return_hmd": None
,"train_mode": "training"
,"test_mode": "inference"
# ,"dnnarch": None
# ,"log_dir": "logs/<dnnarch>"
# ,"framework_type": None
# ,"annotations": {
# "train": ""
# ,"val": ""
# ,"test": ""
# }
# ,"images": {
# "train": ""
# ,"val": ""
# ,"test": ""
# }
# ,"labels":{ `
# "train": ""
# ,"val": ""
# ,"test": ""
# }
# ,"classinfo": {
# "train": ""
# ,"val": ""
# ,"test": ""
# }
} |
def _calc_product(series, start_idx, end_idx):
product = 1
for digit in series[start_idx:end_idx + 1]:
product *= int(digit)
return product
def largest_product_in_series(num_digits, series):
largest_product = 0
for i in range(num_digits, len(series) + 1):
product = _calc_product(series, max(0, i - num_digits), i - 1)
largest_product = max(largest_product, product)
return largest_product |
class Solution:
def countOfAtoms(self, formula: str) -> str:
formula = "(" + formula + ")"
l = len(formula)
def mmerge(dst, src, xs):
for k, v in src.items():
t = dst.get(k, 0)
dst[k] = v * xs + t
def aux(st):
nonlocal formula, l
res = {}
st += 1
while st < l and formula[st] != ')':
if formula[st].isupper():
j = st + 1
while j < l and formula[j].islower():
j += 1
rp = j
while j < l and formula[j].isdigit():
j += 1
x = 1 if j == rp else int(formula[rp: j])
x += res.get(formula[st: rp], 0)
res[formula[st: rp]] = x
st = j
elif formula[st] == '(':
endp, rres = aux(st)
j = endp + 1
while j < l and formula[j].isdigit():
j += 1
xs = 1 if j == endp + 1 else int(formula[endp + 1: j])
mmerge(res, rres, xs)
st = j
return st, res
_, ans = aux(0)
lis = sorted(ans.keys())
aans = []
for s in lis:
aans.append(s)
t = ans[s]
if t > 1:
aans.append(str(t))
return "".join(aans) |
"""
N=300
%timeit one_out_product(xs)
201 µs ± 1.57 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
%timeit one_out_product_fast(xs)
194 µs ± 1.36 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
%timeit bluteforce(xs)
27.2 ms ± 610 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
N=1000
%timeit one_out_product(xs)
737 µs ± 7.12 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
%timeit one_out_product_fast(xs)
707 µs ± 8.64 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
%timeit bluteforce(xs)
329 ms ± 5.11 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
N=3000
%timeit one_out_product(xs)
2.31 ms ± 34.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
%timeit one_out_product_fast(xs)
2.14 ms ± 55 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
%timeit bluteforce(xs)
2.98 s ± 18.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
"""
MOD = 10 ** 9 + 7
def one_out_product(xs):
N = len(xs)
head = [0] * (N + 1)
cur = 1
for i in range(N):
cur *= xs[i]
cur %= MOD
head[i] = cur
head[-1] = 1
tail = [0] * (N + 1)
cur = 1
for i in range(N - 1, -1, -1):
cur *= xs[i]
cur %= MOD
tail[i] = cur
tail[-1] = 1
ret = [head[i - 1] * tail[i + 1] % MOD for i in range(N)]
return ret
def one_out_product_fast(xs):
N = len(xs)
ret = [1] * N
prod = 1
for i in range(N):
ret[i] = prod
prod *= xs[i]
prod %= MOD
prod = 1
for i in range(N - 1, -1, -1):
ret[i] *= prod
ret[i] %= MOD
prod *= xs[i]
prod %= MOD
return ret
def bluteforce(xs):
N = len(xs)
ret = [1] * N
for i in range(N):
for j in range(N):
if i == j:
continue
ret[i] *= xs[j]
ret[i] %= MOD
return ret
N = 3000
xs = range(1, N + 1)
a = one_out_product(xs)
b = one_out_product_fast(xs)
assert a == b
c = bluteforce(xs)
assert a == c
|
#Program to be tested
def boarding(seat_number):
if seat_number >= 1 and seat_number <= 25:
batch_no = 1
elif seat_number >= 26 and seat_number <= 100:
batch_no = 2
elif seat_number >= 101 and seat_number <= 200:
batch_no = 3
else:
batch_no = -1
return batch_no
|
# PRE PROCESSING
def symmetric_NaN_replacement(dataset):
np_dataset = dataset.to_numpy()
for col in range(0,(np_dataset.shape[1])):
ss_idx = 0
for row in range(1,(dataset.shape[0]-1)):
if (np.isnan(np_dataset[row,col]) and (~np.isnan(np_dataset[row-1,col]))): # if a NaN is found, and it is the first one in the range -> start of a window
ss_idx = row
if ((ss_idx != 0) and (~np.isnan(np_dataset[row+1,col]))): # end of the window has just be found
es_idx = row
# perform symmetric interpolation
for i in range(0,es_idx-ss_idx+1):
np_dataset[ss_idx+i,col] = np_dataset[ss_idx-i-1,col]
ss_idx = 0
dataset = pd.DataFrame(np_dataset, columns = dataset.columns)
return dataset
# inspect_dataframe(dataset, dataset.columns) # original
# Dealing with flat zones
boolidx = np.ones(dataset['Sponginess'].shape[0])
dataset_np = []
for i, col in enumerate(dataset.columns):
diff_null = (dataset[col][0:dataset[col].shape[0]].diff() == 0)*1
for j in range(0,dataset[col].shape[0]):
if (diff_null[j] >= 1):
diff_null[j] = diff_null[j-1]+1
boolidx = np.logical_and(boolidx, diff_null < 5)
dataset[~boolidx] = np.NaN
symmetric_NaN_replacement(dataset)
#Dealing with Meme creativity mean removal
THRESHOLD = 1.3
real_value_idx = np.ones(dataset.shape[0])
real_value_idx = np.logical_and(real_value_idx, dataset['Meme creativity'] > THRESHOLD)
print(np.mean(dataset['Meme creativity']))
print(np.mean(dataset['Meme creativity'][~real_value_idx]))
print(np.mean(dataset['Meme creativity'][real_value_idx]))
dataset['Meme creativity'][~real_value_idx] = dataset['Meme creativity'][~real_value_idx] + np.mean(dataset['Meme creativity'][real_value_idx])
inspect_dataframe(dataset, dataset.columns) #pre processed |
name0_0_1_0_0_2_0 = None
name0_0_1_0_0_2_1 = None
name0_0_1_0_0_2_2 = None
name0_0_1_0_0_2_3 = None
name0_0_1_0_0_2_4 = None |
# -*- coding: utf-8 -*-
__title__ = 'pyginx'
__version__ = '0.1.13.7.7'
__description__ = ''
__author__ = 'wrmsr'
__author_email__ = 'timwilloney@gmail.com'
__url__ = 'https://github.com/wrmsr/pyginx'
|
# There are N students in a class. Some of them are friends, while some are not.
# Their friendship is transitive in nature. For example, if A is a direct friend of B,
# and B is a direct friend of C, then A is an indirect friend of C.
# And we defined a friend circle is a group of students who are direct or indirect friends.
# Given a N*N matrix M representing the friend relationship between students in the class.
# If M[i][j] = 1, then the ith and jth students are direct friends with each other, otherwise not.
# And you have to output the total number of friend circles among all the students.
# Example 1:
# Input:
# [[1,1,0],
# [1,1,0],
# [0,0,1]]
# Output: 2
# Explanation:The 0th and 1st students are direct friends, so they are in a friend circle.
# The 2nd student himself is in a friend circle. So return 2.
# Example 2:
# Input:
# [[1,1,0],
# [1,1,1],
# [0,1,1]]
# Output: 1
# Explanation:The 0th and 1st students are direct friends, the 1st and 2nd students are direct friends,
# so the 0th and 2nd students are indirect friends. All of them are in the same friend circle, so return 1.
# Note:
# N is in range [1,200].
# M[i][i] = 1 for all students.
# If M[i][j] = 1, then M[j][i] = 1.
class unionFindSet:
def __init__(self, S):
self.root = {i:i for i in S}
self.count = len(S) #number of different componets
def find(self, u): #find root (the node whose parent is itself)
while self.root[u]!=u:
u = self.root[u]
return u
def union(self, u, v):
pu, pv = self.find(u), self.find(v)
if pu == pv:
return False
self.root[pu] = pv
self.count -= 1 #update count
return True
class Solution(object):
def findCircleNum(self, M):
"""
:type M: List[List[int]]
:rtype: int
"""
# 并查集 O(n^2)
# 此题为并查集的入门题。
# 基础的并查集能解决的一类问题是不断将两个元素所在集合合并,并随时询问两个元素是否在同一集合。
# 定义数组 f(i) 表示 i 元素所在集合的根结点(代表元素)。初始时,所有元素所在集合的根结点就是自身。
# 合并时,直接将两个集合的根结点合并,即修改 f 数组。
# 查询时,不断通过判断 i 是否等于 f(i) 的操作,若不相等则递归判断 f(f(i)),直到 i == f(i) 为止。
# 但以上做法会在一条链的情况下单次查询的时间复杂度退化至线性,故可以采用路径压缩优化,将复杂度降到近似常数。
# 读者可以自行查阅相关资料。
# 对于此题,最后只需检查有多少个元素为一个集合的根结点即可。
# 时间复杂度
# 并查集单次操作的复杂度近似于常数,故总时间复杂度为遍历整个朋友关系数组的复杂度,即 O(n^2)。
n = len(M)
data = unionFindSet([i for i in range(n)])
for i in range(n-1):
for j in range(i+1, n):
if M[i][j]==1:
data.union(i, j)
return data.count |
"""
Exceptions declaration.
"""
__all__ = [
"PyCozmoException",
"PyCozmoConnectionError",
"ConnectionTimeout",
"Timeout",
]
class PyCozmoException(Exception):
""" Base class for all PyCozmo exceptions. """
class PyCozmoConnectionError(PyCozmoException):
""" Base class for all PyCozmo connection exceptions. """
class ConnectionTimeout(PyCozmoConnectionError):
""" Connection timeout. """
class Timeout(PyCozmoException):
""" Timeout. """
|
# -*- coding: utf-8 -*-
"""
模板方法:
定义一个操作中算法骨架,而将一些步骤延迟到子类中。模板方法使得子类可以不改变一个算法的结构即可重定义该算法的某些特定步骤
模板方法特点:
1、模板方法是通过把不变行为搬到超类,去除子类中重复代码来体现它的优势
2、当不变的和可变的行为在方法的子类实现中混合在一起的时候,不变的行为就会在子类中重复出现,当使用模板方法模式把这些行为搬移到单一的地方,这样做帮助子类摆况脱重复的不变行为的纠缠。
Created by 相濡HH on 4/10/15.
"""
class TemplateInterface(object):
"""
模板方法,模板接口
"""
def primitive_operation1(self):
"""
私有操作1
:return:
"""
def primitive_opreation2(self):
"""
私有操作2
:return:
"""
def template_method(self):
"""
"""
self.primitive_operation1()
self.primitive_opreation2()
self.common_method()
print("调用的子类方法么")
def common_method(self):
print("公共方法:ABC ")
class ConcreteImplA(TemplateInterface):
"""
具体子类A
"""
def primitive_operation1(self):
"""
具体子类方法A的实现
:return:
"""
print("具体类A方法1的实现")
def primitive_opreation2(self):
"""
具体子类方法B的实现
:return:
"""
print("具体类A方法2的实现")
class ConcreteImplB(TemplateInterface):
"""
具体子类B
"""
def primitive_operation1(self):
"""
具体子类方法A的实现
:return:
"""
print("具体类B方法1的实现")
def primitive_opreation2(self):
"""
具体子类方法B的实现
:return:
"""
print("具体类B方法2的实现")
if __name__ == '__main__':
c = ConcreteImplA()
c.template_method()
c = ConcreteImplB()
c.template_method()
|
class InvalidMeasurement(Exception):
"""
Raised when a specified measurement is invalid.
"""
|
path = r'c:\users\raibows\desktop\emma.txt'
file = open(path, 'r')
s = file.readlines()
file.close()
r = [i.swapcase() for i in s]
file = open(path, 'w')
file.writelines(r)
file.close()
|
class Config(object):
def __init__(self):
# directories
self.save_dir = ''
self.log_dir = ''
self.train_data_file = ''
self.val_data_file = ''
# input
self.patch_size = [42, 42, 1]
self.N = self.patch_size[0]*self.patch_size[1]
# no. of layers
self.pre_n_layers = 3
self.pregconv_n_layers = 1
self.hpf_n_layers = 3
self.lpf_n_layers = 3
self.prox_n_layers = 4
# no. of features
self.Nfeat = 132 # must be multiple of 3
self.pre_Nfeat = self.Nfeat/3
self.pre_fnet_Nfeat = self.pre_Nfeat
self.prox_fnet_Nfeat = self.Nfeat
self.hpf_fnet_Nfeat = self.Nfeat
# gconv params
self.rank_theta = 11
self.stride = self.Nfeat/3
self.stride_pregconv = self.Nfeat/3
self.min_nn = 16 +8
# learning
self.batch_size = 12
self.grad_accum = 1
self.N_iter = 400000
self.starter_learning_rate = 1e-4
self.end_learning_rate = 1e-5
self.decay_step = 1000
self.decay_rate = (self.end_learning_rate / self.starter_learning_rate)**(float(self.decay_step) / self.N_iter)
self.Ngpus = 2
# debugging
self.save_every_iter = 250
self.summaries_every_iter = 5
self.validate_every_iter = 100
self.test_every_iter = 250
# testing
self.minisize = 49*3 # must be integer multiple of search window
self.search_window = [49,49]
self.searchN = self.search_window[0]*self.search_window[1]
# noise std
self.sigma = 25
|
CAS_HEADERS = ('Host', 'Port', 'ID', 'Operator',
'NMEA', 'Country', 'Latitude', 'Longitude',
'FallbackHost', 'FallbackPort', 'Site', 'Other Details', 'Distance')
NET_HEADERS = ('ID', 'Operator', 'Authentication',
'Fee', 'Web-Net', 'Web-Str', 'Web-Reg', 'Other Details', 'Distance')
STR_HEADERS = ('Mountpoint', 'ID', 'Format', 'Format-Details',
'Carrier', 'Nav-System', 'Network', 'Country', 'Latitude',
'Longitude', 'NMEA', 'Solution', 'Generator', 'Compr-Encryp',
'Authentication', 'Fee', 'Bitrate', 'Other Details', 'Distance')
PYCURL_COULD_NOT_RESOLVE_HOST_ERRNO = 6
PYCURL_CONNECTION_FAILED_ERRNO = 7
PYCURL_TIMEOUT_ERRNO = 28
PYCURL_HANDSHAKE_ERRNO = 35
MULTICURL_SELECT_TIMEOUT = 0.5
|
"""
"""
def _impl(repository_ctx):
sdk_path = repository_ctx.os.environ.get("VULKAN_SDK", None)
if sdk_path == None:
print("VULKAN_SDK environment variable not found, using /usr")
sdk_path = "/usr"
repository_ctx.symlink(sdk_path, "vulkan_sdk_linux")
glslc_path = repository_ctx.which("glslc")
if glslc_path == None:
fail("Unable to find glslc binary in the system")
file_content = """
cc_library(
name = "vulkan_cc_library",
srcs = ["vulkan_sdk_linux/lib/x86_64-linux-gnu/libvulkan.so"],
hdrs = glob([
"vulkan_sdk_linux/include/vulkan/**/*.h",
"vulkan_sdk_linux/include/vulkan/**/*.hpp",
]),
includes = ["vulkan"],
visibility = ["//visibility:public"]
)
# FIXME: I cannot actually run this one in _glsl_shader. There is an error
# when running _glsl_shader rule
filegroup(
name = "glslc",
srcs = ["vulkan_sdk_linux/bin/glslc"],
visibility = ["//visibility:public"],
)
""".format(str(glslc_path)[1:])
repository_ctx.file("BUILD.bazel", file_content)
vulkan_linux = repository_rule(
implementation = _impl,
local = True,
environ = ["VULKAN_SDK"]
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 27 21:16:00 2020
@author: mints
"""
A_K = 0.306
BANDS = {
'AllWISE': ['W1mag', 'W2mag'],
'ATLAS': ['%sap3' % s for s in list('UGRIZ')],
'DES': ['mag_auto_%s' % s for s in list('grizy')],
'KIDS': ['%smag' % s for s in list('ugri')],
'LAS': ['p%smag' % s for s in ['y', 'j', 'h', 'k']],
'LS8': ['dered_mag_%s' % s for s in list('grz')],
'NSC': ['%smag' % s for s in list('ugrizy') + ['vr']],
'PS1': ['%skmag' % s for s in list('grizy')],
'SDSS': ['%smag' % s for s in list('ugriz')],
'unWISE': ['W1mag', 'W2mag'],
'VHS': ['%spmag' % s for s in list('YJH') + ['Ks']],
'VIKING': ['%spmag' % s for s in list('ZYJH') + ['Ks']],
#--- Simulated:
'Happy': ['%smag' % s for s in list('ugriz')],
'Teddy': ['%smag' % s for s in list('ugriz')],
}
EXTINCTIONS = {
'AllWISE': [0, 0],
'ATLAS': [0, 0, 0, 0, 0],
'DES': [3.237, 2.176, 1.595, 1.217, 1.058],
'KIDS': [4.239, 3.303, 2.285, 1.698],
'LAS': [1.194957, 0.895497, 0.568943, 0.356779],
'LS8': [0, 0, 0],
'NSC': [5.10826797, 3.9170915, 2.73640523, 2.07503268, 1.51035948,
1.30611111, 2.816129032],
'PS1': [3.612, 2.691, 2.097, 1.604, 1.336],
'SDSS': [0, 0, 0, 0, 0],
'unWISE': [0, 0],
'VHS': [1.213, 0.891, 0.564, 0.373],
'VIKING': [1.578, 1.213, 0.891, 0.564, 0.373],
# -- Simulated:
'Happy': [0, 0, 0, 0, 0],
'Teddy': [0, 0, 0, 0, 0],
}
LIMITS = {
'AllWISE': [17.1, 15.7],
'ATLAS': [21.78, 22.71, 22.17, 21.40, 20.23],
'DES': [24.33, 24.08, 23.44, 22.69, 21.44],
'KIDS': [24.3, 25.4, 25.2, 24.2],
'LAS': [20.5, 20.0, 18.8, 18.4],
'LS8': [24.5, 23.9, 22.9],
'NSC': [22.6, 23.6, 23.2, 22.8, 22.3, 21.0, 23.3],
'PS1': [23.3, 23.2, 23.1, 22.3, 21.3],
'SDSS': [22.0, 22.2, 22.2, 21.3, 20.5],
'unWISE': [17.93, 16.72],
'VHS': [23., 21.6, 21.0, 20.2],
'VIKING': [23.1, 22.3, 22.1, 21.5, 21.2],
# -- Simulated:
#'Happy': [22.0, 22.2, 22.2, 21.3, 20.5],
'Happy': [99, 99, 99, 99, 99],
'Teddy': [99, 99, 99, 99, 99],
}
def collect(names):
columns = []
ecolumns = []
limits = []
for t in names:
columns += ['%s_%s' % (t.lower(), b.lower()) for b in BANDS[t]]
ecolumns += ['e_%s_%s' % (t.lower(), b.lower()) for b in BANDS[t]]
limits.extend(LIMITS[t])
return columns, ecolumns, limits
|
# -*- coding: UTF-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Copied from trac/utils.py, ticket-links-trunk branch
def unique(seq):
"""Yield unique elements from sequence of hashables, preserving order.
(New in 0.13)
"""
seen = set()
return (x for x in seq if x not in seen and not seen.add(x))
|
__version__ = '0.1'
supported_aln_types = ('blast', 'sam', 'xml')
supported_db_types = ('nt', 'nr','cds', 'genome', 'none')
consensus_aln_types = ('xml',)
|
def ordinal(n):
"""Translate a 0-based index into a 1-based ordinal, e.g. 0 -> 1st, 1 -> 2nd, etc.
:param int n: the index to be translated.
:return: (*str*) -- Ordinal.
"""
ord_dict = {1: "st", 2: "nd", 3: "rd"}
return str(n + 1) + ord_dict.get((n + 1) if (n + 1) < 20 else (n + 1) % 10, "th")
|
def for_G():
for row in range(7):
for col in range(5):
if (col==0 and (row!=0 and row!=6)) or ((row==0 or row==6) and (col>0)) or (row==3 and col>1) or (row>3 and col==4):
print("*",end=" ")
else:
print(end=" ")
print()
def while_G():
i=0
while i<7:
j=0
while j<5:
if (j==0 and (i!=0 and i!=6)) or ((i==0 or i==6) and (j>0)) or (i==3 and j>1) or (i>3 and j==4):
print("*",end=" ")
else:
print(end=" ")
j+=1
i+=1
print()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 26 00:07:09 2017
@author: Nadiar
"""
def iterPower(base, exp):
'''
base: int or float.
exp: int >= 0
returns: int or float, base^exp
'''
# Your code here
res = 1
for i in range(1,(exp + 1)):
res *= base
return res
|
TODO = """
TODO:
/kick - Done
/ban - Done
/hi - Done
/unban - Done
/del - Incomplete
/admin - Incomplete
/pin -no - Incomplete
/unpin - Done -unpin a mesage
/whois - Done. Get the info abouth a member
/members - Done. Tells how many members are in the group
/rules - Done
/py (code) -canceled run py code
Remove redundant code - Incomplete
"""
RULES = """
1. The group is English-only.
2. Be civil, respect each other. Racism and other forms of discrimination are forbidden.
3. No advertisement of any kind without proper permission from the admins, unless you're sharing an open-source project. Mind rule 5 when sharing.
4. No referral links.
5. No spam, no flood.
6. No NSFW content.
7. No contacts, locations sharing allowed in the group.
* Before asking something, google first. If you want Web Development tutorials/courses/books see @CodeWithOlivia.
* Nobody will do your homework for you.
* If you did everything you could and still have to ask, show your advance. If you're getting an error, post full error message and traceback.
* Don't beg too much for help. If some people are willing to help, they will.
"""
HELP = """
Oh hy i am WebDevGuard and i guard the @WebDevChat group\n
/hi - says hello
/help - this text
/bye - send bot to sleep (not implemented)
/rules - WebDevChat rules
/members - member count
/unpin - unpin a message
/todo - what parts are working
/about - About bot
/stupid - Check if you are stupid 😏
/die - Kill a member
/delete - Deletes message
/getid - Get user id
/kick_bot - *Tries* to kick bot
/stupid <user_id> - says if a user is stupid
/kick <user_id> - bans a user
/kill <mgg_id> - delets a mesage
/whois <user_id> - tells info abouth a user
/pardon <user_id> - unban a user
/shutup <user_id - mute a user for 24 hours
"""
ABOUT = """
WDC bot 1.2
Made by arydev anonymous_guy and m3hdi652
"""
STUPID = ['stupid','not stupid :)']
KICK = 'Oh hey you were kiked if you think is a foult contact @Olivia2016 '
BAN = 'Oh hey you were banned/unbaned if you think is a foult contact @Olivia2016 '
DELETE_USAGE = 'Usage: Select message and reply with /delete [no. of message to delete]. If no argument is provided 1 message will be deleted'
WHOIS_USAGE = "type /whois username"
PARDON_USAGE = 'TODO'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by imoyao at 2020/1/15 23:06
NAME = 'yst'
TINY_KEY = ''
TINY_KEY_FILE = 'tiny.key'
DEFUALT_TINY_SUFFIX = 'tiny' # 默认文件在当前目录时,保存的后缀名
DEFUALT_TINY_DIR = 'tiny' # 默认保存的目录名称
TINY_SAVE_IN_CURRENT_DIR_SUFFIX = '.'
SUPPORT_IMG_TYPES = ['.jpg', '.png', '.jpeg']
VERSION = '0.0.1'
|
def solution(A, K):
# if length is equal to K nothing changes
if K == len(A):
return A
# if all elements are the same, nothing change
if all([item == A[0] for item in A]):
return A
N = len(A)
_A = [0] * N
for ind in range(N):
transf_ind = ind + K
_A[transf_ind - (transf_ind // N)*N] = A[ind]
return _A |
#!/usr/bin/env python
#
# Azure Linux extension
#
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the ""Software""), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above
# copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software. THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Get elements from DiagnosticsMonitorConfiguration in LadCfg based on element name
def getDiagnosticsMonitorConfigurationElement(ladCfg, elementName):
if ladCfg and 'diagnosticMonitorConfiguration' in ladCfg:
if elementName in ladCfg['diagnosticMonitorConfiguration']:
return ladCfg['diagnosticMonitorConfiguration'][elementName]
return None
# Get fileCfg form FileLogs in LadCfg
def getFileCfgFromLadCfg(ladCfg):
fileLogs = getDiagnosticsMonitorConfigurationElement(ladCfg, 'fileLogs')
if fileLogs and 'fileLogConfiguration' in fileLogs:
return fileLogs['fileLogConfiguration']
return None
# Get resource Id from LadCfg
def getResourceIdFromLadCfg(ladCfg):
metricsConfiguration = getDiagnosticsMonitorConfigurationElement(ladCfg, 'metrics')
if metricsConfiguration and 'resourceId' in metricsConfiguration:
return metricsConfiguration['resourceId']
return None
# Get event volume from LadCfg
def getEventVolumeFromLadCfg(ladCfg):
return getDiagnosticsMonitorConfigurationElement(ladCfg, 'eventVolume')
# Get default sample rate from LadCfg
def getDefaultSampleRateFromLadCfg(ladCfg):
if ladCfg and 'sampleRateInSeconds' in ladCfg:
return ladCfg['sampleRateInSeconds']
return None
def getPerformanceCounterCfgFromLadCfg(ladCfg):
"""
Return the array of metric definitions
:param ladCfg:
:return: array of metric definitions
"""
performanceCounters = getDiagnosticsMonitorConfigurationElement(ladCfg, 'performanceCounters')
if performanceCounters and 'performanceCounterConfiguration' in performanceCounters:
return performanceCounters['performanceCounterConfiguration']
return None
def getAggregationPeriodsFromLadCfg(ladCfg):
"""
Return an array of aggregation periods as specified. If nothing appears in the config, default PT1H
:param ladCfg:
:return: array of ISO 8601 intervals
:rtype: List(str)
"""
results = []
metrics = getDiagnosticsMonitorConfigurationElement(ladCfg, 'metrics')
if metrics and 'metricAggregation' in metrics:
for item in metrics['metricAggregation']:
if 'scheduledTransferPeriod' in item:
# assert isinstance(item['scheduledTransferPeriod'], str)
results.append(item['scheduledTransferPeriod'])
else:
results.append('PT1H')
return results
def getSinkList(feature_config):
"""
Returns the list of sink names to which all data should be forwarded, according to this config
:param feature_config: The JSON config for a feature (e.g. the struct for "performanceCounters" or "syslogEvents")
:return: the list of names; might be an empty list
:rtype: [str]
"""
if feature_config and 'sinks' in feature_config and feature_config['sinks']:
return [sink_name.strip() for sink_name in feature_config['sinks'].split(',')]
return []
def getFeatureWideSinksFromLadCfg(ladCfg, feature_name):
"""
Returns the list of sink names to which all data for the given feature should be forwarded
:param ladCfg: The ladCfg JSON config
:param str feature_name: Name of the feature. Expected to be "performanceCounters" or "syslogEvents"
:return: the list of names; might be an empty list
:rtype: [str]
"""
return getSinkList(getDiagnosticsMonitorConfigurationElement(ladCfg, feature_name))
class SinkConfiguration:
def __init__(self):
self._sinks = {}
def insert_from_config(self, json):
"""
Walk through the sinksConfig JSON object and add all sinks within it. Every accepted sink is guaranteed to
have a 'name' and 'type' element.
:param json: A hash holding the body of a sinksConfig object
:return: A string containing warning messages, or an empty string
"""
msgs = []
if json and 'sink' in json:
for sink in json['sink']:
if 'name' in sink and 'type' in sink:
self._sinks[sink['name']] = sink
else:
msgs.append('Ignoring invalid sink definition {0}'.format(sink))
return '\n'.join(msgs)
def get_sink_by_name(self, sink_name):
"""
Return the JSON object defining a particular sink.
:param sink_name: string name of sink
:return: JSON object or None
"""
if sink_name in self._sinks:
return self._sinks[sink_name]
return None
def get_all_sink_names(self):
"""
Return a list of all names of defined sinks.
:return: list of names
"""
return self._sinks.keys()
def get_sinks_by_type(self, sink_type):
"""
Return a list of all names of defined sinks.
:return: list of names
"""
return [self._sinks[name] for name in self._sinks if self._sinks[name]['type'] == sink_type]
|
# *###################
# * SYMBOL TABLE
# *###################
class SymbolTable:
def __init__(self, parent=None):
self.symbols = {}
self.parent = parent
def get(self, name):
value = self.symbols.get(name, None)
if value is None and self.parent:
return self.parent.get(name)
return value
def set(self, name, value):
self.symbols[name] = value
def remove(self, name):
del self.symbols[name] |
#!/usr/bin/env python3
try:
print('If you provide a legal file name, this program will output the last two lines of the song to that file...')
print('\nMary had a little lamb,')
answersnow = input('With fleece as white as (enter your file name): ')
answersnowobj = open(answersnow, 'w')
except:
print('Error with that file name!')
else:
print('and every where that mary went', file=answersnowobj)
print('The lamb was sure to go', file=answersnowobj)
answersnowobj.close()
finally:
print('Thanks for playing!')
quit()
|
def solve_knapsack(profits, weights, capacity):
# basic checks
n = len(profits)
if capacity <= 0 or n == 0 or len(weights) != n:
return 0
dp = [0 for x in range(capacity + 1)] # <<<<<<<<<<
# if we have only one weight, we will take it if it is not more than the capacity
for c in range(0, capacity + 1):
if weights[0] <= c:
dp[c] = profits[0]
# process all sub-arrays for all the capacities
for i in range(1, n):
for c in range(capacity, -1, -1): # <<<<<<<<
profit_by_including, profit_by_excluding = 0, 0
if weights[i] <= c: # include the item, if it is not more than the capacity
profit_by_including = profits[i] + dp[c - weights[i]]
profit_by_excluding = dp[c] # exclude the item
dp[c] = max(profit_by_including, profit_by_excluding) # take maximum
return dp[capacity]
if __name__ == '__main__':
print("Total knapsack profit: ", str(solve_knapsack([1, 6, 10, 16], [1, 2, 3, 5], 7)))
print("Total knapsack profit: ", str(solve_knapsack([1, 6, 10, 16], [1, 2, 3, 5], 6)))
|
regions_data = {
"us-east-2":"Leste dos EUA (Ohio)",
"us-east-1":"Leste dos EUA (Norte da Virgínia)",
"us-west-1":"Oeste dos EUA (Norte da Califórnia)",
"us-west-2":"Oeste dos EUA (Oregon)",
"ap-east-1": "Ásia-Pacífico (Hong Kong)",
"ap-south-1":"Ásia Pacífico (Mumbai)",
"ap-northeast-3":"Ásia-Pacífico (Osaka – Local)",
"ap-northeast-2":"Ásia-Pacífico (Seul)",
"ap-southeast-1":"Ásia-Pacífico (Cingapura)",
"ap-southeast-2":"Ásia-Pacífico (Sydney)",
"ap-northeast-1":"Ásia-Pacífico (Tóquio)",
"ca-central-1":"Canadá (Central)",
"eu-central-1":"Europa (Frankfurt)",
"eu-west-1":"Europa (Irlanda)",
"eu-west-2":"Europa (Londres)",
"eu-west-3":"Europa (Paris)",
"eu-north-1":"Europa (Estocolmo)",
"me-south-1":"Oriente Médio (Bahrein)",
"sa-east-1":"América do Sul (São Paulo)"
} |
input1 = input("Insira a palavra #1")
input2 = input("Insira a palavra #2")
input3 = input("Insira a palavra #3")
input3 = input3.upper().replace('A', '').replace('E', '').replace('I', '').replace('O', '').replace('U', '')
print(input1.upper())
print(input2.lower())
print(input3) |
# Define a simple function that prints x
def f(x):
x += 1
print(x)
# Set y
y = 10
# Call the function
f(y)
# Print y to see if it changed
print(y) |
#
# @lc app=leetcode id=922 lang=python3
#
# [922] Sort Array By Parity II
#
# @lc code=start
class Solution:
def sortArrayByParityII(self, a: List[int]) -> List[int]:
i = 0 # pointer for even misplaced
j = 1 # pointer for odd misplaced
sz = len(a)
# invariant: for every misplaced odd there is misplaced even
# since there is just enough space for odds and evens
while i < sz and j < sz:
if a[i] % 2 == 0:
i += 2
elif a[j] % 2 == 1:
j += 2
else:
# a[i] % 2 == 1 AND a[j] % 2 == 0
a[i],a[j] = a[j],a[i]
i += 2
j += 2
return a
# @lc code=end
|
class StringUtil:
@staticmethod
def is_empty(string):
if string is None or string.strip() == "":
return True
else:
return False
@staticmethod
def is_not_empty(string):
return not StringUtil.is_empty(string)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 地址:http://www.runoob.com/python/python-exercise-example15.html
score = int(input("请输入学习成绩(分数): \n"))
if score >= 90:
grade = "A"
elif score >= 60:
grade = "B"
else:
grade = "C"
print("%d 属于 %s" % (score, grade))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Subsetsum by splitting
# c.durr et jill-jênn vie - 2014-2015
# snip{
def part_sum(x, i=0):
"""All subsetsums from x[i:]
:param x: table of values
:param int i: index defining suffix of x to be considered
:iterates: over all values, in arbitrary order
:complexity: :math:`O(2^{len(x)-i})`
"""
if i == len(x):
yield 0
else:
for s in part_sum(x, i + 1):
yield s
yield s + x[i]
def subset_sum(x, R):
"""Subsetsum by splitting
:param x: table of values
:param R: target value
:returns bool: if there is a subsequence of x with total sum R
:complexity: :math:`O(n^{\\lceil n/2 \\rceil})`
"""
k = len(x) // 2 # diviser l'entrée
Y = [v for v in part_sum(x[:k])]
Z = [R - v for v in part_sum(x[k:])]
Y.sort() # test d'intersection Y avec Z
Z.sort()
i = 0
j = 0
while i < len(Y) and j < len(Z):
if Y[i] == Z[j]:
return True
elif Y[i] < Z[j]: # incrémenter l'indice du plus petit élément
i += 1
else:
j += 1
return False
# snip}
|
#!python3
#encoding:utf-8
class Json2Sqlite(object):
def __init__(self):
pass
def BoolToInt(self, bool_value):
if True == bool_value:
return 1
else:
return 0
def IntToBool(self, int_value):
if 0 == int_value:
return False
else:
return True
def ArrayToString(self, array):
if None is array or 0 == len(array):
return None
ret = ""
for v in array:
ret += v + ','
print(ret)
print(ret[:-1])
return ret[:-1]
def StringToArray(self, string):
if None is string or 0 == len(string):
return None
array = []
for item in string.sprit(','):
if 0 < len(item):
array.append(item)
return array
|
"""
@author: David E. Craciunescu
@date: 2021/02/22 (yyyy/mm/dd)
3. Consider a function f(x) which:
- Is known to have a unique local minimum called x0
- At a point in the interval [p1, p2]
- That CAN be p1 or p2.
- Is strictly decreasing between [p1, x0]
- Is strictly increasing between [x0, p2]
Your task is to search, as efficiently as possible, for all points between
p1 and p2 and find that unique local minimum. Formally, if f(x) = k, search
for x ∈ [p1, p2].
To simplify the process, instead of the exact value of each x, a range of
values [α, β] can be indicated, with a β - α < ε, where x is found.
The algorithm data will be the interval [p1, p2], the value k that is being
searched for, and the value ε for the approximation.
"""
# Isn't this just the Indiana Croft exercise written formally? |
INPUT = {
"google": {
"id_token": ""
},
"github": {
"code": "",
"state": ""
}
}
|
"""Constants for the Kuna component."""
ATTR_NOTIFICATIONS_ENABLED = "notifications_enabled"
ATTR_SERIAL_NUMBER = "serial_number"
ATTR_VOLUME = "volume"
CONF_RECORDING_INTERVAL = "recording_interval"
CONF_STREAM_INTERVAL = "stream_interval"
CONF_UPDATE_INTERVAL = "update_interval"
DEFAULT_RECORDING_INTERVAL = 7200
DEFAULT_STREAM_INTERVAL = 5
DEFAULT_UPDATE_INTERVAL = 15
DOMAIN = "kuna"
MANUFACTURER = "Kuna Smart Home Security"
|
base=10
height=5
area=1/2*(base*height)
print("Area of our triangle is : ", area)
file = open("/Users/lipingzhang/Desktop/program/pycharm/seq2seq/MNIST_data/0622_train_features.csv","r")
lines = []
with file as myFile:
for line in file:
feat = []
line = line.split(',')
for i in range(0, len(line)):
feat.append(float(line[i]))
lines.append(feat.tolist())
|
def main():
A=input("Enter the string")
A1=A[0:2:1]
A2=A[-2::1]
print(A1)
print(A2)
A3=(A1+A2)
print("The new string is " ,A3)
if(__name__== '__main__'):
main()
|
'''
Kattis - memorymatch
Consider the 2 different corner cases and the rest is not too hard.
Time: O(num_opens), Space: O(n)
'''
n = int(input())
num_opens = int(input())
cards = {}
turned_off = set()
for i in range(num_opens):
x, y, cx, cy = input().split()
x, y = int(x), int(y)
if not cx in cards:
cards[cx] = set()
if not cy in cards:
cards[cy] = set()
cards[cx].add(x)
cards[cy].add(y)
if cx == cy:
turned_off.add(x)
turned_off.add(y)
# Corner case where u know at least 1 of every type of card
if len(cards) == n//2:
min_length = min(len(cards[c]) for c in cards)
if min_length >= 1:
print(n//2 - len(turned_off)//2)
exit()
ans = 0
for x in cards:
if len(cards[x]) == 2:
for e in cards[x]:
if e in turned_off:
break
else:
ans += 1
if ans + len(turned_off)//2 == n//2 - 1:
print(ans + 1) # Corner case where u have n-1 pairs already
exit()
print(ans) |
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
class TensorforceConfig(object):
# modify dtype mappings
def __init__(
self, *,
buffer_observe=False,
create_debug_assertions=False,
create_tf_assertions=True,
device='CPU',
eager_mode=False,
enable_int_action_masking=True,
name='agent',
seed=None,
tf_log_level=40
):
assert buffer_observe is False or buffer_observe == 'episode' or \
isinstance(buffer_observe, int) and buffer_observe >= 1
if buffer_observe is False:
buffer_observe = 1
super().__setattr__('buffer_observe', buffer_observe)
assert isinstance(create_debug_assertions, bool)
super().__setattr__('create_debug_assertions', create_debug_assertions)
assert isinstance(create_tf_assertions, bool)
super().__setattr__('create_tf_assertions', create_tf_assertions)
assert isinstance(eager_mode, bool)
super().__setattr__('eager_mode', eager_mode)
assert isinstance(enable_int_action_masking, bool)
super().__setattr__('enable_int_action_masking', enable_int_action_masking)
assert device is None or isinstance(device, str) # more specific?
super().__setattr__('device', device)
assert isinstance(name, str)
super().__setattr__('name', name)
assert seed is None or isinstance(seed, int)
super().__setattr__('seed', seed)
assert isinstance(tf_log_level, int) and tf_log_level >= 0
super().__setattr__('tf_log_level', tf_log_level)
def __setattr__(self, name, value):
raise NotImplementedError
def __delattr__(self, name):
raise NotImplementedError
|
'''
11
1 4
3 5
0 6
5 7
3 8
5 9
6 10
8 11
8 12
2 13
12 14
'''
n = int(input())
data = []
for _ in range(n):
line = list(map(int, input().split()))
data.append(line)
data.sort()
# debug
# print(n)
# print(data)
x = -1
y = -1
nx = -1
ny = -1
cnt = 0
for i in range(n):
nx, ny = data[i]
if nx >= y: # 시작시간과 끝나는시간이 같아도 바로 시작함
cnt += 1
x = nx
y = ny
else:
if ny < y:
x = nx
y = ny
else:
pass
print(cnt)
|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This template deploys SAP HANA and all required infrastructure resources (network, firewall rules, NAT, etc). """
def generate_config(context):
properties = context.properties
# Creating the network (VPC + subnets) resource
network = {
'name': 'sap-poc-vpc',
'type': 'network.py',
'properties': {
'autoCreateSubnetworks': False,
'subnetworks': [{
'name': 'subnetwork-1',
'region': properties['region'],
'ipCidrRange': '10.0.0.0/24',
}, {
'name': 'subnetwork-2',
'region': properties['region'],
'ipCidrRange': '192.168.0.0/24',
}]
}
}
#Create a Cloud NAT Gateway
cloud_router = {
'name': 'cloud-nat-gateway',
'type': 'cloud_router.py',
'properties': {
'name': 'cloud-nat-router',
'network': '$(ref.sap-poc-vpc.name)',
'region': properties['region'],
'nats': [{
'name': 'cloud-nat',
'sourceSubnetworkIpRangesToNat': 'LIST_OF_SUBNETWORKS',
'natIpAllocateOption': 'AUTO_ONLY',
'subnetworks': [{
'name': '$(ref.subnetwork-1.selfLink)'
}]
}]
}
}
#Create a windows bastion host to be used for installing HANA Studio and connecting to HANA DB
windows_bastion_host = {
'name': 'windows-bastion-host',
'type': 'instance.py',
'properties': {
'zone': properties['primaryZone'],
'diskImage': 'projects/windows-cloud/global/images/family/windows-2019',
'machineType': 'n1-standard-1',
'diskType': 'pd-ssd',
'networks': [{
'network': "$(ref.sap-poc-vpc.selfLink)",
'subnetwork': "$(ref.subnetwork-2.selfLink)",
'accessConfigs': [{
'type': 'ONE_TO_ONE_NAT'
}]
}],
'tags': {
'items': ['jumpserver']
}
}
}
#Create a linux bastion host which will be used to connect to HANA DB CLI and run commands.
linux_bastion_host = {
'name': 'linux-bastion-host',
'type': 'instance.py',
'properties': {
'zone': properties['primaryZone'],
'diskImage': 'projects/ubuntu-os-cloud/global/images/family/ubuntu-1804-lts',
'machineType': 'f1-micro',
'diskType': 'pd-ssd',
'networks': [{
'network': '$(ref.sap-poc-vpc.selfLink)',
'subnetwork': '$(ref.subnetwork-2.selfLink)',
'accessConfigs': [{
'type': 'ONE_TO_ONE_NAT'
}]
}],
'tags': {
'items': ['jumpserver']
}
}
}
# Create necessary Firewall rules to allow connectivity to HANA DB from both bastion hosts.
firewall_rules = {
'name': 'firewall-rules',
'type': 'firewall.py',
'properties': {
'network': '$(ref.sap-poc-vpc.selfLink)',
'rules': [{
'name': 'allow-ssh-and-rdp',
'allowed':[ {
'IPProtocol': 'tcp',
'ports': ['22', '3389']
}],
'description': 'Allow SSH and RDP from outside to bastion host.',
'direction': 'INGRESS',
'targetTags': ["jumpserver"],
'sourceRanges': [
'0.0.0.0/0'
]
}, {
'name': 'jumpserver-to-hana',
'allowed': [{
'IPProtocol': 'tcp',
'ports': ['22', '30015'] # In general,the port to open is 3 <Instance number> 15 to allow HANA Studio to Connect to HANA DB < -- -- -TODO
}],
'description': 'Allow SSH from bastion host to HANA instances',
'direction': 'INGRESS',
'targetTags': ["hana-db"],
'sourceRanges': ['$(ref.subnetwork-2.ipCidrRange)']
}
]
}
}
sap_hana_resource = {}
if properties.get('secondaryZone'): # HANA HA deployment
sap_hana_resource = {
'name': 'sap_hana',
"type": 'sap_hana_ha.py',
'properties': {
'primaryInstanceName': properties['primaryInstanceName'],
'secondaryInstanceName': properties['secondaryInstanceName'],
'primaryZone': properties['primaryZone'],
'secondaryZone': properties['secondaryZone'],
'sap_vip': '10.1.0.10', #TO DO: improve this by reserving an internal IP address in advance.
}
}
else:
sap_hana_resource = { #HANA standalone setup
'name': 'sap_hana',
"type": 'sap_hana.py',
'properties': {
'instanceName': properties['primaryInstanceName'],
'zone': properties['primaryZone'],
'sap_hana_scaleout_nodes': 0
}
}
#Add the rest of "common & manadatory" properties
sap_hana_resource['properties']['dependsOn'] = ['$(ref.cloud-nat-gateway.selfLink)']
sap_hana_resource['properties']['instanceType'] = properties['instanceType']
sap_hana_resource['properties']['subnetwork'] = 'subnetwork-1'
sap_hana_resource['properties']['linuxImage'] = properties['linuxImage']
sap_hana_resource['properties']['linuxImageProject'] = properties['linuxImageProject']
sap_hana_resource['properties']['sap_hana_deployment_bucket'] = properties['sap_hana_deployment_bucket']
sap_hana_resource['properties']['sap_hana_sid'] = properties['sap_hana_sid']
sap_hana_resource['properties']['sap_hana_instance_number'] = 00
sap_hana_resource['properties']['sap_hana_sidadm_password'] = properties['sap_hana_sidadm_password']
sap_hana_resource['properties']['sap_hana_system_password'] = properties['sap_hana_system_password']
sap_hana_resource['properties']['networkTag'] = 'hana-db'
sap_hana_resource['properties']['publicIP'] = False
# Define optional properties.
optional_properties = [
'serviceAccount',
'sap_hana_backup_size',
'sap_hana_double_volume_size',
'sap_hana_sidadm_uid',
'sap_hana_sapsys_gid',
'sap_deployment_debug',
'post_deployment_script'
]
# Add optional properties if there are any.
for prop in optional_properties:
append_optional_property(sap_hana_resource, properties, prop)
resources = [network, cloud_router, windows_bastion_host, linux_bastion_host, firewall_rules, sap_hana_resource]
return { 'resources': resources}
def append_optional_property(resource, properties, prop_name):
""" If the property is set, it is added to the resource. """
val = properties.get(prop_name)
if val:
resource['properties'][prop_name] = val
return |
# John McDonough
# github - movinalot
# Advent of Code 2015
testing = 0
debug = 0
day = "03"
year = "2015"
part = "1"
answer = None
with open("puzzle_data_" + day + "_" + year + ".txt") as f:
puzzle_data = f.read()
if testing:
puzzle_data = ">"
#puzzle_data = "^>v<"
#puzzle_data = "^v^v^v^v^v"
if debug:
print(puzzle_data)
houses = {}
x = 0
y = 0
def update_present_count(x, y):
if (x,y) in houses:
houses[(x,y)] += 1
else:
houses[(x,y)] = 1
if debug:
print('(',x,',',y,'):',houses[(x,y)])
update_present_count(x, y)
for direction in range(0, len(puzzle_data)):
if debug:
print(puzzle_data[direction])
if puzzle_data[direction] == '^':
y = y + 1
elif puzzle_data[direction] == '>':
x = x + 1
elif puzzle_data[direction] == 'v':
y = y - 1
elif puzzle_data[direction] == '<':
x = x - 1
update_present_count(x, y)
answer = len(houses.keys())
print("AoC Day: " + day + " Year: " + year + " part " + part + ", this is the answer:", answer)
|
"""
This module defines function that turns ANSI codes into an escaped string
"""
def encode_ansi(*codes: int) -> str:
"""
Encodes the ANSI code into an ANSI escape sequence.
>>> encode_ansi(30)
'\\x1b[30m'
Support defining multiple codes:
>>> encode_ansi(1, 33)
'\\x1b[1;33m'
All numbers are treated as positive; the sign doesn't matter:
>>> encode_ansi(-31)
'\\x1b[31m'
:param codes: ANSI codes
:return: ANSI escaped sequence
"""
return f"\033[{';'.join([str(abs(code)) for code in codes])}m"
|
# string methods
course = 'Python for Beginners'
print('Original = ' + course)
print(course.upper())
print(course.lower())
print(course.find('P')) # finds the index of P which is 0
print(course.replace('Beginners', 'Absolute Beginners'))
print('Python' in course) # Boolean value
|
"""Internet Relay Chat Protocol numerics"""
RPL_WELCOME = 1
RPL_YOURHOST = 2
RPL_TRACELINK = 200
RPL_TRACECONNECTING = 201
RPL_TRACEHANDSHAKE = 202
RPL_TRACEUNKNOWN = 203
RPL_TRACEOPERATOR = 204
RPL_TRACEUSER = 205
RPL_TRACESERVER = 206
RPL_TRACENEWTYPE = 208
RPL_TRACELOG = 261
RPL_STATSLINKINFO = 211
RPL_STATSCOMMANDS = 212
RPL_STATSCLINE = 213
RPL_STATSNLINE = 214
RPL_STATSILINE = 215
RPL_STATSKLINE = 216
RPL_STATSYLINE = 218
RPL_ENDOFSTATS = 219
RPL_STATSLLINE = 241
RPL_STATSUPTIME = 242
RPL_STATSOLINE = 243
RPL_STATSHLINE = 244
RPL_UMODEIS = 221
RPL_LUSERCLIENT = 251
RPL_LUSEROP = 252
RPL_LUSERUNKNOWN = 253
RPL_LUSERCHANNELS = 254
RPL_LUSERME = 255
RPL_ADMINME = 256
RPL_ADMINLOC1 = 257
RPL_ADMINLOC2 = 258
RPL_ADMINEMAIL = 259
RPL_NONE = 300
RPL_USERHOST = 302
RPL_ISON = 303
RPL_AWAY = 301
RPL_UNAWAY = 305
RPL_NOWAWAY = 306
RPL_WHOISUSER = 311
RPL_WHOISSERVER = 312
RPL_WHOISOPERATOR = 313
RPL_WHOISIDLE = 317
RPL_ENDOFWHOIS = 318
RPL_WHOISCHANNELS = 319
RPL_WHOWASUSER = 314
RPL_ENDOFWHOWAS = 369
RPL_LIST = 322
RPL_LISTEND = 323
RPL_CHANNELMODEIS = 324
RPL_NOTOPIC = 331
RPL_TOPIC = 332
RPL_INVITING = 341
RPL_SUMMONING = 342
RPL_VERSION = 351
RPL_WHOREPLY = 352
RPL_ENDOFWHO = 315
RPL_NAMEREPLY = 353
RPL_ENDOFNAMES = 366
RPL_LINKS = 364
RPL_ENDOFLINKS = 365
RPL_BANLIST = 367
RPL_ENDOFBANLIST = 368
RPL_INFO = 371
RPL_ENDOFINFO = 374
RPL_MOTDSTART = 375
RPL_MOTD = 372
RPL_ENDOFMOTD = 376
RPL_YOUREOPER = 381
RPL_REHASHING = 382
RPL_TIME = 391
RPL_USERSSTART = 392
RPL_USERS = 393
RPL_ENDOFUSERS = 394
RPL_NOUSERS = 395
ERR_NOSUCHNICK = 401
ERR_NOSUCHSERVER = 402
ERR_NOSUCHCHANNEL = 403
ERR_CANNOTSENDTOCHAN = 404
ERR_TOOMANYCHANNELS = 405
ERR_WASNOSUCHNICK = 406
ERR_TOOMANYTARGETS = 407
ERR_NOORIGIN = 409
ERR_NORECIPIENT = 411
ERR_NOTEXTTOSEND = 412
ERR_NOTOPLEVEL = 413
ERR_WILDTOPLEVEL = 414
ERR_UNKNOWNCOMMAND = 421
ERR_NOMOTD = 422
ERR_NOADMININFO = 423
ERR_FILEERROR = 424
ERR_NONICKNAMEGIVEN = 431
ERR_ERRONEUSNICKNAME = 432
ERR_NICKNAMEINUSE = 433
ERR_NICKCOLLISION = 436
ERR_NOTONCHANNEL = 442
ERR_USERONCHANNEL = 443
ERR_NOLOGIN = 444
ERR_SUMMONDISABLED = 445
ERR_USERSDISABLED = 446
ERR_NOTREGISTERED = 451
ERR_NEEDMOREPARAMS = 461
ERR_ALREADYREGISTRED = 462
ERR_PASSWDMISMATCH = 464
ERR_YOUREBANNEDCREEP = 465
ERR_KEYSET = 467
ERR_CHANNELISFULL = 471
ERR_UNKNOWNMODE = 472
ERR_INVITEONLYCHAN = 473
ERR_BANNEDFROMCHAN = 474
ERR_BADCHANNELKEY = 475
ERR_NOPRIVILEGES = 481
ERR_CHANOPRIVSNEEDED = 482
ERR_CANTKILLSERVER = 483
ERR_NOOPERHOST = 491
ERR_UMODEUNKNOWNFLAG = 501
ERR_USERSDONTMATCH = 502
|
'''
TACO: Multi-sample transcriptome assembly from RNA-Seq
'''
__author__ = "Matthew Iyer, Yashar Niknafs, and Balaji Pandian"
__copyright__ = "Copyright 2012-2018"
__credits__ = ["Matthew Iyer", "Yashar Niknafs", "Balaji Pandian"]
__license__ = "MIT"
__version__ = "0.7.3"
__maintainer__ = "Yashar Niknafs"
__email__ = "yniknafs@umich.edu"
__status__ = "Development"
def single_node_shortest_path_length(node, nbrs):
'''
Adapted from NetworkX 1.10 source code (networkx.github.io)
node: source or sink
nbrs: array of node neighbors (either succs or preds) indexed by node
'''
seen = {}
level = 0
nextlevel = set([node])
while nextlevel:
thislevel = nextlevel # advance to next level
nextlevel = set() # and start a new list (fringe)
for v in thislevel:
if v not in seen:
seen[v] = level # set the level of vertex v
nextlevel.update(nbrs[v])
level += 1
return seen # return all path lengths as dictionary
def _plain_bfs(succs, preds, source):
"""
Adapted from Networkx 1.10
A fast BFS node generator
"""
seen = set()
nextlevel = {source}
while nextlevel:
thislevel = nextlevel
nextlevel = set()
for v in thislevel:
if v not in seen:
yield v
seen.add(v)
nextlevel.update(succs[v])
nextlevel.update(preds[v])
class Graph(object):
SOURCE = -1
SINK = -2
EMPTY = -3
SOURCE_ID = 0
SINK_ID = 1
FIRST_ID = 2
def __init__(self):
self.node_id_map = {Graph.SOURCE: Graph.SOURCE_ID,
Graph.SINK: Graph.SINK_ID}
self.nodes = [Graph.SOURCE, Graph.SINK]
self.succs = [set(), set()]
self.preds = [set(), set()]
self.n = 0
def __len__(self):
return self.n
def __contains__(self, item):
return item in self.node_id_map
def nodes_iter(self, source=False, sink=False):
if source:
yield Graph.SOURCE
if sink:
yield Graph.SINK
for i in xrange(Graph.FIRST_ID, len(self.nodes)):
if self.nodes[i] == Graph.EMPTY:
continue
yield self.nodes[i]
def node_ids_iter(self, source=False, sink=False):
if source:
yield Graph.SOURCE_ID
if sink:
yield Graph.SINK_ID
for i in xrange(Graph.FIRST_ID, len(self.nodes)):
if self.nodes[i] == Graph.EMPTY:
continue
yield i
def edges_iter(self):
for i in xrange(len(self.nodes)):
if self.nodes[i] == Graph.EMPTY:
continue
for j in self.succs[i]:
yield i, j
def has_node(self, node):
return node in self.node_id_map
def get_node(self, node_id):
return self.nodes[node_id]
def get_node_id(self, node):
return self.node_id_map[node]
def add_node(self, node):
if node not in self.node_id_map:
node_id = len(self.nodes)
self.node_id_map[node] = node_id
self.nodes.append(node)
self.succs.append(set())
self.preds.append(set())
self.n += 1
else:
node_id = self.node_id_map[node]
return node_id
def add_path(self, nodes):
if len(nodes) == 0:
return []
# add edges in path
u = self.add_node(nodes[0])
node_ids = [u]
for i in xrange(1, len(nodes)):
v = self.add_node(nodes[i])
node_ids.append(v)
self.succs[u].add(v)
self.preds[v].add(u)
u = v
return node_ids
def remove_node_id(self, node_id):
if node_id == Graph.SOURCE_ID or node_id == Graph.SINK_ID:
return
# for each successor, remove from predecessors
for succ in self.succs[node_id]:
self.preds[succ].remove(node_id)
# for each predecessor, remove from successors
for pred in self.preds[node_id]:
self.succs[pred].remove(node_id)
# remove from node id map
del self.node_id_map[self.nodes[node_id]]
# mask
self.nodes[node_id] = Graph.EMPTY
self.n -= 1
def get_unreachable_nodes(self):
'''find unreachable kmers from source or sink'''
allnodes = set(self.node_id_map.values())
# unreachable from source
a = set(single_node_shortest_path_length(Graph.SOURCE_ID, self.succs))
a = allnodes - a
# unreachable from sink
b = set(single_node_shortest_path_length(Graph.SINK_ID, self.preds))
b = allnodes - b
return a | b
def remove_unreachable_nodes(self):
'''
mask nodes that are unreachable from the source or sink, these occur
due to fragmentation when k > 1
'''
unreachable = self.get_unreachable_nodes()
for i in unreachable:
self.remove_node_id(i)
return len(unreachable)
def is_valid(self):
'''
Adapted from NetworkX 1.10 bidirectional_shortest_path
'''
if self.nodes[Graph.SOURCE_ID] == Graph.EMPTY:
return False
if self.nodes[Graph.SINK_ID] == Graph.EMPTY:
return False
# predecesssor and successors in search
pred = {Graph.SOURCE_ID: None}
succ = {Graph.SINK_ID: None}
# initialize fringes, start with forward
forward_fringe = [Graph.SOURCE_ID]
reverse_fringe = [Graph.SINK_ID]
while forward_fringe and reverse_fringe:
if len(forward_fringe) <= len(reverse_fringe):
this_level = forward_fringe
forward_fringe = []
for v in this_level:
for w in self.succs[v]:
if w not in pred:
forward_fringe.append(w)
pred[w] = v
if w in succ:
return True
# return pred, succ, w # found path
else:
this_level = reverse_fringe
reverse_fringe = []
for v in this_level:
for w in self.preds[v]:
if w not in succ:
succ[w] = v
reverse_fringe.append(w)
if w in pred:
return True
# return pred, succ, w # found path
return False
def topological_sort_kahn(self):
'''
Adapted from NetworkX source code (networkx.github.io)
networkx.algorithms.dag.topological_sort
'''
indegree_map = {}
zero_indegree = []
for i in xrange(len(self.preds)):
if self.nodes[i] == Graph.EMPTY:
continue
indegree = len(self.preds[i])
if indegree > 0:
indegree_map[i] = indegree
else:
zero_indegree.append(i)
nodes = []
while zero_indegree:
i = zero_indegree.pop()
for child in self.succs[i]:
indegree_map[child] -= 1
if indegree_map[child] == 0:
zero_indegree.append(child)
del indegree_map[child]
nodes.append(i)
if indegree_map:
return None
return nodes
def topological_sort_dfs(self):
"""
Adapted from NetworkX source code (networkx.github.io)
networkx.algorithms.dag.topological_sort
"""
# nonrecursive version
order = []
explored = set()
for i in xrange(len(self.nodes)):
if self.nodes[i] == Graph.EMPTY:
continue
if i in explored:
continue
fringe = [i] # nodes yet to look at
while fringe:
j = fringe[-1] # depth first search
if j in explored: # already looked down this branch
fringe.pop()
continue
# Check successors for cycles and for new nodes
new_nodes = [n for n in self.succs[j] if n not in explored]
if new_nodes: # Add new_nodes to fringe
fringe.extend(new_nodes)
else: # No new nodes so w is fully explored
explored.add(j)
order.append(j)
fringe.pop() # done considering this node
order.reverse()
return order
def topological_sort(self):
return self.topological_sort_dfs()
def is_topological_sort(self, order):
order_indexes = dict((x, i) for i, x in enumerate(order))
a = set(self.node_id_map.values()).symmetric_difference(order_indexes)
if len(a) != 0:
return False
for u, v in self.edges_iter():
ui = order_indexes[u]
vi = order_indexes[v]
if ui > vi:
return False
return True
def weakly_connected_components(self):
"""
Adapted from NetworkX 1.10 (networkx.algorithms.components)
Creates a list that maps each node to the index of a weakly
connected component
"""
components = [-1] * len(self.nodes)
num_components = 0
seen = set()
for v in self.node_ids_iter():
if v not in seen:
c = set(_plain_bfs(self.succs, self.preds, v))
for i in c:
components[i] = num_components
num_components += 1
seen.update(c)
return num_components, components
|
class Event:
def __init__(self):
self.listeners = set()
def __call__(self, *args, **kwargs):
for listener in self.listeners:
listener(*args, **kwargs)
def subscribe(self, listener):
self.listeners.add(listener)
|
# currently unused; to be tested and further refined prior to final csv handover
byline_replacementlist = [
"Exclusive ",
" And ",
# jobs
"National",
"Correspondent",
"Political",
"Health " ,
"Political",
"Education" ,
"Commentator",
"Regional",
"Agencies",
"Defence",
"Fashion",
"Music",
"Social Issues",
"Reporter",
"Chief",
"Business",
"Workplace",
"Editor",
"Indulge",
"Science",
"Sunday",
"Saturdays",
"Writer",
"Food ",
"Dr ",
"Professor ",
# cities,
"Las Vegas",
"Melbourne",
"Canberra",
"Brisbane",
"Sydney",
"Perth",
"Adelaide",
"Chicago",
"Daily Mail, London" ,
"London Correspondent",
"London Daily Mail",
"Buenos Aires" ,
"New York",
"New Delhi",
"London",
", Washington",
"Beijing",
"Health",
# random stuff
"State ",
"Council On The Ageing",
"Words ",
"Text",
"By ",
"With ",
" In ",
" - ",
":",
"Proprietor Realise Personal Training Company",
"B+S Nutritionist",
"My Week",
"Medical",
"Manufacturing",
"Brought To You",
"Film ",
"Reviews ",
"Comment",
"Personal",
"Finance",
"Natural ",
"Solutions ",
"Special ",
"Report ",
"Recipe ",
"Photography ",
"Photo",
"-At-Large",
"Styling ",
"Preparation ",
# individual,
" - Ian Rose Is A Melbourne Writer." ,
"Cycling Promotion Fund Policy Adviser ",
". Dannielle Miller Is The Head Of Enlighten Education. Enlighten Works With Teenage Girls In High Schools On Developing A Positive Self-Esteem And Healthy Body Image." ] |
"""
Model attributes, from https://github.com/kohpangwei/group_DRO/blob/master/models.py
Used for: Waterbirds
"""
model_attributes = {
'bert': {
'feature_type': 'text'
},
'inception_v3': {
'feature_type': 'image',
'target_resolution': (299, 299),
'flatten': False
},
'wideresnet50': {
'feature_type': 'image',
'target_resolution': (224, 224),
'flatten': False
},
'resnet50': {
'feature_type': 'image',
'target_resolution': (224, 224),
'flatten': False
},
'resnet34': {
'feature_type': 'image',
'target_resolution': None,
'flatten': False
},
'raw_logistic_regression': {
'feature_type': 'image',
'target_resolution': None,
'flatten': True,
}
} |
# read input
file = open("day_two_input.txt", 'r')
# compile to list
raw_data = file.read().splitlines()
valid_count = 0
# iterate over all lines
for line in raw_data:
line_data = line.split()
# find min/max group from line
required_value_list = line_data[0].split('-')
# define min/max
min = int(required_value_list[0])
max = int(required_value_list[1])
# determine character in question
character = line_data[1].split(':')[0]
# define password
password = line_data[2]
# count required character in password
password_character_count = password.count(character)
# if character greater than or equal to min character count
if password_character_count >= min:
# and if character count less than or equal to max
if password_character_count <= max:
# valid pwd
valid_count += 1
print("Valid Password Count: " + str(valid_count))
|
class DispatchActionConfiguration(object):
"""Slack Dispatch Action Configuration composition object builder.
For more information, see the following URL:
https://api.slack.com/reference/block-kit/composition-objects#dispatch_action_config"""
ON_ENTER_PRESSED = 'on_enter_pressed'
ON_CHARACTER_ENTERED = 'on_character_entered'
ALL_DISPATCH_ACTION_CONFIGURATIONS = [
ON_ENTER_PRESSED,
ON_CHARACTER_ENTERED,
]
def __init__(self, trigger_actions_on):
if not isinstance(trigger_actions_on, list):
trigger_actions_on = [trigger_actions_on]
if (
len(trigger_actions_on) < 1 or
len(trigger_actions_on) > len(ALL_DISPATCH_ACTION_CONFIGURATIONS)
):
raise ValueError(
'trigger_actions_on should be one or both of: '
f'{ON_ENTER_PRESSED}, {ON_CHARACTER_ENTERED}'
)
for action in trigger_actions_on:
if action not in ALL_DISPATCH_ACTION_CONFIGURATIONS:
raise ValueError(
'trigger_actions_on should be one or both of: '
f'{ON_ENTER_PRESSED}, {ON_CHARACTER_ENTERED}'
)
super(DispatchActionConfiguration, self).__init__(
trigger_actions_on=trigger_actions_on,
)
|
__title__ = 'SQL Python for Deep Learning'
__version__ = '0.1'
__author__ = 'Miguel Gonzalez-Fierro'
__license__ = 'MIT license'
# Synonym
VERSION = __version__
LICENSE = __license__
|
#
# Complete the 'flippingMatrix' function below.
#
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY matrix as parameter.
#
def flippingMatrix(matrix):
n = len(matrix) - 1
max_sum = 0
for i in range(len(matrix) // 2):
for j in range(len(matrix) // 2):
top_left = matrix[i][j]
top_right = matrix[i][n-j]
bottom_left = matrix[n-i][j]
bottom_right = matrix[n-i][n-j]
max_sum += max(top_left, top_right, bottom_left, bottom_right)
return max_sum
|
#
# PySNMP MIB module SIAE-UNITYPE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file://./sm_unitype.mib
# Produced by pysmi-0.3.2 at Fri Jul 19 08:22:05 2019
# On host 0e190c6811ee platform Linux version 4.9.125-linuxkit by user root
# Using Python version 3.7.3 (default, Apr 3 2019, 05:39:12)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion")
siaeMib, = mibBuilder.importSymbols("SIAE-TREE-MIB", "siaeMib")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, Counter32, Gauge32, IpAddress, ObjectIdentity, ModuleIdentity, TimeTicks, iso, NotificationType, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Counter64, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Counter32", "Gauge32", "IpAddress", "ObjectIdentity", "ModuleIdentity", "TimeTicks", "iso", "NotificationType", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Counter64", "Unsigned32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
unitTypeMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 506))
unitTypeMib.setRevisions(('2015-03-04 00:00', '2014-12-01 00:00', '2014-03-19 00:00', '2014-02-07 00:00', '2013-04-16 00:00',))
if mibBuilder.loadTexts: unitTypeMib.setLastUpdated('201503040000Z')
if mibBuilder.loadTexts: unitTypeMib.setOrganization('SIAE MICROELETTRONICA spa')
unitType = MibIdentifier((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3))
unitTypeUnequipped = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 1))
if mibBuilder.loadTexts: unitTypeUnequipped.setStatus('current')
unitTypeODU = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 5))
if mibBuilder.loadTexts: unitTypeODU.setStatus('current')
unitTypeALFO80HD = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 200))
if mibBuilder.loadTexts: unitTypeALFO80HD.setStatus('current')
unitTypeALFO80HDelectrical = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 201))
if mibBuilder.loadTexts: unitTypeALFO80HDelectrical.setStatus('current')
unitTypeALFO80HDelectricalOptical = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 202))
if mibBuilder.loadTexts: unitTypeALFO80HDelectricalOptical.setStatus('current')
unitTypeALFO80HDoptical = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 203))
if mibBuilder.loadTexts: unitTypeALFO80HDoptical.setStatus('current')
unitTypeAGS20ARI1 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 210))
if mibBuilder.loadTexts: unitTypeAGS20ARI1.setStatus('current')
unitTypeAGS20ARI2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 211))
if mibBuilder.loadTexts: unitTypeAGS20ARI2.setStatus('current')
unitTypeAGS20ARI4 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 212))
if mibBuilder.loadTexts: unitTypeAGS20ARI4.setStatus('current')
unitTypeAGS20DRI4 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 213))
if mibBuilder.loadTexts: unitTypeAGS20DRI4.setStatus('current')
unitTypeAGS20ARI1TDM2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 214))
if mibBuilder.loadTexts: unitTypeAGS20ARI1TDM2.setStatus('current')
unitTypeAGS20ARI1TDM3 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 215))
if mibBuilder.loadTexts: unitTypeAGS20ARI1TDM3.setStatus('current')
unitTypeAGS20ARI2TDM2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 216))
if mibBuilder.loadTexts: unitTypeAGS20ARI2TDM2.setStatus('current')
unitTypeAGS20ARI2TDM3 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 217))
if mibBuilder.loadTexts: unitTypeAGS20ARI2TDM3.setStatus('current')
unitTypeAGS20ARI4TDM2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 218))
if mibBuilder.loadTexts: unitTypeAGS20ARI4TDM2.setStatus('current')
unitTypeAGS20ARI4TDM3 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 219))
if mibBuilder.loadTexts: unitTypeAGS20ARI4TDM3.setStatus('current')
unitTypeAGS20DRI4TDM2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 220))
if mibBuilder.loadTexts: unitTypeAGS20DRI4TDM2.setStatus('current')
unitTypeAGS20DRI4TDM3 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 221))
if mibBuilder.loadTexts: unitTypeAGS20DRI4TDM3.setStatus('current')
unitTypeAGS20CORE = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 222))
if mibBuilder.loadTexts: unitTypeAGS20CORE.setStatus('current')
unitTypeAGS20ARI1DP = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 223))
if mibBuilder.loadTexts: unitTypeAGS20ARI1DP.setStatus('current')
unitTypeAGS20ARI1TDM2DP = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 224))
if mibBuilder.loadTexts: unitTypeAGS20ARI1TDM2DP.setStatus('current')
unitTypeAGS20ARI1TDM3DP = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 225))
if mibBuilder.loadTexts: unitTypeAGS20ARI1TDM3DP.setStatus('current')
unitTypeALFOplus2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 230))
if mibBuilder.loadTexts: unitTypeALFOplus2.setStatus('current')
unitTypeAGS20ODU = ObjectIdentity((1, 3, 6, 1, 4, 1, 3373, 1103, 6, 3, 231))
if mibBuilder.loadTexts: unitTypeAGS20ODU.setStatus('current')
mibBuilder.exportSymbols("SIAE-UNITYPE-MIB", unitTypeALFO80HDelectrical=unitTypeALFO80HDelectrical, unitTypeAGS20ARI1TDM2=unitTypeAGS20ARI1TDM2, unitTypeAGS20ARI4=unitTypeAGS20ARI4, unitTypeAGS20DRI4TDM3=unitTypeAGS20DRI4TDM3, unitTypeAGS20ARI2TDM3=unitTypeAGS20ARI2TDM3, PYSNMP_MODULE_ID=unitTypeMib, unitTypeAGS20ARI2=unitTypeAGS20ARI2, unitTypeMib=unitTypeMib, unitTypeALFO80HD=unitTypeALFO80HD, unitTypeALFOplus2=unitTypeALFOplus2, unitTypeAGS20DRI4TDM2=unitTypeAGS20DRI4TDM2, unitTypeAGS20ODU=unitTypeAGS20ODU, unitTypeAGS20ARI1=unitTypeAGS20ARI1, unitType=unitType, unitTypeAGS20ARI4TDM3=unitTypeAGS20ARI4TDM3, unitTypeAGS20DRI4=unitTypeAGS20DRI4, unitTypeAGS20ARI1DP=unitTypeAGS20ARI1DP, unitTypeAGS20ARI1TDM2DP=unitTypeAGS20ARI1TDM2DP, unitTypeAGS20ARI1TDM3=unitTypeAGS20ARI1TDM3, unitTypeAGS20ARI2TDM2=unitTypeAGS20ARI2TDM2, unitTypeODU=unitTypeODU, unitTypeALFO80HDelectricalOptical=unitTypeALFO80HDelectricalOptical, unitTypeAGS20ARI4TDM2=unitTypeAGS20ARI4TDM2, unitTypeUnequipped=unitTypeUnequipped, unitTypeAGS20ARI1TDM3DP=unitTypeAGS20ARI1TDM3DP, unitTypeALFO80HDoptical=unitTypeALFO80HDoptical, unitTypeAGS20CORE=unitTypeAGS20CORE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.