content stringlengths 7 1.05M |
|---|
def calculate_pi(n_terms: int) -> float:
numerator: float = 4.0
denominator: float = 1.0
operation: float = 1.0
pi: float = 0.0
for _ in range(n_terms):
pi += operation *(numerator/denominator)
denominator += 2.0
operation *= -1.0
return pi
if __name__ == "__main__":
print(calculate_pi(100000)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Anne Philipp (University of Vienna)
@Date: March 2018
@License:
(C) Copyright 2014 UIO.
This software is licensed under the terms of the Apache Licence Version 2.0
which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
""" |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1, l2):
# create a Answer node and mark it's start node
StartAns = Answer = ListNode(0)
carry = 0
# keep counting until one of nodes is end
while l1 and l2:
carry, sumval = divmod( l1.val+l2.val+carry, 10 )
Answer.next = ListNode( sumval )
# to next node
l1 = l1.next
l2 = l2.next
Answer = Answer.next
# if l1 or l2 still have next node
tmplist = l1 or l2
while tmplist:
carry, sumval = divmod( tmplist.val+carry, 10 )
Answer.next = ListNode( sumval )
# to next node
tmplist = tmplist.next
Answer = Answer.next
if carry != 0:
Answer.next = ListNode(carry)
#return without the first one node
return StartAns.next
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
def deleteNode(head_ref, del_):
if (head_ref == None or del_ == None):
return
if (head_ref == del_):
head_ref = del_.next
if (del_.next != None):
del_.next.prev = del_.prev
if (del_.prev != None):
del_.prev.next = del_.next
return head_ref
def deleteNodeAtGivenPos(head_ref,n):
if (head_ref == None or n <= 0):
return
current = head_ref
i = 1
while ( current != None and i < n ):
current = current.next
i = i + 1
if (current == None):
return
deleteNode(head_ref, current)
return head_ref
def push(head_ref, new_data):
new_node = Node(0)
new_node.data = new_data
new_node.prev = None
new_node.next = (head_ref)
if ((head_ref) != None):
(head_ref).prev = new_node
(head_ref) = new_node
return head_ref
def printList(head):
while (head != None) :
print( head.data ,end= " ")
head = head.next
head = None
head = push(head, 6)
head = push(head, 12)
head = push(head, 4)
head = push(head, 3)
head = push(head, 8)
print("Doubly linked list before deletion:")
printList(head)
n = 2
head = deleteNodeAtGivenPos(head, n)
print("\nDoubly linked list after deletion:")
printList(head)
|
# Windows functions with NLP data
# A. Load the data
# 1. Load the dataframe
df = spark.read.load('sherlock_sentences.parquet')
# Filter and show the first 5 rows
df.where('id > 70').show(5, truncate=False)
# 2. Split and explode text
# Split the clause column into a column called words
split_df = clauses_df.select(split('clause', ' ').alias('words'))
split_df.show(5, truncate=False)
# Explode the words column into a column called word
exploded_df = split_df.select(explode('words').alias('word'))
exploded_df.show(10)
# Count the resulting number of rows in exploded_df
print("\nNumber of rows: ", exploded_df.count())
# B. Moving window analysis
# 1. Creating context window features
# Word for each row, previous two and subsequent two words
query = """
SELECT
part,
LAG(word, 2) OVER(PARTITION BY part ORDER BY id) AS w1,
LAG(word, 1) OVER(PARTITION BY part ORDER BY id) AS w2,
word AS w3,
LEAD(word, 1) OVER(PARTITION BY part ORDER BY id) AS w4,
LEAD(word, 2) OVER(PARTITION BY part ORDER BY id) AS w5
FROM text
"""
spark.sql(query).where("part = 12").show(10)
# 2. Repartition the data. Ensures that the data for each chapter is contained on the same node (machine)
# Repartition text_df into 12 partitions on 'chapter' column
repart_df = text_df.repartition(12, 'chapter')
# Prove that repart_df has 12 partitions
repart_df.rdd.getNumPartitions()
# C. Finding common word sequences
# 1. Find the top 10 sequences of five words
query = """
SELECT w1, w2, w3, w4, w5, COUNT(*) AS count FROM (
SELECT word AS w1,
LEAD(word, 1) OVER(PARTITION BY part ORDER BY id) AS w2,
LEAD(word, 2) OVER(PARTITION BY part ORDER BY id) AS w3,
LEAD(word, 3) OVER(PARTITION BY part ORDER BY id) AS w4,
LEAD(word, 4) OVER(PARTITION BY part ORDER BY id) AS w5
FROM text
)
GROUP BY w1, w2, w3, w4, w5
ORDER BY count DESC
LIMIT 10 """
df = spark.sql(query)
df.show()
# 2. Unique 5-tuples sorted in descending order
query = """
SELECT DISTINCT w1, w2, w3, w4, w5 FROM (
SELECT word AS w1,
LEAD(word,1) OVER(PARTITION BY part ORDER BY id ) AS w2,
LEAD(word,2) OVER(PARTITION BY part ORDER BY id ) AS w3,
LEAD(word,3) OVER(PARTITION BY part ORDER BY id ) AS w4,
LEAD(word,4) OVER(PARTITION BY part ORDER BY id ) AS w5
FROM text
)
ORDER BY w1 DESC, w2 DESC, w3 DESC, w4 DESC, w5 DESC
LIMIT 10
"""
df = spark.sql(query)
df.show()
# 3. Most frequent 3-tuples per chapter
subquery = """
SELECT chapter, w1, w2, w3, COUNT(*) as count
FROM
(
SELECT
chapter,
word AS w1,
LEAD(word, 1) OVER(PARTITION BY chapter ORDER BY id ) AS w2,
LEAD(word, 2) OVER(PARTITION BY chapter ORDER BY id ) AS w3
FROM text
)
GROUP BY chapter, w1, w2, w3
ORDER BY chapter, count DESC
"""
# Take the output from the subquery and produce a follow up query
query = """
SELECT chapter, w1, w2, w3, count FROM
(
SELECT
chapter,
ROW_NUMBER() OVER (PARTITION BY chapter ORDER BY count DESC) AS row,
w1, w2, w3, count
FROM ( %s )
)
WHERE row = 1
ORDER BY chapter ASC
""" % subquery
spark.sql(query).show()
|
# Listen
# Eine Liste ist ein eingebauter Datentyp von Python
# mit einer Liste kann man in einer Variablen mehrere Werte speichern und mit einem Index darauf
# zugreifen
# Hier legen wir uns eine leere Liste an
neue_liste = []
# mit den eckigen Klammern wird eine Liste angelegt
# jetzt füllen wir sie, indem wir mehrere Elemente anhängen (= append auf English)
neue_liste.append('Das')
neue_liste.append('ist')
neue_liste.append('unser')
neue_liste.append('Text')
print(neue_liste)
# auf ein Element können wir so zugreifen (eckige Klammern!)
print(neue_liste[0])
# Wir können es auch so verändern
neue_liste[2] = 'nur'
print(neue_liste)
# gib nun das 3. Element aus (Frage: bei welcher Zahl fängt die Liste an zu zählen?)
# wir können die Liste auch so anlegen
zweite_liste = ['Das', 'ist', 'noch', 'mehr', 'Text']
# gib die zweite Liste aus
# wir können auch Zahlen in die Liste machen
zahlen_liste = [1, 2, 3, 4, 5]
# addiere zur 4 in der Liste noch 6 dazu
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def add_tags(ResourceArns=None, Tags=None):
"""
Adds the specified tags to the specified resource. You can tag your Application Load Balancers and your target groups.
Each tag consists of a key and an optional value. If a resource already has a tag with the same key, AddTags updates its value.
To list the current tags for your resources, use DescribeTags . To remove tags from your resources, use RemoveTags .
See also: AWS API Documentation
Examples
This example adds the specified tags to the specified load balancer.
Expected Output:
:example: response = client.add_tags(
ResourceArns=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ResourceArns: list
:param ResourceArns: [REQUIRED]
The Amazon Resource Name (ARN) of the resource.
(string) --
:type Tags: list
:param Tags: [REQUIRED]
The tags. Each resource can have a maximum of 10 tags.
(dict) --Information about a tag.
Key (string) -- [REQUIRED]The key of the tag.
Value (string) --The value of the tag.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def create_listener(LoadBalancerArn=None, Protocol=None, Port=None, SslPolicy=None, Certificates=None, DefaultActions=None):
"""
Creates a listener for the specified Application Load Balancer.
You can create up to 10 listeners per load balancer.
To update a listener, use ModifyListener . When you are finished with a listener, you can delete it using DeleteListener . If you are finished with both the listener and the load balancer, you can delete them both using DeleteLoadBalancer .
For more information, see Listeners for Your Application Load Balancers in the Application Load Balancers Guide .
See also: AWS API Documentation
Examples
This example creates an HTTP listener for the specified load balancer that forwards requests to the specified target group.
Expected Output:
This example creates an HTTPS listener for the specified load balancer that forwards requests to the specified target group. Note that you must specify an SSL certificate for an HTTPS listener. You can create and manage certificates using AWS Certificate Manager (ACM). Alternatively, you can create a certificate using SSL/TLS tools, get the certificate signed by a certificate authority (CA), and upload the certificate to AWS Identity and Access Management (IAM).
Expected Output:
:example: response = client.create_listener(
LoadBalancerArn='string',
Protocol='HTTP'|'HTTPS',
Port=123,
SslPolicy='string',
Certificates=[
{
'CertificateArn': 'string'
},
],
DefaultActions=[
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
]
)
:type LoadBalancerArn: string
:param LoadBalancerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the load balancer.
:type Protocol: string
:param Protocol: [REQUIRED]
The protocol for connections from clients to the load balancer.
:type Port: integer
:param Port: [REQUIRED]
The port on which the load balancer is listening.
:type SslPolicy: string
:param SslPolicy: The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
:type Certificates: list
:param Certificates: The SSL server certificate. You must provide exactly one certificate if the protocol is HTTPS.
(dict) --Information about an SSL server certificate deployed on a load balancer.
CertificateArn (string) --The Amazon Resource Name (ARN) of the certificate.
:type DefaultActions: list
:param DefaultActions: [REQUIRED]
The default action for the listener.
(dict) --Information about an action.
Type (string) -- [REQUIRED]The type of action.
TargetGroupArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the target group.
:rtype: dict
:return: {
'Listeners': [
{
'ListenerArn': 'string',
'LoadBalancerArn': 'string',
'Port': 123,
'Protocol': 'HTTP'|'HTTPS',
'Certificates': [
{
'CertificateArn': 'string'
},
],
'SslPolicy': 'string',
'DefaultActions': [
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
]
},
]
}
"""
pass
def create_load_balancer(Name=None, Subnets=None, SecurityGroups=None, Scheme=None, Tags=None, IpAddressType=None):
"""
Creates an Application Load Balancer.
When you create a load balancer, you can specify security groups, subnets, IP address type, and tags. Otherwise, you could do so later using SetSecurityGroups , SetSubnets , SetIpAddressType , and AddTags .
To create listeners for your load balancer, use CreateListener . To describe your current load balancers, see DescribeLoadBalancers . When you are finished with a load balancer, you can delete it using DeleteLoadBalancer .
You can create up to 20 load balancers per region per account. You can request an increase for the number of load balancers for your account. For more information, see Limits for Your Application Load Balancer in the Application Load Balancers Guide .
For more information, see Application Load Balancers in the Application Load Balancers Guide .
See also: AWS API Documentation
Examples
This example creates an Internet-facing load balancer and enables the Availability Zones for the specified subnets.
Expected Output:
This example creates an internal load balancer and enables the Availability Zones for the specified subnets.
Expected Output:
:example: response = client.create_load_balancer(
Name='string',
Subnets=[
'string',
],
SecurityGroups=[
'string',
],
Scheme='internet-facing'|'internal',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
IpAddressType='ipv4'|'dualstack'
)
:type Name: string
:param Name: [REQUIRED]
The name of the load balancer.
This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen.
:type Subnets: list
:param Subnets: [REQUIRED]
The IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from at least two Availability Zones.
(string) --
:type SecurityGroups: list
:param SecurityGroups: The IDs of the security groups to assign to the load balancer.
(string) --
:type Scheme: string
:param Scheme: The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the Internet.
The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can only route requests from clients with access to the VPC for the load balancer.
The default is an Internet-facing load balancer.
:type Tags: list
:param Tags: One or more tags to assign to the load balancer.
(dict) --Information about a tag.
Key (string) -- [REQUIRED]The key of the tag.
Value (string) --The value of the tag.
:type IpAddressType: string
:param IpAddressType: The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4 .
:rtype: dict
:return: {
'LoadBalancers': [
{
'LoadBalancerArn': 'string',
'DNSName': 'string',
'CanonicalHostedZoneId': 'string',
'CreatedTime': datetime(2015, 1, 1),
'LoadBalancerName': 'string',
'Scheme': 'internet-facing'|'internal',
'VpcId': 'string',
'State': {
'Code': 'active'|'provisioning'|'failed',
'Reason': 'string'
},
'Type': 'application',
'AvailabilityZones': [
{
'ZoneName': 'string',
'SubnetId': 'string'
},
],
'SecurityGroups': [
'string',
],
'IpAddressType': 'ipv4'|'dualstack'
},
]
}
:returns:
(string) --
"""
pass
def create_rule(ListenerArn=None, Conditions=None, Priority=None, Actions=None):
"""
Creates a rule for the specified listener.
Each rule can have one action and one condition. Rules are evaluated in priority order, from the lowest value to the highest value. When the condition for a rule is met, the specified action is taken. If no conditions are met, the default action for the default rule is taken. For more information, see Listener Rules in the Application Load Balancers Guide .
To view your current rules, use DescribeRules . To update a rule, use ModifyRule . To set the priorities of your rules, use SetRulePriorities . To delete a rule, use DeleteRule .
See also: AWS API Documentation
Examples
This example creates a rule that forwards requests to the specified target group if the URL contains the specified pattern (for example, /img/*).
Expected Output:
:example: response = client.create_rule(
ListenerArn='string',
Conditions=[
{
'Field': 'string',
'Values': [
'string',
]
},
],
Priority=123,
Actions=[
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
]
)
:type ListenerArn: string
:param ListenerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the listener.
:type Conditions: list
:param Conditions: [REQUIRED]
A condition. Each condition specifies a field name and a single value.
If the field name is host-header , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
.
(matches 0 or more characters)
? (matches exactly 1 character)
If the field name is path-pattern , you can specify a single path pattern. A path pattern is case sensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
_ - . $ / ~ ' ' @ : +
(using amp;)
(matches 0 or more characters)
? (matches exactly 1 character)
(dict) --Information about a condition for a rule.
Field (string) --The name of the field. The possible values are host-header and path-pattern .
Values (list) --The condition value.
If the field name is host-header , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
.
(matches 0 or more characters)
? (matches exactly 1 character)
If the field name is path-pattern , you can specify a single path pattern (for example, /img/*). A path pattern is case sensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
_ - . $ / ~ ' ' @ : +
(using amp;)
(matches 0 or more characters)
? (matches exactly 1 character)
(string) --
:type Priority: integer
:param Priority: [REQUIRED]
The priority for the rule. A listener can't have multiple rules with the same priority.
:type Actions: list
:param Actions: [REQUIRED]
An action. Each action has the type forward and specifies a target group.
(dict) --Information about an action.
Type (string) -- [REQUIRED]The type of action.
TargetGroupArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the target group.
:rtype: dict
:return: {
'Rules': [
{
'RuleArn': 'string',
'Priority': 'string',
'Conditions': [
{
'Field': 'string',
'Values': [
'string',
]
},
],
'Actions': [
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
],
'IsDefault': True|False
},
]
}
:returns:
A-Z, a-z, 0-9
.
(matches 0 or more characters)
? (matches exactly 1 character)
"""
pass
def create_target_group(Name=None, Protocol=None, Port=None, VpcId=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None):
"""
Creates a target group.
To register targets with the target group, use RegisterTargets . To update the health check settings for the target group, use ModifyTargetGroup . To monitor the health of targets in the target group, use DescribeTargetHealth .
To route traffic to the targets in a target group, specify the target group in an action using CreateListener or CreateRule .
To delete a target group, use DeleteTargetGroup .
For more information, see Target Groups for Your Application Load Balancers in the Application Load Balancers Guide .
See also: AWS API Documentation
Examples
This example creates a target group that you can use to route traffic to targets using HTTP on port 80. This target group uses the default health check configuration.
Expected Output:
:example: response = client.create_target_group(
Name='string',
Protocol='HTTP'|'HTTPS',
Port=123,
VpcId='string',
HealthCheckProtocol='HTTP'|'HTTPS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]
The name of the target group.
This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen.
:type Protocol: string
:param Protocol: [REQUIRED]
The protocol to use for routing traffic to the targets.
:type Port: integer
:param Port: [REQUIRED]
The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target.
:type VpcId: string
:param VpcId: [REQUIRED]
The identifier of the virtual private cloud (VPC).
:type HealthCheckProtocol: string
:param HealthCheckProtocol: The protocol the load balancer uses when performing health checks on targets. The default is the HTTP protocol.
:type HealthCheckPort: string
:param HealthCheckPort: The port the load balancer uses when performing health checks on targets. The default is traffic-port , which indicates the port on which each target receives traffic from the load balancer.
:type HealthCheckPath: string
:param HealthCheckPath: The ping path that is the destination on the targets for health checks. The default is /.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target. The default is 30 seconds.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response from a target means a failed health check. The default is 5 seconds.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy. The default is 5.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount: The number of consecutive health check failures required before considering a target unhealthy. The default is 2.
:type Matcher: dict
:param Matcher: The HTTP codes to use when checking for a successful response from a target. The default is 200.
HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def delete_listener(ListenerArn=None):
"""
Deletes the specified listener.
Alternatively, your listener is deleted when you delete the load balancer it is attached to using DeleteLoadBalancer .
See also: AWS API Documentation
Examples
This example deletes the specified listener.
Expected Output:
:example: response = client.delete_listener(
ListenerArn='string'
)
:type ListenerArn: string
:param ListenerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the listener.
:rtype: dict
:return: {}
"""
pass
def delete_load_balancer(LoadBalancerArn=None):
"""
Deletes the specified Application Load Balancer and its attached listeners.
You can't delete a load balancer if deletion protection is enabled. If the load balancer does not exist or has already been deleted, the call succeeds.
Deleting a load balancer does not affect its registered targets. For example, your EC2 instances continue to run and are still registered to their target groups. If you no longer need these EC2 instances, you can stop or terminate them.
See also: AWS API Documentation
Examples
This example deletes the specified load balancer.
Expected Output:
:example: response = client.delete_load_balancer(
LoadBalancerArn='string'
)
:type LoadBalancerArn: string
:param LoadBalancerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the load balancer.
:rtype: dict
:return: {}
"""
pass
def delete_rule(RuleArn=None):
"""
Deletes the specified rule.
See also: AWS API Documentation
Examples
This example deletes the specified rule.
Expected Output:
:example: response = client.delete_rule(
RuleArn='string'
)
:type RuleArn: string
:param RuleArn: [REQUIRED]
The Amazon Resource Name (ARN) of the rule.
:rtype: dict
:return: {}
"""
pass
def delete_target_group(TargetGroupArn=None):
"""
Deletes the specified target group.
You can delete a target group if it is not referenced by any actions. Deleting a target group also deletes any associated health checks.
See also: AWS API Documentation
Examples
This example deletes the specified target group.
Expected Output:
:example: response = client.delete_target_group(
TargetGroupArn='string'
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:rtype: dict
:return: {}
"""
pass
def deregister_targets(TargetGroupArn=None, Targets=None):
"""
Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer.
See also: AWS API Documentation
Examples
This example deregisters the specified instance from the specified target group.
Expected Output:
:example: response = client.deregister_targets(
TargetGroupArn='string',
Targets=[
{
'Id': 'string',
'Port': 123
},
]
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type Targets: list
:param Targets: [REQUIRED]
The targets. If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it.
(dict) --Information about a target.
Id (string) -- [REQUIRED]The ID of the target.
Port (integer) --The port on which the target is listening.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def describe_account_limits(Marker=None, PageSize=None):
"""
Describes the current Elastic Load Balancing resource limits for your AWS account.
For more information, see Limits for Your Application Load Balancer in the Application Load Balancer Guide .
See also: AWS API Documentation
:example: response = client.describe_account_limits(
Marker='string',
PageSize=123
)
:type Marker: string
:param Marker: The marker for the next set of results. (You received this marker from a previous call.)
:type PageSize: integer
:param PageSize: The maximum number of results to return with this call.
:rtype: dict
:return: {
'Limits': [
{
'Name': 'string',
'Max': 'string'
},
],
'NextMarker': 'string'
}
:returns:
application-load-balancers
listeners-per-application-load-balancer
rules-per-application-load-balancer
target-groups
targets-per-application-load-balancer
"""
pass
def describe_listeners(LoadBalancerArn=None, ListenerArns=None, Marker=None, PageSize=None):
"""
Describes the specified listeners or the listeners for the specified Application Load Balancer. You must specify either a load balancer or one or more listeners.
See also: AWS API Documentation
Examples
This example describes the specified listener.
Expected Output:
:example: response = client.describe_listeners(
LoadBalancerArn='string',
ListenerArns=[
'string',
],
Marker='string',
PageSize=123
)
:type LoadBalancerArn: string
:param LoadBalancerArn: The Amazon Resource Name (ARN) of the load balancer.
:type ListenerArns: list
:param ListenerArns: The Amazon Resource Names (ARN) of the listeners.
(string) --
:type Marker: string
:param Marker: The marker for the next set of results. (You received this marker from a previous call.)
:type PageSize: integer
:param PageSize: The maximum number of results to return with this call.
:rtype: dict
:return: {
'Listeners': [
{
'ListenerArn': 'string',
'LoadBalancerArn': 'string',
'Port': 123,
'Protocol': 'HTTP'|'HTTPS',
'Certificates': [
{
'CertificateArn': 'string'
},
],
'SslPolicy': 'string',
'DefaultActions': [
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
]
},
],
'NextMarker': 'string'
}
"""
pass
def describe_load_balancer_attributes(LoadBalancerArn=None):
"""
Describes the attributes for the specified Application Load Balancer.
See also: AWS API Documentation
Examples
This example describes the attributes of the specified load balancer.
Expected Output:
:example: response = client.describe_load_balancer_attributes(
LoadBalancerArn='string'
)
:type LoadBalancerArn: string
:param LoadBalancerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the load balancer.
:rtype: dict
:return: {
'Attributes': [
{
'Key': 'string',
'Value': 'string'
},
]
}
"""
pass
def describe_load_balancers(LoadBalancerArns=None, Names=None, Marker=None, PageSize=None):
"""
Describes the specified Application Load Balancers or all of your Application Load Balancers.
To describe the listeners for a load balancer, use DescribeListeners . To describe the attributes for a load balancer, use DescribeLoadBalancerAttributes .
See also: AWS API Documentation
Examples
This example describes the specified load balancer.
Expected Output:
:example: response = client.describe_load_balancers(
LoadBalancerArns=[
'string',
],
Names=[
'string',
],
Marker='string',
PageSize=123
)
:type LoadBalancerArns: list
:param LoadBalancerArns: The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.
(string) --
:type Names: list
:param Names: The names of the load balancers.
(string) --
:type Marker: string
:param Marker: The marker for the next set of results. (You received this marker from a previous call.)
:type PageSize: integer
:param PageSize: The maximum number of results to return with this call.
:rtype: dict
:return: {
'LoadBalancers': [
{
'LoadBalancerArn': 'string',
'DNSName': 'string',
'CanonicalHostedZoneId': 'string',
'CreatedTime': datetime(2015, 1, 1),
'LoadBalancerName': 'string',
'Scheme': 'internet-facing'|'internal',
'VpcId': 'string',
'State': {
'Code': 'active'|'provisioning'|'failed',
'Reason': 'string'
},
'Type': 'application',
'AvailabilityZones': [
{
'ZoneName': 'string',
'SubnetId': 'string'
},
],
'SecurityGroups': [
'string',
],
'IpAddressType': 'ipv4'|'dualstack'
},
],
'NextMarker': 'string'
}
:returns:
(string) --
"""
pass
def describe_rules(ListenerArn=None, RuleArns=None, Marker=None, PageSize=None):
"""
Describes the specified rules or the rules for the specified listener. You must specify either a listener or one or more rules.
See also: AWS API Documentation
Examples
This example describes the specified rule.
Expected Output:
:example: response = client.describe_rules(
ListenerArn='string',
RuleArns=[
'string',
],
Marker='string',
PageSize=123
)
:type ListenerArn: string
:param ListenerArn: The Amazon Resource Name (ARN) of the listener.
:type RuleArns: list
:param RuleArns: The Amazon Resource Names (ARN) of the rules.
(string) --
:type Marker: string
:param Marker: The marker for the next set of results. (You received this marker from a previous call.)
:type PageSize: integer
:param PageSize: The maximum number of results to return with this call.
:rtype: dict
:return: {
'Rules': [
{
'RuleArn': 'string',
'Priority': 'string',
'Conditions': [
{
'Field': 'string',
'Values': [
'string',
]
},
],
'Actions': [
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
],
'IsDefault': True|False
},
],
'NextMarker': 'string'
}
:returns:
A-Z, a-z, 0-9
.
(matches 0 or more characters)
? (matches exactly 1 character)
"""
pass
def describe_ssl_policies(Names=None, Marker=None, PageSize=None):
"""
Describes the specified policies or all policies used for SSL negotiation.
For more information, see Security Policies in the Application Load Balancers Guide .
See also: AWS API Documentation
Examples
This example describes the specified policy used for SSL negotiation.
Expected Output:
:example: response = client.describe_ssl_policies(
Names=[
'string',
],
Marker='string',
PageSize=123
)
:type Names: list
:param Names: The names of the policies.
(string) --
:type Marker: string
:param Marker: The marker for the next set of results. (You received this marker from a previous call.)
:type PageSize: integer
:param PageSize: The maximum number of results to return with this call.
:rtype: dict
:return: {
'SslPolicies': [
{
'SslProtocols': [
'string',
],
'Ciphers': [
{
'Name': 'string',
'Priority': 123
},
],
'Name': 'string'
},
],
'NextMarker': 'string'
}
:returns:
(string) --
"""
pass
def describe_tags(ResourceArns=None):
"""
Describes the tags for the specified resources. You can describe the tags for one or more Application Load Balancers and target groups.
See also: AWS API Documentation
Examples
This example describes the tags assigned to the specified load balancer.
Expected Output:
:example: response = client.describe_tags(
ResourceArns=[
'string',
]
)
:type ResourceArns: list
:param ResourceArns: [REQUIRED]
The Amazon Resource Names (ARN) of the resources.
(string) --
:rtype: dict
:return: {
'TagDescriptions': [
{
'ResourceArn': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
"""
pass
def describe_target_group_attributes(TargetGroupArn=None):
"""
Describes the attributes for the specified target group.
See also: AWS API Documentation
Examples
This example describes the attributes of the specified target group.
Expected Output:
:example: response = client.describe_target_group_attributes(
TargetGroupArn='string'
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:rtype: dict
:return: {
'Attributes': [
{
'Key': 'string',
'Value': 'string'
},
]
}
"""
pass
def describe_target_groups(LoadBalancerArn=None, TargetGroupArns=None, Names=None, Marker=None, PageSize=None):
"""
Describes the specified target groups or all of your target groups. By default, all target groups are described. Alternatively, you can specify one of the following to filter the results: the ARN of the load balancer, the names of one or more target groups, or the ARNs of one or more target groups.
To describe the targets for a target group, use DescribeTargetHealth . To describe the attributes of a target group, use DescribeTargetGroupAttributes .
See also: AWS API Documentation
Examples
This example describes the specified target group.
Expected Output:
:example: response = client.describe_target_groups(
LoadBalancerArn='string',
TargetGroupArns=[
'string',
],
Names=[
'string',
],
Marker='string',
PageSize=123
)
:type LoadBalancerArn: string
:param LoadBalancerArn: The Amazon Resource Name (ARN) of the load balancer.
:type TargetGroupArns: list
:param TargetGroupArns: The Amazon Resource Names (ARN) of the target groups.
(string) --
:type Names: list
:param Names: The names of the target groups.
(string) --
:type Marker: string
:param Marker: The marker for the next set of results. (You received this marker from a previous call.)
:type PageSize: integer
:param PageSize: The maximum number of results to return with this call.
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
],
'NextMarker': 'string'
}
:returns:
(string) --
"""
pass
def describe_target_health(TargetGroupArn=None, Targets=None):
"""
Describes the health of the specified targets or all of your targets.
See also: AWS API Documentation
Examples
This example describes the health of the targets for the specified target group. One target is healthy but the other is not specified in an action, so it can't receive traffic from the load balancer.
Expected Output:
This example describes the health of the specified target. This target is healthy.
Expected Output:
:example: response = client.describe_target_health(
TargetGroupArn='string',
Targets=[
{
'Id': 'string',
'Port': 123
},
]
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type Targets: list
:param Targets: The targets.
(dict) --Information about a target.
Id (string) -- [REQUIRED]The ID of the target.
Port (integer) --The port on which the target is listening.
:rtype: dict
:return: {
'TargetHealthDescriptions': [
{
'Target': {
'Id': 'string',
'Port': 123
},
'HealthCheckPort': 'string',
'TargetHealth': {
'State': 'initial'|'healthy'|'unhealthy'|'unused'|'draining',
'Reason': 'Elb.RegistrationInProgress'|'Elb.InitialHealthChecking'|'Target.ResponseCodeMismatch'|'Target.Timeout'|'Target.FailedHealthChecks'|'Target.NotRegistered'|'Target.NotInUse'|'Target.DeregistrationInProgress'|'Target.InvalidState'|'Elb.InternalError',
'Description': 'string'
}
},
]
}
:returns:
Elb.RegistrationInProgress - The target is in the process of being registered with the load balancer.
Elb.InitialHealthChecking - The load balancer is still sending the target the minimum number of health checks required to determine its health status.
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def modify_listener(ListenerArn=None, Port=None, Protocol=None, SslPolicy=None, Certificates=None, DefaultActions=None):
"""
Modifies the specified properties of the specified listener.
Any properties that you do not specify retain their current values. However, changing the protocol from HTTPS to HTTP removes the security policy and SSL certificate properties. If you change the protocol from HTTP to HTTPS, you must add the security policy and server certificate.
See also: AWS API Documentation
Examples
This example changes the default action for the specified listener.
Expected Output:
This example changes the server certificate for the specified HTTPS listener.
Expected Output:
:example: response = client.modify_listener(
ListenerArn='string',
Port=123,
Protocol='HTTP'|'HTTPS',
SslPolicy='string',
Certificates=[
{
'CertificateArn': 'string'
},
],
DefaultActions=[
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
]
)
:type ListenerArn: string
:param ListenerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the listener.
:type Port: integer
:param Port: The port for connections from clients to the load balancer.
:type Protocol: string
:param Protocol: The protocol for connections from clients to the load balancer.
:type SslPolicy: string
:param SslPolicy: The security policy that defines which protocols and ciphers are supported. For more information, see Security Policies in the Application Load Balancers Guide .
:type Certificates: list
:param Certificates: The SSL server certificate.
(dict) --Information about an SSL server certificate deployed on a load balancer.
CertificateArn (string) --The Amazon Resource Name (ARN) of the certificate.
:type DefaultActions: list
:param DefaultActions: The default actions.
(dict) --Information about an action.
Type (string) -- [REQUIRED]The type of action.
TargetGroupArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the target group.
:rtype: dict
:return: {
'Listeners': [
{
'ListenerArn': 'string',
'LoadBalancerArn': 'string',
'Port': 123,
'Protocol': 'HTTP'|'HTTPS',
'Certificates': [
{
'CertificateArn': 'string'
},
],
'SslPolicy': 'string',
'DefaultActions': [
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
]
},
]
}
"""
pass
def modify_load_balancer_attributes(LoadBalancerArn=None, Attributes=None):
"""
Modifies the specified attributes of the specified Application Load Balancer.
If any of the specified attributes can't be modified as requested, the call fails. Any existing attributes that you do not modify retain their current values.
See also: AWS API Documentation
Examples
This example enables deletion protection for the specified load balancer.
Expected Output:
This example changes the idle timeout value for the specified load balancer.
Expected Output:
This example enables access logs for the specified load balancer. Note that the S3 bucket must exist in the same region as the load balancer and must have a policy attached that grants access to the Elastic Load Balancing service.
Expected Output:
:example: response = client.modify_load_balancer_attributes(
LoadBalancerArn='string',
Attributes=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type LoadBalancerArn: string
:param LoadBalancerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the load balancer.
:type Attributes: list
:param Attributes: [REQUIRED]
The load balancer attributes.
(dict) --Information about a load balancer attribute.
Key (string) --The name of the attribute.
access_logs.s3.enabled - Indicates whether access logs stored in Amazon S3 are enabled. The value is true or false .
access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs in Amazon S3 are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket.
access_logs.s3.prefix - The prefix for the location in the S3 bucket. If you don't specify a prefix, the access logs are stored in the root of the bucket.
deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false .
idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-3600. The default is 60 seconds.
Value (string) --The value of the attribute.
:rtype: dict
:return: {
'Attributes': [
{
'Key': 'string',
'Value': 'string'
},
]
}
:returns:
access_logs.s3.enabled - Indicates whether access logs stored in Amazon S3 are enabled. The value is true or false .
access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs in Amazon S3 are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket.
access_logs.s3.prefix - The prefix for the location in the S3 bucket. If you don't specify a prefix, the access logs are stored in the root of the bucket.
deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false .
idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-3600. The default is 60 seconds.
"""
pass
def modify_rule(RuleArn=None, Conditions=None, Actions=None):
"""
Modifies the specified rule.
Any existing properties that you do not modify retain their current values.
To modify the default action, use ModifyListener .
See also: AWS API Documentation
Examples
This example modifies the condition for the specified rule.
Expected Output:
:example: response = client.modify_rule(
RuleArn='string',
Conditions=[
{
'Field': 'string',
'Values': [
'string',
]
},
],
Actions=[
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
]
)
:type RuleArn: string
:param RuleArn: [REQUIRED]
The Amazon Resource Name (ARN) of the rule.
:type Conditions: list
:param Conditions: The conditions.
(dict) --Information about a condition for a rule.
Field (string) --The name of the field. The possible values are host-header and path-pattern .
Values (list) --The condition value.
If the field name is host-header , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
.
(matches 0 or more characters)
? (matches exactly 1 character)
If the field name is path-pattern , you can specify a single path pattern (for example, /img/*). A path pattern is case sensitive, can be up to 128 characters in length, and can contain any of the following characters. Note that you can include up to three wildcard characters.
A-Z, a-z, 0-9
_ - . $ / ~ ' ' @ : +
(using amp;)
(matches 0 or more characters)
? (matches exactly 1 character)
(string) --
:type Actions: list
:param Actions: The actions.
(dict) --Information about an action.
Type (string) -- [REQUIRED]The type of action.
TargetGroupArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the target group.
:rtype: dict
:return: {
'Rules': [
{
'RuleArn': 'string',
'Priority': 'string',
'Conditions': [
{
'Field': 'string',
'Values': [
'string',
]
},
],
'Actions': [
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
],
'IsDefault': True|False
},
]
}
:returns:
A-Z, a-z, 0-9
.
(matches 0 or more characters)
? (matches exactly 1 character)
"""
pass
def modify_target_group(TargetGroupArn=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None):
"""
Modifies the health checks used when evaluating the health state of the targets in the specified target group.
To monitor the health of the targets, use DescribeTargetHealth .
See also: AWS API Documentation
Examples
This example changes the configuration of the health checks used to evaluate the health of the targets for the specified target group.
Expected Output:
:example: response = client.modify_target_group(
TargetGroupArn='string',
HealthCheckProtocol='HTTP'|'HTTPS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type HealthCheckProtocol: string
:param HealthCheckProtocol: The protocol to use to connect with the target.
:type HealthCheckPort: string
:param HealthCheckPort: The port to use to connect with the target.
:type HealthCheckPath: string
:param HealthCheckPath: The ping path that is the destination for the health check request.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response means a failed health check.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount: The number of consecutive health check failures required before considering the target unhealthy.
:type Matcher: dict
:param Matcher: The HTTP codes to use when checking for a successful response from a target.
HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def modify_target_group_attributes(TargetGroupArn=None, Attributes=None):
"""
Modifies the specified attributes of the specified target group.
See also: AWS API Documentation
Examples
This example sets the deregistration delay timeout to the specified value for the specified target group.
Expected Output:
:example: response = client.modify_target_group_attributes(
TargetGroupArn='string',
Attributes=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type Attributes: list
:param Attributes: [REQUIRED]
The attributes.
(dict) --Information about a target group attribute.
Key (string) --The name of the attribute.
deregistration_delay.timeout_seconds - The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused . The range is 0-3600 seconds. The default value is 300 seconds.
stickiness.enabled - Indicates whether sticky sessions are enabled. The value is true or false .
stickiness.type - The type of sticky sessions. The possible value is lb_cookie .
stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
Value (string) --The value of the attribute.
:rtype: dict
:return: {
'Attributes': [
{
'Key': 'string',
'Value': 'string'
},
]
}
:returns:
deregistration_delay.timeout_seconds - The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused . The range is 0-3600 seconds. The default value is 300 seconds.
stickiness.enabled - Indicates whether sticky sessions are enabled. The value is true or false .
stickiness.type - The type of sticky sessions. The possible value is lb_cookie .
stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
"""
pass
def register_targets(TargetGroupArn=None, Targets=None):
"""
Registers the specified targets with the specified target group.
By default, the load balancer routes requests to registered targets using the protocol and port number for the target group. Alternatively, you can override the port for a target when you register it.
The target must be in the virtual private cloud (VPC) that you specified for the target group. If the target is an EC2 instance, it must be in the running state when you register it.
To remove a target from a target group, use DeregisterTargets .
See also: AWS API Documentation
Examples
This example registers the specified instances with the specified target group.
Expected Output:
This example registers the specified instance with the specified target group using multiple ports. This enables you to register ECS containers on the same instance as targets in the target group.
Expected Output:
:example: response = client.register_targets(
TargetGroupArn='string',
Targets=[
{
'Id': 'string',
'Port': 123
},
]
)
:type TargetGroupArn: string
:param TargetGroupArn: [REQUIRED]
The Amazon Resource Name (ARN) of the target group.
:type Targets: list
:param Targets: [REQUIRED]
The targets. The default port for a target is the port for the target group. You can specify a port override. If a target is already registered, you can register it again using a different port.
(dict) --Information about a target.
Id (string) -- [REQUIRED]The ID of the target.
Port (integer) --The port on which the target is listening.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def remove_tags(ResourceArns=None, TagKeys=None):
"""
Removes the specified tags from the specified resource.
To list the current tags for your resources, use DescribeTags .
See also: AWS API Documentation
Examples
This example removes the specified tags from the specified load balancer.
Expected Output:
:example: response = client.remove_tags(
ResourceArns=[
'string',
],
TagKeys=[
'string',
]
)
:type ResourceArns: list
:param ResourceArns: [REQUIRED]
The Amazon Resource Name (ARN) of the resource.
(string) --
:type TagKeys: list
:param TagKeys: [REQUIRED]
The tag keys for the tags to remove.
(string) --
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass
def set_ip_address_type(LoadBalancerArn=None, IpAddressType=None):
"""
Sets the type of IP addresses used by the subnets of the specified Application Load Balancer.
See also: AWS API Documentation
:example: response = client.set_ip_address_type(
LoadBalancerArn='string',
IpAddressType='ipv4'|'dualstack'
)
:type LoadBalancerArn: string
:param LoadBalancerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the load balancer.
:type IpAddressType: string
:param IpAddressType: [REQUIRED]
The IP address type. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use ipv4 .
:rtype: dict
:return: {
'IpAddressType': 'ipv4'|'dualstack'
}
"""
pass
def set_rule_priorities(RulePriorities=None):
"""
Sets the priorities of the specified rules.
You can reorder the rules as long as there are no priority conflicts in the new order. Any existing rules that you do not specify retain their current priority.
See also: AWS API Documentation
Examples
This example sets the priority of the specified rule.
Expected Output:
:example: response = client.set_rule_priorities(
RulePriorities=[
{
'RuleArn': 'string',
'Priority': 123
},
]
)
:type RulePriorities: list
:param RulePriorities: [REQUIRED]
The rule priorities.
(dict) --Information about the priorities for the rules for a listener.
RuleArn (string) --The Amazon Resource Name (ARN) of the rule.
Priority (integer) --The rule priority.
:rtype: dict
:return: {
'Rules': [
{
'RuleArn': 'string',
'Priority': 'string',
'Conditions': [
{
'Field': 'string',
'Values': [
'string',
]
},
],
'Actions': [
{
'Type': 'forward',
'TargetGroupArn': 'string'
},
],
'IsDefault': True|False
},
]
}
:returns:
A-Z, a-z, 0-9
_ - . $ / ~ " ' @ : +
(using amp;)
(matches 0 or more characters)
? (matches exactly 1 character)
"""
pass
def set_security_groups(LoadBalancerArn=None, SecurityGroups=None):
"""
Associates the specified security groups with the specified load balancer. The specified security groups override the previously associated security groups.
See also: AWS API Documentation
Examples
This example associates the specified security group with the specified load balancer.
Expected Output:
:example: response = client.set_security_groups(
LoadBalancerArn='string',
SecurityGroups=[
'string',
]
)
:type LoadBalancerArn: string
:param LoadBalancerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the load balancer.
:type SecurityGroups: list
:param SecurityGroups: [REQUIRED]
The IDs of the security groups.
(string) --
:rtype: dict
:return: {
'SecurityGroupIds': [
'string',
]
}
:returns:
(string) --
"""
pass
def set_subnets(LoadBalancerArn=None, Subnets=None):
"""
Enables the Availability Zone for the specified subnets for the specified load balancer. The specified subnets replace the previously enabled subnets.
See also: AWS API Documentation
Examples
This example enables the Availability Zones for the specified subnets for the specified load balancer.
Expected Output:
:example: response = client.set_subnets(
LoadBalancerArn='string',
Subnets=[
'string',
]
)
:type LoadBalancerArn: string
:param LoadBalancerArn: [REQUIRED]
The Amazon Resource Name (ARN) of the load balancer.
:type Subnets: list
:param Subnets: [REQUIRED]
The IDs of the subnets. You must specify at least two subnets. You can add only one subnet per Availability Zone.
(string) --
:rtype: dict
:return: {
'AvailabilityZones': [
{
'ZoneName': 'string',
'SubnetId': 'string'
},
]
}
"""
pass
|
"""This problem was asked by Apple.
Gray code is a binary code where each successive value differ in only one bit,
as well as when wrapping around.
Gray code is common in hardware so that we don't see temporary spurious values during transitions.
Given a number of bits n, generate a possible gray code for it.
For example, for n = 2, one gray code would be [00, 01, 11, 10].
""" |
"""
Contains code no longer used but kept for review/further reuse
--- imports need to be re-added ---
"""
def determine_disjuct_modules_alternative(src_rep):
"""
Potentially get rid of determine_added_modules and get_modules_lst()
"""
findimports_output = subprocess.check_output(['findimports', src_rep])
findimports_output = findimports_output.decode('utf-8').splitlines()
custom_modules_lst = []
for i, elem in enumerate(findimports_output):
if ':' in elem:
continue
elem = elem.rstrip('\n').split('.',1)[0].strip()
#print(f" element : {elem}")
custom_modules_lst.append(elem)
custom_modules_lst = set(custom_modules_lst)
#beautify this
disjunct_modules = []
for i, elem in enumerate(custom_modules_lst):
if elem in sys.modules:
continue
else:
disjunct_modules.append(elem)
return disjunct_modules
def determine_added_modules(src_rep, python_version):
"""
Determine overlapping and disjunct modules between the ones shipped by the specified python version and the ones found in the source repository specified.
For now we rely on findimports providing the needed dependency tree within the source repository.
Links to tool : https://github.com/mgedmin/findimports, https://pypi.org/project/findimports/
"""
python_modules_lst = get_modules_list(python_version)
findimports_output = subprocess.check_output(['findimports', src_rep])
findimports_output = findimports_output.decode('utf-8').splitlines()
custom_modules_lst = []
for i, elem in enumerate(findimports_output):
if ':' in elem:
continue
elem = elem.rstrip('\n').split('.',1)[0].strip()
#print(f" element : {elem}")
custom_modules_lst.append(elem)
custom_modules_lst = set(custom_modules_lst)
not_common = [val for val in custom_modules_lst if val not in python_modules_lst]
common = [val for val in custom_modules_lst if val in python_modules_lst]
return not_common, common |
class Endereco:
def __init__(self, rua="", bairro="", numero="", cidade="", estado="", cep=""):
self.rua = rua
self.bairro = bairro
self.numero = numero
self.cidade = cidade
self.estado = estado
self.cep = cep
|
"""
--- Day 6: Memory Reallocation ---
A debugger program here is having an issue: it is trying to repair a memory reallocation routine, but it keeps getting stuck in an infinite loop.
In this area, there are sixteen memory banks; each memory bank can hold any number of blocks. The goal of the reallocation routine is to balance the blocks between the memory banks.
The reallocation routine operates in cycles. In each cycle, it finds the memory bank with the most blocks (ties won by the lowest-numbered memory bank) and redistributes those blocks among the banks. To do this, it removes all of the blocks from the selected bank, then moves to the next (by index) memory bank and inserts one of the blocks. It continues doing this until it runs out of blocks; if it reaches the last memory bank, it wraps around to the first one.
The debugger would like to know how many redistributions can be done before a blocks-in-banks configuration is produced that has been seen before.
For example, imagine a scenario with only four memory banks:
The banks start with 0, 2, 7, and 0 blocks. The third bank has the most blocks, so it is chosen for redistribution.
Starting with the next bank (the fourth bank) and then continuing to the first bank, the second bank, and so on, the 7 blocks are spread out over the memory banks. The fourth, first, and second banks get two blocks each, and the third bank gets one back. The final result looks like this: 2 4 1 2.
Next, the second bank is chosen because it contains the most blocks (four). Because there are four memory banks, each gets one block. The result is: 3 1 2 3.
Now, there is a tie between the first and fourth memory banks, both of which have three blocks. The first bank wins the tie, and its three blocks are distributed evenly over the other three banks, leaving it with none: 0 2 3 4.
The fourth bank is chosen, and its four blocks are distributed such that each of the four banks receives one: 1 3 4 1.
The third bank is chosen, and the same thing happens: 2 4 1 2.
At this point, we've reached a state we've seen before: 2 4 1 2 was already seen. The infinite loop is detected after the fifth block redistribution cycle, and so the answer in this example is 5.
Given the initial block counts in your puzzle input, how many redistribution cycles must be completed before a configuration is produced that has been seen before?
Your puzzle answer was 11137.
--- Part Two ---
Out of curiosity, the debugger would also like to know the size of the loop: starting from a state that has already been seen, how many block redistribution cycles must be performed before that same state is seen again?
In the example above, 2 4 1 2 is seen again after four cycles, and so the answer in that example would be 4.
How many cycles are in the infinite loop that arises from the configuration in your puzzle input?
Your puzzle answer was 1037.
Both parts of this puzzle are complete! They provide two gold stars: **
"""
def part1(puzzle_input):
seen = set()
state = puzzle_input
num_it = 0
while True:
num_it += 1
state = part1_redist_cycle(state)
frozen_state = tuple(state)
if frozen_state in seen:
break
else:
seen.add(frozen_state)
return num_it
def part1_redist_cycle(state):
value = max(state)
index = state.index(value)
state[index] = 0 # removes all the blocks
size = len(state)
while value:
index = index + 1 if index < size - 1 else 0
value -= 1
state[index] += 1
return state
def part2(puzzle_input):
seen = {}
state = puzzle_input
num_it = 0
while True:
num_it += 1
state = part1_redist_cycle(state)
frozen_state = tuple(state)
if frozen_state in seen:
break
else:
seen[frozen_state] = num_it
return num_it - seen[frozen_state]
def test_part1_redist_cycle():
state = [0, 2, 7, 0]
assert [2, 4, 1, 2] == part1_redist_cycle(state)
def test_part1():
puzzle_input = [0, 2, 7, 0]
assert 5 == part1(puzzle_input)
def test_part2():
puzzle_input = [0, 2, 7, 0]
assert 4 == part2(puzzle_input)
def main():
with open('input.txt') as f:
puzzle_input = [int(n.strip()) for n in f.read().strip().split()]
print('part1: %s' % part1(puzzle_input))
print('part2: %s' % part2(puzzle_input))
if __name__ == '__main__':
main()
|
# Question Link : https://leetcode.com/explore/challenge/card/december-leetcoding-challenge/572/week-4-december-22nd-december-28th/3581/
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
n = len(s)
dp = [0] * n
if s[0] != '0':
dp[0] = 1
for i in range(1,n):
if s[i]!='0':
dp[i] += dp[i-1]
if s[i-1]=='1' or (s[i-1]=='2' and int(s[i])<7):
if i-2>0:
dp[i] += dp[i-2]
else:
dp[i]+=1
return dp[-1]
|
input = open('input', 'r').read().strip()
input = [list(map(int, r)) for r in input.splitlines()]
h, w = len(input), len(input[0])
def neighbours(x, y):
return [(p, q) for u in range(-1, 2) for v in range(-1, 2)
if 0 <= (p := x+u) < h and 0 <= (q := y+v) < w]
def step(m):
m = [[n+1 for n in r] for r in m]
flashs, bag = set(), {(x, y) for x, r in enumerate(m) for y, n in enumerate(r) if n > 9}
while bag:
flashs |= bag
expansion = set()
for x, y in bag:
for p, q in neighbours(x, y):
if (p, q) in flashs: continue
m[p][q] += 1
expansion.add((p, q))
bag = {(x, y) for x, y in expansion if m[x][y] > 9}
for (x, y) in flashs: m[x][y] = 0
return len(flashs), m
def p1(steps=100):
s, m = 0, input
for _ in range(steps):
f, m = step(m)
s += f
return s
def p2():
i, m = 0, input
while 1:
i += 1
f, m = step(m)
if f == h*w: return i
if (r1 := p1()) is not None: print(r1)
if (r2 := p2()) is not None: print(r2)
|
''' Automatically set `current_app` into context based on URL namespace. '''
def namespaced(request):
''' Set `current_app` to url namespace '''
request.current_app = request.resolver_match.namespace
return {}
|
# *-* coding:utf-8 *-*
"""Module states Amapa"""
def start(st_reg_number):
"""Checks the number valiaty for the Alagoas state"""
divisor = 11
if len(st_reg_number) > 9:
return False
if len(st_reg_number) < 9:
return False
if st_reg_number[0:2] != "03":
return False
aux = int(st_reg_number[0:len(st_reg_number) - 1])
if 3000000 < aux and aux < 3017001:
control1 = 5
control2 = 0
if 3017000 < aux and aux < 3019023:
control1 = 9
control2 = 1
if aux > 3019022:
control1 = 0
control2 = 0
sum_total = 0
peso = 9
for i in range(len(st_reg_number)-1):
sum_total = sum_total + int(st_reg_number[i]) * peso
peso = peso - 1
sum_total += control1
rest_division = sum_total % divisor
digit = divisor - rest_division
if digit == 10:
digit = 0
if digit == 11:
digit = control2
return digit == int(st_reg_number[len(st_reg_number)-1])
|
#!/usr/bin/env python
class AssembleError(Exception):
def __init__(self, line_no, reason):
message = '%d: %s' % (line_no, reason)
super(AssembleError, self).__init__(message)
|
#Boolean is a Data Type in Python which has 2 values - True and False
print (bool(0)) #Python will return False
print (bool(1)) #Python will return True
print (bool(1.5)) #Pyton will return True
print (bool(None)) #Python will return False
print (bool('')) #Python will return False |
n = int(input())
count = 0
for i in range(1, n + 1):
if i < 100:
count += 1
else:
s = str(i)
if int(s[1]) - int(s[0]) == int(s[2]) - int(s[1]):
count += 1
print(count)
|
l1 = list(range(10))
new_list = [x*x + 2*x + 1 for x in l1]
print(l1)
print(new_list)
|
class Solution:
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n < 2:
return n
dp_ = [1] * n
for idx, num in enumerate(nums):
for i in range(idx-1, -1, -1):
if nums[i] < nums[idx]:
dp_[idx] = max(dp_[idx], dp_[i]+1)
return max(dp_)
|
# md5 : 2cdb8e874f0950ea17a7135427b4f07d
# sha1 : 73b16f132eb0247ea124b6243ca4109f179e564c
# sha256 : 099b17422e1df0235e024ff5128a60571e72af451e1c59f4d61d3cf32c1539ed
ord_names = {
3: b'mciExecute',
4: b'CloseDriver',
5: b'DefDriverProc',
6: b'DriverCallback',
7: b'DrvGetModuleHandle',
8: b'GetDriverModuleHandle',
9: b'NotifyCallbackData',
10: b'OpenDriver',
11: b'PlaySound',
12: b'PlaySoundA',
13: b'PlaySoundW',
14: b'SendDriverMessage',
15: b'WOW32DriverCallback',
16: b'WOW32ResolveMultiMediaHandle',
17: b'WOWAppExit',
18: b'aux32Message',
19: b'auxGetDevCapsA',
20: b'auxGetDevCapsW',
21: b'auxGetNumDevs',
22: b'auxGetVolume',
23: b'auxOutMessage',
24: b'auxSetVolume',
25: b'joy32Message',
26: b'joyConfigChanged',
27: b'joyGetDevCapsA',
28: b'joyGetDevCapsW',
29: b'joyGetNumDevs',
30: b'joyGetPos',
31: b'joyGetPosEx',
32: b'joyGetThreshold',
33: b'joyReleaseCapture',
34: b'joySetCapture',
35: b'joySetThreshold',
36: b'mci32Message',
37: b'mciDriverNotify',
38: b'mciDriverYield',
39: b'mciFreeCommandResource',
40: b'mciGetCreatorTask',
41: b'mciGetDeviceIDA',
42: b'mciGetDeviceIDFromElementIDA',
43: b'mciGetDeviceIDFromElementIDW',
44: b'mciGetDeviceIDW',
45: b'mciGetDriverData',
46: b'mciGetErrorStringA',
47: b'mciGetErrorStringW',
48: b'mciGetYieldProc',
49: b'mciLoadCommandResource',
50: b'mciSendCommandA',
51: b'mciSendCommandW',
52: b'mciSendStringA',
53: b'mciSendStringW',
54: b'mciSetDriverData',
55: b'mciSetYieldProc',
56: b'mid32Message',
57: b'midiConnect',
58: b'midiDisconnect',
59: b'midiInAddBuffer',
60: b'midiInClose',
61: b'midiInGetDevCapsA',
62: b'midiInGetDevCapsW',
63: b'midiInGetErrorTextA',
64: b'midiInGetErrorTextW',
65: b'midiInGetID',
66: b'midiInGetNumDevs',
67: b'midiInMessage',
68: b'midiInOpen',
69: b'midiInPrepareHeader',
70: b'midiInReset',
71: b'midiInStart',
72: b'midiInStop',
73: b'midiInUnprepareHeader',
74: b'midiOutCacheDrumPatches',
75: b'midiOutCachePatches',
76: b'midiOutClose',
77: b'midiOutGetDevCapsA',
78: b'midiOutGetDevCapsW',
79: b'midiOutGetErrorTextA',
80: b'midiOutGetErrorTextW',
81: b'midiOutGetID',
82: b'midiOutGetNumDevs',
83: b'midiOutGetVolume',
84: b'midiOutLongMsg',
85: b'midiOutMessage',
86: b'midiOutOpen',
87: b'midiOutPrepareHeader',
88: b'midiOutReset',
89: b'midiOutSetVolume',
90: b'midiOutShortMsg',
91: b'midiOutUnprepareHeader',
92: b'midiStreamClose',
93: b'midiStreamOpen',
94: b'midiStreamOut',
95: b'midiStreamPause',
96: b'midiStreamPosition',
97: b'midiStreamProperty',
98: b'midiStreamRestart',
99: b'midiStreamStop',
100: b'mixerClose',
101: b'mixerGetControlDetailsA',
102: b'mixerGetControlDetailsW',
103: b'mixerGetDevCapsA',
104: b'mixerGetDevCapsW',
105: b'mixerGetID',
106: b'mixerGetLineControlsA',
107: b'mixerGetLineControlsW',
108: b'mixerGetLineInfoA',
109: b'mixerGetLineInfoW',
110: b'mixerGetNumDevs',
111: b'mixerMessage',
112: b'mixerOpen',
113: b'mixerSetControlDetails',
114: b'mmDrvInstall',
115: b'mmGetCurrentTask',
116: b'mmTaskBlock',
117: b'mmTaskCreate',
118: b'mmTaskSignal',
119: b'mmTaskYield',
120: b'mmioAdvance',
121: b'mmioAscend',
122: b'mmioClose',
123: b'mmioCreateChunk',
124: b'mmioDescend',
125: b'mmioFlush',
126: b'mmioGetInfo',
127: b'mmioInstallIOProcA',
128: b'mmioInstallIOProcW',
129: b'mmioOpenA',
130: b'mmioOpenW',
131: b'mmioRead',
132: b'mmioRenameA',
133: b'mmioRenameW',
134: b'mmioSeek',
135: b'mmioSendMessage',
136: b'mmioSetBuffer',
137: b'mmioSetInfo',
138: b'mmioStringToFOURCCA',
139: b'mmioStringToFOURCCW',
140: b'mmioWrite',
141: b'mmsystemGetVersion',
142: b'mod32Message',
143: b'mxd32Message',
144: b'sndPlaySoundA',
145: b'sndPlaySoundW',
146: b'tid32Message',
147: b'timeBeginPeriod',
148: b'timeEndPeriod',
149: b'timeGetDevCaps',
150: b'timeGetSystemTime',
151: b'timeGetTime',
152: b'timeKillEvent',
153: b'timeSetEvent',
154: b'waveInAddBuffer',
155: b'waveInClose',
156: b'waveInGetDevCapsA',
157: b'waveInGetDevCapsW',
158: b'waveInGetErrorTextA',
159: b'waveInGetErrorTextW',
160: b'waveInGetID',
161: b'waveInGetNumDevs',
162: b'waveInGetPosition',
163: b'waveInMessage',
164: b'waveInOpen',
165: b'waveInPrepareHeader',
166: b'waveInReset',
167: b'waveInStart',
168: b'waveInStop',
169: b'waveInUnprepareHeader',
170: b'waveOutBreakLoop',
171: b'waveOutClose',
172: b'waveOutGetDevCapsA',
173: b'waveOutGetDevCapsW',
174: b'waveOutGetErrorTextA',
175: b'waveOutGetErrorTextW',
176: b'waveOutGetID',
177: b'waveOutGetNumDevs',
178: b'waveOutGetPitch',
179: b'waveOutGetPlaybackRate',
180: b'waveOutGetPosition',
181: b'waveOutGetVolume',
182: b'waveOutMessage',
183: b'waveOutOpen',
184: b'waveOutPause',
185: b'waveOutPrepareHeader',
186: b'waveOutReset',
187: b'waveOutRestart',
188: b'waveOutSetPitch',
189: b'waveOutSetPlaybackRate',
190: b'waveOutSetVolume',
191: b'waveOutUnprepareHeader',
192: b'waveOutWrite',
193: b'wid32Message',
194: b'wod32Message',
} |
"""
Given a linked list, remove the nth node from the end of list and return its head.
For example,
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Try to do this in one pass.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
"""
Method:
Defining a slow pointer and a fast pointer
Fast pointer is ahead of the slow pointer by n
When the fast pointer reaches the end, the slow pointer is at end-n-1
Delete the reference to the next node
Your runtime beats 38.08 % of python submissions
"""
# #Defining a slow pointer and a fast pointer
slow_ptr = head
fast_ptr = head
dummy = head
# #Fast pointer is ahead of the slow pointer by n
for _ in range(n):
if fast_ptr.next:
fast_ptr = fast_ptr.next
else:
return dummy.next
# #When the fast pointer reaches the end,
# #the slow pointer is at end-n-1
while fast_ptr and fast_ptr.next:
fast_ptr = fast_ptr.next
slow_ptr = slow_ptr.next
# #Delete the reference to the next node
slow_ptr.next = slow_ptr.next.next
return dummy |
dados = int(input())
pontos = input()
pontos = pontos.split(' ')
luisa = 0
antonio = 0
pessoa = 0
for vez in pontos:
pessoa += 1
if pessoa == 3:
pessoa = 1
if pessoa == 1:
luisa += int(vez)
elif pessoa == 2:
antonio += int(vez)
if int(vez) == 6 and pessoa == 1:
pessoa = 0
elif int(vez) == 6 and pessoa == 2:
pessoa = 1
elif pessoa == 2:
pessoa = 0
if luisa > antonio:
print('LUISA {}'.format(luisa))
elif antonio > luisa:
print('ANTONIO {}'.format(antonio))
else:
print('EMPATE {}'.format(antonio)) |
def getCountLetterString(input):
# get range
space_index = input.find(" ")
range = input[0:space_index]
hyphen_index = input.find("-")
start = range[0:hyphen_index]
end = range[hyphen_index + 1:]
# get letter
colon_index = input.find(":")
letter = input[space_index + 1:colon_index]
# get password string
password = input[colon_index + 1:]
# Debug
#print("Range: {} to {}; Letter: {}; Password: {}".format(start, end, letter, password))
return [int(start), int(end), letter, password]
lines = []
with open('./input') as f:
lines = f.read().splitlines()
valid_pass_count = 0
for input in lines:
count_letter_string = getCountLetterString(input)
start = count_letter_string[0]
end = count_letter_string[1]
letter = count_letter_string[2]
password = count_letter_string[3]
first_letter = password[start]
second_letter = password[end]
if first_letter is letter and second_letter is not letter:
valid_pass_count += 1
elif first_letter is not letter and second_letter is letter:
valid_pass_count += 1
print("There are {} valid passwords".format(valid_pass_count))
|
l = [0]
def expChar(idx, c):
global l
if c == 'S':
return
elif c == 'D':
l[idx] = l[idx]**2
return
else:
l[idx] = l[idx]**3
return
def special(idx, c):
global l
if c == '*':
l[idx] *= 2
l[idx-1] *= 2
else:
l[idx] *= -1
def solution(dartResult):
dartResult += ' '
global l
l = [0]
idx = 0
for i in range(len(dartResult)-1):
if dartResult[i].isdigit(): # 10때문에 문제가 생김
if dartResult[i+1].isdigit():
continue
if dartResult[i-1].isdigit():
l.append(int(dartResult[i-1:i+1]))
idx += 1
continue
l.append(int(dartResult[i]))
idx += 1
elif dartResult[i].isalpha():
expChar(idx, dartResult[i])
else:
special(idx, dartResult[i])
answer = sum(l)
return answer |
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/own_data.py',
'../_base_/schedules/schedule_1x_own_data.py', '../_base_/default_runtime.py'
]
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth' |
# https://leetcode.com/problems/number-of-digit-one/
class Solution:
def countDigitOne(self, n: int) -> int:
result = threshold = 0
divisor = limit = 10
while n // limit > 0:
limit *= 10
while divisor <= limit:
div, mod = divmod(n, divisor)
result += div * (divisor // 10)
if mod > threshold:
result += min(mod - threshold, divisor // 10)
divisor, threshold = 10 * divisor, 10 * threshold + 9
return result
|
#desafio 35: Analisando triângulo V1.0 Para construir um triângulo é necessário que
# a medida de qualquer um dos lados seja menor que a soma das medidas dos outros dois
# e maior que o valor absoluto da diferença entre essas medidas.
# | b - c | < a < b + c
# | a - c | < b < a + c
# | a - b | < c < a + b
print('\33[1m--=--=-- Analisador de triângulos --=--=--\33[m')
a = float(input('Primeiro segmento: '))
b = float(input('Segundo segmento: '))
c = float(input('Terceiro segmento: '))
print()
if c < a + b and b < a + c and a < b + c:
print(f'Os segmentos {a}, {b} e {c} \33[1;32mPODEM\33[m formar um triângulo.')
else:
print(f'Os segmentos {a}, {b} e {c} \33[1;31mNÃO\33[m podem formar um triângulo.')
print('__'*30)
|
height = float(input("enter your height in m: "))
weight = float(input("enter your weight in kg: "))
bmi = weight/(height**2)
if bmi <= 18.5 :
print(f"you bmi is {bmi}, you are underweight")
elif bmi <=25 :
print(f"you bmi is {bmi}you have a normal weight")
elif bmi <= 30 :
print(f"you bmi is {bmi}you are slightly overweight")
elif bmi <= 35 :
print(f"you bmi is {bmi}you are obese")
else :
print(f"you bmi is {bmi}you are clinically obese")
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 17 09:27:36 2016
@author: tih
"""
def Accounts(Type=None):
User_Pass = {
'NASA': ['', ''],
'GLEAM': ['', ''],
'FTP_WA': ['', ''],
'MSWEP': ['', ''],
'VITO': ['', '']}
Selected_Path = User_Pass[Type]
return(Selected_Path)
|
script_create_table_tipos = lambda dados = {} : """
DROP TABLE IF EXISTS Tipos;
CREATE TABLE Tipos (
id int NOT NULL PRIMARY KEY,
nome text NOT NULL DEFAULT 'pokemon'
);
"""
script_insert_table_tipos = lambda dados = {} : """INSERT INTO Tipos (id, nome) VALUES (?, ?);"""
dados_padrao_tabela_tipos = lambda dados = {} : [(1,'pokemon')] |
n, k = map(int,input().split())
cnt = 0
prime = [True]*(n+1)
for i in range(2,n+1,1):
if prime[i] == False: continue
for j in range(i,n+1,i):
if prime[j] == True: prime[j] = False;cnt+=1
if cnt == k: print(j);break |
class TrackData():
def __init__(self):
self.trail_data = None # common dictionary, which is mutable
self.camera_id = None
self.trail_num = 0
self.file_name = None
self.feat_list = []
self.mean_feat = None
self.height = None
self.map_time_stamp = []
self.target_image_path = None
self.record_id = None
def set_record_id(self, record_id):
"""Set record id."""
self.record_id = record_id
def get_record_id(self):
"""Get record id."""
return self.record_id
def set_image_path(self, image_path):
"""Set image path."""
self.target_image_path = image_path
def get_image_path(self):
"""Get image path."""
return self.target_image_path
def get_map_time_stamp(self):
"""Get map time stamp."""
return list(set(self.map_time_stamp))
def add_map_time_stamp(self, map_time_stamp):
"""Add map time stamp."""
self.map_time_stamp.append(map_time_stamp)
def get_trail_stamp_list(self):
"""Get trail stamp list."""
return list(self.trail_data.keys())
def set_stamp_data(self, time_stamp, new_stamp_data):
"""Set stamp data."""
self.trail_data[time_stamp] = new_stamp_data
def get_stamp_data(self, time_stamp):
"""Get stamp data."""
try:
stamp_data = self.trail_data[time_stamp]
except KeyError:
stamp_data = None
return stamp_data
def set_height(self, height):
"""Set height."""
self.height = height
def get_height(self):
"""Get height."""
return self.height
def set_trail(self, trail_data):
"""Set trail data."""
self.trail_data = trail_data
def get_trail(self):
"""Get trail data."""
return self.trail_data
def set_camera_id(self, camera_id):
"""Set camera id."""
self.camera_id = camera_id
def get_camera_id(self):
"""Get camera id."""
return self.camera_id
def set_trail_num(self, trail_num):
"""Set trail num."""
self.trail_num = trail_num
def get_trail_num(self):
"""Get trail num."""
return self.trail_num
def set_file_name(self, file_name):
"""Set file_name."""
self.file_name = file_name
def get_file_name(self):
"""Get file_name."""
return self.file_name
def put_feat_list(self, feat):
"""Put feat list."""
self.feat_list.append(feat)
def get_feat_list(self):
"""Get feat list."""
return self.feat_list
def set_mean_feat(self, mean_feat):
"""Put mean_feat."""
self.mean_feat = mean_feat
def get_mean_feat(self):
"""Get mean_feat."""
return self.mean_feat
class FrameData():
"""To save info for each stamp of a track."""
def __init__(self):
self.bbox = None
self.head = None
self.feat = None
self.predict_flag = 0
self.world = None
self.flag = None
self.temp_world_dict = {}
self.camera_bbox = {}
def set_camera_bbox(self, camera, bbox):
"""Set camera bbox."""
self.camera_bbox[camera] = bbox
def get_camera_bbox(self, camera):
"""Get camera bbox."""
try:
bbox = self.camera_bbox[camera]
return bbox
except KeyError:
return None
def set_bbox(self, bbox):
"""Set bbox."""
self.bbox = bbox
def get_bbox(self):
"""Get bbox."""
return self.bbox
def set_head(self, head):
"""Set bbox."""
self.head = head
def get_head(self):
"""Get bbox."""
return self.head
def set_feat(self, feat):
self.feat = feat
def get_feat(self):
return self.feat
def set_flag(self, flag):
self.flag = flag
def get_flag(self):
return self.flag
def set_predict_flag(self, flag):
"""Set bbox."""
self.predict_flag = flag
def set_world(self, world):
"""Set world."""
self.world = world
def get_world(self):
"""Get world."""
return self.world
def put_temp_world_dict(self, record_id_a, record_id_b, world):
"""Get world temp."""
self.temp_world_dict[(record_id_a, record_id_b)] = world
def get_temp_world_dict(self):
"""Get world temp."""
return self.temp_world_dict
class FrameResultData(FrameData):
"""To save info for each stamp of global id."""
def __init__(self):
super(FrameResultData, self).__init__()
self.camera_bbox = {}
self.camera_head = {}
self.camera_feat = {}
self.footpoint = None
def set_camera_bbox(self, camera, bbox):
"""Set camera bbox."""
self.camera_bbox[camera] = bbox
def get_camera_bbox(self, camera):
"""Get camera bbox."""
try:
bbox = self.camera_bbox[camera]
return bbox
except KeyError:
return None
def set_camera_head(self, camera, head):
"""Set camera head."""
self.camera_head[camera] = head
def get_camera_head(self, camera):
"""Get camera head."""
try:
head = self.camera_head[camera]
return head
except KeyError:
return None
def set_footpoint(self, footpoint):
"""Set room_map."""
self.footpoint = footpoint
def get_footpoint(self):
"""Get room_map."""
return self.footpoint
def set_camera_feat(self, camera, feat):
self.camera_feat[camera] = feat
def get_camera_feat(self, camera):
try:
feat = self.camera_feat[camera]
return feat
except:
return None
|
def sum_iter(numbers):
total = 0
for n in numbers:
total = total + n
return total
def sum_rec(numbers):
if len(numbers) == 0:
return 0
return numbers[0] + sum_rec(numbers[1:])
|
with open('data.txt') as f:
data = f.readlines()
data = [int(i.rstrip()) for i in data]
incr = 0
for idx, val in enumerate(data):
if idx == 0:
print(data[0])
continue
if data[idx-1] < data[idx]:
incr += 1
print(f"{data[idx]} increase")
else:
print(f"{data[idx]}")
print(f"Loop Total {incr}")
asdf = sum([int(second > first) for first, second in zip(data, data[1:])])
print(f"Generator count = {asdf}")
|
#!/usr/bin/python3
def this_fails():
x = 1/0
try:
this_fails()
except ZeroDivisionError as err:
print('Handling run-time error: ', err) |
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def mergeTrees(self, t1, t2):
def recurse(a1, a2):
if a1 == None:
return a2
if a2 == None:
return a1
cur = TreeNode(a1.val + a2.val)
cur.left = recurse(a1.left, a2.left)
cur.right = recurse(a1.right, a2.right)
return cur
return recurse(t1, t2)
z = Solution()
a = TreeNode(1)
b = TreeNode(3)
c = TreeNode(2)
d = TreeNode(5)
a.left = b
a.right = c
b.left = d
e = TreeNode(2)
f = TreeNode(1)
g = TreeNode(3)
h = TreeNode(4)
i = TreeNode(7)
e.left = f
e.right = g
f.right = h
g.right = i
res = (z.mergeTrees(a, e))
print(res.val)
print(res.left.val)
print(res.right.val)
print(res.left.left.val)
print(res.left.right.val)
print(res.right.right.val)
|
# -*- coding: utf-8 -*-
class Config(object):
DEBUG = False
TESTING = False
DATABASE_URI = ('postgresql+psycopg2://'
'taxo:taxo@localhost:5432/taxonwiki')
ASSETS_DEBUG = False
ASSETS_CACHE = True
ASSETS_MANIFEST = 'json'
UGLIFYJS_EXTRA_ARGS = ['--compress', '--mangle']
COMPASS_CONFIG = {
'output_style': ':compressed'
}
SECRET_KEY = 'I am not safe please do not use me'
class ProductionConfig(Config):
SECRET_KEY = None
class DevelopmentConfig(Config):
DEBUG = True
ASSETS_CACHE = False
UGLIFYJS_EXTRA_ARGS = ['--compress']
COMPASS_CONFIG = {
'output_style': ':extended'
}
class TestingConfig(Config):
TESTING = True
|
buttons_dict = {1: [r"$|a|$", "abs"], 2: [r"$\sqrt{a}$", "sqrt"], 3: [r"$\log$", "log"],
4: [r"$\ln$", "ln"], 5: [r"$a^b$", "power"], 6: [r"$()$", "brackets"],
7: [r"%", "percent"], 8: [r"$=$", "equals"], 9: [r"$\lfloor{a}\rfloor$", "floor"],
10: [r"$f(x)$", "func"], 11: [r"$\cot$", "cot"], 12: [r"$\tan$", "tan"],
13: [r"$7$", "7"], 14: [r"$8$", "8"], 15: [r"$9$", "9"], 16: [r"$\div$", "div"],
17: [r"$\lceil{a}\rceil$", "ceil"], 18: [r"$\frac{d}{dx}$", "derivative"],
19: [r"$\sec$", "sec"], 20: [r"$\cos$", "cos"], 21: [r"$4$", "4"], 22: [r"$5$", "5"],
23: [r"$6$", "6"], 24: [r"$\times$", "times"], 25: [r"$x$", "x"],
26: [r"$\int$", "integral"], 27: [r"$\csc$", "csc"], 28: [r"$\sin$", "sin"],
29: [r"$1$", "1"], 30: [r"$2$", "2"], 31: [r"$3$", "3"], 32: [r"$-$", "minus"],
33: [r"$y$", "y"], 34: [r"$\int^a_b$", "def_integral"], 35: [r"$e$", "e"],
36: [r"$\pi$", "pi"], 37: [r"$\frac{a}{b}$", "fraction"], 38: [r"$0$", "0"],
39: [r"$.$", "decimal_point"], 40: [r"$+$", "plus"]}
# these are the buttons in LaTeX maths mode format |
default_ingredient_list = ['lentils', 'kale', 'shallots', 'swiss cheese', 'anchovies', 'Quiche', 'cashew nut',
'Waffles', 'chicken liver', 'parsley', 'babaganoosh', 'Toast', 'bouillon', 'hamburger',
'hoisin sauce', 'chaurice sausage', 'fennel', 'curry', 'clams', 'spaghetti squash',
'haiku roll', 'ancho chili peppers', 'flax seed', 'remoulade', 'alfredo sauce', 'avacado',
'broccoli', 'moo shu wrappers', 'truffles', 'carne asada', 'Pancakes', 'tomato puree',
'steak', 'Guancamole', 'crab', 'bison', 'almond', 'snap peas', 'corn', 'basil', 'barley',
'grouper', 'romaine lettuce', 'tarragon', 'Spaghetti', 'edimame', 'Tater tots', 'jambalaya',
'amaretto', 'bean sauce', 'lobster', 'granola', 'sour cream', 'yogurt', 'cilantro',
'avocados', 'duck', 'dates', 'kumquats', 'spearmint', 'celery seeds', 'cider vinegar',
'sardines', 'bacon', 'jack cheese', 'haddock', 'shitakes', 'franks', 'pickles', 'ginger',
'ginger ale', 'french fries', 'Irish stew', 'breadfruit', 'dips', 'bass',
'potato chips', 'lemons', 'salmon', 'Wine', 'caviar', 'apple butter', 'bard', 'coconut oil',
'Cabbage', 'carrots', 'asparagus', 'kiwi', 'chocolate', 'unsweetened chocolate',
'tomato sauce', 'oatmeal', 'gumbo', 'panko bread crumbs', 'pancetta', 'Reuben',
'condensed milk', 'Pizza', 'curry paste', 'rosemary', 'ketchup', 'cornmeal', 'turkeys',
'rice', 'split peas', 'pink beans', 'maraschino cherries', 'dried leeks', 'bruschetta',
'molasses', 'spinach', 'cucumbers', 'cupcakes', 'mesclun greens', 'bagels', 'apples',
'Bruscetta', 'ice cream', 'asiago cheese', 'tomatoes', 'pistachios', 'eggs', 'vegemite',
'corn syrup', 'cake', 'hash browns', 'sazon', 'veal', 'habanero chilies', 'red chili powder',
'Tabasco sauce', 'fajita', 'portabella mushrooms', 'Goji berry', 'brazil nuts', 'parsnips',
'enchilada', 'Quesadilla', 'hummus', 'chimichangadates', 'sherry', 'bok choy', 'horseradish',
'rhubarb', 'quail', 'mint', 'Irish cream liqueur', 'Pepperoni', 'melons', 'pears',
'cocoa powder', 'bluefish', 'Mandarin oranges', 'cooking wine', 'tartar sauce', 'papayas',
'honey', 'shrimp', 'black olives', 'canola oil', 'cheddar cheese', 'alfalfa', 'cider',
'corn flour', 'feta cheese', 'fondu', 'onions', 'water', 'sauerkraut', 'cornstarch',
'bourbon', 'cabbage', 'brown rice', 'baguette', 'balsamic vinegar', 'ahi tuna ', 'mushrooms',
'pasta', 'chips', 'garlic', 'chicory', 'allspice', 'maple syrup', 'chickpeas', 'chard',
'hot dogs', 'baking soda', 'arugala', 'sausages', 'sweet peppers', 'five-spice powder',
'thyme', 'chili powder', 'Havarti cheese', 'artichokes', 'beef', 'fish ', 'tuna',
'eel sushi', 'sweet potatoes', 'donuts', 'sunflower seeds', 'coconuts', 'salsa', 'celery',
'prunes', 'crayfish', 'hamburger ', 'beer', 'jicama', 'rum', 'rice vinegar', 'bean threads',
'hazelnuts', 'kidney beans', 'halibut', 'grapes', 'chambord', 'adobo', 'chipotle peppers',
'capers', 'Ziti', 'cherries', 'hot sauce', 'eel', 'pico de gallo', 'green onions',
'sesame seeds', 'Zucchini', 'French toast', 'chai', 'focaccia', 'guavas', 'raspberries',
'huckleberries', 'zinfandel wine', 'croutons', 'mayonnaise', 'barbecue sauce', 'cumin',
'pea beans', 'tonic water', 'tortillas', 'squash', 'gorgonzola', 'squid', 'Graham crackers',
'brussels sprouts', 'coriander', 'summer squash', 'rose water', 'mustard seeds', 'borscht',
'gelatin', 'tofu', 'white beans', 'English muffins', 'jelly', 'cream cheese', 'snapper',
'mustard', 'broccoli raab', 'Romano cheese', 'buritto', 'paprika', 'acorn squash ',
'snow peas', 'cannellini beans', 'red snapper', 'ham', 'raisins', 'creme fraiche',
'watermelons', 'artificial sweetener', 'BBQ', 'Linguine', 'plantains', 'strawberries',
'monkfish', 'powdered sugar', 'Spinach', 'cheese', 'buckwheat', 'potatoes', 'goose', 'beets',
'lima beans', 'jelly beans', 'huenos rancheros', 'pumpkins', 'salt', 'blueberries',
'navy beans', 'graham crackers', 'custard', 'sushi', 'radishes', 'berries',
'red pepper flakes', 'ale', 'okra', 'soy sauce', 'tea', 'aioli', 'date sugar', 'pork',
'liver', 'cottage cheese', 'limes', 'orange peels', 'vinegar', 'olives', 'cactus', 'Kahlua',
'mackerel', 'apricots', 'green beans', 'Garlic', 'black-eyed peas', 'soybeans',
'andouille sausage', 'Marsala', 'jam', 'marshmallows', 'walnuts', 'geese', 'flour', 'coffee',
'heavy cream', 'red beans', 'lemon juice', 'poultry seasoning', 'Cappuccino Latte',
'red cabbage', 'blue cheese', 'chicken', 'Moose', 'Yogurt', 'baked beans', 'cream', 'figs',
'dill', 'swordfish', 'rice wine', 'peanuts', 'cayenne pepper', "pig's feet", 'fish sauce',
'barley sugar', 'acorn squash', 'rice paper', 'Lasagna', 'applesauce', 'cauliflower',
'kabobs', 'sea cucumbers', 'sugar', 'Ostrich', 'asian noodles ', 'zest', 'cinnamon',
'Venison', 'chowder', 'butter', 'almond butter', 'cream of tartar', 'dumpling', 'Milk',
'cantaloupes', 'apple pie spice', 'brown sugar', 'cod', 'lemon Peel', 'vermouth',
'provolone', 'Worcestershire sauce', 'beans', 'breadcrumbs', 'bay leaves', 'garlic powder',
'eggrolls', 'jerky', 'water chestnuts', 'scallops', 'Walnuts', 'almond paste', 'wasabi',
'cloves', 'marmalade', 'honeydew melons', 'brunoise', 'bread', 'white chocolate', 'chutney',
'chestnuts', 'Meatballs', 'baking powder', 'catfish', 'rabbits', 'olive oil', 'poppy seeds',
'margarine', 'pecans', 'nectarines', 'milk', 'eggplants', 'sweet chili sauce', 'bisque',
'venison', 'buttermilk', 'mascarpone', 'cereal', 'mozzarella', 'ricotta cheese',
'pumpkin seeds', 'half-and-half', 'Italian bread', 'dumplings', 'Noodles', 'pinto beans',
'jalapeno', 'plum tomatoes', 'curry powder', 'broth', 'Parmesan cheese', 'grapefruits',
'pepper', 'bacon grease', 'lettuce', 'crabs', 'plums', 'blackberries', 'pesto', 'cookies',
'succotash', 'soymilk', 'gouda', 'oranges', 'pine nuts', 'bean sprouts', 'artichoke',
'won ton skins', 'trout', 'pomegranates', 'French dip', 'cremini mushrooms', 'oregano',
'pheasants', 'corned beef', 'gnocchi', 'chili sauce', 'turtle', 'almond extract', 'antelope',
'lemon grass', 'Avocado roll', 'almonds', 'falafel', 'peanut butter', 'tomato paste',
'pineapples', 'wild rice', 'Milkshake', 'tomato juice', 'wine vinegar', 'alligator',
'albacore tuna', 'herring', 'mussels', 'lamb', 'cranberries', 'chives', 'onion powder',
'leeks', 'peaches', 'Lamb', 'fennel seeds', 'Indian food', 'Canadian bacon', 'prawns',
'coconut milk', 'peas', 'couscous', 'Apple juice', 'bananas', 'brandy', 'lobsters', 'sage',
'wine', 'prosciutto', 'chili peppers', 'kingfish', 'raw sugar', 'aquavit', 'Porter',
'curry leaves', 'black beans', 'vanilla', 'colby cheese', 'passion fruit', 'octopus',
'vanilla bean', 'grits', 'flounder', 'arugula', 'turnips', 'macaroni', 'anchovy paste']
|
"""type_traits.py
We need to assess if a string can be converted to int or float.
This module provides simple tests is_<type>.
"""
def is_float(val):
try:
return float(val) - val == 0
except:
return False
return False
def is_int(val):
try:
return int(val) - val == 0
except:
return False
return False
def is_zero(val):
return val == 0
|
# coding:utf-8
# example 04: double_linked_list.py
class Node(object):
def __init__(self, val=None):
self.val = val
self.prev = None
self.next = None
class DoubleLinkedList(object):
def __init__(self, maxsize=None):
self.maxsize = maxsize
self.root = Node()
self.tailnode = None
self.length = 0
def __len__(self):
return self.length
def iter_node(self):
if self.length == 0:
return
cur = self.root
for _ in range(self.length):
cur = cur.next
yield cur
def __iter__(self):
for node in self.iter_node():
yield node.val
def iter_node_reverse(self):
if self.length == 0:
return
cur = self.tailnode
for _ in range(self.length):
yield cur
cur = cur.prev
def empty(self):
return self.root.next is None
def append(self, val): # O(1)
if self.maxsize is not None and self.length == self.maxsize:
raise Exception("Full")
node = Node(val)
if self.length == 0:
node.prev = self.root
self.root.next = node
else:
node.prev = self.tailnode
self.tailnode.next = node
self.tailnode = node
self.length += 1
def appendleft(self, val): # O(1)
if self.maxsize is not None and self.length == self.maxsize:
raise Exception("Full")
node = Node(val)
if self.length == 0:
node.prev = self.root
self.root.next = node
self.tailnode = node
else:
node.prev = self.root
node.next = self.root.next
self.root.next.prev = node
self.root.next = node
self.length += 1
def pop(self): # O(1)
if self.length == 0:
raise Exception("pop form empty Double Linked List")
val = self.tailnode.val
tailnode = self.tailnode.prev
tailnode.next = None
del self.tailnode
self.length -= 1
self.tailnode = None if self.length == 0 else tailnode
return val
def popleft(self): # O(1)
if self.length == 0:
raise Exception("pop form empty Double Linked List")
headnode = self.root.next
val = headnode.val
self.root.next = headnode.next
if headnode is self.tailnode:
self.tailnode = None
else:
headnode.next.prev = self.root
del headnode
self.length -= 1
return val
def find(self, val): # O(n)
for idx, node in enumerate(self.iter_node()):
if node.val == val:
return idx
return -1
def insert(self, pos, val): # O(n)
if pos <= 0:
self.appendleft(val)
elif self.length - 1 < pos:
self.append(val)
else:
node = Node(val)
pre = self.root
for _ in range(pos):
pre = pre.next
node.prev = pre
node.next = pre.next
node.next.prev = node
pre.next = node
self.length += 1
def remove(self, node): # O(1), node is not value
if self.length == 0:
return
if self.length == 1:
self.root.next = None
self.tailnode = None
elif node is self.tailnode:
self.tailnode = node.prev
self.tailnode.next = None
else:
node.prev.next = node.next
node.next.prev = node.prev
self.length -= 1
return node
def clear(self):
for node in self.iter_node():
del node
self.root.next = None
self.tailnode = None
self.length = 0
# use pytest
dll = DoubleLinkedList()
class TestDoubleLinkedList(object):
def test_append(self):
dll.append(1)
dll.append(2)
dll.append(3)
assert [node.val for node in dll.iter_node()] == [1, 2, 3]
assert [node.val for node in dll.iter_node_reverse()] == [3, 2, 1]
def test_appendleft(self):
dll.appendleft(0)
assert list(dll) == [0, 1, 2, 3]
def test_len(self):
assert len(dll) == 4
def test_pop(self):
tail_val = dll.pop()
assert tail_val == 3
def test_popleft(self):
head_val = dll.popleft()
assert head_val == 0
def test_find(self):
assert dll.find(2) == 1
assert dll.find(4) == -1
def test_insert(self):
dll.insert(1, 5)
assert [node.val for node in dll.iter_node()] == [1, 5, 2]
def test_remove(self):
headnode = dll.root.next
node = dll.remove(headnode)
assert node.val == 1
assert [node.val for node in dll.iter_node()] == [5, 2]
def test_clear_and_empty(self):
dll.clear()
assert dll.empty() is True
|
def trinomial(cfg,i,j,k) : #function t=trinomial(i,j,k)
#% Computes the trinomial of
#% the three input arguments
#
aux_1=cfg.factorial(i+j+k) #aux_1=factorial(i+j+k);
aux_2=cfg.factorial(i)*cfg.factorial(j)*cfg.factorial(k) #aux_2=factorial(i)*factorial(j)*factorial(k);
t = aux_1/aux_2 #t= aux_1/aux_2;
#
return t
|
for x in range(65,70):
for y in range(65,x+1):
print(chr(x),end='')
print()
"""
# p[attern
A
BB
CCC
DDDD
EEEEE
""" |
salario = float(input('Qual o salário de contribuição? '))
if salario <= 1045.00:
desc = 0.075 * salario
elif salario > 1045.00 and salario <= 2089.60:
desc = ((salario - 1045) * 0.09) + (1045 * 0.075)
elif salario > 2089.60 and salario <= 3134.40:
t = salario - 2089.60
desc = (t * 0.12) + ((2089.60 - 1045) * 0.09) + (1045 * 0.075)
elif salario > 3134.40 and salario <= 6101.06:
t = salario - 3134.40
desc = (t * 0.14) + ((3134.40 - 2089.60) * 0.12) + ((2089.60 - 1045) * 0.09) + (1045 * 0.075)
elif salario > 6101.06:
desc = ((6101.06 - 3134.40) * 0.14) + ((3134.40 - 2089.60) * 0.12) + ((2089.60 - 1045) * 0.09) + (1045 * 0.075)
print(f'O desconto sobre o salário de {salario} é {desc:.2f}.')
|
# -*- coding: utf-8 -*-
name = 'tbb'
version = '2017.0'
def commands():
appendenv('LD_LIBRARY_PATH', '{root}/lib/intel64/gcc4.7')
env.TBBROOT.set('{root}')
env.TBB_LIBRARIES.set('{root}/lib/intel64/gcc4.7')
env.TBB_INCLUDE_DIR.set('{root}/include')
|
#!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""
---------------------------------
@Time : 2017/12/7 15:22
@Author : yuxy
@File : return_func.py
@Project : PythonSyntax
----------------------------------
"""
def lazy_sum(*args):
def sum():
ax = 0
for n in args:
ax += n
return ax
return sum
f = lazy_sum(1, 3, 5, 7, 9)
print(f)
print(f())
# 利用闭包返回一个计数器函数,每次调用它返回递增整数:
def create_counter():
count = 0
def counter():
nonlocal count
count += 1
return count
return counter
counterA = create_counter()
print(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5
counterB = create_counter()
if [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:
print('测试通过!')
else:
print('测试失败!')
# 2
def createCounter():
fs = [0]
def counter():
fs[0] = fs[0] + 1
return fs[0]
return counter
# 3
def createCounter():
def counter():
i = 1
while True:
yield i
i += 1
return counter().__next__
counterA = createCounter()
print(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5
counterB = createCounter()
if [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:
print('测试通过!')
else:
print('测试失败!')
L = list(filter(lambda n: n % 2 == 1, range(1, 20)))
print(L)
|
budget = float(input())
season = input()
if budget <= 100:
destination = 'Bulgaria'
money_spent = budget * 0.7
info = f'Hotel - {money_spent:.2f}'
if season == 'summer':
money_spent = budget * 0.3
info = f'Camp - {money_spent:.2f}'
elif budget <= 1000:
destination = 'Balkans'
money_spent = budget * 0.8
info = f'Hotel - {money_spent:.2f}'
if season == 'summer':
money_spent = budget * 0.4
info = f'Camp - {money_spent:.2f}'
else:
destination = 'Europe'
money_spent = budget * 0.9
info = f'Hotel - {money_spent:.2f}'
print('Somewhere in ' + destination)
print(info)
|
# -*- coding: utf-8 -*-
# ------------- Cantidad de segundos que has vivido -------------
# Definición de variables
anios = 30
dias_por_anio = 365
horas_por_dia = 24
segundos_por_hora = 60
# Operación
print (anios * dias_por_anio * horas_por_dia * segundos_por_hora)
|
"""W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in <...>.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification).
"""
|
# 1.定义类:定义静态方法
class Dog(object):
@staticmethod
def info_print():
print('这是一个静态方法')
# 2.创建对象
wangcai = Dog()
# 3.用类和实例分别调用静态方法
wangcai.info_print()
Dog.info_print() |
FiboList , Flag = [0,1] , True
"""
This Function,
at first we create a array of -1's
to check fibonacci data's and store them in it.
Then each time we call the fibonacci function (Recursion).
Flag is for understanding that we need to create a new
arr or we call the function recursively.
"""
def fibonacci_1(n):
global Flag,FiboList
if Flag:
for i in range(n-1):
FiboList.append(-1)
Flag = False
if not Flag:
if FiboList[n] == -1:
FiboList[n] = fibonacci_1(n-1) + fibonacci_1(n-2)
return FiboList[n]
else:
return FiboList[n]
print(fibonacci_1(10))
|
'''
6. 有这样一个字典d = {"chaoqian":87, “caoxu”:90, “caohuan”:98, “wuhan”:82, “zhijia”:89}
1)将以上字典按成绩排名
'''
d = {"chaoqian":87, "caoxu":90, "caohuan":98,"wuhan":82,"zhijia":89}
print(sorted(d.items(),key = lambda x :x[1],reverse =True))
|
def solution(movements):
horizontal, vertical = 0, 0
for move in movements:
direction, magnitude = move.split(' ')
if direction == "forward":
horizontal += int(magnitude)
elif direction == "down":
vertical += int(magnitude)
elif direction == "up":
vertical -= int(magnitude)
return horizontal * vertical
def solution2(movements):
horizontal, vertical, aim = 0, 0, 0
for move in movements:
direction, magnitude = move.split(' ')
magnitude = int(magnitude)
if direction == "forward":
horizontal += magnitude
vertical += (aim * magnitude)
elif direction == "down":
aim += int(magnitude)
elif direction == "up":
aim -= int(magnitude)
return horizontal * vertical
assert 150 == solution(["forward 5","down 5","forward 8","up 3","down 8","forward 2"])
assert 900 == solution2(["forward 5","down 5","forward 8","up 3","down 8","forward 2"])
def main():
with open('input.txt', 'r') as inp:
input_data = inp.readlines()
result1 = solution(input_data)
print(f"Part 1 answer: {result1}")
result2 = solution2(input_data)
print(f"Part 2 answer: {result2}")
main()
|
stop_words = ['dey', 'awfar', 'wey', 'wan', 'shey', 'sabi', 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves',
'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'chop', 'beta', 'molenu',
'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its','eni', 'duro',
'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'dia', 'moti', 'gba',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'mo', 'lenu', 'ewo', 'chai',
'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'e', 'don', 'oshe', 'oshey',
'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'hussle', 'hashtag',
'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'werey', 'unda', 'khaki',
'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'awon', 'ja', 'eja',
'against', 'between', 'into', 'through', 'during', 'before', 'after', 'tenk', '10x',
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'oshi', 'belle', 'casala',
'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here',
'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each',
'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not',
'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can',
'will', 'just', 'don','should', 'now','https',"'s",'...', "whats'",
"rt","whats","n't","de","'m","un","en","``","dedic","twittermoments",
"amp","e","y","o","ce","retweet","sur","na","el","1","2","3","4",
"5","6","7","8","9","0","ca","nao","se","com","los","u","des","-",
"--","'","''","la","como","con","segundo",'de', 'la', 'que', 'el',
'en', 'y', 'a', 'los', 'del', 'se', 'las', 'por', 'un', 'para',
'con', 'no', 'una', 'su', 'al', 'lo', 'como', 'más', 'pero', 'sus',
'le', 'ya', 'o', "know","time","q","em", 'fent', 'kalamity', 'wine',
"re","11","isnt","wan","ver","like","'re","m","'ve","bec","n","twt","kca","c","a",
"b","d","e","f","g","h","i","j","k","l","m","n","o",
"p","q","r","s","t","u","v","w","x","y","z"]
|
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
if k <= 0 or n < k:
return []
res_lst = []
def dfs(i, curr_lst):
if len(curr_lst) == k:
res_lst.append(curr_lst)
for value in range(i, n+1):
dfs(value+1, curr_lst+[value])
dfs(1, [])
return res_lst
# time complexity : O(n!)
# space complexity : O(k)
|
# -*- coding: utf-8 -*-
__version__ = "0.2.4"
__title__ = "pygcgen"
__summary__ = "Automatic changelog generation"
__uri__ = "https://github.com/topic2k/pygcgen"
__author__ = "topic2k"
__email__ = "topic2k+pypi@gmail.com"
__license__ = "MIT"
__copyright__ = "2016-2018 %s" % __author__
|
"""
Default status.html resource
"""
class StatusResource:
def on_get(self, req, resp):
pass |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 13:02:55 2015
@author: magusverma
"""
def reached_limit(current, limit):
for i in range(len(current)):
if current[i] != limit[i]-1:
return False
return True
def increment(current, limit):
for i in range(len(current)-1,-1,-1):
if current[i] < limit[i]-1:
current[i]+=1
break
current[i] = 0
|
class GraphEdgesMapping:
def __init__(self, first_dual_edges_mapping, second_dual_edges_mapping):
self._first = first_dual_edges_mapping
self._second = second_dual_edges_mapping
@property
def size(self):
return self._first.shape[0]
@property
def first(self):
return self._first
@property
def second(self):
return self._second
|
class Solution:
def binary_find_last(self, nums, target):
"""
this is find the last data
"""
low = 0
hight = len(nums)-1
first = True
while low <= hight:
mid = (hight-low)//2+low
if nums[mid] == target:
return mid
elif nums[mid] > target:
hight = mid-1
else:
low = mid+1
return -1
def searchRange(self, nums, target: int):
rul = self.binary_find_last(nums, target)
# left = 0
# right = len(nums)-1
# rul = [-1]*2
# while left <= right:
# mid = (right-left)//2+left
# if nums[mid] == target:
# rul[0] = mid
# elif nums[mid] > target:
# right = mid-1
# else:
# left = mid+1
# print(rul)
# rul = [-1]*2
# # find first data index
# left = 0
# right = len(nums)-1
# index = 0
# while left <= right:
# mid = (right-left)//2+left
# if target <= nums[mid]:
# right = mid-1
# index = mid
# else:
# left = mid+1
# # print(index)
# rul[0] = index
# if index == -1:
# return rul
# left = 0
# right = len(nums)-1
# index = 0
# target += 1
# while left <= right:
# mid = (right-left)//2+left
# if target <= nums[mid]:
# right = mid-1
# index = mid
# else:
# left = mid+1
# rul[1] = index
return rul
# def test_mid_find(nums, target):
# left = 0
# right = len(nums)-1
# index = -1
# while left <= right:
# mid = (right-left)//2+left
# if target <= nums[mid]:
# right = mid-1
# index = mid
# else:
# left = mid+1
# return index
# up method is wrong, for the fiorst data and unknow data for result 0
def binary_find(nums, target):
low = 0
hight = len(nums)-1
while low <= hight:
mid = (hight-low)//2+low
if nums[mid] == target:
return mid
elif nums[mid] > target:
hight = mid-1
else:
low = mid+1
return -1
if __name__ == "__main__":
nums = [5, 7, 7, 8, 8, 10]
target = 7
sol = Solution()
print(sol.searchRange(nums, target))
# print(test_mid_find(nums, target))
# print(binary_find(nums, target))
# it need complexity must be in the order of O(logn)
# so use midd find
|
#!/usr/bin/env python3
data = open("in").read().split("\n\n")
data = list(map(lambda x: x.split("\n"), data))
for i in range(len(data)): # todo this is stupid
data[i] = list(filter(lambda x: x != '', data[i]))
tot = 0
tot2 = 0
for d in data:
a = set("".join(d))
tot += len(a)
tota = 0
for q in a:
if len(d) == len(list(filter(lambda x: x.count(q) > 0, d))):
tot2 += 1
print(tot)
print(tot2)
|
#operate with params
OP_PARAMS_PATH = "/data/params/"
def save_bool_param(param_name,param_value):
try:
real_param_value = 1 if param_value else 0
with open(OP_PARAMS_PATH+"/"+param_name, "w") as outfile:
outfile.write(f'{real_param_value}')
except IOError:
print("Failed to save "+param_name+" with value ",param_value)
def load_bool_param(param_name,param_def_value):
try:
with open(OP_PARAMS_PATH+"/"+param_name, 'r') as f:
for line in f:
value_saved = int(line)
#print("Reading Params ",param_name , "value", value_saved)
return True if value_saved == 1 else False
except IOError:
print("Initializing "+param_name+" with value ",param_def_value)
save_bool_param(param_name,param_def_value)
return param_def_value
def save_float_param(param_name,param_value):
try:
real_param_value = param_value * 1.0
with open(OP_PARAMS_PATH+"/"+param_name, "w") as outfile:
outfile.write(f'{real_param_value}')
except IOError:
print("Failed to save "+param_name+" with value ",real_param_value)
def load_float_param(param_name,param_def_value):
try:
with open(OP_PARAMS_PATH+"/"+param_name, 'r') as f:
for line in f:
value_saved = float(line)
#print("Reading Params ",param_name , "value", value_saved)
return value_saved * 1.0
except IOError:
print("Initializing "+param_name+" with value ",param_def_value*1.0)
save_float_param(param_name,param_def_value * 1.0)
return param_def_value * 1.0
|
# Hello! World!
print("Hello, World!")
# Learning Strings
my_string = "This is a string"
## Make string uppercase
my_string_upper = my_string.upper()
print(my_string_upper)
# Determine data type of string
print(type(my_string))
# Slicing strings [python is zero-based and starts at 0 and not 1]
print(my_string[0:4])
print(my_string[:1])
print(my_string[0:14])
|
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
self.ret = []
self.counts = collections.Counter(candidates)
nums = [k for k in set(sorted(candidates))]
self.Backtrack(nums, target, [], 0)
return self.ret
def Backtrack(self, nums, target, combination, k):
if target == 0:
self.ret.append(combination)
return combination
for i in range(k, len(nums)):
if target - nums[i] < 0:
break
temp_sum = 0
temp_list = []
for j in range(self.counts[nums[i]]):
temp_sum += nums[i]
temp_list.append(nums[i])
if target - temp_sum < 0:
break
combination += temp_list
combination = self.Backtrack(nums, target - temp_sum, combination, i + 1)
combination = combination[:-len(temp_list)]
return combination
|
"""
Instructions:
1. Create a class named ReversedString that inherits from StringOperations class
2. Implement the function reverse
3. reverse function should be a one liner function that returns the reverse string to_be_reversed
4. Instantiate the class ReversedString
5. Print to show your function implementation result
"""
class StringOperations:
def reverse(self, *, to_be_reversed: str = None):
raise NotImplemented('This method need to be implemented')
class ReversedString(StringOperations):
def reverse(to_be_reversed):
print ("hello world"[::-1])
ob= ReversedString()
ob.reverse()
|
# Function that detects cycle in a directed graph
def cycleCheck(vertices, adj):
visited = set()
ancestor = set()
for vertex in range(vertices):
if vertex not in visited:
if dfs(vertex, adj, visited, ancestor)==True:
return True
return False
# Recursive dfs function
def dfs(vertex, adj, visited, ancestor):
visited.add(vertex)
ancestor.add(vertex)
for neighbour in adj[vertex]:
if neighbour not in visited:
if dfs(neighbour, adj, visited, ancestor)==True:
return True
elif neighbour in ancestor:
return True
ancestor.remove(vertex)
return False
print('---------------------------------------------------------------------')
print('\tCheck if a cycle exists in a directed graph')
print('---------------------------------------------------------------------\n')
t = int(input("Enter the number of testcases: "))
for _ in range(t):
print('\n*************** Testcase', _+1, '***************\n')
vertices, edges = map(int, input("Enter number of vertices & edges: ").split())
# Create an adjacency list
adj = [[] for vertex in range(vertices)]
for edge in range(edges):
start, end = map(int, input("Enter edge: ").split())
adj[start].append(end)
if cycleCheck(vertices, adj) == True:
print('Cycle detected')
else:
print('No cycle detected') |
def writeHigh(b, value):
return (b & 0x0f) | (value << 4)
def readHigh(b):
return (b & 0xf0) >> 4
def writeLow(b, value):
return (value) | (b & 0xf0)
def readLow(b):
return b & 0x0f
b = 0x00
# A的位置1
b = writeLow(b, 1)
while readLow(b) <= 9:
b = writeHigh(b, 1)
while readHigh(b) <= 9:
if(readHigh(b) % 3 != readLow(b) % 3):
print([readLow(b), readHigh(b)])
b = writeHigh(b, readHigh(b) + 1)
# print('highbits', readHigh(b))
b = writeLow(b, readLow(b) + 1)
# print('lowbits', readLow(b))
|
# DROP TABLES
USERS_TABLE = "users"
SONGS_TABLE = "songs"
ARTISTS_TABLE = "artists"
TIME_TABLE = "time"
SONGS_PLAY_TABLE = "songplays"
songplay_table_drop = f"DROP TABLE IF EXISTS {SONGS_PLAY_TABLE};"
user_table_drop = f"DROP TABLE IF EXISTS {USERS_TABLE};"
song_table_drop = f"DROP TABLE IF EXISTS {SONGS_TABLE};"
artist_table_drop = f"DROP TABLE IF EXISTS {ARTISTS_TABLE};"
time_table_drop = f"DROP TABLE IF EXISTS {TIME_TABLE};"
# CREATE TABLES
songplay_table_create = """
CREATE TABLE IF NOT EXISTS {} (
songplay_id SERIAL NOT NULL,
start_time bigint NOT NULL,
user_id int NOT NULL,
level text,
song_id text,
artist_id text,
session_id int,
location text,
user_agent text,
PRIMARY KEY(songplay_id),
FOREIGN KEY(user_id) REFERENCES {}(user_id),
FOREIGN KEY(song_id) REFERENCES {}(song_id),
FOREIGN KEY(artist_id) REFERENCES {}(artist_id),
FOREIGN KEY(start_time) REFERENCES {}(start_time)
);
""".format(
SONGS_PLAY_TABLE, USERS_TABLE, SONGS_TABLE, ARTISTS_TABLE, TIME_TABLE
)
user_table_create = """
CREATE TABLE IF NOT EXISTS {} (
user_id int NOT NULL,
first_name text,
last_name text,
gender text,
level text,
PRIMARY KEY(user_id)
);
""".format(
USERS_TABLE
)
song_table_create = """
CREATE TABLE IF NOT EXISTS {} (
song_id text NOT NULL,
title text,
artist_id text,
year int,
duration real,
PRIMARY KEY(song_id)
);
""".format(
SONGS_TABLE
)
artist_table_create = """
CREATE TABLE IF NOT EXISTS {} (
artist_id text NOT NULL,
name text,
location text,
latitude text,
longitude text,
PRIMARY KEY(artist_id)
);
""".format(
ARTISTS_TABLE
)
time_table_create = """
CREATE TABLE IF NOT EXISTS {} (
start_time bigint NOT NULL,
hour int,
day int,
week int,
month int,
year int,
weekday text,
PRIMARY KEY(start_time)
)
""".format(
TIME_TABLE
)
# INSERT RECORDS
songplay_table_insert = """
INSERT INTO {} (start_time, user_id, level, song_id, artist_id, session_id, location, user_agent)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
ON CONFLICT(songplay_id) DO NOTHING;
""".format(
SONGS_PLAY_TABLE
)
user_table_insert = """
INSERT INTO {} (user_id, first_name, last_name, gender, level)
VALUES(%s, %s, %s, %s, %s)
ON CONFLICT(user_id) DO UPDATE SET level = excluded.level
""".format(
USERS_TABLE
)
song_table_insert = """
INSERT INTO {} (song_id, title, artist_id, year, duration)
VALUES(%s, %s, %s, %s, %s)
ON CONFLICT(song_id) DO NOTHING;
""".format(
SONGS_TABLE
)
artist_table_insert = """
INSERT INTO {} (artist_id, name, location, latitude, longitude)
VALUES(%s, %s, %s, %s, %s)
ON CONFLICT(artist_id) DO NOTHING;
""".format(
ARTISTS_TABLE
)
time_table_insert = """
INSERT INTO {} (start_time, hour, day, week, month, year, weekday)
VALUES(%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT(start_time) DO NOTHING;
""".format(
TIME_TABLE
)
# FIND SONGS
song_select = """
SELECT songs.song_id, artists.artist_id
FROM songs JOIN artists ON songs.artist_id = artists.artist_id
WHERE songs.title = %s AND artists.name = %s AND songs.duration = %s;
"""
# QUERY LISTS
create_table_queries = [
user_table_create,
song_table_create,
artist_table_create,
time_table_create,
songplay_table_create,
]
drop_table_queries = [
user_table_drop,
song_table_drop,
artist_table_drop,
time_table_drop,
songplay_table_drop,
]
|
# ********************************************************************************** #
# #
# Project: SkinAnaliticAI #
# Author: Pawel Rosikiewicz #
# Contact: prosikiewicz_gmail.com #
# #
#. This notebook is a part of Skin AanaliticAI development kit, created #
#. for evaluation of public datasets used for skin cancer detection with #
#. large number of AI models and data preparation pipelines. #
# #
# License: MIT #
#. Copyright (C) 2021.01.30 Pawel Rosikiewicz #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# config, ...........................................................................................
PROJECT_NAME = "Skin_cancer_detection_and_classyfication"
# config, ...........................................................................................
# CLASS_DESCRIPTION
#. "key" : str, class name used in original dataset downloaded form databse
# "original_name" : str, same as the key, but you can introduce other values in case its necessarly
# "class_full_name" : str, class name used on images, saved data etc, (more descriptive then class names, or sometimes the same according to situation)
# "class_group" : str, group of classes, if the classes are hierarchical,
# "class_description" : str, used as notes, or for class description available for the user/client
# "links" : list, with link to more data, on each class,
CLASS_DESCRIPTION = {
'akiec':{
"original_name":'akiec',
"class_full_name": "squamous_cell_carcinoma", # prevoisly called "Actinic_keratoses" in my dataset, but ths name is easier to find in online resourses, noth names are correct,
"class_group": "Tumour_Benign",
"class_description": "Class that contains two subclasses:(A) Actinic_Keratoses or (B) Bowen’s disease. Actinic Keratoses (Solar Keratoses) and Intraepithelial Carcinoma (Bowen’s disease) are common non-invasive, variants of squamous cell carcinoma that can be treated locally without surgery. These lesions may progress to invasive squamous cell carcinoma – which is usually not pigmented. Both neoplasms commonly show surface scaling and commonly are devoid of pigment, Actinic keratoses are more common on the face and Bowen’s disease is more common on other body sites. Because both types are induced by UV-light the surrounding skin is usually typified by severe sun damaged except in cases of Bowen’s disease that are caused by human papilloma virus infection and not by UV. Pigmented variants exist for Bowen’s disease and for actinic keratoses",
"links":["https://dermoscopedia.org/Actinic_keratosis_/_Bowen%27s_disease_/_keratoacanthoma_/_squamous_cell_carcinoma"]
},
'bcc':{
"original_name":'bcc',
"class_full_name": "Basal_cell_carcinoma",
"class_group": "Tumour_Benign",
"class_description": "Basal cell carcinoma (BCC) is the most common type of skin cancer in the world that rarely metastasizes but grows destructively if untreated. It appears in different morphologic variants (flat, nodular, pigmented, cystic). There are multiple histopathologic subtypes of BCC including superficial, nodular, morpheaform/sclerosing/infiltrative, fibroepithelioma of Pinkus, microcytic adnexal and baso-squamous cell BCC. Each subtype can be clinically pigmented or non-pigmented. It is not uncommon for BCCs to display pigment on dermoscopy with up to 30% of clinically non-pigmented BCCs revealing pigment on dermoscopy. Based on the degree of pigmentation, some BCCs can mimic melanomas or other pigmented skin lesions. Depending on the subtype of BCC and the degree of pigmentation, the clinical differential diagnosis can be quite broad ranging from benign inflammatory lesions to melanoma. Fortunately, the dermoscopic criteria for BCC are visible irrespective of the size of the tumor and can be well distiguished using dermatoscopy",
"links":["https://dermoscopedia.org/Basal_cell_carcinoma"]
},
'bkl':{
"original_name":'bkl',
"class_full_name": "Benign_keratosis",
"class_group": "Tumour_Benign",
"class_description": "Benign keratosis is a generic group that includes three typesy of non-carcinogenig lesions: (A) seborrheic keratoses (senile wart), (B) solar lentigo - which can be regarded a flat variant of seborrheic keratosis, (C) and lichen-planus like keratoses (LPLK), which corresponds to a seborrheic keratosis or a solar lentigo with inflammation and regression. The three subgroups may look different dermatoscopically, but we grouped them together because they are similar biologically and often reported under the same generic term histopathologically. Briefly: Seborrheic keratoses (A) are benign epithelial lesions that can appear on any part of the body except for the mucous membranes, palms, and soles. The lesions are quite prevalent in people older than 30 years. Early seborrheic keratoses are light - to dark brown oval macules with sharply demarcated borders. As the lesions progress, they transform into plaques with a waxy or stuck-on appearance, often with follicular plugs scattered over their surfaces. The size of the lesions varies from a few millimeters to a few centimeters. Solar lentigines (B) are sharply circumscribed, uniformly pigmented macules that are located predominantly on the sun-exposed areas of the skin, such as the dorsum of the hands, the shoulders, and the scalp. Lentigines are a result of hyperplasia of keratinocytes and melanocytes, with increased accumulation of melanin in the keratinocytes. They are induced by ultraviolet light exposure. Unlike freckles, solar lentigines persist indefinitely. Nearly 90% of Caucasians over the age of 60 years have these lesions. LPLK (C), is one of the common benign neoplasms of the skin, and it is highly variable in its appearance, Some LPKL can show morphologic features mimicking melanoma and are often biopsied or excised for diagnostic reasons",
"links": ["https://dermoscopedia.org/Solar_lentigines_/_seborrheic_keratoses_/_lichen_planus-like_keratosis"]
},
'df': {
"original_name":'df',
"class_full_name": "Dermatofibroma",
"class_group": "Tumour_Benign",
"class_description": "Dermatofibromas (DFs) are prevalent cutaneous lesions that most frequently affect young to middle-aged adults, with a slight predominance in females. Clinically, dermatofibromas appear as firm, single or multiple papules/nodules with a relatively smooth surface and predilection for the lower extremities. Characteristically, upon lateral compression of the skin surrounding dermatofibromas, the tumors tend to pucker inward producing a dimple-like depression in the overlying skin; a feature known as the dimple or Fitzpatrick’s sign. Dermatofibroma is a benign skin lesion regarded as either a benign proliferation or an inflammatory reaction to minimal trauma. The most common dermatoscopic presentation is reticular lines at the periphery with a central white patch denoting fibrosis",
"links": ["https://dermoscopedia.org/Dermatofibromas"]
},
'nv': {
"original_name":'nv',
"class_full_name": "Melanocytic_nevus",
"class_group": "Tumour_Benign",
"class_description": "Melanocytic nevi are benign neoplasms of melanocytes and appear in a myriad of variants, which all were included in train data used for diagnosis. The variants may differ significantly from a dermatoscopic point of view. Unlike, melanoma they are usually symmetric with regard to the distribution of color and structure",
"links":["https://dermoscopedia.org/Benign_Melanocytic_lesions"]
},
"mel": {
"original_name":'mel',
"class_full_name": "Melanoma",
"class_group": "Tumour_Malignant",
"class_description": "Melanoma is a malignant neoplasm derived from melanocytes that may appear in different variants. If excised in an early stage it can be cured by simple surgical excision. Melanomas can be invasive or non-invasive (in situ). Melanomas are usually, albeit not always, chaotic, and some melanoma specific criteria depend on anatomic site, All variants of melanoma including melanoma in situ, except for non-pigmented, subungual, ocular or mucosal melanoma were included in train dataset used for diagnosis",
"linkss": ["https://dermoscopedia.org/Melanoma"]
},
'vasc':{
"original_name":'vasc',
"class_full_name": "Vascular_skin_lesions",
"class_group": "Vascular_skin_lesions",
"class_description": "Angiomas are dermatoscopically characterized by red or purple color and solid, well circumscribed structures known as red clods or lacunes.Data Used for training for diagnosis: Vascular skin lesions in the dataset range from cherry angiomas to angiokeratomas and pyogenic granulomas. Hemorrhage is also included in this category",
"links": ["https://dermoscopedia.org/Vascular_lesions"]
}
} |
class Config(object):
SECRET_KEY = "CantStopAddictedToTheShinDigChopTopHeSaysImGonnaWinBig"
HOST = "0a398d5f.ngrok.io"
SHOPIFY_CONFIG = {
'API_KEY': '<API KEY HERE>',
'API_SECRET': '<API SECRET HERE>',
'APP_HOME': 'http://' + HOST,
'CALLBACK_URL': 'http://' + HOST + '/install',
'REDIRECT_URI': 'http://' + HOST + '/connect',
'SCOPE': 'read_products, read_collection_listings'
}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__version__ = "3.0.0"
__author__ = "Amir Zeldes"
__copyright__ = "Copyright 2015-2019, Amir Zeldes"
__license__ = "Apache 2.0 License"
|
frase = str(input('Digite uma frase?').lower().strip())
print('Na Frase há: {} letra A.'.format(frase.count('a')))
print('Ela aparece pela primeira vez em {}.'.format(frase.find('a')+1))
print(' Ela aparece pela ultima vez em {}'.format(frase.rfind('a')+1))
|
# 对可变对象进行测试
a = 10
b = 20
c = [a]
d = c
a = 15
print(c)
c = [10,20]
print(d)
print(id(c),id(d)) |
DEV = {
"SERVER_NAME": "dev-api.materialsdatafacility.org",
"API_LOG_FILE": "deva.log",
"PROCESS_LOG_FILE": "devp.log",
"LOG_LEVEL": "DEBUG",
"FORM_URL": "https://connect.materialsdatafacility.org/",
"TRANSFER_DEADLINE": 3 * 60 * 60, # 3 hours, in seconds
"INGEST_URL": "https://dev-api.materialsdatafacility.org/ingest",
"INGEST_INDEX": "mdf-dev",
"INGEST_TEST_INDEX": "mdf-dev",
"LOCAL_EP": "ca7550ad-55a9-4762-b558-8f2b15049039",
# Disable backups
"BACKUP_EP": False,
# Petrel
# "BACKUP_EP": "e38ee745-6d04-11e5-ba46-22000b92c6ec",
# "BACKUP_PATH": "/MDF/mdf_connect/dev/data/",
# "BACKUP_HOST": "https://e38ee745-6d04-11e5-ba46-22000b92c6ec.e.globus.org",
# "BACKUP_FEEDSTOCK": "/MDF/mdf_connect/dev/feedstock/",
# NCSA
# "BACKUP_EP": "82f1b5c6-6e9b-11e5-ba47-22000b92c6ec",
"BACKUP_PATH": "/mdf_connect/dev/data/",
"BACKUP_HOST": "https://data.materialsdatafacility.org",
"BACKUP_FEEDSTOCK": "/mdf_connect/dev/feedstock/",
"DEFAULT_CLEANUP": True,
"FINAL_CLEANUP": True,
"DEFAULT_DOI_TEST": True,
"NUM_DOI_CHARS": 2, # Characters per section
"NUM_DOI_SECTIONS": 5,
"DEFAULT_PUBLISH_COLLECTION": 35,
"TEST_PUBLISH_COLLECTION": 35,
"DEFAULT_CITRINATION_PUBLIC": False,
"DEFAULT_MRR_TEST": True,
"SQS_QUEUE": "mdfc_dev1.fifo",
"SQS_GROUP_ID": "mdf_connect_dev",
"DYNAMO_STATUS_TABLE": "dev-status-alpha-2",
"DYNAMO_CURATION_TABLE": "dev-curation-alpha-1"
}
|
inputDoc = open("input.txt")
docLines = inputDoc.readlines()
inputDoc.close()
# PART 1
# Find two numbers that add up to 2020 and multiply them
correct1 = []
for line in docLines:
line = int(line.replace("\n", ""))
for lineTwo in docLines:
lineTwo = int(lineTwo.replace("\n", ""))
if line + lineTwo == 2020:
correct1 = [line, lineTwo]
break
if len(correct1) > 0:
break
print(correct1[0] * correct1[1]) # 514579
# PART 2
# Find 3 numbers that add to 2020
correct2 = []
for line in docLines:
line = int(line.replace("\n", ""))
for lineTwo in docLines:
lineTwo = int(lineTwo.replace("\n", ""))
for lineThree in docLines:
lineThree = int(lineThree.replace("\n", ""))
if line + lineTwo + lineThree == 2020:
correct2 = [line, lineTwo, lineThree]
break
if len(correct2) > 0:
break
if len(correct2) > 0:
break
print(correct2[0] * correct2[1] * correct2[2]) # 42275090
# print(next(map(lambda a: a[0] * a[1], [list(map(lambda a: int(a), filter(lambda a: [int(b) for b in docLines if int(a) + int(b) == 2020], docLines)))])))
|
"""Contains the class for a problem instance."""
class AgglutinatingRolls():
"""Define a class for a problem instance."""
def __init__(self, instance: dict):
"""Initialize an object."""
self.rolls_a = instance.get('rolls_a', [])
self.rolls_b = instance.get('rolls_a', [])
# costs = [agglutinate, new_roll, short_roll, unused_roll]
self.costs = instance.get('costs', [5, 8, 7, 7])
self.max_length = instance.get('max_length', 10000)
self.max_number_of_rolls = instance.get('max_number_of_rolls', 10)
|
class InvalidApiKeyError(Exception):
def __init__(self):
self.message = "The API key you inserted isn't valid"
class InvalidCityNameError(Exception):
def __init__(self):
self.message = "Couldn't find any city with this name or code. Please check again."
|
#
# PySNMP MIB module GWPAGERMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/GWPAGERMIB
# Produced by pysmi-0.3.4 at Wed May 1 13:20:45 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, TimeTicks, ObjectIdentity, IpAddress, iso, Integer32, enterprises, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, NotificationType, NotificationType, Gauge32, Bits, Counter32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "TimeTicks", "ObjectIdentity", "IpAddress", "iso", "Integer32", "enterprises", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "NotificationType", "NotificationType", "Gauge32", "Bits", "Counter32", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
novell = MibIdentifier((1, 3, 6, 1, 4, 1, 23))
gateways = MibIdentifier((1, 3, 6, 1, 4, 1, 23, 2))
gwPAGER = MibIdentifier((1, 3, 6, 1, 4, 1, 23, 2, 43))
gwPAGERInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 23, 2, 43, 1))
gwPAGERTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 23, 2, 43, 2))
gwPAGERGatewayName = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGERGatewayName.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGERGatewayName.setDescription('The GroupWise PAGER Gateway name.')
gwPAGERUptime = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 2), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGERUptime.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGERUptime.setDescription('Uptime of the GroupWise PAGER Gateway.')
gwPAGERGroupWiseLink = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 5))).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGERGroupWiseLink.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGERGroupWiseLink.setDescription('GroupWise PAGER Gateway Link: UP or DOWN')
gwPAGERFrgnLink = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 5))).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGERFrgnLink.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGERFrgnLink.setDescription('GroupWise PAGER Gateway Foreign Link: UP or DOWN')
gwPAGEROutBytes = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGEROutBytes.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGEROutBytes.setDescription('The number of message bytes sent to GroupWise PAGER.')
gwPAGERInBytes = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGERInBytes.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGERInBytes.setDescription('The number of message bytes received from GroupWise PAGER.')
gwPAGEROutMsgs = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGEROutMsgs.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGEROutMsgs.setDescription('The number of messages sent to GroupWise PAGER.')
gwPAGERInMsgs = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGERInMsgs.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGERInMsgs.setDescription('The number of messages received from PAGER.')
gwPAGEROutStatuses = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGEROutStatuses.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGEROutStatuses.setDescription('The number of statuses sent to PAGER.')
gwPAGERInStatuses = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGERInStatuses.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGERInStatuses.setDescription('The number of statuses received from PAGER.')
gwPAGEROutErrors = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGEROutErrors.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGEROutErrors.setDescription('The number of failed transfers to PAGER.')
gwPAGERInErrors = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: gwPAGERInErrors.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGERInErrors.setDescription('The number of failed transfers from PAGER.')
gwPAGERTrapTime = MibScalar((1, 3, 6, 1, 4, 1, 23, 2, 43, 2, 1), Integer32())
if mibBuilder.loadTexts: gwPAGERTrapTime.setStatus('mandatory')
if mibBuilder.loadTexts: gwPAGERTrapTime.setDescription('The time the trap occurred. Seconds since Jan 1, 1970 (GMT)')
gwPAGERStartTrap = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 43, 1) + (0,1)).setObjects(("GWPAGERMIB", "gwPAGERTrapTime"), ("GWPAGERMIB", "gwPAGERGatewayName"))
if mibBuilder.loadTexts: gwPAGERStartTrap.setDescription('GroupWise PAGER Gateway start.')
gwPAGERStopTrap = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 43, 1) + (0,2)).setObjects(("GWPAGERMIB", "gwPAGERTrapTime"), ("GWPAGERMIB", "gwPAGERGatewayName"))
if mibBuilder.loadTexts: gwPAGERStopTrap.setDescription('GroupWise PAGER Gateway stop.')
gwPAGERRestartTrap = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 43, 1) + (0,3)).setObjects(("GWPAGERMIB", "gwPAGERTrapTime"), ("GWPAGERMIB", "gwPAGERGatewayName"))
if mibBuilder.loadTexts: gwPAGERRestartTrap.setDescription('GroupWise PAGER Gateway restart.')
gwPAGERGroupWiseLinkTrap = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 43, 1) + (0,4)).setObjects(("GWPAGERMIB", "gwPAGERTrapTime"), ("GWPAGERMIB", "gwPAGERGatewayName"))
if mibBuilder.loadTexts: gwPAGERGroupWiseLinkTrap.setDescription('GroupWise Link lost by GroupWise PAGER Gateway')
gwPAGERFgnLinkTrap = NotificationType((1, 3, 6, 1, 4, 1, 23, 2, 43, 1) + (0,5)).setObjects(("GWPAGERMIB", "gwPAGERTrapTime"), ("GWPAGERMIB", "gwPAGERGatewayName"))
if mibBuilder.loadTexts: gwPAGERFgnLinkTrap.setDescription('PAGER Link lost by GroupWise PAGER Gateway')
mibBuilder.exportSymbols("GWPAGERMIB", gwPAGERTrapInfo=gwPAGERTrapInfo, gwPAGERRestartTrap=gwPAGERRestartTrap, gwPAGERInBytes=gwPAGERInBytes, gwPAGERInfo=gwPAGERInfo, gwPAGERGroupWiseLinkTrap=gwPAGERGroupWiseLinkTrap, novell=novell, gwPAGERInMsgs=gwPAGERInMsgs, gwPAGERTrapTime=gwPAGERTrapTime, gwPAGERInErrors=gwPAGERInErrors, gwPAGERFrgnLink=gwPAGERFrgnLink, gwPAGERUptime=gwPAGERUptime, gwPAGEROutBytes=gwPAGEROutBytes, gwPAGERStartTrap=gwPAGERStartTrap, gwPAGEROutErrors=gwPAGEROutErrors, gwPAGERGatewayName=gwPAGERGatewayName, gwPAGEROutMsgs=gwPAGEROutMsgs, gwPAGER=gwPAGER, gwPAGERGroupWiseLink=gwPAGERGroupWiseLink, gwPAGERStopTrap=gwPAGERStopTrap, gwPAGERFgnLinkTrap=gwPAGERFgnLinkTrap, gateways=gateways, gwPAGERInStatuses=gwPAGERInStatuses, gwPAGEROutStatuses=gwPAGEROutStatuses)
|
class Solution:
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
nnums = set(nums)
N = len(nums)
missing = N*(N+1)/2-sum(nnums) #計算缺失值
duplicated = sum(nums) - sum(nnums) #計算重複值
return [duplicated,int(missing)]
|
a = input()
b = input()
c = input()
print(a[0] + b[1] + c[2])
|
def evalRPN(tokens: List[str]) -> int:
stack = [] #make a stack to hold the numberes and result calculation
for char in tokens: #iterate through the tokens
if char not in "+*-/": #if token is not an operator then it is a number so we add to stack
stack.append(int(char))
else: #if it is an operator then we pop the last two values from the stack and the left + right
r,l = stack.pop(),stack.pop()
if char == "*":
stack.append(l*r)
elif char == "/":
stack.append(int(float(l)/r))
elif char == "+":
stack.append(l+r)
elif char == "-":
stack.append(l-r)
return stack [-1]
#explanation : https://www.youtube.com/watch?v=3wGTlsLnZE4&list=PLLOxZwkBK52Akgqf4oSWPOQO9ROWS_9rc&index=20
|
# Lv-677.PythonCore
name_que = input("Hello. \nWhat is your name? \n")
print ("Hello, ", name_que)
age_que = input("How old are you? \n")
print ("Your age is: ", age_que)
live_que = input(f"Where do u live {name_que}? \n")
print("You live in ", live_que) |
# Royals and suits
jack = 11
queen = 12
king = 13
ace = 14
spades = 's'
clubs = 'c'
hearts = 'h'
diamonds = 'd'
ranks = [2, 3, 4, 5, 6, 7, 8, 9, 10, jack, queen, king, ace]
suits = [spades, clubs, hearts, diamonds]
# Hands
straight_flush = 'Straight flush'
quads = 'Four of a kind'
full_house = 'Full house'
flush = 'Flush'
straight = 'Straight'
trips = 'Three of a kind'
two_pair = 'Two pair'
pair = 'Pair'
high_card = 'High card'
|
s = ''
while True:
try:
s+= input()
except:
break
r = ['a', s.count('a')]
for i in range(98, 123):
n = s.count(chr(i))
if n > r[1]:
r[0] = chr(i)
r[1] = n
elif n == r[1]:
r[0] += chr(i)
print(r[0])
|
def Run(filepath):
with open(filepath) as input:
measurements = list(map(int, input.read().split('\n')))
print('Day 1')
Part1(measurements)
Part2(measurements)
def Part1(measurements):
increases = 0
previousDepth = None
for depth in measurements:
#Increment if this isn't the first measurement and the depth has increased
if previousDepth != None and depth > previousDepth:
increases += 1
previousDepth = depth
print('Part 1: ' + str(increases))
#The result should be 1791
def Part2(measurements):
increases = 0
previousSum = None
#Run as long as there are complete sets of 3 measurements to add together
for index in range(len(measurements)-2):
measurement1 = measurements[index]
measurement2 = measurements[index+1]
measurement3 = measurements[index+2]
currentSum = measurement1 + measurement2 + measurement3
#Increment if this isn't the first set of measurements and the sum has increased
if previousSum != None and currentSum > previousSum:
increases += 1
previousSum = currentSum
print('Part 2: ' + str(increases))
#The result should be 1822 |
"""
O(n^min(k, n-k))
"""
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
result = []
self.dfs(list(range(1, n + 1)), k, [], result)
return result
def dfs(self, arr, k, path, result):
if k < 0:
return
if k == 0:
result.append(path)
for i in range(len(arr)):
self.dfs(arr[i + 1 :], k - 1, path + [arr[i]], result)
|
my_email = 'twitterlehigh2@gmail.com'
my_password = 'kB5-LXX-T7w-TLG'
# my_email = 'twitterlehigh@gmail.com'
# my_password = 'Lehigh131016'
|
'''
Given an integer, write a function that reverses
the bits (in binary) and returns the integer result.
Understand:
417 --> 267
167 --> 417
0 --> 0
Plan:
Use bin() to convert the binary into a string.
Use bracket indexing to reverse the order.
Use int() to convert it into a decimal number.
'''
def csReverseIntegerBits(n):
str_n = bin(n).replace("0b","")
rev_str_n = str_n[::-1]
int_n = int(rev_str_n, 2)
return int_n
#Author's Answer
def csReverseIntegerBits(n):
return int(bin(n)[:1:-1],2)
'''
Given a binary string (ASCII encoded), write a function
that returns the equivalent decoded text.
Every eight bits in the binary string represents one
character on the ASCII table.
Understand:
csBinaryToASCII("011011000110000101101101011000100110010001100001") -> "lambda"
"" --> ""
Plan:
Split the string into a list of every 8 digits.
Convert each 8 digit into its ASCII character.
Join the string.
'''
def csBinaryToASCII(binary):
str_list = []
for i in range(0, len(binary), 8):
str_list.append(int(binary[i:i+8], 2))
chr_binary = []
for str_ in str_list:
chr_binary.append(chr(str_))
return "".join(chr_binary)
#Author's Answer
def csBinaryToASCII(binary):
return "".join( [ chr(int(binary[i: i+8], 2)) for i in range(0, len(binary), 8) ] )
'''
Given a number, write a function that converts
that number into a string that contains "raindrop sounds"
corresponding to certain potential factors. A factor
is a number that evenly divides into another number,
leaving no remainder. The simplest way to test if one
number is a factor of another is to use the modulo operator.
Here are the rules for csRaindrop. If the input number:
has 3 as a factor, add "Pling" to the result.
has 5 as a factor, add "Plang" to the result.
has 7 as a factor, add "Plong" to the result.
does not have any of 3, 5, or 7 as a factor, the result
should be the digits of the input number.
Understand:
28 --> "Plong"
30 --> "PlingPlang"
34 --> "34"
Plan:
Using if statements, check if the number has a factor
of 3, 5, or 7. If so, append an empty string with Pling,
Plang, or Plong. Else, return the number as the string.
'''
def csRaindrops(number):
result = ""
if number % 3 == 0:
result += "Pling"
if number % 5 == 0:
result += "Plang"
if number % 7 == 0:
result += "Plong"
if result == "":
result += str(number)
return result
#Author's Answer
def csRaindrops(number):
result = ''
result += "Pling" * (number % 3 == 0)
result += "Plang" * (number % 5 == 0)
result += "Plong" * (number % 7 == 0)
return result or str(number)
|
class Solution:
def solve(self, board: List[List[str]]) -> None:
m, n = len(board), len(board and board[0])
def explore(i, j):
board[i][j] = "S"
for x, y in ((i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)):
if 0 <= x < m and 0 <= y < n and board[x][y] == "O":
explore(x, y)
for i in range(max(m, n)):
if i < m and board[i][0] == "O":
explore(i, 0)
if i < m and board[i][n - 1] == "O":
explore(i, n - 1)
if i < n and board[0][i] == "O":
explore(0, i)
if i < n and board[m - 1][i] == "O":
explore(m - 1, i)
for i in range(m):
for j in range(n):
if board[i][j] == "S":
board[i][j] = "O"
elif board[i][j] == "O":
board[i][j] = "X"
|
# -*- coding: utf-8 -*-
"""
To-Do application
"""
def add(todos):
"""
Add a task
"""
pass
def delete(todos, index=None):
"""
Delete one or all tasks
"""
pass
def get_printable_todos(todos):
"""
Get formatted tasks
"""
pass
def toggle_done(todos, index):
"""
Toggle a task
"""
pass
def view(todos, index):
"""
Print tasks
"""
print('\nTo-Do list')
print('=' * 40)
def main():
"""
Main function
"""
print('Add New tasks...')
# TODO Add 3 tasks & print
print('\nThe Second one is toggled')
# TODO Toggle the second task & print
print('\nThe last one is removed')
# TODO Remove only the third task & print
print('\nAll the todos are cleaned.')
# TODO Remove all the tasks & print
if __name__ == '__main__':
main()
|
class SerVivo:
def __init__(self):
self._vivo = True
def is_vivo(self):
return self._vivo
def morir(self):
self._vivo= False
#Se pone _ porque es una clase abstracta y solo se puede usar en esta clase
#Se pone __ porque es una clase privada solo la usa cada clase
|
dias = int(input("Quantos dias alugados? "))
km = float(input("Quantos Kms rodados: "))
preco = 60 * dias + 0.15 * km
print(f'O total a pagar é de R${preco:.2f}') |
#!/usr/bin/env python3
with open("main.go", encoding="utf-8") as file:
# FIXME
usage = "\n".join(file.read().split("\n")[13:-1])
with open("tools/_README.md", mode="r", encoding="utf-8") as file:
readme = file.read()
readme = readme.replace("<<<<USAGE>>>>", usage)
with open("README.md", mode="w", encoding="utf-8") as file:
file.write(readme)
|
def binary_search(the_list, target):
lower_bound = 0
upper_bound = len(the_list) - 1
while lower_bound <= upper_bound:
pivot = (lower_bound + upper_bound) // 2
pivot_value = the_list[pivot]
if pivot_value == target:
return pivot
if pivot_value > target:
upper_bound = pivot - 1
else:
lower_bound = pivot + 1
return -1
my_list = [1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 , 10]
print(binary_search(my_list, 10))
print(binary_search(my_list, 4))
print(binary_search(my_list, 33))
|
"""
User ACL
========
"""
_schema = {
# Medlemsnummer
'id': {'type': 'integer',
'readonly': True
},
'acl': {'type': 'dict',
'readonly': False,
'schema': {'groups': {'type': 'list', 'default': [],'schema': {'type': 'objectid'}},
'roles': {'type': 'list', 'default': [],'schema': {'type': 'objectid'}},
},
}
}
definition = {
'item_title': 'users/acl',
'url': 'users/acl',
'datasource': {'source': 'users',
'default_sort': [('id', 1)],
},
'extra_response_fields': ['id'],
'resource_methods': ['GET'], #No post, only internal!!
'item_methods': ['GET'],
#'auth_field': 'id', #This will limit only users who has
'allowed_write_roles': ['superadmin'],
'allowed_item_write_roles': ['superadmin'],
'additional_lookup': {
'url': 'regex("[\d{1,6}]+")',
'field': 'id',
},
'schema': _schema,
}
|
"""
The cost of stock on each day is given in an array A[] of size N.
Find all the days on which you buy and sell the stock
so that in between those days your profit is maximum.
"""
def sellandbuy(a, n):
result = []
start = 0
end = 1
while end < n:
if a[start] < a[end] and a[end] > a[end-1]:
end += 1
else:
if end-start > 1:
result.append([start, end-1])
start = end
end = end+1
else:
start = end
end = end+1
if end-start > 1 and a[end-1] > a[start]:
result.append([start, end-1])
start = end
end = end+1
return result
print(sellandbuy([11, 42, 49, 96, 23, 20, 49, 26,
26, 18, 73, 2, 53, 59, 34, 99, 25, 2], 18))
|
def solution(n):
sum = 0
print(list(str(n)))
for i, j in enumerate(list(str(n))):
sum+=int(j)
return sum
print(solution(11)) |
"""
LeetCode Problem: 108. Convert Sorted Array to Binary Search Tree
Link: https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(N)
Space Complexity: O(N)
"""
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
def helper(left, right):
if left > right:
return None
# always choose left middle node as a root
p = (left + right) // 2
# preorder traversal: node -> left -> right
root = TreeNode(nums[p])
root.left = helper(left, p - 1)
root.right = helper(p + 1, right)
return root
return helper(0, len(nums) - 1) |
class Solution:
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
x = [i == '1' for i in a[::-1]]
y = [i == '1' for i in b[::-1]]
r = []
carry = False
if len(x) > len(y):
y += [False] * (len(x) - len(y))
else:
x += [False] * (len(y) - len(x))
for d in range(len(x)):
s, carry = self.full_adder(x[d], y[d], carry)
r += [s]
if carry:
r += [True]
r.reverse()
return ''.join(['1' if i else '0' for i in r])
def half_adder(self, a, b):
return a ^ b, a & b
def full_adder(self, a, b, cin):
s1, c1 = self.half_adder(a, b)
s2, c2 = self.half_adder(s1, cin)
return s2, c1 | c2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.