content stringlengths 7 1.05M |
|---|
n = int(input('Me diga um número qualquer: '))
if (n % 2 == 0):
print('O número {} é PAR'.format(n))
else:
print('O número {} é IMPAR'.format(n))
|
"""
39.61%
return ['Fizz' * (not i % 3) + 'Buzz' * (not i % 5) or str(i) for i in range(1, n+1)]
"""
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
result = []
for i in range(1, n+1):
if i % 3 == 0 and i % 5 == 0:
result.append('FizzBuzz')
elif i % 3 == 0:
result.append('Fizz')
elif i % 5 == 0:
result.append('Buzz')
else:
result.append(str(i))
return result |
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"generate": "00_numpy.ipynb",
"square_root_by_exhaustive": "01_python03.ipynb",
"square_root_by_binary_search": "01_python03.ipynb",
"square_root_by_newton": "01_python03.ipynb",
"search": "01_python03.ipynb",
"select_sort": "01_python03.ipynb"}
modules = ["numpycore.py",
"py03.py"]
doc_url = "https://greyhawk.github.io/pandas_exercises/"
git_url = "https://github.com/greyhawk/pandas_exercises/tree/master/"
def custom_doc_links(name): return None
|
class Solution(object):
def lengthOfLastWord(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0:
return 0
else:
words = s.split()
return len(words[len(words) - 1])
|
answer1 = widget_inputs["radio1"]
answer2 = widget_inputs["radio2"]
answer3 = widget_inputs["radio3"]
answer4 = widget_inputs["radio4"]
is_correct = False
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if answer1 == True:
is_correct = True
else:
is_correct = is_correct and False
commentizer("Check the first one. Remember, an SVG animation will animate the rotation of an image, as opposed to a gif which is a series of raster images displayed one after another.")
if answer4 == True:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the second one. Will the image be reused? If so, an external file probably makes more sense.")
if is_correct:
commentizer("Great job!")
commentizer(" I love the internet :)")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct |
# -*- coding: utf-8 -*-
"""Top-level package for botorum."""
__author__ = """JP White"""
__email__ = 'jpwhite3@gmail.com'
__version__ = '0.1.0'
|
def buy():
return "期貨作多"
def sell():
return "做空"
def zero():
return "選擇權新倉"
def C():
return "平倉"
# 選擇器
def switcher(command):
menu = {
'buy': buy,
'sell': sell,
'0': zero,
'C': C,
'4': lambda: print('Error')
}
func = menu.get(command, '4') # 从map中取出方法
return func() # 执行
|
# Open file for reading
inputFile = open('input_1', 'rt')
# Put values into array
inputValues = []
for x in inputFile:
inputValues.append(int(x))
inputFile.close()
"""
PUZZLE ONE
"""
increaseCount = 0
for currentIndex in range(len(inputValues)-1):
if inputValues[currentIndex+1]-inputValues[currentIndex] > 0:
increaseCount += 1
print("Increase Count: ", increaseCount)
"""
PUZZLE TWO
"""
increaseCount2 = 0
for currentIndex2 in range(len(inputValues)-3):
currentSum = inputValues[currentIndex2]+inputValues[currentIndex2+1]+inputValues[currentIndex2+2]
nextSum = inputValues[currentIndex2+1]+inputValues[currentIndex2+2]+inputValues[currentIndex2+3]
if nextSum - currentSum > 0:
increaseCount2 += 1
print("Increase Count 2: ", increaseCount2)
|
"""Message type identifiers for Routing."""
MESSAGE_FAMILY = "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/routing/1.0"
FORWARD = f"{MESSAGE_FAMILY}/forward"
ROUTE_QUERY_REQUEST = f"{MESSAGE_FAMILY}/route-query-request"
ROUTE_QUERY_RESPONSE = f"{MESSAGE_FAMILY}/route-query-response"
ROUTE_UPDATE_REQUEST = f"{MESSAGE_FAMILY}/route-update-request"
ROUTE_UPDATE_RESPONSE = f"{MESSAGE_FAMILY}/route-update-response"
MESSAGE_PACKAGE = "aries_cloudagent.messaging.routing.messages"
MESSAGE_TYPES = {
FORWARD: f"{MESSAGE_PACKAGE}.forward.Forward",
ROUTE_QUERY_REQUEST: f"{MESSAGE_PACKAGE}.route_query_request.RouteQueryRequest",
ROUTE_QUERY_RESPONSE: f"{MESSAGE_PACKAGE}.route_query_response.RouteQueryResponse",
ROUTE_UPDATE_REQUEST: f"{MESSAGE_PACKAGE}.route_update_request.RouteUpdateRequest",
ROUTE_UPDATE_RESPONSE: (
f"{MESSAGE_PACKAGE}.route_update_response.RouteUpdateResponse"
),
}
|
digit = input('Enter number:')
name = input("Name:")
if not digit.i:
print("Input must be a digit")
exit(1)
print(int(digit) + 1)
|
# Zapytaj użytkownika o 3 ulubione potrawy i zapisz je w postaci listy
# favourite_dishes = []
# dish = input("Jakie jest Twoje ulubione danie nr 1? ")
# favourite_dishes.append(dish)
# dish = input("Jakie jest Twoje ulubione danie nr 2? ")
# favourite_dishes.append(dish)
# dish = input("Jakie jest Twoje ulubione danie nr 3? ")
# favourite_dishes.append(dish)
#
# print("Twoje ulubione dania to:", favourite_dishes)
# Alternatywne rozwiązanie
dishes = input("Jakie są Twoje 3 ulubione dania? Wymień, rozdzielając przecinkiem ")
favourite_dishes = dishes.split(',')
print(favourite_dishes)
|
num1 = 11
num2 = 222
num3 = 3333333
num3 = 333
num4 = 44444
|
# mock data
OP_STATIC_ATTRS = {
"objectClass": ["top", "oxAuthClient"],
"oxAuthScope": [
"inum=F0C4,ou=scopes,o=gluu",
"inum=C4F5,ou=scopes,o=gluu",
],
"inum": "w124asdgggAGs",
}
ADD_OP_TEST_ARGS = {
"oxAuthLogoutSessionRequired": False,
"oxAuthTrustedClient": False,
"oxAuthResponseType": "token",
"oxAuthTokenEndpointAuthMethod": "client_secret_basic",
"oxAuthRequireAuthTime": False,
"oxAccessTokenAsJwt": False,
"oxPersistClientAuthorizations": True,
"oxAuthGrantType": "client_credentials",
"oxAttributes":
'{"tlsClientAuthSubjectDn":null,"runIntrospectionScriptBeforeAccessTokenAsJwtCreationAndIncludeClaims":false,"keepClientAuthorizationAfterExpiration":false}',
"oxAuthAppType": "web",
"oxDisabled": False,
"oxIncludeClaimsInIdToken": False,
"oxRptAsJwt": False,
"displayName": "test-client2",
"oxAuthClientSecret": "somecoolsecret",
"oxAuthSubjectType": "pairwise",
}
MOCKED_SEARCH_S_VALID_RESPONSE = [(
"inum=59376804-e84b-411a-9492-653d14e52c24,ou=clients,o=gluu",
{
"objectClass": [b"top", b"oxAuthClient"],
"oxAuthLogoutSessionRequired": [b"false"],
"oxAuthScope": [
b"inum=F0C4,ou=scopes,o=gluu",
b"inum=C4F5,ou=scopes,o=gluu",
],
"oxAuthTrustedClient": [b"false"],
"oxAuthResponseType": [b"token"],
"oxAuthTokenEndpointAuthMethod": [b"client_secret_basic"],
"oxAuthRequireAuthTime": [b"false"],
"oxAccessTokenAsJwt": [b"false"],
"oxPersistClientAuthorizations": [b"true"],
"oxAuthGrantType": [b"client_credentials"],
"inum": [b"59376804-e84b-411a-9492-653d14e52c24"],
"oxAttributes": [
b'{"tlsClientAuthSubjectDn":null,"runIntrospectionScriptBeforeAccessTokenAsJwtCreationAndIncludeClaims":false,"keepClientAuthorizationAfterExpiration":false}'
],
"oxAuthAppType": [b"web"],
"oxLastLogonTime": [b"20200714072830.011Z"],
"oxAuthClientSecretExpiresAt": [b"21200623000000.000Z"],
"oxDisabled": [b"false"],
"oxIncludeClaimsInIdToken": [b"false"],
"oxRptAsJwt": [b"false"],
"displayName": [b"test-client"],
"oxAuthClientSecret": [b"gWxnjnUdCm8Rpc0WPmm9lQ=="],
"oxAuthSubjectType": [b"pairwise"],
"oxLastAccessTime": [b"20200714072830.011Z"],
},
)]
OP_ADD_OP_EXPECTED_RETURN = expected_created_op = (
"inum=w124asdgggAGs,ou=clients,o=gluu",
{
"objectClass": [b"top", b"oxAuthClient"],
"oxAuthLogoutSessionRequired": [b"false"],
"oxAuthTrustedClient": [b"false"],
"oxAuthScope":
[b"inum=F0C4,ou=scopes,o=gluu", b"inum=C4F5,ou=scopes,o=gluu"],
"oxAuthResponseType": [b"token"],
"oxAuthTokenEndpointAuthMethod": [b"client_secret_basic"],
"oxAuthRequireAuthTime": [b"false"],
"oxAccessTokenAsJwt": [b"false"],
"oxPersistClientAuthorizations": [b"true"],
"oxAuthGrantType": [b"client_credentials"],
"inum": [b"w124asdgggAGs"],
"oxAttributes": [
b'{"tlsClientAuthSubjectDn":null,"runIntrospectionScriptBeforeAccessTokenAsJwtCreationAndIncludeClaims":false,"keepClientAuthorizationAfterExpiration":false}'
],
"oxAuthAppType": [b"web"],
"oxIncludeClaimsInIdToken": [b"false"],
"oxRptAsJwt": [b"false"],
"oxDisabled": [b"false"],
"displayName": [b"test-client2"],
"oxAuthClientSecret": [b"somecoolsecret"],
"oxAuthSubjectType": [b"pairwise"],
},
)
|
# -*- coding: utf-8 -*-
"""
@date: 2020/7/13 下午4:03
@file: __init__.py.py
@author: zj
@description:
"""
# This line will be programatically read/write by setup.py.
# Leave them at the bottom of this file and don't touch them.
__version__ = "0.1.9"
|
#!/usr/bin/env python3
# Day 15: Non-overlapping Intervals
#
# Given a collection of intervals, find the minimum number of intervals you
# need to remove to make the rest of the intervals non-overlapping.
#
# Note:
# - You may assume the interval's end point is always bigger than its start
# point.
# - Intervals like [1,2] and [2,3] have borders "touching" but they don't
# overlap each other.
class Solution:
def eraseOverlapIntervals(self, intervals: [[int]]) -> int:
# Edge case
if len(intervals) == 0:
return 0
# Convenience functions for code clarity
start = lambda interval: interval[0]
end = lambda interval: interval[1]
# Sort intervals by their end
intervals = sorted(intervals, key = end)
# Greedy!
intervals_to_remove = 0
previous_start = start(intervals[0])
previous_end = end(intervals[0])
for interval in intervals[1:]:
if start(interval) < previous_end:
intervals_to_remove += 1
else:
previous_start = start(interval)
previous_end = end(interval)
return intervals_to_remove
# Tests
assert Solution().eraseOverlapIntervals([[1,2],[2,3],[3,4],[1,3]]) == 1
assert Solution().eraseOverlapIntervals([[1,2],[1,2],[1,2]]) == 2
assert Solution().eraseOverlapIntervals([[1,2],[2,3]]) == 0
|
aux = 0
num = int(input("Ingrese un numero entero positivo: "))
if num>0:
for x in range(0,num+1):
aux = aux + x
print (aux) |
start = [8,13,1,0,18,9]
last_said = None
history = {}
def say(num, turn_no):
print(f'turn {i}\tsay {num}')
for i in range(30000000):
if i < len(start):
num = start[i]
else:
# print(f'turn {i} last said {last_said} {history}')
if last_said in history:
# print('in')
num = i - history[last_said] - 1
else:
num = 0
# print(history)
if last_said is not None:
history[last_said] = i - 1
# say(num, i)
if i % 1000000 == 0: print(i, num)
last_said = num
print(i, num) |
"""
******************************************************
Author: Mark Arakaki
October 15, 2017
Personal Practice Use
*****************************************************
Divisors:
Create a program that asks the user for a number and then prints out a list of all the divisors of that number. (If you don't know what a divisor is, it is a number that divides evently into another number. For example, 13 is divisor of 26 because 26 / 13 has no remainder.)
"""
number = input("Please enter in a number that you want divided: ")
list_of_divisors = []
print("Listed below are the list of possible divisors for the inputted integer: \n")
if number == 0:
print("")
else:
divisor = number / 2
while divisor > 0:
list_of_divisors.append(divisor)
divisor = divisor / 2
print(list_of_divisors)
|
num1 = '100'
num2 = '200'
# 100200
print(num1 + num2)
# Casting - 300
num1 = int(num1)
num2 = int(num2)
print(num1 + num2) |
print('Esse programa recebe 3 entradas numericas e diz qual o maior. ')
um = int(input('Digite um numero: '))
dois = int(input('Digite outro numero: '))
tres = int(input('Digite um terceiro numero: '))
if um > dois and um >tres:
print(um,'é o maior')
elif dois > um and dois > tres:
print(dois,'é o maior')
elif tres > dois and tres > um:
print(tres,'é o maior')
if um < dois and um <tres:
print(um,'é o menor')
elif dois < um and dois < tres:
print(dois,'é o menor')
elif tres < dois and tres < um:
print(tres,'é o menor')
input('Pressione enter pra sair')
|
# Databricks notebook source exported at Sun, 13 Mar 2016 23:07:00 UTC
# MAGIC %md # <img width="300px" src="http://cdn.arstechnica.net/wp-content/uploads/2015/09/2000px-Wikipedia-logo-v2-en-640x735.jpg"/> Clickstream Analysis
# MAGIC
# MAGIC ** Dataset: 3.2 billion requests collected during the month of February 2015 grouped by (src, dest) **
# MAGIC
# MAGIC ** Source: https://datahub.io/dataset/wikipedia-clickstream/ **
# MAGIC
# MAGIC <img width="700px" src="https://databricks-prod-cloudfront.s3.amazonaws.com/docs/images/ny.clickstream.png"/>
# MAGIC
# MAGIC <i>*This notebook requires Spark 1.6+</i>
# COMMAND ----------
# MAGIC %md
# MAGIC This is a copy of the original python notebook by Michael Armburst at Spark Summit East February 2016 (watch later)
# MAGIC
# MAGIC [](https://www.youtube.com/v/35Y-rqSMCCA)
# MAGIC
# MAGIC shared from [https://twitter.com/michaelarmbrust/status/699969850475737088](https://twitter.com/michaelarmbrust/status/699969850475737088)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC This is a data analysis workflow developed with [Databricks Community Edition](https://databricks.com/blog/2016/02/17/introducing-databricks-community-edition-apache-spark-for-all.html), a free version of Databricks designed for learning [Apache Spark](https://spark.apache.org/).
# MAGIC
# MAGIC You can [join the wait list](http://go.databricks.com/databricks-community-edition-beta-waitlist) for Community Edition today!
# COMMAND ----------
# MAGIC %scala if (org.apache.spark.BuildInfo.sparkBranch < "1.6") sys.error("Attach this notebook to a cluster running Spark 1.6+")
# COMMAND ----------
# Load the raw dataset stored as a CSV file
clickstreamRaw = sqlContext.read \
.format("com.databricks.spark.csv") \
.options(header="true", delimiter="\t", mode="PERMISSIVE", inferSchema="true") \
.load("dbfs:///databricks-datasets/wikipedia-datasets/data-001/clickstream/raw-uncompressed")
# Convert the dataset to a more efficent format to speed up our analysis
clickstreamRaw.write \
.mode("overwrite") \
.format("parquet") \
.save("/datasets/wiki-clickstream")
# COMMAND ----------
clicks = sqlContext.read.parquet("/datasets/wiki-clickstream")
# COMMAND ----------
clicks.printSchema
# COMMAND ----------
all_clicks = clicks.selectExpr("sum(n) AS clicks").first().clicks
wiki_clicks = clicks.where("prev_id IS NOT NULL").selectExpr("sum(n) AS clicks").first().clicks
float(wiki_clicks) / all_clicks * 100
# COMMAND ----------
# Make clicks available as a SQL table.
clicks.registerTempTable("clicks")
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT *
# MAGIC FROM clicks
# MAGIC WHERE
# MAGIC curr_title = 'Donald_Trump' AND
# MAGIC prev_id IS NOT NULL AND prev_title != 'Main_Page'
# MAGIC ORDER BY n DESC
# MAGIC LIMIT 20
# COMMAND ----------
# MAGIC %scala
# MAGIC package d3
# MAGIC // We use a package object so that we can define top level classes like Edge that need to be used in other cells
# MAGIC
# MAGIC import org.apache.spark.sql._
# MAGIC import com.databricks.backend.daemon.driver.EnhancedRDDFunctions.displayHTML
# MAGIC
# MAGIC case class Edge(src: String, dest: String, count: Long)
# MAGIC
# MAGIC case class Node(name: String)
# MAGIC case class Link(source: Int, target: Int, value: Long)
# MAGIC case class Graph(nodes: Seq[Node], links: Seq[Link])
# MAGIC
# MAGIC object graphs {
# MAGIC val sqlContext = SQLContext.getOrCreate(org.apache.spark.SparkContext.getOrCreate())
# MAGIC import sqlContext.implicits._
# MAGIC
# MAGIC def force(clicks: Dataset[Edge], height: Int = 100, width: Int = 960): Unit = {
# MAGIC val data = clicks.collect()
# MAGIC val nodes = (data.map(_.src) ++ data.map(_.dest)).map(_.replaceAll("_", " ")).toSet.toSeq.map(Node)
# MAGIC val links = data.map { t =>
# MAGIC Link(nodes.indexWhere(_.name == t.src.replaceAll("_", " ")), nodes.indexWhere(_.name == t.dest.replaceAll("_", " ")), t.count / 20 + 1)
# MAGIC }
# MAGIC showGraph(height, width, Seq(Graph(nodes, links)).toDF().toJSON.first())
# MAGIC }
# MAGIC
# MAGIC /**
# MAGIC * Displays a force directed graph using d3
# MAGIC * input: {"nodes": [{"name": "..."}], "links": [{"source": 1, "target": 2, "value": 0}]}
# MAGIC */
# MAGIC def showGraph(height: Int, width: Int, graph: String): Unit = {
# MAGIC
# MAGIC displayHTML(s"""
# MAGIC <!DOCTYPE html>
# MAGIC <html>
# MAGIC <head>
# MAGIC <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
# MAGIC <title>Polish Books Themes - an Interactive Map</title>
# MAGIC <meta charset="utf-8">
# MAGIC <style>
# MAGIC
# MAGIC .node_circle {
# MAGIC stroke: #777;
# MAGIC stroke-width: 1.3px;
# MAGIC }
# MAGIC
# MAGIC .node_label {
# MAGIC pointer-events: none;
# MAGIC }
# MAGIC
# MAGIC .link {
# MAGIC stroke: #777;
# MAGIC stroke-opacity: .2;
# MAGIC }
# MAGIC
# MAGIC .node_count {
# MAGIC stroke: #777;
# MAGIC stroke-width: 1.0px;
# MAGIC fill: #999;
# MAGIC }
# MAGIC
# MAGIC text.legend {
# MAGIC font-family: Verdana;
# MAGIC font-size: 13px;
# MAGIC fill: #000;
# MAGIC }
# MAGIC
# MAGIC .node text {
# MAGIC font-family: "Helvetica Neue","Helvetica","Arial",sans-serif;
# MAGIC font-size: 17px;
# MAGIC font-weight: 200;
# MAGIC }
# MAGIC
# MAGIC </style>
# MAGIC </head>
# MAGIC
# MAGIC <body>
# MAGIC <script src="//d3js.org/d3.v3.min.js"></script>
# MAGIC <script>
# MAGIC
# MAGIC var graph = $graph;
# MAGIC
# MAGIC var width = $width,
# MAGIC height = $height;
# MAGIC
# MAGIC var color = d3.scale.category20();
# MAGIC
# MAGIC var force = d3.layout.force()
# MAGIC .charge(-700)
# MAGIC .linkDistance(180)
# MAGIC .size([width, height]);
# MAGIC
# MAGIC var svg = d3.select("body").append("svg")
# MAGIC .attr("width", width)
# MAGIC .attr("height", height);
# MAGIC
# MAGIC force
# MAGIC .nodes(graph.nodes)
# MAGIC .links(graph.links)
# MAGIC .start();
# MAGIC
# MAGIC var link = svg.selectAll(".link")
# MAGIC .data(graph.links)
# MAGIC .enter().append("line")
# MAGIC .attr("class", "link")
# MAGIC .style("stroke-width", function(d) { return Math.sqrt(d.value); });
# MAGIC
# MAGIC var node = svg.selectAll(".node")
# MAGIC .data(graph.nodes)
# MAGIC .enter().append("g")
# MAGIC .attr("class", "node")
# MAGIC .call(force.drag);
# MAGIC
# MAGIC node.append("circle")
# MAGIC .attr("r", 10)
# MAGIC .style("fill", function (d) {
# MAGIC if (d.name.startsWith("other")) { return color(1); } else { return color(2); };
# MAGIC })
# MAGIC
# MAGIC node.append("text")
# MAGIC .attr("dx", 10)
# MAGIC .attr("dy", ".35em")
# MAGIC .text(function(d) { return d.name });
# MAGIC
# MAGIC //Now we are giving the SVGs co-ordinates - the force layout is generating the co-ordinates which this code is using to update the attributes of the SVG elements
# MAGIC force.on("tick", function () {
# MAGIC link.attr("x1", function (d) {
# MAGIC return d.source.x;
# MAGIC })
# MAGIC .attr("y1", function (d) {
# MAGIC return d.source.y;
# MAGIC })
# MAGIC .attr("x2", function (d) {
# MAGIC return d.target.x;
# MAGIC })
# MAGIC .attr("y2", function (d) {
# MAGIC return d.target.y;
# MAGIC });
# MAGIC d3.selectAll("circle").attr("cx", function (d) {
# MAGIC return d.x;
# MAGIC })
# MAGIC .attr("cy", function (d) {
# MAGIC return d.y;
# MAGIC });
# MAGIC d3.selectAll("text").attr("x", function (d) {
# MAGIC return d.x;
# MAGIC })
# MAGIC .attr("y", function (d) {
# MAGIC return d.y;
# MAGIC });
# MAGIC });
# MAGIC </script>
# MAGIC </html>
# MAGIC """)
# MAGIC }
# MAGIC
# MAGIC def help() = {
# MAGIC displayHTML("""
# MAGIC <p>
# MAGIC Produces a force-directed graph given a collection of edges of the following form:</br>
# MAGIC <tt><font color="#a71d5d">case class</font> <font color="#795da3">Edge</font>(<font color="#ed6a43">src</font>: <font color="#a71d5d">String</font>, <font color="#ed6a43">dest</font>: <font color="#a71d5d">String</font>, <font color="#ed6a43">count</font>: <font color="#a71d5d">Long</font>)</tt>
# MAGIC </p>
# MAGIC <p>Usage:<br/>
# MAGIC <tt>%scala</tt></br>
# MAGIC <tt><font color="#a71d5d">import</font> <font color="#ed6a43">d3._</font></tt><br/>
# MAGIC <tt><font color="#795da3">graphs.force</font>(</br>
# MAGIC <font color="#ed6a43">height</font> = <font color="#795da3">500</font>,<br/>
# MAGIC <font color="#ed6a43">width</font> = <font color="#795da3">500</font>,<br/>
# MAGIC <font color="#ed6a43">clicks</font>: <font color="#795da3">Dataset</font>[<font color="#795da3">Edge</font>])</tt>
# MAGIC </p>""")
# MAGIC }
# MAGIC }
# COMMAND ----------
# MAGIC %scala
# MAGIC // print the help for the graphing library
# MAGIC d3.graphs.help()
# COMMAND ----------
# MAGIC %scala
# MAGIC import d3._
# MAGIC
# MAGIC graphs.force(
# MAGIC height = 800,
# MAGIC width = 1000,
# MAGIC clicks = sql("""
# MAGIC SELECT
# MAGIC prev_title AS src,
# MAGIC curr_title AS dest,
# MAGIC n AS count FROM clicks
# MAGIC WHERE
# MAGIC curr_title IN ('Donald_Trump', 'Bernie_Sanders', 'Hillary_Rodham_Clinton', 'Ted_Cruz') AND
# MAGIC prev_id IS NOT NULL AND NOT (curr_title = 'Main_Page' OR prev_title = 'Main_Page')
# MAGIC ORDER BY n DESC
# MAGIC LIMIT 20""").as[Edge])
# COMMAND ----------
|
# Python - 3.4.3
WALL = 1
START_POINT = 2
FINISH_POINT = 3
dirs = {
'W': (0, -1),
'E': (0, 1),
'N': (-1, 0),
'S': (1, 0)
}
# 尋找起點
def find_maze_start_point(maze):
for row in range(len(maze)):
for col in range(len(maze[0])):
if maze[row][col] == START_POINT:
return (row, col)
return None
def maze_runner(maze, directions):
r, c = find_maze_start_point(maze)
for direction in directions:
dr, dc = dirs[direction]
r += dr
c += dc
if (0 <= r < len(maze)) and (0 <= c < len(maze[0])):
if maze[r][c] == WALL:
# 撞到牆壁回傳 "Dead"
return 'Dead'
elif maze[r][c] == FINISH_POINT:
# 走到終點回傳 "Finish"
return 'Finish'
else:
# 走到地圖外回傳 "Dead"
return 'Dead'
# 沒有走到終點回傳 "Lost"
return 'Lost'
|
"""This file contains constants used used by the Ethereum JSON RPC
interface."""
BLOCK_TAG_EARLIEST = "earliest"
BLOCK_TAG_LATEST = "latest"
BLOCK_TAG_PENDING = "pending"
BLOCK_TAGS = (BLOCK_TAG_EARLIEST, BLOCK_TAG_LATEST, BLOCK_TAG_PENDING)
|
# ------- FUNCTION BASICS --------
def allotEmail(firstName, surname):
return firstName+'.'+surname+'@pythonabc.org'
name = input("Enter your name: ")
fName, sName = name.split()
compEmail = allotEmail(fName, sName)
print(compEmail)
def get_sum(*args):
sum = 0
for i in args:
sum += i
return sum
print("sum =", get_sum(3,4,5,7)) |
'''
An approximation of network latency in the Bitcoin network based on the
following paper: https://ieeexplore.ieee.org/document/6688704/.
From the green line in Fig 1, we can approximate the function as:
Network latency (sec) = 19/300 sec/KB * KB + 1 sec
If we assume a transaction is 500 bytes or 1/2 KB, we get the function
Network latency (sec) = 19/600 sec/tx * tx + 1 sec
We use this as a parameter into our exponential delay
'''
SEC_PER_TRANSACTION = 19.0/600
'''
Required depth for longest chain to consider a block to be finalized
'''
FINALIZATION_DEPTH = 6
'''
Transaction rate in transactions/sec used when generating a transaction
dataset
'''
TX_RATE = 1
'''
Transaction size used for computing network latency when broadcasting transactions
'''
TX_SIZE = 1
|
class Spam(object):
'''
The Spam object contains lots of spam
Args:
arg (str): The arg is used for ...
*args: The variable arguments are used for ...
**kwargs: The keyword arguments are used for ...
Attributes:
arg (str): This is where we store arg,
'''
def __init__(self, arg, *args, **kwargs):
self.arg = arg
def eggs(self, amount, cooked):
'''We can't have spam without eggs, so here's the eggs
Args:
amount (int): The amount of eggs to return
cooked (bool): Should the eggs be cooked?
Raises:
RuntimeError: Out of eggs
Returns:
Eggs: A bunch of eggs
'''
pass
|
'''
Given an integer array nums, return the length of the longest strictly increasing subsequence.
A subsequence is a sequence that can be derived from an array by deleting some or no elements without changing the order of the remaining elements. For example, [3,6,2,7] is a subsequence of the array [0,3,1,6,2,2,7].
Example 1:
Input: nums = [10,9,2,5,3,7,101,18]
Output: 4
Explanation: The longest increasing subsequence is [2,3,7,101], therefore the length is 4.
Example 2:
Input: nums = [0,1,0,3,2,3]
Output: 4
Example 3:
Input: nums = [7,7,7,7,7,7,7]
Output: 1
Constraints:
1 <= nums.length <= 2500
-104 <= nums[i] <= 104
Follow up:
Could you come up with the O(n2) solution?
Could you improve it to O(n log(n)) time complexity?
'''
# Bin Search Approach -> Time: O(NlogN), Space: O(n)
class Solution(object):
def binarySearch(self, temp_arr, low, high, target):
while low <= high:
mid = low + (high - low) // 2
if temp_arr[mid] == target:
return mid
if temp_arr[mid] > target:
high = mid - 1
else:
low = mid + 1
return low # low is always ending at the right position
def lengthOfLIS(self, nums):
if not nums or len(nums) == 0:
return 0
temp_arr = []
len_point = 1 # len_point put on temp_arr
temp_arr.append(nums[0])
for i in range(1, len(nums)):
if nums[i] > temp_arr[-1]:
temp_arr.append(nums[i])
len_point += 1
else:
bs_idx = self.binarySearch(temp_arr, 0, len(temp_arr)- 1, nums[i])
temp_arr[bs_idx] = nums[i]
return len_point
# DP Approach -> Time: O(n^2), Space: O(n)
class Solution(object):
def lengthOfLIS(self, nums):
res = 0
dp_table = [1] * len(nums)
for elem in range (1, len(nums)):
for elem1 in range (0, elem):
if (nums[elem] > nums[elem1] and dp_table[elem] < dp_table[elem1] + 1):
dp_table[elem] = dp_table[elem1] + 1
for elem in range (0, len(nums)):
res = max(res, dp_table[elem])
return res
|
# Time: O(m * n)
# Space: O(m * n)
class Solution(object):
def numDistinctIslands(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
directions = {'l':[-1, 0], 'r':[ 1, 0], \
'u':[ 0, 1], 'd':[ 0, -1]}
def dfs(i, j, grid, island):
if not (0 <= i < len(grid) and \
0 <= j < len(grid[0]) and \
grid[i][j] > 0):
return False
grid[i][j] *= -1
for k, v in directions.iteritems():
island.append(k);
dfs(i+v[0], j+v[1], grid, island)
return True
islands = set()
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
island = []
if dfs(i, j, grid, island):
islands.add("".join(island))
return len(islands)
|
# Тип данных СЛОВРИ (dict)
# Инициализация
dict_temp = {}
print(type(dict_temp), dict_temp)
dict_temp = {'dict1' : 1, 'dict2' : 2.1, 'dict3': 'name', 'dict4':[1,2,3]}
print(type(dict_temp), dict_temp)
dict_temp = dict.fromkeys(['a', 'b'], [12, 2020])
print(type(dict_temp), dict_temp)
dict_temp = dict(brend = 'volvo', volume_engine = 1.5)
print(type(dict_temp), dict_temp)
dict_temp = {a: a**2 for a in range(5)}
print(type(dict_temp), dict_temp)
# Обращения к содержимому словаря
print(dict_temp[2])
# Функции со словарями
print(list(dict_temp.keys()))
print(list(dict_temp.values()))
print(list(dict_temp.items()))
print('____________________________')
# Работа с элементами
dict_temp[0] = 100
print(type(dict_temp), dict_temp)
dict_temp['name'] = 'Dima'
print(type(dict_temp), dict_temp)
# Методы
# keys, values, items см выше
temp = dict_temp.pop('name')
print(type(temp), temp)
# Итерирование по словарю
for pair in dict_temp.items():
print(pair)
for kay, value in dict_temp.items():
print(kay, value)
for kay in dict_temp.keys():
print(kay)
for value in dict_temp.values():
print(value)
|
"""
Given three integer arrays arr1, arr2 and arr3 sorted in strictly increasing order, return a sorted array of only the integers that appeared in all three arrays.
Example 1:
Input: arr1 = [1,2,3,4,5], arr2 = [1,2,5,7,9], arr3 = [1,3,4,5,8]
Output: [1,5]
Explanation: Only 1 and 5 appeared in the three arrays.
Constraints:
1 <= arr1.length, arr2.length, arr3.length <= 1000
1 <= arr1[i], arr2[i], arr3[i] <= 2000
"""
# Hash Map Solution
class Solution(object):
def arraysIntersection(self, arr1, arr2, arr3):
temp = collections.Counter(arr1 + arr2 + arr3)
res = []
for key, value in temp.iteritems():
if value == 3:
res.append(key)
return res
#Algorithm
#Initiate three pointers p1, p2, p3, and place them at the beginning of arr1, arr2, arr3 by initializing them to 0;
#while they are within the boundaries:
#if arr1[p1] == arr2[p2] && arr2[p2] == arr3[p3], we should store it because it appears three times in arr1, arr2, and arr3;
#else
#if arr1[p1] < arr2[p2], move the smaller one, i.e., p1;
#else if arr2[p2] < arr3[p3], move the smaller one, i.e., p2;
#if neither of the above conditions is met, it means arr1[p1] >= arr2[p2] && arr2[p2] >= arr3[p3], therefore move p3.
class Solution(object):
def arraysIntersection(self, arr1, arr2, arr3):
res = []
ptr1, ptr2, ptr3 = 0, 0, 0
while ptr1 < len(arr1) and ptr2 < len(arr2) and ptr3 < len(arr3):
if arr1[ptr1] == arr2[ptr2] == arr3[ptr3]:
res.append(arr1[ptr1])
ptr1 += 1
ptr2 += 1
ptr3 += 1
else:
if arr1[ptr1] < arr2[ptr2]:
ptr1 += 1
elif arr2[ptr2] < arr3[ptr3]:
ptr2 += 1
else:
ptr3 += 1
return res
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 6 19:42:18 2019
@author: rounak
"""
num = int (input("Enter a number: "))
#if the elements in the range(2, num) evenly divides the num,
#then it is included in the divisors list
divisor = [x for x in range(2, num) if num % x == 0]
for x in divisor:
print(x)
|
class Solution(object):
@staticmethod
def min_steps(candy, n, m):
min_step = float("inf")
def dfs(curr, i, j, num_candy, steps):
nonlocal min_step
if num_candy == m:
min_step = min(steps, min_step)
if steps > min_step:
return
if (i, j) in candy:
num_candy += 1
if 0 <= i+1 < n and 0 <= j < n and (i+1, j) not in curr:
curr.append((i+1, j))
dfs(curr, i+1, j, num_candy, steps+1)
curr.pop()
if 0 <= i < n and 0 <= j-1 < n and (i, j-1) not in curr:
curr.append((i, j-1))
dfs(curr, i, j-1, num_candy, steps+1)
curr.pop()
if 0 <= j+1 < n and 0 <= i < n and (i, j+1) not in curr:
curr.append((i, j+1))
dfs(curr, i, j+1, num_candy, steps+1)
curr.pop()
dfs([], 0, 0, 0, 0)
# need to -1 because last one should be exactly has candy, and in current
# implementation, step+1 then find candy == m
print(min_step-1)
return min_step-1
candy = [(0, 3), (1, 1), (2, 2), (3, 3)]
m = len(candy)
n = 4
s = Solution()
s.min_steps(candy, n, m)
|
class Employee:
# Constructor untuk Employee
def __init__(self, first_name, last_name, monthly_salary):
self._first_name = first_name
self._last_name = last_name
self._monthly_salary = monthly_salary
if monthly_salary < 0:
self._monthly_salary = 0
# Getter dan setter first_name
@property
def first_name(self):
return self._first_name
@first_name.setter
def first_name(self, new_first_name):
self._first_name = new_first_name
# Getter dan setter last_name
@property
def last_name(self):
return self._last_name
@last_name.setter
def last_name(self, new_last_name):
self._last_name = new_last_name
# Getter dan setter monthly_salary
@property
def monthly_salary(self):
return self._monthly_salary
@monthly_salary.setter
def monthly_salary(self, new_monthly_salary):
self._monthly_salary = new_monthly_salary
|
def test_Feeds(flamingo_env):
flamingo_env.settings.PLUGINS = ['flamingo.plugins.Feeds']
flamingo_env.settings.FEEDS_DOMAIN = 'www.example.org'
flamingo_env.settings.FEEDS = [
{
'id': 'www.example.org',
'title': 'Example.org',
'type': 'atom',
'output': 'en/feed.atom.xml',
'lang': 'en',
'contents': lambda ctx: ctx.contents,
'entry-id': lambda content: content['path'],
'updated': lambda content: '1970-01-01 00:00:00+01:00',
},
]
flamingo_env.write('/content/blog-post.html', """
title: blog-post
Blog post
=========
""")
flamingo_env.build()
|
class Solution:
def minFallingPathSum(self, arr: List[List[int]]) -> int:
min1 = min2 = -1
for j in range(len(arr[0])):
if min1 == -1 or arr[0][j] < arr[0][min1]:
min2 = min1
min1 = j
elif min2 == -1 or arr[0][j] < arr[0][min2]:
min2 = j
for i in range(1, len(arr)):
currMin1 = currMin2 = -1
for j in range(len(arr[i])):
if j == min1:
arr[i][j] += arr[i - 1][min2]
else:
arr[i][j] += arr[i - 1][min1]
if currMin1 == -1 or arr[i][j] < arr[i][currMin1]:
currMin2 = currMin1
currMin1 = j
elif currMin2 == -1 or arr[i][j] < arr[i][currMin2]:
currMin2 = j
min1, min2 = currMin1, currMin2
return arr[-1][min1]
|
GENERAL_HELP = '''
Usage:
vt <command> [options]
Commands:
lists Get all lists
list Return items of a specific list
item Return a specific item
show Alias for item
done Mark an item done
complete Alias for done
undone Mark an item undone
uncomplete Alias for undone
modify Modify item by providing a comment
edit Alias for modify
comment Alias for modify
comments Alias for modify
add Create a new item
move Associate an item with a new list
mv Alias for move
categories Return a list of valid categories for a given list
categorize Provide a category for a given item
label Alias for categorize
help Get help on a command
'''
LISTS_HELP = '''
Usage:
vt lists
Description:
Return all lists
'''
LIST_HELP = '''
Usage:
vt list [GUID] [options]
Description:
Return all items of a specified list. GUID may be either the unique identifier of
a list or the name of the list if it is unique. If no GUID is provided, use the
default list defined in the VT_DEFAULT_LIST environment variable.
Options:
-e, --extended Show extended information about items.
-u, --unfinished Only display items that have not been completed yet.
-c, --categories Include item categories in output.
-q, --quiet Quiet mode. Remove any extraneous output.
-W, --no-wrap Do not apply any text wrapping to output.
'''
DONE_HELP = '''
Usage:
vt done [GUID] ...
vt complete [GUID] ...
Description:
Mark an item done. When run without a GUID, display all recently completed items.
'''
UNDONE_HELP = '''
Usage:
vt undone [GUID] ...
vt uncomplete [GUID] ...
Description:
Mark an item undone. When run without a GUID, display all recently completed items.
'''
COMMENT_HELP = '''
Usage:
vt modify GUID [options] [comment]
vt comment GUID [options] [comment]
vt comments GUID [options] [comment]
vt edit GUID [options] [comment]
Description:
Add a comment to the specified item. No comment should be provided when using the -d flag.
Options:
-a, --append Append comment rather than overwriting.
-d, --delete Remove comment from item.
'''
ADD_HELP = '''
Usage:
vt add [GUID] item
Description:
Create a new item. GUID is the unique identifier for the list the item will be placed on.
When GUID is not provided, use the default list defined in VT_DEFAULT_LIST environment variable.
'''
MOVE_HELP = '''
Usage:
vt move ITEM LIST
vt mv ITEM LIST
Description:
Move item to a new list where ITEM is the guid of the item and LIST is the guid of the new list.
'''
CATEGORIES_HELP = '''
Usage:
vt categories [GUID]
Description:
Display the available categories for a list. GUID is the unique identifier for a list.
When GUID is not provided, use the default list defined in VT_DEFAULT_LIST environment variable.
'''
CATEGORIZE_HELP = '''
Usage:
vt categorize GUID CATEGORY
vt label GUID CATEGORY
Description:
Assign CATEGORY to the item specified by GUID.
'''
|
# -*- coding: utf-8 -*-
"""
@author: ashutosh
A simple program to add two numbers.
"""
def main():
"""
The main function to execute upon call.
Returns
-------
int
returns integer 0 for safe executions.
"""
print("Program to add two numbers.\n")
# two float values
num1 = 1.5
num2 = 4.5
# Adding the two given numbers
sum_val = float(num1) + float(num2)
# Displaying the result
print("The sum of given numbers is,")
print("{n1} + {n2} = {sm}".format(n1=num1, n2=num2, sm=sum_val))
return 0
if __name__ == "__main__":
main()
|
# slicing lab
def swap(seq):
return seq[-1:]+seq[1:-1]+seq[:1]
assert swap('something') == 'gomethins'
assert swap(tuple(range(10))) == (9,1,2,3,4,5,6,7,8,0)
def rem(seq):
return seq[::2]
assert rem('a word') == 'awr'
def rem4(seq):
return seq[4:-4:2]
print(rem4( (1,2,3,4,5,6,7,8,9,10,11), ) )
def reverse(seq):
return seq[::-1]
print(reverse('a string'))
def thirds(seq):
i = len(seq)//3
#return seq[i*2:i*3+1] + seq[:i] + seq[i:i*2]
return seq[i:-i] + seq[-i:] + seq[:i]
print (thirds(tuple(range(12))))
|
def minkowski(a, b, p) :
summ = 0
n = len(a)
for i in range(n) :
summ += (b[i]-a[i])**p
summ = summ ** (1/p)
return summ
a = [0, 3, 4, 5]
b = [7, 6, 3, -1]
p=3
print(minkowski(a, b, p))
|
frase=int(input('digite um numero: '))
x = frase % 2
if x == 1:
print('{} é impar'.format(frase))
else:print('{} é par'.format(frase))
|
inp = input()
points = inp.split(" ")
for i in range(len(points)):
points[i] = int(points[i])
points.sort()
result = points[len(points) - 1] - points[0]
print(result) |
# Created by MechAviv
# ID :: [4000013]
# Maple Road : Inside the Small Forest
sm.showFieldEffect("maplemap/enter/40000", 0) |
# Define time, time constant
t = np.arange(0, 10, .1)
tau = 0.5
# Compute alpha function
f = t * np.exp(-t/tau)
# Define u(t), v(t)
u_t = t
v_t = np.exp(-t/tau)
# Define du/dt, dv/dt
du_dt = 1
dv_dt = -1/tau * np.exp(-t/tau)
# Define full derivative
df_dt = u_t * dv_dt + v_t * du_dt
# Uncomment below to visualize
with plt.xkcd():
plot_alpha_func(t, f, df_dt) |
al = 0
ga = 0
di = 0
x = 0
while x != 4:
x = int(input())
if x == 1:
al = al + 1
if x == 2:
ga = ga + 1
if x == 3:
di = di + 1
print('MUITO OBRIGADO')
print('Alcool: {}'.format(al))
print('Gasolina: {}'.format(ga))
print('Diesel: {}'.format(di))
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def countNodes(self, root: TreeNode) -> int:
maxDepth = self.findLeftMaxDepth(root)
if maxDepth <= 1:
return maxDepth
else:
curRoot = root
curDepth = 1
totalNum = 0
while True:
if curRoot.left == None:
totalNum += 1
break
elif curRoot.right == None:
totalNum += 2
break
elif self.findLeftMaxDepth(curRoot.right) + curDepth == maxDepth:
totalNum += 2 ** (maxDepth-curDepth)
curRoot = curRoot.right
curDepth += 1
elif self.findRightMaxDepth(curRoot.left) + curDepth == maxDepth:
totalNum += 2 ** (maxDepth-curDepth-1)
totalNum += (2 ** (maxDepth-curDepth) - 1)
break
else:
totalNum += 2 ** (maxDepth-curDepth-1)
curRoot = curRoot.left
curDepth += 1
return totalNum
def findLeftMaxDepth(self, root):
if root == None:
return 0
elif root.left == None:
return 1
else:
return 1 + self.findLeftMaxDepth(root.left)
def findRightMaxDepth(self, root):
if root == None:
return 0
elif root.right == None:
return 1
else:
return 1 + self.findRightMaxDepth(root.right) |
'''
A library to speed up physics data analysis.
Contains functions for error analysis and calculations
for various physics mechanics values.
''' |
# Creating variables dynamically.
# To be able to pass arguments to variable file, we must define
# and use "get_variables" in a similar manner as follows:
def get_variables(server_uri, start_port):
# Note that the order in which the libraries are listed here must match
# that in 'server.py'.
port = int(start_port)
target_uri = "%s:%d" % (server_uri, port)
port += 1
common_uri = "%s:%d" % (server_uri, port)
port += 1
security_uri = "%s:%d" % (server_uri, port)
# The following variables will be available in the caller's
# file.
return { "target_uri" : target_uri,
"common_uri" : common_uri,
"security_uri" : security_uri }
|
class StageOutputs:
execute_outputs = {
# Outputs from public Cisco docs:
# https://www.cisco.com/c/en/us/td/docs/routers/asr1000/release/notes/asr1k_rn_rel_notes/asr1k_rn_sys_req.html
'copy running-config startup-config': '''\
PE1#copy running-config startup-config
Destination filename [startup-config]?
%Error opening bootflash:running-config (Permission denied)
''',
'show boot': '''\
starfleet-1#show boot
BOOT variable = bootflash:/cat9k_iosxe.BLD_V173_THROTTLE_LATEST_20200421_032634.SSA.bin;
Configuration Register is 0x102
MANUAL_BOOT variable = no
BAUD variable = 9600
ENABLE_BREAK variable does not exist
BOOTMODE variable does not exist
IPXE_TIMEOUT variable does not exist
CONFIG_FILE variable =
''',
}
parsed_outputs = {
'show boot': {
'active':
{'boot_variable': 'bootflash:/cat9k_iosxe.BLD_V173_THROTTLE_LATEST_20200421_032634.SSA.bin;',
'configuration_register': '0x102'}}
}
config_outputs = {
'no boot system bootflash:/cat9k_iosxe.BLD_V173_THROTTLE_LATEST_20200421_032634.SSA.bin': '',
'boot system bootflash:/cat9k_iosxe.BLD_V173_THROTTLE_LATEST_20200421_032634.SSA.bin': '',
'config-register 0x2102': '',
}
def get_execute_output(arg, **kwargs):
'''Return the execute output of the given show command'''
return StageOutputs.execute_outputs[arg]
def get_parsed_output(arg, **kwargs):
'''Return the parsed output of the given show command '''
return StageOutputs.parsed_outputs[arg]
def get_config_output(arg, **kwargs):
'''Return the out of the given config string'''
return StageOutputs.config_outputs[arg]
|
class UnknownCommand(Exception):
pass
class ModuleNotFound(Exception):
pass
class VariableError(Exception):
pass
class ModuleError:
error = ""
def __init__(self, error):
self.error = error |
# Copyright 2017 Bloomberg Finance L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package provides Python interfaces to Comdb2 databases.
Two different Python submodules are provided for interacting with Comdb2
databases. Both submodules work from Python 2.7+ and from Python 3.5+.
`comdb2.dbapi2` provides an interface that conforms to `the Python Database API
Specification v2.0 <https://www.python.org/dev/peps/pep-0249/>`_. If you're
already familiar with the Python DB-API, or if you intend to use libraries that
expect to be given DB-API compliant connections, this module is likely to be
the best fit for you. Additionally, if a better way of communicating with
a Comdb2 database than ``libcdb2api`` is ever introduced, this module will be
upgraded to it under the hood.
`comdb2.cdb2` provides a thin, pythonic wrapper over cdb2api. If you're more
familiar with ``libcdb2api`` than with the Python DB-API and you don't
anticipate a need to interact with libraries that require DB-API compliant
connections, this module may be simpler to get started with.
"""
__version__ = "1.1.6"
|
CLAIM_TYPES = {
'stream': 1,
'channel': 2,
'repost': 3
}
STREAM_TYPES = {
'video': 1,
'audio': 2,
'image': 3,
'document': 4,
'binary': 5,
'model': 6
}
# 9/21/2020
MOST_USED_TAGS = {
"gaming",
"people & blogs",
"entertainment",
"music",
"pop culture",
"education",
"technology",
"blockchain",
"news",
"funny",
"science & technology",
"learning",
"gameplay",
"news & politics",
"comedy",
"bitcoin",
"beliefs",
"nature",
"art",
"economics",
"film & animation",
"lets play",
"games",
"sports",
"howto & style",
"game",
"cryptocurrency",
"playstation 4",
"automotive",
"crypto",
"mature",
"sony interactive entertainment",
"walkthrough",
"tutorial",
"video game",
"weapons",
"playthrough",
"pc",
"anime",
"how to",
"btc",
"fun",
"ethereum",
"food",
"travel & events",
"minecraft",
"science",
"autos & vehicles",
"play",
"politics",
"commentary",
"twitch",
"ps4live",
"love",
"ps4",
"nonprofits & activism",
"ps4share",
"fortnite",
"xbox",
"porn",
"video games",
"trump",
"español",
"money",
"music video",
"nintendo",
"movie",
"coronavirus",
"donald trump",
"steam",
"trailer",
"android",
"podcast",
"xbox one",
"survival",
"audio",
"linux",
"travel",
"funny moments",
"litecoin",
"animation",
"gamer",
"lets",
"playstation",
"bitcoin news",
"history",
"xxx",
"fox news",
"dance",
"god",
"adventure",
"liberal",
"2020",
"horror",
"government",
"freedom",
"reaction",
"meme",
"photography",
"truth",
"health",
"lbry",
"family",
"online",
"eth",
"crypto news",
"diy",
"trading",
"gold",
"memes",
"world",
"space",
"lol",
"covid-19",
"rpg",
"humor",
"democrat",
"film",
"call of duty",
"tech",
"religion",
"conspiracy",
"rap",
"cnn",
"hangoutsonair",
"unboxing",
"fiction",
"conservative",
"cars",
"hoa",
"epic",
"programming",
"progressive",
"cryptocurrency news",
"classical",
"jesus",
"movies",
"book",
"ps3",
"republican",
"fitness",
"books",
"multiplayer",
"animals",
"pokemon",
"bitcoin price",
"facebook",
"sharefactory",
"criptomonedas",
"cod",
"bible",
"business",
"stream",
"comics",
"how",
"fail",
"nsfw",
"new music",
"satire",
"pets & animals",
"computer",
"classical music",
"indie",
"musica",
"msnbc",
"fps",
"mod",
"sport",
"sony",
"ripple",
"auto",
"rock",
"marvel",
"complete",
"mining",
"political",
"mobile",
"pubg",
"hip hop",
"flat earth",
"xbox 360",
"reviews",
"vlogging",
"latest news",
"hack",
"tarot",
"iphone",
"media",
"cute",
"christian",
"free speech",
"trap",
"war",
"remix",
"ios",
"xrp",
"spirituality",
"song",
"league of legends",
"cat"
}
MATURE_TAGS = [
'nsfw', 'porn', 'xxx', 'mature', 'adult', 'sex'
]
def normalize_tag(tag):
return tag.replace(" ", "_").replace("&", "and").replace("-", "_")
COMMON_TAGS = {
tag: normalize_tag(tag) for tag in list(MOST_USED_TAGS)
}
INDEXED_LANGUAGES = [
'none',
'en',
'aa',
'ab',
'ae',
'af',
'ak',
'am',
'an',
'ar',
'as',
'av',
'ay',
'az',
'ba',
'be',
'bg',
'bh',
'bi',
'bm',
'bn',
'bo',
'br',
'bs',
'ca',
'ce',
'ch',
'co',
'cr',
'cs',
'cu',
'cv',
'cy',
'da',
'de',
'dv',
'dz',
'ee',
'el',
'eo',
'es',
'et',
'eu',
'fa',
'ff',
'fi',
'fj',
'fo',
'fr',
'fy',
'ga',
'gd',
'gl',
'gn',
'gu',
'gv',
'ha',
'he',
'hi',
'ho',
'hr',
'ht',
'hu',
'hy',
'hz',
'ia',
'id',
'ie',
'ig',
'ii',
'ik',
'io',
'is',
'it',
'iu',
'ja',
'jv',
'ka',
'kg',
'ki',
'kj',
'kk',
'kl',
'km',
'kn',
'ko',
'kr',
'ks',
'ku',
'kv',
'kw',
'ky',
'la',
'lb',
'lg',
'li',
'ln',
'lo',
'lt',
'lu',
'lv',
'mg',
'mh',
'mi',
'mk',
'ml',
'mn',
'mr',
'ms',
'mt',
'my',
'na',
'nb',
'nd',
'ne',
'ng',
'nl',
'nn',
'no',
'nr',
'nv',
'ny',
'oc',
'oj',
'om',
'or',
'os',
'pa',
'pi',
'pl',
'ps',
'pt',
'qu',
'rm',
'rn',
'ro',
'ru',
'rw',
'sa',
'sc',
'sd',
'se',
'sg',
'si',
'sk',
'sl',
'sm',
'sn',
'so',
'sq',
'sr',
'ss',
'st',
'su',
'sv',
'sw',
'ta',
'te',
'tg',
'th',
'ti',
'tk',
'tl',
'tn',
'to',
'tr',
'ts',
'tt',
'tw',
'ty',
'ug',
'uk',
'ur',
'uz',
've',
'vi',
'vo',
'wa',
'wo',
'xh',
'yi',
'yo',
'za',
'zh',
'zu'
]
|
class Node:
def __init__(self,tag,valid_bit = 1,next = None,previous = None):
self.tag = tag
self.valid_bit = valid_bit
self.next = next
self.previous = previous
def set_next_pointer(self,next):
self.next = next
def set_previous_pointer(self,previous):
self.previous = previous
def get_tag(self):
return self.tag
def get_index(self):
return self.index
def get_next_pointer(self):
return self.next
def get_previous_pointer(self):
return self.previous
|
# Do not hard code credentials
client = boto3.client(
's3',
# Hard coded strings as credentials, not recommended.
aws_access_key_id='AKIAIO5FODNN7EXAMPLE',
aws_secret_access_key='ABCDEF+c2L7yXeGvUyrPgYsDnWRRC1AYEXAMPLE'
)
# adding another line
|
#!/usr/bin/env python3
#
## @file
# checkout_humble.py
#
# Copyright (c) 2020, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
NO_COMBO = 'A combination named: {} does not exist in the workspace manifest'
|
cuda_code = '''
extern "C" __global__ void my_kernel(float* input_domain, int input_domain_n, int* layer_sizes, int layer_number, float* full_weights,
float* full_biases, float* results_cuda, int max_layer_size, int* activations) {
// Calculate all the bounds, node by node, for each layer. 'new_layer_values' is the current working layer, old layer is the prevoius (first step old layer is the input layer)
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
if (thread_id >= input_domain_n) return;
int area_start = thread_id * layer_sizes[0] * 2;
float* old_layer_values = new float[max_layer_size * 2]();
float* new_layer_values = new float[max_layer_size * 2]();
// Step 1: copy inputs in 'old_layer_values' ('new_layer_values' is the first hidden layer)
for (int i = 0; i < (2 * layer_sizes[0]); i++) old_layer_values[i] = input_domain[area_start + i];
// Step 2: starting the propagation cycle
int bias_index = 0;
int weights_index = 0;
for (int layer_idx = 0; layer_idx < layer_number - 1; layer_idx ++){
int old_layer_size = layer_sizes[layer_idx];
int new_layer_size = layer_sizes[layer_idx + 1];
for (int new_node_idx = 0; new_node_idx < new_layer_size*2; new_node_idx += 2){
for (int old_node_idx = 0; old_node_idx < old_layer_size*2; old_node_idx += 2){
if(full_weights[weights_index] > 0) {
new_layer_values[new_node_idx] += (old_layer_values[old_node_idx] * full_weights[weights_index]); //lower bound
new_layer_values[new_node_idx + 1] += (old_layer_values[old_node_idx + 1] * full_weights[weights_index]); //upper bound
} else {
new_layer_values[new_node_idx] += (old_layer_values[old_node_idx + 1] * full_weights[weights_index]); //lower bound
new_layer_values[new_node_idx + 1] += (old_layer_values[old_node_idx] * full_weights[weights_index]); //upper bound
}
weights_index += 1;
}
// Adding bias for each layer (including the output)
new_layer_values[new_node_idx] += full_biases[bias_index];
new_layer_values[new_node_idx+1] += full_biases[bias_index];
bias_index += 1;
// Application of the activation function
// ReLU
if (activations[layer_idx] == 1){
if (new_layer_values[new_node_idx] < 0) new_layer_values[new_node_idx] = 0;
if (new_layer_values[new_node_idx+1] < 0) new_layer_values[new_node_idx+1] = 0;
// TanH
} else if (activations[layer_idx] == 2){
new_layer_values[new_node_idx] = ( 1 - pow(2.71828f, -2*new_layer_values[new_node_idx]) ) / ( 1 + pow(2.71828f, -2*new_layer_values[new_node_idx]) );
new_layer_values[new_node_idx+1] = ( 1 - pow(2.71828f, -2*new_layer_values[new_node_idx+1]) ) / ( 1 + pow(2.71828f, -2*new_layer_values[new_node_idx+1]) );
// Sigmoid
} else if (activations[layer_idx] == 3){
new_layer_values[new_node_idx] = 1 / ( 1 + pow(2.71828f, -new_layer_values[new_node_idx]) );
new_layer_values[new_node_idx+1] = 1 / ( 1 + pow(2.71828f, -new_layer_values[new_node_idx+1]) );
}
}
for (int i = 0; i < max_layer_size * 2; i++) old_layer_values[i] = new_layer_values[i];
for (int i = 0; i < max_layer_size * 2; i++) new_layer_values[i] = 0;
}
// Step 3: copy the local output layer in the global 'results_cuda' array
int results_start = thread_id * layer_sizes[layer_number - 1] * 2;
for (int i=0; i < layer_sizes[layer_number - 1] * 2; i++) results_cuda[results_start + i] = old_layer_values[i];
// Free memory
delete[] old_layer_values;
delete[] new_layer_values;
}
''' |
class Point:
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def __str__(self):
return "(" + str(round(self.x, 1)) + ', ' + str(round(self.y, 1)) + ")"
class Triangle:
def __init__(self, points):
self.points = points
def get_centroid(self):
sum_x = 0
for point in self.points:
sum_x += point.x
sum_y = 0
for point in self.points:
sum_y += point.y
return Point(sum_x/3, sum_y/3)
def main():
info = input().split()
point1 = Point(x=info[0], y=info[1])
point2 = Point(x=info[2], y=info[3])
point3 = Point(x=info[4], y=info[5])
triangle1 = Triangle([point1, point2, point3])
centroid1 = triangle1.get_centroid()
print(centroid1)
if(__name__ == '__main__'):
main()
|
def func1():
"""
# Model Management
## Introduction
Dataloop's Model Management is here to provide Machine Learning engineers the ability to manage their research and production process.
We want to introduce Dataloop entities to create, manage, view, compare, restore, and deploy training sessions.
Our Model Management gives a separation between Model code, weights and configuration, and the data.
in Offline mode, there is no need to do any code integration with Dataloop - just create a model and snapshots entities and you can start managing your work on the platform create reproducible training:
- same configurations and dataset to reproduce the training
- view project/org models and snapshots in the platform
- view training metrics and results
- compare experiments
NOTE: all functions from the codebase can be used in FaaS and pipelines only with custom functions! User must create a FaaS and expose those functions any way he’d like
Online Mode:
In the online mode, you can train and deploy your models easily anywhere on the platform.
All you need to do is create a Model Adapter class and expose some functions to build an API between Dataloop and your model.
After that, you can easily add model blocks to pipelines, add UI slots in the studio, one-button-training etc
### Model and Snapshot entities
#### Model
The model entity is basically the algorithm, the architecture of the model, e.g Yolov5, Inception, SVM, etc.
- In online it should contain the Model Adapter to create a Dataloop API
#### Snapshot
Using the Model (architecture), Dataset and Ontology (data and labels) and configuration (a dictionary) we can create a Snapshot of a training process.
The Snapshot contains the weights and any other artifact needed to load the trained model
a snapshot can be used as a parent to another snapshot - to start for that point (fine-tune and transfer learning)
#### Buckets and Codebase
1. local
2. item
3. git
4. GCS
### The Model Adapter
The Model Adapter is a python class to create a single API between Dataloop's platform and your Model
1. Train
2. Predict
3. load/save model weights
4. annotation conversion if needed
We enable two modes of work:
in Offline mode, everything is local, you don't have to upload any model code or any weights to platform, which causes the platform integration to be minimal.
For example, you cannot use the Model Management components in a pipeline, can easily create a button interface with your model's inference and more.
In Online mode - once you build an Adapter, our platform can interact with your model and trained snapshots and you can connect buttons and slots inside the platform to create, train, inference etc and connect the model and any train snapshot to the UI or to add to a pipeline
"""
|
global_var = 10
def func_exemple(local_var_1, local_var_2):
print(local_var_1, local_var_2, global_var)
func_exemple(11, 12)
# След пример, мы пытаемся изменить значение перем Global_var в нутри перемен
print(' След пример, мы пытаемся изменить значение перем Global_var в нутри перемен ')
def func_exemple_1(local_var_1, local_var_2):
global_var = 20
print(local_var_1, local_var_2, global_var, id(global_var))
func_exemple_1(11, 12)
# Поменялось ли значение прем global_var??? Проверим
print(' Поменялось ли значение прем global_var??? Проверим')
print(global_var, id(global_var))
# Не поменялось, потому что Global_Var = 10 (ГЛОБАЛЬНАЯ) и Global_var = 20 (ЛОКАЛЬНАЯ) не имеют НИ ЧЕГО ОБЩЕГО
print('! Не поменялось, потому что Global_Var = 10 (ГЛОБАЛЬНАЯ) и Global_var = 20 (ЛОКАЛЬНАЯ) не имеют НИ ЧЕГО ОБЩЕГО !! ')
print()
# Служебное слово GLOBAL. Для спец. манипупуляц. с переменными
print(' Служебное слово GLOBAL. Для спец. манипупуляц. с переменными ')
def func_exemple_1(local_var_1, local_var_2):
global global_var
global_var = 20
print(local_var_1, local_var_2, global_var, id(global_var))
func_exemple_1(11, 12)
print(global_var, id(global_var))
print()
# Так же есть служебное слово - nonlocal - . C помощью nonlocal можно делать переопределение внутренней области.
print(' Так же есть служебное слово - nonlocal - . C помощью nonlocal можно делать переопределение внутренней области. ')
def counter():
num = 0
def plus_one():
nonlocal num
num+=1
return num
return plus_one
count = counter()
print(count)
print('*')
print(count())
# Я не понял!! Что то написали..работает.. а не должно.. еще что то. Более подробный материал будет прикреплен к лекции
print('* * ')
print(count()) |
class TrackableObject:
def __init__(self, objectID, centroid_frame_timestamp, detection_class_id, centroid, boxoid, bbox_rw_coords):
# store the object ID, then initialize a list of centroids
# using the current centroid
self.objectID = objectID
# initialize instance variable, 'oids' as a list
self.oids = []
# initialize instance variable, 'centroids' as a list
self.centroids = []
# initialize instance variable, 'boxoids' as a list
self.boxoids = []
# initialize instance variable, 'bbox_rw_coords' as a list
self.bbox_rw_coords = []
# initialize instance variable 'detection_class_id' as 0
self.detection_class_id = detection_class_id
# initialize a boolean used to indicate if the object has
# already been counted or not
self.counted = False
# initialize a boolean used to indicate if the object has left the node's field of view and the tracks complete
self.complete = False
# pass first boxoid to 'append_boxoids' method for processing
self.append_boxoid(boxoid)
# pass first centroid to 'append_centroids' method for processing
self.append_centroid(centroid)
self.append_oids(centroid_frame_timestamp, detection_class_id, centroid, boxoid, bbox_rw_coords)
def append_centroid(self, centroid):
pass
#self.centroids.append(list(centroid))
def append_boxoid(self, boxoid):
#self.boxoids.append(list(boxoid))
# if self.detection_class_id > 0 and boxoid[5] <= 0: # if object's class has been identified already but this isn't a new identification
# pass # ... then don't change the current detection class. Even if the new detection_class_id is a -1, which means that the detection has changed but we'll stick with the first detected object class
# else: # if the object's class hasn't been identified yet or this is a new identification from a detected frame or a -1
# self.detection_class_id = boxoid[5]
pass
def append_oids(self, centroid_frame_timestamp, detection_class_id, centroid, boxoid, bbox_rw_coords):
if self.detection_class_id > 0 and detection_class_id <= 0: # if object's class has been identified already but this isn't a new identification
pass # ... then don't change the current detection class. Even if the new detection_class_id is a -1, which means that the detection has changed but we'll stick with the first detected object class
else: # if the object's class hasn't been identified yet or this is a new identification from a detected frame or a -1
self.detection_class_id = detection_class_id
oid = {
"frame_timestamp": centroid_frame_timestamp,
"centroid": list(centroid),
"boxoid": list(boxoid),
"bbox_rw_coords": bbox_rw_coords
}
self.oids.append(oid)
|
"""
STATEMENT
Given a binary tree, find its minimum depth.
The minimum depth is the number of nodes along the shortest path
from the root node down to the nearest leaf node.
CLARIFICATIONS
- The root is not leaf for trees with levels more than one? Yes.
EXAMPLES
(needs to be drawn)
COMMENTS
- A recursive solution checking the existence left and right subtree should work.
"""
def minDepth(root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
if not root.left:
return 1 + minDepth(root.right)
if not root.right:
return 1 + minDepth(root.left)
return 1 + min(minDepth(root.left), minDepth(root.right))
|
#---------------------------------------
# Selection Sort
#---------------------------------------
def selection_sort(A):
for i in range (0, len(A) - 1):
minIndex = i
for j in range (i+1, len(A)):
if A[j] < A[minIndex]:
minIndex = j
if minIndex != i:
A[i], A[minIndex] = A[minIndex], A[i]
A = [5,9,1,2,4,8,6,3,7]
print(A)
selection_sort(A)
print(A) |
games = ["chess", "soccer", "tennis"]
foods = ["chicken", "milk", "fruits"]
favorites = games + foods
print(favorites)
|
# Given a matrix of m x n elements (m rows, n columns), return all elements of the matrix in spiral order.
# For example,
# Given the following matrix:
# [
# [ 1, 2, 3 ],
# [ 4, 5, 6 ],
# [ 7, 8, 9 ]
# ]
# You should return [1,2,3,6,9,8,7,4,5].
class Solution:
# @param {integer[][]} matrix
# @return {integer[]}
def spiralOrder(self, matrix):
if not matrix or not matrix[0]:
return []
total = len(matrix) * len(matrix[0])
spiral = []
l, r, u, d = -1, len(matrix[0]), 0, len(matrix)
s, i, j = 0, 0, 0
for c in range(total):
spiral.append(matrix[i][j])
s, i, j, l, r, u, d = self._next(s, i, j, l, r, u, d)
return spiral
def _next(self, s, i, j, l, r, u, d):
if s == 0: # step right
j += 1
if j == r:
i, j = i+1, r-1
r -= 1
s = 1
elif s == 1: # step down
i += 1
if i == d:
i, j = d-1, j-1
d -= 1
s = 2
elif s == 2: # step left
j -= 1
if j == l:
i, j = i-1, l+1
l += 1
s = 3
else: # step up
i -= 1
if i == u:
i, j = u+1, j+1
u += 1
s = 0
return s, i, j, l, r, u, d |
"""
[2016-05-04] Challenge #265 [Hard] Permutations with repeat
https://www.reddit.com/r/dailyprogrammer/comments/4i3xrm/20160504_challenge_265_hard_permutations_with/
The number of permutations of a list that includes repeats is `(factorial of list length) / (product of factorials of
each items repeat frequency)
for the list `0 0 1 2` the permutations in order are
0 0 1 2
0 0 2 1
0 1 0 2
0 1 2 0
0 2 0 1
0 2 1 0
1 0 0 2
1 0 2 0
1 2 0 0
2 0 0 1
2 0 1 0
2 1 0 0
#1. Calculate permutation number of list that may include repeats
The permutation number is similar to Monday and Wednesday's challenge. But only wednesday's approach of calculating it
without generating the full list will work (fast) for the longer inputs. The input varies from previous ones in that
you are provided a list rather than a number to account for possible repeats. If there are no repeats, then the answer
is the same as the part 2 (wednesday) challenge.
**input:**
5 4 3 2 1 0
2 1 0 0
5 0 1 2 5 0 1 2 0 0 1 1 5 4 3 2 1 0
8 8 8 8 8 8 8 8 8 7 7 7 6 5 0 1 2 5 0 1 2 0 0 1 1 5 4 3 2 1 0 6 7 8
**output:** (0 based indexes)
719
11
10577286119
3269605362042919527837624
# 2. retrieve list from permutation number and sorted list
input is in format: permutation_number, sorted list to permute
output format is above part 1 input rows.
**input:**
719, 0 1 2 3 4 5
11, 0 0 1 2
10577286119, 0 0 0 0 0 1 1 1 1 1 2 2 2 3 4 5 5 5
3269605362042919527837624, 0 0 0 0 0 1 1 1 1 1 2 2 2 3 4 5 5 5 6 6 7 7 7 7 8 8 8 8 8 8 8 8 8 8
# bonus
use the above function and wednesday's combination number (optional) to compress/encode a list into a fixed set of
numbers (with enough information to decode it)
**input:**
hello, heely owler world!
You might wish to convert to ascii, then calculate the combination number for the unique ascii codes, then calculate
the permutation number with each letter replaced by contiguous indexes.
"""
def main():
pass
if __name__ == "__main__":
main()
|
def Sequential_Search(elements):
for i in range (len(elements)): #outer loop for comparison
for j in range (len(elements)):#inner loop to compare against outer loop
pos = 0
found = False
while pos < len(elements) and not found:
if j == i:
continue
else:
pos = pos + 1
return found, pos
elements = [1,2,3,4,5,6,6,7,8,9,10]
print(Sequential_Search(elements)) |
def palcheck(s):
ns=""
for i in s:
ns=i+ns
if s==ns:
return True
return False
def cod(s):
l=len(s)
for i in range(2,l):
if palcheck(s[:i]):
t1=s[:i]
k=s[i:]
break
t=len(k)
for j in range(2,t):
if palcheck(k[:j]):
t2=k[:j]
k2=k[j:]
if palcheck(k2)==True:
print(t1,t2,k2)
return 1
print("Impossible")
return 0
us=input("Input String\n")
if len(us)>=1 and len(us)<=1000:
cod(us)
else:
print("Not Under Limitation")
|
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class Subnetwork(GCPResource):
'''Object to represent a gcp subnetwork'''
resource_type = "compute.v1.subnetwork"
# pylint: disable=too-many-arguments
def __init__(self,
rname,
project,
zone,
ip_cidr_range,
region,
network,
):
'''constructor for gcp resource'''
super(Subnetwork, self).__init__(rname,
Subnetwork.resource_type,
project,
zone)
self._ip_cidr_range = ip_cidr_range
self._region = region
self._network = '$(ref.%s.selfLink)' % network
@property
def ip_cidr_range(self):
'''property for resource ip_cidr_range'''
return self._ip_cidr_range
@property
def region(self):
'''property for resource region'''
return self._region
@property
def network(self):
'''property for resource network'''
return self._network
def to_resource(self):
""" return the resource representation"""
return {'name': self.name,
'type': Subnetwork.resource_type,
'properties': {'ipCidrRange': self.ip_cidr_range,
'network': self.network,
'region': self.region,
}
}
|
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
strs = []
tmp = ''
for s in paragraph:
if s in '!? \';.,':
if tmp:
strs.append(tmp)
tmp = ''
else:
tmp += s.lower()
if tmp:
strs.append(tmp)
cnt = {}
max_num = 0
res = ''
banned = set(banned)
for string in strs:
if string not in banned:
if string not in cnt:
cnt[string] = 1
else:
cnt[string] += 1
if cnt[string] > max_num:
max_num = cnt[string]
res = string
return res
|
"""
ShellSort is a variation of insertion sort.
Sometimes called as "diminishing increment sort".
How ShelSort improves insertion sort algorithm?
By breaking the original list into a number of sub-lists, each sublist is sorted using the insertion sort.
It will move the items nearer to its original index.
Algorithms:
1. Take the list of numbers
2. Find out the gap/incrementor
3. Create the sub-list based on gap and sort them using insertion sort algorithm
4. Reduce gap and repeat step 3.
5. Stop when gap is 0.
"""
def shell_sort(list1):
gap = len(list1) // 2
while gap > 0:
for index in range(gap, len(index)):
current_element = list1[index]
pos = index
while pos >= gap and current_element < list1[pos - gap]:
list1[pos] = list1[pos - gap]
pos = pos - gap
list1[pos] = current_element
gap = gap // 2
|
class Solution:
def rob(self, nums: List[int]) -> int:
if len(nums) == 0:
return 0
if len(nums) == 1:
return nums[0]
dp = []
dp.append(nums[0])
dp.append(max(nums[0], nums[1]))
for i in range(2, len(nums)):
dp.append(max(nums[i] + dp[i - 2], dp[i - 1]))
return dp[-1]
nums = [2, 1, 1, 2, 4, 6]
|
def foo(x, y):
s = x + y
if s > 10:
print("s>10")
elif s > 5:
print("s>5")
else:
print("less")
print("over")
def bar():
s = 1 + 2
if s > 10:
print("s>10")
elif s > 5:
print("s>5")
else:
print("less")
print("over")
|
#coding:utf-8
'''
filename:arabic2roman.py
chap:6
subject:6
conditions:translate Arabic numerals to Roman numerals
solution:class Arabic2Roman
'''
class Arabic2Roman:
trans = {1:'I',5:'V',10:'X',50:'L',100:'C',500:'D',1000:'M'}
# 'I(a)X(b)V(c)I(d)'
trans_unit = {1:(0,0,0,1),2:(0,0,0,2),3:(0,0,0,3),
4:(1,0,1,0),5:(0,0,1,0),
6:(0,0,1,1),7:(0,0,1,2),8:(0,0,1,3),
9:(1,1,0,0)}
def __init__(self,digit):
self.digit = digit
self.roman = self.get_roman()
def __str__(self):
return f'{self.digit:4} : {self.roman}'
def get_roman(self):
if self.digit >= 4000 or self.digit <=0:
raise ValueError('Input moust LT 4000 and GT 0')
lst = []
n = self.digit
for i in (1000,100,10,1):
q=n//i
r=n%i
n=r
lst.append(self.get_str(q,i))
return ''.join(lst)
def get_str(self,q:"0<= q <=9",i:'1,10,100,1000'):
rst = ''
if not q:
# q == 0
return rst
unit = self.trans_unit[q]
for s,u in zip((1,10,5,1),unit):
# 'I(a)X(b)V(c)I(d)'
rst += self.trans.get(s*i,'') * u
return rst
if __name__ == '__main__':
for i in range(1,120):
print(Arabic2Roman(i))
while True:
digit = int(input('Enter an integer :'))
print(Arabic2Roman(digit))
|
# AARD: function: __main__
# AARD: #1:1 -> #1:2 :: defs: %1 / uses: [@1 5:4-5:10] { call }
# AARD: #1:2 -> #1:3, #1:4 :: defs: / uses: %1 [@1 5:4-5:10]
if test():
# AARD: #1:3 -> #1:4 :: defs: %2 / uses: [@1 7:5-7:12]
foo = 3
# AARD: #1:4 -> :: defs: %3 / uses: [@1 10:1-10:8] { call }
print()
# AARD: @1 = if2.py
|
class Node:
def __init__(self, value):
self._value = value
self._parent = None
self._children = []
@property
def value(self):
return self._value
@property
def children(self):
return self._children
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, node):
if self._parent == node:
return
if self._parent is not None:
self._parent.remove_child(self)
self._parent = node
if node is not None:
node.add_child(self)
def add_child(self, node):
if node not in self._children:
self._children.append(node)
node.parent = self
def remove_child(self, node):
if node in self._children:
self._children.remove(node)
node.parent = None
def depth_search(self, value):
if self._value == value:
return self
for child in self._children:
node = child.depth_search(value)
if node is not None:
return node
return None
def breadth_search(self, value):
queue = list()
while queue:
node = queue.pop(0)
if node._value == value:
return node
queue.extend(node._children)
return None
# node1 = Node("root1")
# node2 = Node("root2")
# node3 = Node("root3")
# node3.parent = node1
# node3.parent = node2
# print(node1.children)
# print(node2.children) |
class Env:
def __init__(self):
self.played = False
def getTime(self):
pass
def playWavFile(self, file):
pass
def wavWasPlayed(self):
self.played = True
def resetWav(self):
self.played = False
|
algorithm_parameter = {
'type': 'object',
'required': ['name', 'value'],
'properties': {
'name': {
'description': 'Name of algorithm parameter',
'type': 'string',
},
'value': {
'description': 'Value of algorithm parameter',
'oneOf': [
{'type': 'number'},
{'type': 'string'},
],
},
},
}
algorithm_launch_spec = {
'type': 'object',
'required': ['algorithm_name'],
'properties': {
'algorithm_name': {
'description': 'Name of the algorithm to execute.',
'type': 'string',
},
'media_query': {
'description': 'Query string used to filter media IDs. If '
'supplied, media_ids will be ignored.',
'type': 'string',
},
'media_ids': {
'description': 'List of media IDs. Must supply media_query '
'or media_ids.',
'type': 'array',
'items': {'type': 'integer'},
},
'extra_params': {
'description': 'Extra parameters to pass into the algorithm',
'type': 'array',
'items': {'$ref': '#/components/schemas/AlgorithmParameter'},
},
},
}
algorithm_launch = {
'type': 'object',
'properties': {
'message': {
'type': 'string',
'description': 'Message indicating successful launch.',
},
'uid': {
'type': 'array',
'description': 'A list of uuid strings identifying each job '
'started.',
'items': {'type': 'string'},
},
'gid': {
'type': 'string',
'description': 'A uuid string identifying the group of jobs '
'started.',
},
},
}
|
# -*- coding: UTF-8 -*-
"""
此脚本用于实现计数器
"""
def word_count(data):
"""
输入一个列表,统计列表中各个元素出现的次数
参数
----
data : list, 需要统计的列表
返回
----
re : dict, 结果hash表,key为原列表中的元素,value为对应的出现次数
"""
re = {}
for item in data:
re[item] = re.get(item, 0) + 1
return re
|
class solve_day(object):
with open('inputs/day02.txt', 'r') as f:
data = f.readlines()
def part1(self):
grid = [[1,2,3],
[4,5,6],
[7,8,9]]
code = []
## locations
# 1 - grid[0][0]
# 2 - grid[0][1]
# 3 - grid[0][2]
# 4 - grid[1][0]
# 5 - grid[1][1]
# 6 - grid[1][2]
# 7 - grid[2][0]
# 8 - grid[2][1]
# 9 - grid[2][2]
position = [0,0]
for i,d in enumerate(self.data):
d = d.strip()
if i == 0:
# set starting position
position = [1,1]
for x in d:
if x == 'U':
position[0] += -1 if position[0]-1 >= 0 and position[0]-1 <= 2 else 0
if x == 'D':
position[0] += 1 if position[0]+1 >= 0 and position[0]+1 <= 2 else 0
if x == 'R':
position[1] += 1 if position[1]+1 >= 0 and position[1]+1 <= 2 else 0
if x == 'L':
position[1] += -1 if position[1]-1 >= 0 and position[1]-1 <= 2 else 0
code.append(grid[position[0]][position[1]])
return ''.join([str(x) for x in code])
def part2(self):
grid = [['','',1,'',''],
['',2,3,4,''],
[5,6,7,8,9],
['','A','B','C',''],
['','','D','','']]
code = []
position = [0,0]
for i,d in enumerate(self.data):
d = d.strip()
if i == 0:
# set starting position
position = [2,0]
for x in d:
if x == 'U':
if position[1] in [0, 4]:
pass
if position[1] in [1, 3]:
position[0] += -1 if position[0]-1 in [1,2,3] else 0
if position[1] in [2]:
position[0] += -1 if position[0]-1 in [0,1,2,3,4] else 0
if x == 'D':
if position[1] in [0, 4]:
pass
if position[1] in [1, 3]:
position[0] += 1 if position[0]+1 in [1,2,3] else 0
if position[1] in [2]:
position[0] += 1 if position[0]+1 in [0,1,2,3,4] else 0
if x == 'R':
if position[0] in [0, 4]:
pass
if position[0] in [1, 3]:
position[1] += 1 if position[1]+1 in [1,2,3] else 0
if position[0] in [2]:
position[1] += 1 if position[1]+1 in [0,1,2,3,4] else 0
if x == 'L':
if position[0] in [0, 4]:
pass
if position[0] in [1, 3]:
position[1] += -1 if position[1]-1 in [1,2,3] else 0
if position[0] in [2]:
position[1] += -1 if position[1]-1 in [0,1,2,3,4] else 0
code.append(grid[position[0]][position[1]])
return ''.join([str(x) for x in code])
if __name__ == '__main__':
s = solve_day()
print(f'Part 1: {s.part1()}')
print(f'Part 2: {s.part2()}') |
class IntegerStack(list):
def __init__(self):
stack = [] * 128
self.extend(stack)
def depth(self):
return len(self)
def tos(self):
return self[-1]
def push(self, v):
self.append(v)
def dup(self):
self.append(self[-1])
def drop(self):
self.pop()
def swap(self):
a = self[-2]
self[-2] = self[-1]
self[-1] = a
|
t = int(input('Primeiro termo: '))
r = int(input('Razão da PA: '))
pa = t
cont = 1
while cont <= 10:
print(f'{pa} -> ', end=' ')
pa += r
cont += 1
print('FIM')
|
'''n = int(input('Digite um número de 0 à 9999: '))
print('Unidade: ', n%10)
print('Dezena: ', n%100//10)
print('Centena: ', n%1000//100)
print('Milhar: ', n%10000//1000)'''
n = input('Digite um número de 0 à 9999: ')
print('Unidade: ', n[3])
print('Dezena: ', n[2])
print('Centena: ', n[1])
print('Milhar: ', n[0]) |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Collaborative Pads',
'version': '2.0',
'category': 'Extra Tools',
'description': """
Adds enhanced support for (Ether)Pad attachments in the web client.
===================================================================
Lets the company customize which Pad installation should be used to link to new
pads (by default, http://etherpad.com/).
""",
'website': 'https://www.odoo.com/page/notes',
'depends': ['web', 'base_setup'],
'data': [
'views/pad.xml',
'views/res_config_view.xml',
],
'demo': ['data/pad_demo.xml'],
'web': True,
'qweb': ['static/src/xml/pad.xml']
}
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Rollup rules for Bazel
The Rollup rules run the [rollup.js](https://rollupjs.org/) bundler with Bazel.
## Installation
Add the `@bazel/rollup` npm package to your `devDependencies` in `package.json`. (`rollup` itself should also be included in `devDependencies`, unless you plan on providing it via a custom target.)
### Installing with user-managed dependencies
If you didn't use the `yarn_install` or `npm_install` rule, you'll have to declare a rule in your root `BUILD.bazel` file to execute rollup:
```python
# Create a rollup rule to use in rollup_bundle#rollup_bin
# attribute when using user-managed dependencies
nodejs_binary(
name = "rollup_bin",
entry_point = "//:node_modules/rollup/bin/rollup",
# Point bazel to your node_modules to find the entry point
data = ["//:node_modules"],
)
```
## Usage
The `rollup_bundle` rule is used to invoke Rollup on some JavaScript inputs.
The API docs appear [below](#rollup_bundle).
Typical example:
```python
load("@npm//@bazel/rollup:index.bzl", "rollup_bundle")
rollup_bundle(
name = "bundle",
srcs = ["dependency.js"],
entry_point = "input.js",
config_file = "rollup.config.js",
)
```
Note that the command-line options set by Bazel override what appears in the rollup config file.
This means that typically a single `rollup.config.js` can contain settings for your whole repo,
and multiple `rollup_bundle` rules can share the configuration.
Thus, setting options that Bazel controls will have no effect, e.g.
```javascript
module.exports = {
output: { file: 'this_is_ignored.js' },
}
```
### Output types
You must determine ahead of time whether Rollup will write a single file or a directory.
Rollup's CLI has the same behavior, forcing you to pick `--output.file` or `--output.dir`.
Writing a directory is used when you have dynamic imports which cause code-splitting, or if you
provide multiple entry points. Use the `output_dir` attribute to specify that you want a
directory output.
Each `rollup_bundle` rule produces only one output by running the rollup CLI a single time.
To get multiple output formats, you can wrap the rule with a macro or list comprehension, e.g.
```python
[
rollup_bundle(
name = "bundle.%s" % format,
entry_point = "foo.js",
format = format,
)
for format in [
"cjs",
"umd",
]
]
```
This will produce one output per requested format.
### Stamping
You can stamp the current version control info into the output by writing some code in your rollup config.
See the [stamping documentation](stamping).
By passing the `--stamp` option to Bazel, two additional input files will be readable by Rollup.
1. The variable `bazel_version_file` will point to `bazel-out/volatile-status.txt` which contains
statuses that change frequently; such changes do not cause a re-build of the rollup_bundle.
2. The variable `bazel_info_file` will point to `bazel-out/stable-status.txt` file which contains
statuses that stay the same; any changed values will cause rollup_bundle to rebuild.
Both `bazel_version_file` and `bazel_info_file` will be `undefined` if the build is run without `--stamp`.
> Note that under `--stamp`, only the bundle is re-built, but not the compilation steps that produced the inputs.
> This avoids a slow cascading re-build of a whole tree of actions.
To use these files, you write JS code in your `rollup.config.js` to read from the status files and parse the lines.
Each line is a space-separated key/value pair.
```javascript
/**
* The status files are expected to look like
* BUILD_SCM_HASH 83c699db39cfd74526cdf9bebb75aa6f122908bb
* BUILD_SCM_LOCAL_CHANGES true
* STABLE_BUILD_SCM_VERSION 6.0.0-beta.6+12.sha-83c699d.with-local-changes
* BUILD_TIMESTAMP 1520021990506
*
* Parsing regex is created based on Bazel's documentation describing the status file schema:
* The key names can be anything but they may only use upper case letters and underscores. The
* first space after the key name separates it from the value. The value is the rest of the line
* (including additional whitespaces).
*
* @param {string} p the path to the status file
* @returns a two-dimensional array of key/value pairs
*/
function parseStatusFile(p) {
if (!p) return [];
const results = {};
const statusFile = require('fs').readFileSync(p, {encoding: 'utf-8'});
for (const match of `\n${statusFile}`.matchAll(/^([A-Z_]+) (.*)/gm)) {
// Lines which go unmatched define an index value of `0` and should be skipped.
if (match.index === 0) {
continue;
}
results[match[1]] = match[2];
}
return results;
}
const statuses = parseStatusFile(bazel_version_file);
// Parse the stamp file produced by Bazel from the version control system
let version = '<unknown>';
// Don't assume BUILD_SCM_VERSION exists
if (statuses['BUILD_SCM_VERSION']) {
version = 'v' + statuses['BUILD_SCM_VERSION'];
if (DEBUG) {
version += '_debug';
}
}
```
### Debug and Opt builds
When you use `--compilation_mode=dbg`, Bazel produces a distinct output-tree in `bazel-out/[arch]-dbg/bin`.
Code in your `rollup.config.js` can look in the environment to detect if a debug build is being performed,
and include extra developer information in the bundle that you wouldn't normally ship to production.
Similarly, `--compilation_mode=opt` is Bazel's signal to perform extra optimizations.
You could use this value to perform extra production-only optimizations.
For example you could define a constant for enabling Debug:
```javascript
const DEBUG = process.env['COMPILATION_MODE'] === 'dbg';
```
and configure Rollup differently when `DEBUG` is `true` or `false`.
### Increasing Heap memory for rollup
The `rollup_bin` attribute allows you to customize the rollup.js program we execute,
so you can use `nodejs_binary` to construct your own.
> You can always call `bazel query --output=build [default rollup_bin]` to see what
> the default definition looks like, then copy-paste from there to be sure yours
> matches.
```python
nodejs_binary(
name = "rollup_more_mem",
data = ["@npm//rollup:rollup"],
entry_point = "@npm//:node_modules/rollup/dist/bin/rollup",
templated_args = [
"--node_options=--max-old-space-size=<SOME_SIZE>",
],
)
rollup_bundle(
...
rollup_bin = ":rollup_more_mem",
)
```
"""
load(":rollup_bundle.bzl", _rollup_bundle = "rollup_bundle")
rollup_bundle = _rollup_bundle
|
digit_mapping = {
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f']
}
def get_letter_strings(number_string):
if not number_string:
return
if len(number_string) == 1:
return digit_mapping[number_string[0]]
possible_strings = list()
current_letters = digit_mapping[number_string[0]]
strings_of_rem_nums = get_letter_strings(number_string[1:])
for letter in current_letters:
for string in strings_of_rem_nums:
possible_strings.append(letter + string)
return possible_strings
assert get_letter_strings("2") == [
'a', 'b', 'c']
assert get_letter_strings("23") == [
'ad', 'ae', 'af', 'bd', 'be', 'bf', 'cd', 'ce', 'cf']
assert get_letter_strings("32") == [
'da', 'db', 'dc', 'ea', 'eb', 'ec', 'fa', 'fb', 'fc']
|
class Solution:
def minTaps(self, n: int, A: List[int]) -> int:
dp = [math.inf] * (n + 1)
for i in range(0, n + 1):
left = max(0, i - A[i])
use = (dp[left] + 1) if i - A[i] > 0 else 1
dp[i] = min(dp[i], use)
for j in range(i, min(i + A[i] + 1, n + 1)):
dp[j] = min(dp[j], use)
# print(dp)
return dp[-1] if dp[-1] != math.inf else -1
|
type = input()
if (type=='прямоугольник'):
a = float(input())
b = float(input())
print(a * b)
elif (type=='круг'):
a = float(input())
print(3.14 * (a ** 2))
elif (type=='треугольник'):
a = float(input())
b = float(input())
c = float(input())
p = (a+b+c)/2
print((p*(p-a)*(p-b)*(p-c))**0.5) |
def fibo_recur(n):
if n == 0:
return 0
if n == 1:
return 1
if n == 2:
return 1
return fibo_recur(n-1) + fibo_recur(n-2)
def fibo_dp(n, dp=dict()):
if n == 0:
return 0
if n == 1 or n == 2:
return 1
if n in dp:
return dp[n]
dp[n] = fibo_dp(n-1, dp) + fibo_dp(n-2, dp)
return dp[n]
a = int(input())
print(fibo_dp(a))
print(fibo_recur(a))
|
class Square:
def __init__(self, sideLength = 0):
self.sideLength = sideLength
def area_square(self):
return self.sideLength ** 2
def perimeter_square(self):
return self.sideLength * 4
class Triangle:
def __init__(self, base : float, height : float):
self.base = base
self.height = height
def area_triangle(self):
area = (self.base * self.height)/2
return area
def perimeter_square(self, hypotenuse = 0):
perimeter = (self.base * 2) + hypotenuse
return perimeter
if __name__ == '__main__':
triangle = Triangle(10, 5.5)
print('Triangle area: %f' % triangle.area_triangle())
print('Triangle perimeter: %f' %triangle.perimeter_square(10))
square = Square(10)
print('Square area: %f' % square.area_square())
print('Square perimeter: %f' % square.perimeter_square())
|
#! /usr/bin/python3
def parse():
prev_data_S = "-1,-1,-1,-1,-1"
prev_none_vga_data = "0"
while(1):
f_read = open("temp.txt","r")
data = f_read.read()
f_read.close()
data_S = data.split('S')
if(len(data_S)>2):
none_vga_data = data_S[2].split('T')
while ( len(data_S)<=2 or data_S[2] == prev_data_S or (none_vga_data[0] == prev_none_vga_data and data_S[len(data_S)-1]=='0')):
f_read = open("temp.txt","r")
data = f_read.read()
f_read.close()
data_S = data.split('S')
temp_data = data_S[2].split(',')
prev_data = prev_data_S.split(',')
prev_none_vga_data = none_vga_data[0]
if(temp_data[0]!=prev_data[0]):
f_hex5_3 = open("hex5_3.txt","w")
f_hex5_3.write(temp_data[0])
f_hex5_3.close()
print ("update hex 5,4,3")
if(temp_data[1]!=prev_data[1]):
f_hex2_0 = open("hex2_0.txt","w")
f_hex2_0.write(temp_data[1])
f_hex2_0.close()
print ("update hex 2,1,0")
if(temp_data[2]!=prev_data[2]):
f_ledr = open("ledr.txt","w")
f_ledr.write(temp_data[2])
f_ledr.close()
print ("update ledr")
if(temp_data[5]!=prev_data[5]):
f_ledr = open("vga_user.txt","w")
f_ledr.write(temp_data[5])
f_ledr.close()
print ("update vga_user")
prev_data_S = data_S[2]
else:
print("Read error and app is parsing again")
def main():
parse()
if __name__=="__main__":
main() |
class ResultKey:
"""
Key for storing and searching Metrics.
"""
def __init__(self, data_set_date, tags):
self.data_set_date = data_set_date
self.tags = tags
def __str__(self):
return "DataSetDate: {}\nTags: {}".format(self.data_set_date, self.tags)
def __eq__(self, other):
if self.data_set_date != other.data_set_date:
return False
if len(self.tags) != len(other.tags):
return False
for k, v in self.tags.items():
if (k not in other.tags) or (v != other.tags[k]):
return False
return True
def __hash__(self):
return hash((self.data_set_date, frozenset(self.tags.items())))
def to_json(self):
return {
"DataSetDate": self.data_set_date,
"Tags": self.tags
}
@staticmethod
def from_json(d):
return ResultKey(d["DataSetDate"], d["Tags"])
|
"""
3,猴子吃桃问题:猴子第一天摘下若干个桃子,当即吃了一半,还不瘾,又多吃了一个。第二天早上又将剩下的桃子吃掉一半,又多吃了一个。
以后每天早上都吃了前一天剩下的一半零一个。到第10天早上想再吃时,见只剩下一个桃子了。求第一天共摘了多少。
"""
def find_x(day):
x = 3*(2**(day-1))-2
return x
print(find_x(10))
# a10=1
# a9=(a10+1)*2
# a8=(a9+1)*2
# ....
# 数学思想,正常人的思想
# res = 1
# for i in range(9):
# res += 1
# res *= 2
# print(res)
# 计算机的思想
res01 = 1
while True:
res = res01
for i in range(9):
res /= 2
res -= 1
if res == 1:
print(res01)
break
res01 += 1
|
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
class DictList(dict):
def __setitem__(self, key, value):
try:
# Assumes there is a list on the key
self[key].append(value)
except KeyError: # If it fails, because there is no key
super(DictList, self).__setitem__(key, value)
except AttributeError: # If it fails because it is not a list
super(DictList, self).__setitem__(key, [self[key], value])
|
"""
For strings S and T, we say "T divides S" if and only if S = T + ... + T (T concatenated with itself 1 or more times)
Return the largest string X such that X divides str1 and X divides str2.
Example 1:
Input: str1 = "ABCABC", str2 = "ABC"
Output: "ABC"
Example 2:
Input: str1 = "ABABAB", str2 = "ABAB"
Output: "AB"
Example 3:
Input: str1 = "LEET", str2 = "CODE"
Output: ""
Note:
1 <= str1.length <= 1000
1 <= str2.length <= 1000
str1[i] and str2[i] are English uppercase letters.
"""
class Solution:
def gcdOfStrings(self, str1: str, str2: str) -> str:
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
def made_of(s, part, c):
for i in range(0, len(s), c):
p = s[i:i + c]
if p != part:
return False
return True
if not str1 or not str2:
return ''
if len(str1) < len(str2):
str1, str2 = str2, str1
c = gcd(len(str1), len(str2))
if made_of(str1, str2[:c], c) and made_of(str2, str1[:c], c):
return str1[:c]
return ''
class Solution2:
def gcdOfStrings(self, str1, str2):
# Euclidean Algorithm
if len(str1) < len(str2):
str1, str2 = str2, str1
# now can assume len(str1) >= len(str2)
DIV = True
while DIV:
DIV = False
n, m = len(str1), len(str2)
while(str1[:m] == str2):
DIV = True
str1 = str1[m:]
if not str1: # divisible
return str2
else:
str1, str2 = str2, str1
return "" |
### Do something - what - print
### Data source? - what data is being printed?
### Output device? - where the data is being printed?
### I think these are rather good questions, it would be cool
### to specify these in the code
print("Hello, World!\nI'm Ante")
|
# -*- coding: utf-8 -*-
"""
dbmanage Library
~~~~~~~~~~~~~~~~
"""
|
input_size = 512
model = dict(
type='SingleStageDetector',
backbone=dict(
type='SSDVGG',
depth=16,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')),
neck=dict(
type='SSDNeck',
in_channels=(512, 1024),
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
l2_norm_scale=20,
last_kernel_size=4),
bbox_head=dict(
type='SSDHead',
in_channels=(512, 1024, 512, 256, 256, 256, 256),
num_classes=80,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=512,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2])),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.0,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.0,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False),
test_cfg=dict(
nms_pre=1000,
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200))
cudnn_benchmark = True
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[1, 1, 1],
to_rgb=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[1, 1, 1],
to_rgb=True),
# dict(type='ImageToTensor', keys=['img']),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
type='RepeatDataset',
times=5,
dataset=dict(
type='CocoDataset',
ann_file='data/coco/annotations/instances_train2017.json',
img_prefix='data/coco/train2017/',
pipeline=[
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[1, 1, 1],
to_rgb=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
])),
val=dict(
type='CocoDataset',
ann_file='data/coco/annotations/instances_val2017.json',
img_prefix='data/coco/val2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[1, 1, 1],
to_rgb=True),
# dict(type='ImageToTensor', keys=['img']),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]),
test=dict(
type='CocoDataset',
ann_file='data/coco/annotations/instances_val2017.json',
img_prefix='data/coco/val2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[1, 1, 1],
to_rgb=True),
# dict(type='ImageToTensor', keys=['img']),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]))
evaluation = dict(interval=1, metric='bbox')
optimizer = dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
checkpoint_config = dict(interval=1)
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
work_dir = './work_dirs'
gpu_ids = range(0, 1)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class NoSuchTableError(Exception):
"""Raised when a referenced table is not found"""
class NoSuchNamespaceError(Exception):
"""Raised when a referenced name-space is not found"""
class NamespaceNotEmptyError(Exception):
"""Raised when a name-space being dropped is not empty"""
class AlreadyExistsError(Exception):
"""Raised when a table or name-space being created already exists in the catalog"""
|
def operation():
"""
:return: Returns a number between 1 and 5 that represents the four fundamental mathematical operations and exit.
"""
print("Choose an option:")
print("[1] Addition (+)")
print("[2] Subtraction (-)")
print("[3] Multiplication (*)")
print("[4] Division (/)")
print("[5] Exit")
while True:
try:
option = int(input("Enter the number of the operation: "))
while not 1 <= option <= 5:
option = int(input("[ERROR] Enter a number present among the options: "))
return option
except (TypeError, ValueError):
print("[ERROR] Your option has to be an integer")
print("Text-based calculator")
while True: # Loop for the first number
try:
tot = float(input("Enter a number: "))
break
except (TypeError, ValueError):
print("[ERROR] The value has to be an integer or a decimal number.")
while True: # Main loop
operator = operation()
if operator < 5: # Verifies whether the user wants
while True: # to exit or not
try:
number = float(input("Enter a number: "))
break
except (TypeError, ValueError):
print("[ERROR] The value has to be an integer or a decimal number")
if operator == 1: # Addition
tot += number
elif operator == 2: # Subtraction
tot -= number
elif operator == 3: # Multiplication
tot *= number
else: # Division
tot /= number
if tot == int(tot): # It is just to avoid writing
print(int(tot)) # integers as floats (e.g. 6.0
else: # instead of 6)
print(tot)
|
'''
LEADERS OF AN ARRAY
The task is to find all leaders in an array, where
a leader is an array element which is greater than all the elements
on its right side
'''
print("Enter the size of array : ")
num = int(input())
a = []
print("Enter array elements")
for i in range(0, num):
a.append(int(input()))
maximum = a[num - 1]
print("The following are the leaders of array : ")
print(a[num - 1], " ", end = '')
for i in range(num - 2, -1, -1):
if (a[i] > maximum):
print(a[i], " ", end = '')
'''
Input : num = 5
Array = [13, 4, 12, 1, 5]
Output :
The following are the leaders of array :
5 12 13
'''
|
tupla = (int(input('Digite um número: ')),
int(input('Digite outro número: ')),
int(input('Digite mais um número: ')),
int(input('Digite o último número: ')))
print(f'Você digitou os valores {tupla}')
n9 = tupla.count(9)
if n9 >= 1:
print(f'O valor 9 apareceu {n9} vez(es).')
else:
print('O valor 9 apareceu 0 vezes.')
if 3 in tupla:
posicao3 = tupla.index(3) + 1
print(f'O valor 3 apareceu na {posicao3}ª posição')
else:
print('O valor 3 não foi digitado em nenhuma posição.')
print('Os valores pares digitados foram: ', end='')
for c in tupla:
if c % 2 == 0:
print(c, end=' ')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.