content
stringlengths 7
1.05M
|
|---|
_base_ = [
'../../_base_/models/resnet50.py',
'../../_base_/datasets/imagenet.py',
'../../_base_/schedules/sgd_steplr-100e.py',
'../../_base_/default_runtime.py',
]
# model settings
model = dict(backbone=dict(norm_cfg=dict(type='SyncBN')))
# dataset settings
data = dict(
imgs_per_gpu=64, # total 64x4=256
train=dict(
data_source=dict(ann_file='data/imagenet/meta/train_1percent.txt')))
# optimizer
optimizer = dict(
type='SGD',
lr=0.1,
momentum=0.9,
weight_decay=5e-4,
paramwise_options={'\\Ahead.': dict(lr_mult=1)})
# learning policy
lr_config = dict(policy='step', step=[12, 16], gamma=0.2)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=20)
checkpoint_config = dict(interval=10)
log_config = dict(
interval=10,
hooks=[dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')])
|
def test_add_to_basket(browser):
link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/'
browser.get(link)
assert browser.find_element_by_class_name('btn-add-to-basket').is_displayed(), f'Basket button not found'
|
class TaskAnswer:
# list of tuple: (vertice_source, vertice_destination, moved_value)
_steps = []
def get_steps(self) -> list:
return self._steps
def add_step(self, source: int, destination: int, value: float):
step = (source, destination, value)
self._steps.append(step)
def print(self):
for step in self._steps:
(source, destination, value) = step
print("from", source, "to", destination, "move", value)
|
class Solution:
def getDescentPeriods(self, prices: List[int]) -> int:
curr = result = 1
for i in range(1, len(prices)):
if prices[i] + 1 == prices[i-1]:
curr += 1
else:
curr = 1
result += curr
return result
|
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from splinter import Browser\n",
"from bs4 import BeautifulSoup\n",
"from datetime import datetime \n",
"import os\n",
"import time\n",
"from urllib.parse import urlsplit"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# Defining path to open nasa site\n",
"executable_path = {'executable_path':\"/usr/local/bin/chromedriver\"}\n",
"brow = Browser('chrome', **executable_path, headless=False)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# putting in the nasa site\n",
"nasa = \"https://mars.nasa.gov/news\"\n",
"brow.visit(nasa)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"html = brow.html\n",
"soup = BeautifulSoup(html,\"html.parser\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"News Title: Mars Now\n",
"Paragraphs: The Red Planet's surface has been visited by eight NASA spacecraft. The ninth will be the first that includes a roundtrip ticket in its flight plan. \n"
]
}
],
"source": [
"title = soup.find(\"div\", class_=\"content_title\").text\n",
"paragraphs = soup.find(\"div\", class_=\"article_teaser_body\").text\n",
"print(f\"News Title: {title}\")\n",
"print(f\"Paragraphs: {paragraphs}\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"# Finding images for Mars\n",
"featured_image_url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n",
"brow.visit(featured_image_url)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"https://www.jpl.nasa.gov/\n"
]
}
],
"source": [
"url = \"{0.scheme}://{0.netloc}/\".format(urlsplit(featured_image_url))\n",
"print(url)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"//*[@id=\"page\"]/section[3]/div/ul/li[1]/a/div/div[2]/img\n"
]
}
],
"source": [
"# path to get image\n",
"xpath = \"//*[@id=\\\"page\\\"]/section[3]/div/ul/li[1]/a/div/div[2]/img\"\n",
"print(xpath)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"results = brow.find_by_xpath(xpath)\n",
"image = results[0]\n",
"image.click()"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"# html_image = brow.html\n",
"# img_url = soup.find(\"div\", class_=\"fancybox-image\")[\"src\"]\n",
"# print(img_url)\n",
"# Having a hard time pulling the data however using inspect we can find the url \n",
"img_url = \"/spaceimages/images/largesize/PIA23948_hires.jpg\""
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"https://www.jpl.nasa.gov//spaceimages/images/largesize/PIA23948_hires.jpg\n"
]
}
],
"source": [
"featured_imgurl = url + img_url \n",
"print(featured_imgurl)"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"# Mars Weather\n",
"mars_weather_url = \"https://twitter.com/marswxreport?lang=en\"\n",
"brow.visit(mars_weather_url)"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {},
"outputs": [],
"source": [
"html_weather = brow.html\n",
"soup2 = BeautifulSoup(html_weather, \"html.parser\")\n",
"mars_weather = soup.find(\"p\", {\"class\": \"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\"}"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>0</th>\n",
" <th>1</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Equatorial Diameter:</td>\n",
" <td>6,792 km</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>Polar Diameter:</td>\n",
" <td>6,752 km</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>Mass:</td>\n",
" <td>6.39 × 10^23 kg (0.11 Earths)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>Moons:</td>\n",
" <td>2 (Phobos & Deimos)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>Orbit Distance:</td>\n",
" <td>227,943,824 km (1.38 AU)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>Orbit Period:</td>\n",
" <td>687 days (1.9 years)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>Surface Temperature:</td>\n",
" <td>-87 to -5 °C</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>First Record:</td>\n",
" <td>2nd millennium BC</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>Recorded By:</td>\n",
" <td>Egyptian astronomers</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" 0 1\n",
"0 Equatorial Diameter: 6,792 km\n",
"1 Polar Diameter: 6,752 km\n",
"2 Mass: 6.39 × 10^23 kg (0.11 Earths)\n",
"3 Moons: 2 (Phobos & Deimos)\n",
"4 Orbit Distance: 227,943,824 km (1.38 AU)\n",
"5 Orbit Period: 687 days (1.9 years)\n",
"6 Surface Temperature: -87 to -5 °C\n",
"7 First Record: 2nd millennium BC\n",
"8 Recorded By: Egyptian astronomers"
]
},
"execution_count": 31,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Mars Facts and conversion from html to table\n",
"mars_facts = \"https://space-facts.com/mars/\"\n",
"table = pd.read_html(mars_facts)\n",
"table[0]"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Parameter</th>\n",
" <th>Values</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Equatorial Diameter:</td>\n",
" <td>6,792 km</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>Polar Diameter:</td>\n",
" <td>6,752 km</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>Mass:</td>\n",
" <td>6.39 × 10^23 kg (0.11 Earths)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>Moons:</td>\n",
" <td>2 (Phobos & Deimos)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>Orbit Distance:</td>\n",
" <td>227,943,824 km (1.38 AU)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>Orbit Period:</td>\n",
" <td>687 days (1.9 years)</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>Surface Temperature:</td>\n",
" <td>-87 to -5 °C</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>First Record:</td>\n",
" <td>2nd millennium BC</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>Recorded By:</td>\n",
" <td>Egyptian astronomers</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Parameter Values\n",
"0 Equatorial Diameter: 6,792 km\n",
"1 Polar Diameter: 6,752 km\n",
"2 Mass: 6.39 × 10^23 kg (0.11 Earths)\n",
"3 Moons: 2 (Phobos & Deimos)\n",
"4 Orbit Distance: 227,943,824 km (1.38 AU)\n",
"5 Orbit Period: 687 days (1.9 years)\n",
"6 Surface Temperature: -87 to -5 °C\n",
"7 First Record: 2nd millennium BC\n",
"8 Recorded By: Egyptian astronomers"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# table clean up \n",
"df_mars_facts = table[0]\n",
"df_mars_facts.columns = [\"Parameter\", \"Values\"]\n",
"df_mars_facts"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [],
"source": [
"mars_facts_html_table = df_mars_facts.to_html()\n",
"mars_facts_html_table = mars_facts_html_table.replace(\"\\n\",\"\")"
]
},
{
"cell_type": "code",
"execution_count": 72,
"metadata": {},
"outputs": [],
"source": [
"# Hemispheres\n",
"url_hemisphere = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n",
"brow.visit(url_hemisphere)"
]
},
{
"cell_type": "code",
"execution_count": 73,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"https://astrogeology.usgs.gov/\n"
]
}
],
"source": [
"hemi_base_url = \"{0.scheme}://{0.netloc}/\".format(urlsplit(url_hemisphere))\n",
"print(hemi_base_url)"
]
},
{
"cell_type": "code",
"execution_count": 74,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Cerberus Hemisphere Enhanced\n"
]
}
],
"source": [
"# Cerberus Hemisphere\n",
"hemisphere_imgurls = []\n",
"# inspect the link and copy the xpath\n",
"results = brow.find_by_xpath(\"//*[@id='product-section']/div[2]/div[1]/a/img\").click()\n",
"cerberus_open_img = brow.find_by_xpath(\"//*[@id='wide-image']/img\").click()\n",
"cerberus_img = brow.html\n",
"soup = BeautifulSoup(cerberus_img, \"html.parser\")\n",
"cerberus_url = soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n",
"cerberus_imgurl = hemi_base_url + cerberus_url\n",
"cerberus_title = soup.find(\"h2\", class_=\"title\").text\n",
"print(cerberus_title)"
]
},
{
"cell_type": "code",
"execution_count": 80,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'image title': 'Cerberus Hemisphere Enhanced', 'image url': 'https://astrogeology.usgs.gov//cache/images/f5e372a36edfa389625da6d0cc25d905_cerberus_enhanced.tif_full.jpg'}\n"
]
}
],
"source": [
"Cerberus = {\"image title\": cerberus_title, \"image url\": cerberus_imgurl}\n",
"hemisphere_imgurls.append(Cerberus)\n",
"print(Cerberus)"
]
},
{
"cell_type": "code",
"execution_count": 95,
"metadata": {},
"outputs": [],
"source": [
"# Return back to home page\n",
"url_hemisphere = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n",
"brow.visit(url_hemisphere)"
]
},
{
"cell_type": "code",
"execution_count": 96,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'image title': 'Schiaparelli Hemisphere Enhanced', 'image url': 'https://astrogeology.usgs.gov//cache/images/3778f7b43bbbc89d6e3cfabb3613ba93_schiaparelli_enhanced.tif_full.jpg'}\n"
]
}
],
"source": [
"# Schiaparelli Hemisphere\n",
"results2 = brow.find_by_xpath(\"//*[@id='product-section']/div[2]/div[2]/a/img\").click()\n",
"open_schiaparelli = brow.find_by_xpath(\"//*[@id='wide-image']\").click()\n",
"schiaparelli_img = brow.html\n",
"soup = BeautifulSoup(schiaparelli_img, \"html.parser\")\n",
"schiaparelli_url = soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n",
"schiaparelli_imgurl = hemi_base_url + schiaparelli_url\n",
"schiaparelli_title = soup.find(\"h2\", class_=\"title\").text\n",
"Schiaparelli = {\"image title\": schiaparelli_title, \"image url\": schiaparelli_imgurl}\n",
"hemisphere_imgurls.append(Schiaparelli)\n",
"print(Schiaparelli)"
]
},
{
"cell_type": "code",
"execution_count": 114,
"metadata": {},
"outputs": [],
"source": [
"# Return back to home page\n",
"url_hemisphere = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n",
"brow.visit(url_hemisphere)"
]
},
{
"cell_type": "code",
"execution_count": 115,
"metadata": {},
"outputs": [],
"source": [
"# Syrtis Major Hemisphere\n",
"results3 = brow.find_by_xpath(\"//*[@id='product-section']/div[2]/div[3]/a/img\").click()\n",
"syrtis_major_open = brow.find_by_xpath(\"//*[@id='wide-image']\").click()\n",
"syrtis_major_img = brow.html\n",
"soup = BeautifulSoup(syrtis_major_img, \"html.parser\")\n",
"syrtis_major_url = soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n",
"syrtis_major_imgurl = hemi_base_url + syrtis_major_url\n",
"syrtis_major_title = soup.find(\"h2\", class_=\"title\").text\n",
"Syrtis_major = {\"image title\": syrtis_major_title, \"image url\": syrtis_major_imgurl}\n",
"hemisphere_imgurls.append(Syrtis_major)"
]
},
{
"cell_type": "code",
"execution_count": 116,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'image title': 'Syrtis Major Hemisphere Enhanced', 'image url': 'https://astrogeology.usgs.gov//cache/images/555e6403a6ddd7ba16ddb0e471cadcf7_syrtis_major_enhanced.tif_full.jpg'}\n"
]
}
],
"source": [
"print(Syrtis_major)"
]
},
{
"cell_type": "code",
"execution_count": 120,
"metadata": {},
"outputs": [],
"source": [
"# Return back to home page\n",
"url_hemisphere = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n",
"brow.visit(url_hemisphere)"
]
},
{
"cell_type": "code",
"execution_count": 121,
"metadata": {},
"outputs": [],
"source": [
"# Valles Marineris Hemisphere \n",
"results4 = brow.find_by_xpath(\"//*[@id='product-section']/div[2]/div[4]/a/img\").click()\n",
"marineris_open = brow.find_by_xpath(\"//*[@id='wide-image']\").click()\n",
"marineris_image = brow.html\n",
"soup = BeautifulSoup(marineris_image, \"html.parser\")\n",
"marineris_url = soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n",
"marineris_imgurl = hemi_base_url + syrtis_major_url\n",
"marineris_title = soup.find(\"h2\", class_=\"title\").text\n",
"Valles_marineris = {\"image title\": marineris_title, \"image url\": marineris_imgurl}\n",
"hemisphere_imgurls.append(Valles_marineris)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "PythonData",
"language": "python",
"name": "pythondata"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
|
# Implementar la función es_primo(), que devuelva un booleano en base a
# si numero es primo o no.
def es_primo(numero):
for i in range (2,numero):
if (numero % i) == 0:
return False
return True
assert(es_primo(7)== True)
assert(es_primo(6) == False)
|
'''
question-1
Code likho jo iss list mein se maximum dhund kar ke print kare.
numbers=[50, 40, 23, 70, 56, 12, 5, 10, 7]
Aapke program ka output 70 hona chaiye.
'''
#through max function
numbers=[50, 40, 23, 70, 56, 12, 5, 10, 7]
print (max(numbers))
#without useing max function
numbers=[50, 40, 23, 70, 56, 12, 5, 10, 7]
lst=numbers[0]
store=0
for i in numbers:
if i>lst:
lst=i
print(lst)
'''
Code likho jo neeche di gayi lists ke items ko reverse order yaani ki ulta print kare.
places=["delhi", "gujrat", "rajasthan", "punjab", "kerala"]
Aapke code ka outut yeh hona chaiye:
delhi
gujrat
rajasthan
punjab
kerela
'''
places=["delhi", "gujrat", "rajasthan", "punjab", "kerala"]
# for i in places:
# print(i)
for i in places:
print (i)
'''
Palindrome wo strings ya numbers hote hai jo ulta seedhe same hote hai. Jaise, NITIN. Nitin ko aap left se padho ya right se, nitin hi hai. Aise hi MOM bhi ek palindrome hai.
Code likho jo check kare ki kya list palindrome hai ya nahi. Aur print karo “Haan! palindrome hai” agar hai. Aur “nahi! Palindrome nahi hai” agar nahi hai.
Abhi ke liye iss list ko use kar ke code likh sakte ho:
name=[ ‘n’, ’i’, ‘t’, ‘i’ , ‘n’ ]
'''
# name=['n','i','t','i','n']
name=['a','b','c','d','e']
rev=name[::-1]
# print (rev[0])
# print(rev)
if name== rev:
print("Palindrome hai")
else:
print("Palindrome nhi h ")
#list example
student_marks = [23, 45, 89, 90, 56, 80]
length= len(student_marks)
print(length)
index=0
calc=0
while index<length:
calc+=student_marks[index]
index+=1
print(calc)
#list_example -2
student_marks = [23, 45, 67, 89, 90, 54, 34, 21, 34, 23, 19, 28, 10, 45, 86, 87,9]
length =len(student_marks)
low=0
heigh=0
index=0
while index<length:
marks=student_marks[index]
if marks<50:
low = low+1
else:
heigh=heigh+1
index+=1
print(heigh)
print(low)
|
print("####################################################")
print("#FILENAME:\t\ta1p1.py\t\t\t #")
print("#ASSIGNMENT:\t\tHomework Assignment 1 Pt. 1#")
print("#COURSE/SECTION:\tCIS 3389.251\t\t #")
print("#DUE DATE:\t\tWednesday, 12.February 2020#")
print("####################################################\n\n\n")
cont = 'y'
while cont.lower() == 'y' or cont.lower() == 'yes':
total = 0
avg = 0
number1 = float(input("First number:\t"))
number2 = float(input("Second number:\t"))
number3 = float(input("Third number:\t"))
total = number1 + number2 + number3
avg = total/3
if number1 >= number2 and number1 >= number3:
first_largest = number1
if number2 >= number3:
second_largest = number2
third_largest = number3
else:
second_largest = number3
third_largest = number2
elif number1 >= number2 and number1 < number3:
first_largest = number3
second_largest = number1
third_largest = number2
elif number1 < number2 and number1 < number3:
third_largest = number1;
if number2 >= number3:
first_largest = number2
second_largest = number3
else:
first_largest = number3
second_largest = number2
elif number1 < number2 and number1 >= number3:
first_largest = number2
second_largest = number1
third_largest = number3
print("\n\nSecond largest number entered:\t", second_largest)
print("Average:\t\t\t", avg, "\n\n")
cont = input("Would you like to continue? ")
|
#!/usr/bin/env python
# encoding: utf-8
class Solution(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
dp = [0]*(num+1)
for i in xrange(num+1):
dp[i] = dp[i/2] if i%2 == 0 else dp[i/2]+1
return dp
|
# 폰켓몬
def solution(nums):
half = len(nums)/2
nums = list(set(nums))
diff = len(nums)
if half >= diff:
return int(diff)
else:
return int(half)
# 테스트 1 〉 통과 (0.01ms, 10.2MB)
# 테스트 2 〉 통과 (0.01ms, 10.1MB)
# 테스트 3 〉 통과 (0.01ms, 10.2MB)
# 테스트 4 〉 통과 (0.01ms, 10.2MB)
# 테스트 5 〉 통과 (0.01ms, 10.3MB)
# 테스트 6 〉 통과 (0.01ms, 10.1MB)
# 테스트 7 〉 통과 (0.01ms, 10MB)
# 테스트 8 〉 통과 (0.01ms, 10.2MB)
# 테스트 9 〉 통과 (0.01ms, 10.2MB)
# 테스트 10 〉 통과 (0.01ms, 10.2MB)
# 테스트 11 〉 통과 (0.01ms, 10.2MB)
# 테스트 12 〉 통과 (0.09ms, 10.3MB)
# 테스트 13 〉 통과 (0.09ms, 10.3MB)
# 테스트 14 〉 통과 (0.09ms, 10.2MB)
# 테스트 15 〉 통과 (0.05ms, 10.3MB)
# 테스트 16 〉 통과 (1.09ms, 11MB)
# 테스트 17 〉 통과 (0.60ms, 10.6MB)
# 테스트 18 〉 통과 (0.59ms, 10.5MB)
# 테스트 19 〉 통과 (0.39ms, 10.4MB)
# 테스트 20 〉 통과 (0.32ms, 10.3MB)
|
#!/usr/local/bin/python3
# Python Challenge - 1
# http://www.pythonchallenge.com/pc/def/map.html
# Keyword: ocr
def main():
'''
Hint:
K -> M
O -> Q
E -> G
Everybody thinks twice before solving this.
'''
cipher_text = ('g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcp'
'q ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr\'q '
'ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq '
'pcamkkclbcb. lmu ynnjw ml rfc spj.')
plain_text = caesar_cipher(cipher_text, 2)
print('PLAIN TEXT: {}'.format(plain_text))
# Apply cipher to /map.html (get /ocr.html)
print('NEW ADDRESS PATH')
print(caesar_cipher('/map', 2))
# Challenge 23
# chall_23 = 'va gur snpr bs jung?'
# print(caesar_cipher(chall_23, 13))
# for i in range(26):
# plain_23 = caesar_cipher(chall_23, i)
# print('i: {}'.format(i))
# print('PLAIN TEXT: {}'.format(plain_23))
# Rot13: in the face of what?
def caesar_cipher(cipher_text, n):
'''
Input: string of cipher_text, n is int for alphabet rotation
Output: string of plain text, applying simple n rotation
'''
# Convert cipher_text to lowercase
cipher_lower = cipher_text.lower()
# Create cipher key dictionary
codex = {}
base = ord('a')
for i in range(26):
# Assumes a is 0, z is 25
letter = chr(base + i)
rotated_letter = chr(((i + n) % 26) + base)
codex[letter] = rotated_letter
# Build plain_text string using the codex mapping
plain_text = ''
for c in cipher_lower:
plain_text += codex.get(c, c)
return plain_text
if __name__ == '__main__':
main()
|
def check_next_num(inp, j):
for i in range(j, len(inp)):
elm = inp[i]
if len(inp) > i+1:
if inp[i+1] == (elm+1):
check_next_num(inp, i+1)
else:
if j == 0:
return i
return i
else:
return len(inp)-1
def solution(inp):
"""
1. Sort inp
2. Check to see if next in inp slist is the next value
"""
inp.sort()
ranges = []
res = []
for i in range(len(inp)):
l = check_next_num(inp, i)
ranges.append([i, l])
print([i, l])
seen = []
for r in ranges:
si = r[0]
ei = r[1]
sv = inp[si]
ev = inp[ei]
if ei not in seen:
if ev == sv:
res.append(str(ev))
seen.append(ei)
if (ev - sv) >= 2:
s = str(sv)+'-'+str(ev)
res.append(s)
seen.append(ei)
elif (ev-sv) == 1:
res.append(str(sv))
return (",".join(res))
print(ranges)
#Indexs 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
solution([-6,-3,-2,-1,0,1,3,4,5,7,8, 9, 10,11,14,15,17,18,19,20])
"""
[0,0], [1,5], [6,13], [14,15], [17, 19]
[[0, 0], [1, 5], [2, 5], [3, 5], [4, 5], [5, 5], [6, 8], [7, 8], [8, 8], [9, 13], [10, 13], [11, 13], [12, 13], [13, 13], [14, 15], [15, 15], [16, 19]
-6,-3-1,3-5,7-11,14,15,17-20
-6,-3-1,3-5,7-11,14,15,17-20
-6,-3-1,3-5,7-11,14,15,17-20
"""
#Indexes 0 1 2 3 4 5 6 7 8 9
# solution([-3,-2,-1,2,10,15,16,18,19,20])
"""
[0, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 9]
[[0, 2], [3, 3], [4, 4], [5, 6], [6, 6], [7, 9]]
[[0, 0], [1, 2], [2, 2], [3, 3], [4, 4], [5, 6], [6, 6], [7, 9], [8, 9], [9, 9]]
-3--1,2,10,15,16,18-20
-3--1,2,10,15,16,18-20
-3,-2,-1,2,10,15,16,18-20
"""
|
def rotate(str, d, mag):
if (d=="L"):
return str[mag:] + str[0:mag]
elif (d=="R"):
return str[len(str)-mag:] + str[0: len(str)-mag]
def checkAnagram(str1, str2):
if(sorted(str1)==sorted(str2)):
return True
else:
return False
def subString(s, n, ans):
for i in range(n):
for leng in range(i+1,n+1):
sub = s[i:leng]
if(len(sub)==len(ans)):
if(checkAnagram(sub,ans)):
return True
return False
str = input().split(" ")
str1 = str[0]
str2 = str1
q = int(str[1])
ans = ""
d = list()
mag = list()
str3 = str[2:]
for i in range(len(str3)):
if (i%2==0):
d.append(str3[i])
else:
mag.append(int(str3[i]))
#str1 = input()
#str2 = str1
#q = int(input())
#d = list()
#mag = list()
#ans = ""
#for i in range(q):
# d.append(input())
# mag.append(int(input()))
for i in range(q):
str2 = rotate(str2,d[i],mag[i])
ans = ans + str2[0]
if(subString(str1,len(str1),ans)):
print("YES")
else:
print("NO")
|
"""
You are asked to ensure that the first and last names of people begin with a capital letter in their passports. For example, alison heck should be capitalised correctly as Alison Heck.
Given a full name, your task is to capitalize the name appropriately.
Input Format
A single line of input containing the full name, .
Constraints
The string consists of alphanumeric characters and spaces.
Note: in a word only the first character is capitalized. Example 12abc when capitalized remains 12abc.
Output Format
Print the capitalized string, .
Sample Input
chris alan
Sample Output
Chris Alan
"""
# Complete the solve function below.
def solve(s):
assert 0 < len(s) < 1000
a_string = s.split(' ')
s = ' '.join((word.capitalize() for word in a_string))
#
# for x in s[:].split():
# s = s.replace(x, x.capitalize())
return s
if __name__ == '__main__':
# s = input()
s = 'vyshnav mt cv df'
result = solve(s)
print(result)
|
number = int(input("Pick a number? "))
for i in range(5):
number = number + number
print(number)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
MAXIMUM_SECRET_LENGTH = 20
MAXIMUM_CONTAINER_APP_NAME_LENGTH = 40
SHORT_POLLING_INTERVAL_SECS = 3
LONG_POLLING_INTERVAL_SECS = 10
LOG_ANALYTICS_RP = "Microsoft.OperationalInsights"
CONTAINER_APPS_RP = "Microsoft.App"
MAX_ENV_PER_LOCATION = 2
MICROSOFT_SECRET_SETTING_NAME = "microsoft-provider-authentication-secret"
FACEBOOK_SECRET_SETTING_NAME = "facebook-provider-authentication-secret"
GITHUB_SECRET_SETTING_NAME = "github-provider-authentication-secret"
GOOGLE_SECRET_SETTING_NAME = "google-provider-authentication-secret"
MSA_SECRET_SETTING_NAME = "msa-provider-authentication-secret"
TWITTER_SECRET_SETTING_NAME = "twitter-provider-authentication-secret"
APPLE_SECRET_SETTING_NAME = "apple-provider-authentication-secret"
UNAUTHENTICATED_CLIENT_ACTION = ['RedirectToLoginPage', 'AllowAnonymous', 'RejectWith401', 'RejectWith404']
FORWARD_PROXY_CONVENTION = ['NoProxy', 'Standard', 'Custom']
CHECK_CERTIFICATE_NAME_AVAILABILITY_TYPE = "Microsoft.App/managedEnvironments/certificates"
|
# Author=====>>>Nipun Garg<<<=====
# Problem Statement - Given number of jobs and number of applicants
# And for each applicant given that wether each applicant is
# eligible to get the job or not in the form of matrix
# Return 1 if a person can get the job
def dfs(graph, applicant, visited, result,nApplicants,nJobs):
for i in range(0,nJobs):
if(graph[applicant][i]==1 and (not visited[i])):
visited[i]=1
if( result[i]<0 or dfs(graph, result[i], visited, result, nApplicants, nJobs)):
result[i]=applicant
return 1
return 0
#Return maximum people that can get the job
def bipartite(graph,nApplicants,nJobs):
result = []
for i in range(0,nJobs):
result.append(-1)
retval=0
for i in range(nApplicants):
visited = []
for j in range(nApplicants):
visited.append(0)
if(dfs(graph, i, visited, result, nApplicants, nJobs)):
retval+=1
return retval
#Main function
if __name__ == '__main__':
# Total number of applicant and total number of jobs
nApplicants = input("Enter the number of applicants : ")
nJobs = input("Enter the number of jobs : ")
graph = []
#Taking input if a user can have a job then its value for job is 1
for i in range(nApplicants):
print("Enter the status(1/0) for applicant - "+str(i+1)+" for "+str(nJobs)+" Jobs!")
temp=[]
for j in range(nJobs):
temp.append(input("For job - "+str(j+1)+" : "))
graph.append(temp)
# print(graph)
print("Maximum applicants that can have job is : "+str(bipartite(graph, nApplicants, nJobs)))
|
print('<=== Antecessor e Sucessor ===>')
n = int(input('Digite um número:'))
na = n-1
ns = n+1
print('O número que antecede {} é {}.'.format(n, na))
print('O número que sucede {} é {}.'.format(n, ns))
print('')
print('Melhorado')
print('O número que antecede {} é {}'.format(n, n-1))
print('O número que sucede {} é {}'.format(n, n+1))
|
#026 - Primeira e última ocorrência de uma string
frase = str(input('Digite uma frase: ')).upper().strip()
print(f'A letra A aparece {frase.count("A")}')
print(f'A primeira letra A apareeceu na posicao {frase.find("A")+1}')
print(f'A ultima letra A aparece na posicao {frase.rfind("A")+1}')
|
# -*- coding: utf8 -*-
# @author: yinan
# @time: 18-7-4 下午5:52
# @filename: response.py
def return_message(data, msg, code):
return {
"code": code,
"data": data,
"msg": msg
}
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def tree2str(self, t: TreeNode) -> str:
if t is None:
return ""
if t.left is None and t.right is None:
return str(t.val)+""
if t.right is None:
return str(t.val)+"("+str(self.tree2str(t.left))+")"
return str(t.val)+"("+str(self.tree2str(t.left)
)+")"+"("+str(self.tree2str(t.right))+")"
|
# get distinct characters and their count in a String
string = input("Enter String: ")
c = 0
for i in range(65, 91):
c = 0
for j in range(0, len(string)):
if(string[j] == chr(i)):
c += 1
if c > 0:
print("", chr(i), " is ", c, " times.")
c = 0
for i in range(97, 123):
c = 0
for j in range(0, len(string)):
if(string[j] == chr(i)):
c += 1
if c > 0:
print("", chr(i), " is ", c, " times.")
|
class SearchPath:
def __init__(self, path=None):
if path is None:
self._path = []
else:
self._path = path
def branch_off(self, label, p):
path = self._path + [(label, p)]
return SearchPath(path)
@property
def labels(self):
return [label for label, p in self._path]
@property
def likelihood(self):
if self._path:
probs = [p for label, p in self._path]
res = 1
for p in probs:
res *= p
return res
return 0
class PathBuilder:
def __init__(self, roots):
self._paths = []
for label, p in roots:
search_path = SearchPath()
search_path = search_path.branch_off(label, p)
self._paths.append(search_path)
def make_step(self, pmfs):
if len(pmfs) != len(self._paths):
raise WrongNumberOfPMFsException()
candidates = []
for i in range(len(self._paths)):
search_path = self._paths[i]
pmf = pmfs[i]
for label, p in enumerate(pmf):
candidates.append(search_path.branch_off(label, p))
self._paths = self._best_paths(candidates, limit=len(pmfs))
def _best_paths(self, paths, limit):
return sorted(paths, key=lambda c: c.likelihood, reverse=True)[:limit]
@property
def best_path(self):
best_path = self._best_paths(self._paths, limit=1)[0]
return best_path.labels
@property
def paths(self):
res = []
for search_path in self._paths:
res.append(search_path.labels)
return res
class WrongNumberOfPMFsException(Exception):
pass
class StatesKeeper:
def __init__(self, initial_state):
self._paths = {}
self._initial_state = initial_state
def store(self, path, state):
self._paths[tuple(path)] = state
def retrieve(self, path):
if path:
return self._paths[tuple(path)]
else:
return self._initial_state
class BaseBeamSearch:
def __init__(self, start_of_seq, end_of_seq, beam_size=3, max_len=150):
self._sos = start_of_seq
self._eos = end_of_seq
self._beam_size = beam_size
self._max_len = max_len
def _without_last(self, path):
return path[:-1]
def _remove_special(self, path):
path = path[1:]
if path[-1] == self._eos:
return self._without_last(path)
return path
def _split_path(self, path):
prefix = self._without_last(path)
last_one = path[-1]
return prefix, last_one
def generate_sequence(self):
y0 = self._sos
decoder_state = self.get_initial_state()
keeper = StatesKeeper(decoder_state)
builder = PathBuilder([(y0, 1.0)])
for _ in range(self._max_len):
pmfs = []
for path in builder.paths:
prefix, label = self._split_path(path)
state = keeper.retrieve(prefix)
next_pmf, next_state = self.decode_next(label, state)
keeper.store(path, next_state)
pmfs.append(next_pmf)
builder.make_step(pmfs)
if builder.best_path[-1] == self._eos:
break
return self._remove_special(builder.best_path)
def get_initial_state(self):
raise NotImplementedError
def decode_next(self, prev_y, prev_state):
raise NotImplementedError
class BeamCandidate:
def __init__(self, full_sequence, character, likelihood, state):
self.full_sequence = full_sequence
self.character = character
self.likelihood = likelihood
self.state = state
def branch_off(self, character, likelihood, state):
seq = self.full_sequence + character
return BeamCandidate(seq, character, likelihood, state)
# todo: consider better implementation for StatesKeeper
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# SKR03
# =====
# Dieses Modul bietet Ihnen einen deutschen Kontenplan basierend auf dem SKR03.
# Gemäss der aktuellen Einstellungen ist die Firma nicht Umsatzsteuerpflichtig.
# Diese Grundeinstellung ist sehr einfach zu ändern und bedarf in der Regel
# grundsätzlich eine initiale Zuweisung von Steuerkonten zu Produkten und / oder
# Sachkonten oder zu Partnern.
# Die Umsatzsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten bei den Produktstammdaten hinterlegt werden (in Abhängigkeit der
# Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter Finanzbuchhaltung
# (Kategorie: Umsatzsteuer).
# Die Vorsteuern (voller Steuersatz, reduzierte Steuer und steuerfrei)
# sollten ebenso bei den Produktstammdaten hinterlegt werden (in Abhängigkeit
# der Steuervorschriften). Die Zuordnung erfolgt auf dem Aktenreiter
# Finanzbuchhaltung (Kategorie: Vorsteuer).
# Die Zuordnung der Steuern für Ein- und Ausfuhren aus EU Ländern, sowie auch
# für den Ein- und Verkauf aus und in Drittländer sollten beim Partner
# (Lieferant/Kunde)hinterlegt werden (in Anhängigkeit vom Herkunftsland
# des Lieferanten/Kunden). Die Zuordnung beim Kunden ist 'höherwertig' als
# die Zuordnung bei Produkten und überschreibt diese im Einzelfall.
#
# Zur Vereinfachung der Steuerausweise und Buchung bei Auslandsgeschäften
# erlaubt Odoo ein generelles Mapping von Steuerausweis und Steuerkonten
# (z.B. Zuordnung 'Umsatzsteuer 19%' zu 'steuerfreie Einfuhren aus der EU')
# zwecks Zuordnung dieses Mappings zum ausländischen Partner (Kunde/Lieferant).
# Die Rechnungsbuchung beim Einkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Vorsteuer Steuermessbetrag (z.B. Vorsteuer
# Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie 'Vorsteuern' (z.B. Vorsteuer
# 19%). Durch multidimensionale Hierachien können verschiedene Positionen
# zusammengefasst werden und dann in Form eines Reports ausgegeben werden.
#
# Die Rechnungsbuchung beim Verkauf bewirkt folgendes:
# Die Steuerbemessungsgrundlage (exklusive Steuer) wird ausgewiesen bei den
# jeweiligen Kategorien für den Umsatzsteuer Steuermessbetrag
# (z.B. Umsatzsteuer Steuermessbetrag Voller Steuersatz 19%).
# Der Steuerbetrag erscheint unter der Kategorie 'Umsatzsteuer'
# (z.B. Umsatzsteuer 19%). Durch multidimensionale Hierachien können
# verschiedene Positionen zusammengefasst werden.
# Die zugewiesenen Steuerausweise können auf Ebene der einzelnen
# Rechnung (Eingangs- und Ausgangsrechnung) nachvollzogen werden,
# und dort gegebenenfalls angepasst werden.
# Rechnungsgutschriften führen zu einer Korrektur (Gegenposition)
# der Steuerbuchung, in Form einer spiegelbildlichen Buchung.
{
'name': 'Deutschland SKR03 - Accounting',
'version': '2.0',
'author': 'openbig.org',
'website': 'http://www.openbig.org',
'category': 'Localization',
'description': """
Dieses Modul beinhaltet einen deutschen Kontenrahmen basierend auf dem SKR03.
==============================================================================
German accounting chart and localization.
""",
'depends': ['l10n_de'],
'data': [
'data/l10n_de_skr03_chart_data.xml',
'data/account_tax_fiscal_position_data.xml',
'data/account_chart_template_data.yml',
],
}
|
class Bot:
'''
state - state of the game
returns a move
'''
def move(self, state, symbol):
raise NotImplementedError('Abstractaaa')
def get_name(self):
raise NotImplementedError('Abstractaaa')
|
__author__ = 'shukkkur'
'''
https://codeforces.com/problemset/problem/581/A
A. Vasya the Hipster
'''
red, blue = map(int, input().split())
total = red + blue
a = min(red, blue)
total -= 2 * a
b = total // 2
print(a, b)
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def dnsmasq_dependencies():
http_archive(
name = "dnsmasq",
urls = ["http://www.thekelleys.org.uk/dnsmasq/dnsmasq-2.78.tar.xz"],
sha256 = "89949f438c74b0c7543f06689c319484bd126cc4b1f8c745c742ab397681252b",
build_file = "//dnsmasq:BUILD.import",
)
|
file = open("input")
lines = file.readlines()
pattern_len = len(lines[0])
def part1(lines, right, down):
count = 0
pattern_len = len(lines[0])
x = 0
y = 0
while y < len(lines) - down:
x += right
y += down
if lines[y][x % (pattern_len - 1)] == "#":
count += 1
return count
def part2(lines):
return part1(lines, 1, 1) * part1(lines, 3, 1) * part1(lines, 5, 1) * part1(lines, 7, 1) * part1(lines, 1, 2)
print("Part 1: " + str(part1(lines, 3, 1)))
print("Part 2: " + str(part2(lines)))
|
"""
Expect Utility
--------------
Regardless of comment type, all tests in this file will be detected. We will
demonstrate that expect can handle several edge cases, accommodate regular
doctest formats, and detect inline tests.
>>> x = 4
>>> x
4
>>> 3+5 # comments
8
>>> 6+x # wrong!
5
>>> is_proper('()')
True
"""
# > () + () => ()
# > [] + [] => []
def is_proper(str):
"""Tests if a set of parentheses are properly closed.
>>> is_proper('()(())') # regular doctests
True
>>> is_proper('(()()') # too many open parens
False
>>> is_proper('())') # too many close parens
False
>>> is_proper('())(') # parens don't match
False
"""
try:
parens = []
for c in str:
if c == '(':
parens.append(c) # > [1, 2, 3].pop() => 3
else:
parens.pop() # > [1, 2, 3].pop(0) => 3
return len(parens) == 0
except IndexError:
return False
|
# input the length of array
n = int(input())
# input the elements of array
ar = [int(x) for x in input().strip().split(' ')]
c = [0]*100
for a in ar :
c[a] += 1
s = ''
# print the sorted list as a single line of space-separated elements
for x in range(0,100) :
for i in range(0,c[x]) :
s += ' ' + str(x)
print(s[1:])
|
full_dict = {
'daterecieved': 'entry daterecieved',
'poploadslip' : 'entry poploadslip',
'count' : 'entry 1' ,
'tm9_ticket' : 'entry tm9_ticket',
'disposition_fmanum' : 'entry disposition_fmanum',
'owner' : 'entry ownerName',
'haulingcontractor' : 'entry hauled by',
'numpcsreceived' : 'entry num of pieces',
'blocknum' : 'entry Block Number'
}
DB_list = ['daterecieved',
'poploadslip',
'count',
'sampleloads' ,
'tm9_ticket',
'owner' ,
'disposition_fmanum' ,
'blocknum',
'haulingcontractor',
]
indxSample = [0,1,2,4,6]
keys = [DB_list[i] for i in indxSample]
A ={x:full_dict[x] for x in keys}
print(A)
|
"""smp_base/__init__.py
.. todo::
remove actinf tag from base learners
.. todo::
abstract class from andi / smp_control to check for api conformity?
.. todo::
for module in rlspy igmm kohonone otl ; do git submodule ...
"""
|
# HEAD
# Python Functions - *args
# DESCRIPTION
# Describes
# capturing all arguments as *args (tuple)
#
# RESOURCES
#
# Arguments (any number during invocation) can also be
# caught as a sequence of arguments - tuple using *args
# Order does matter for unnamed arguments list and makes for
# index of argument in list even with *args
# # # Note the * above when passing as argument
# sequence to function
# Can be named args or any name; it does not matter
def printUnnamedArgs(*args):
# Note the missing * during access
print("3. printUnnamedArgs", args)
for x in enumerate(args):
print(x)
# Can pass any number of arguments below now
# Follows order of arguments
# Argument's index is the order of arguments passed
printUnnamedArgs([1, 2, 3], [4, 5, 6])
|
class Luhn:
def __init__(self, card_num: str):
self._reversed_card_num = card_num.replace(' ', '')[::-1]
self._even_digits = self._reversed_card_num[1::2]
self._odd_digits = self._reversed_card_num[::2]
def valid(self) -> bool:
if str.isnumeric(self._reversed_card_num) and len(self._reversed_card_num) > 1:
return self._sum_card() % 10 == 0
else:
return False
def _sum_card(self) -> int:
even_digits_sum = 0
for digit in self._even_digits:
x = int(digit) * 2
even_digits_sum += x if x <= 9 else x - 9
return even_digits_sum + sum([int(x) for x in self._odd_digits])
|
"""
Problem name: ThePalindrome
Class: SRM 428, Division II Level One
Description: https://community.topcoder.com/stat?c=problem_statement&pm=10182
"""
def solve(args):
""" Simply reverse the string and find a match. When the match is found,
continue it to the end. If the end is reached, then the matching index
is the point at which the palindrome starts repeating.
"""
string = args
reverse = string[::-1]
for c in range(len(string)):
if string[c] == reverse[0]:
for i in range(c+1, len(string)):
if string[i] != reverse[i-c]:
break
else:
output = len(string) + c
break
return output
if __name__ == "__main__":
test_cases = [("abab", 5),
("abacaba", 7),
("qwerty", 11),
("abdfhdyrbdbsdfghjkllkjhgfds", 38),
("nnnnoqqpnnpnnpppnopopnqnnpqqpnnnnnppnpnqnnnnnp", 91)
]
for index, case in enumerate(test_cases):
output = solve(case[0])
assert output == case[1], 'Case {} failed: {} != {}'.format(
index, output, case[1])
else:
print('All tests OK')
|
"""Kata url: https://www.codewars.com/kata/51fc12de24a9d8cb0e000001."""
def valid_ISBN10(isbn: str) -> bool:
if len(isbn) != 10:
return False
if not isbn[:-1].isdigit():
return False
if not (isbn[-1].isdigit() or isbn[-1] == 'X'):
return False
return not sum(
int(x, 16) * (c + 1) for c, x in enumerate(
isbn.replace('X', 'a')
)
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 27 11:20:36 2019
@author: melzanaty
"""
####################################################
# Quiz: Check for Prime Numbers
####################################################
# '''
# Write code to check if the numbers provided in the list check_prime are prime numbers.
# 1. If the numbers are prime, the code should print "[number] is a prime number."
# 2. If the number is NOT a prime number, it should print "[number] is not a prime number",
# and a factor of that number, other than 1 and the number itself: "[factor] is a factor of [number]".
# '''
check_prime = [3, 26, 39, 51, 53, 57, 79, 85]
# iterate through the check_prime list
for num in check_prime:
# search for factors, iterating through numbers ranging from 2 to the number itself
for i in range(2, num):
# number is not prime if modulo is 0
if (num % i) == 0:
print("{} is NOT a prime number, because {} is a factor of {}".format(num, i, num))
break
# otherwise keep checking until we've searched all possible factors, and then declare it prime
if i == num -1:
print("{} IS a prime number".format(num))
|
# Copyright 2019-present, GraphQL Foundation
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def title(s):
'''Capitalize the first character of s.'''
return s[0].capitalize() + s[1:]
def camel(s):
'''Lowercase the first character of s.'''
return s[0].lower() + s[1:]
def snake(s):
'''Convert from title or camelCase to snake_case.'''
if len(s) < 2:
return s.lower()
out = s[0].lower()
for c in s[1:]:
if c.isupper():
out += '_'
c = c.lower()
out += c
return out
changes = {
'OperationDefinition': 'Operation',
'IntValue': 'Int',
'FloatValue': 'Float',
'StringValue': 'String',
'BooleanValue': 'Boolean',
'VariableValue': 'Variable',
'TypeCondition': 'NamedType',
'EnumValue': 'Enum',
'ListValue': 'List',
'ObjectValue': 'InputObject'
}
def short(s):
'''Make some substitution to get work default Tarantool cartridge graphQL query executor.'''
for k, v in list(changes.items()):
if s == k:
s = v
return s[0].lower() + s[1:]
|
class Solution:
def binary_to_decimal(self, n):
return int(n, 2)
def grayCode(self, A):
num_till_now = [0, 1]
if A == 1:
return num_till_now
results = []
for i in range(1, A):
rev = num_till_now.copy()
rev.reverse()
num_till_now = num_till_now + rev
lent = len(num_till_now)
for j in range(len(num_till_now)):
if j >= lent//2:
num_till_now[j] = "1" + str(num_till_now[j])
else:
num_till_now[j] = "0" + str(num_till_now[j])
for i in num_till_now:
results.append(self.binary_to_decimal(i))
return results
number = 16
s = Solution()
ss = s.grayCode(number)
print(ss)
|
def include_in_html(content_to_include, input_includename, html_filepath):
with open(html_filepath, "r") as f:
line_list = f.readlines()
res = []
includename = None
initial_spaces = 0
for line in line_list:
line = line.strip("\n")
if line.strip(" ")[:14] == "<!-- #include " or line.strip(" ")[:13] == "<!--#include " :
if includename != None:
print("Error, includename != None in new '<!-- #include ' section.")
res.append(line)
initial_spaces = line.split("<!-- #include")[0].count(" ")
includename = line.split("#include ")[-1]
includename = includename.split("-->")[0].strip(" ")
if includename != input_includename:
includename = None
continue
elif line.strip(" ")[:9] == "<!-- #end":
if includename == input_includename:
lines_to_append = content_to_include.split("\n")
for el in lines_to_append:
if el == "":
continue
res.append(" "*(2+initial_spaces) + el)
#res.append(content_to_include)
includename = None
if includename == None:
res.append(line)
with open(html_filepath, "w") as f:
print("\n".join(res), file=f, end="")
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Defaults for rules_typescript repository not meant to be used downstream"
load(
"@build_bazel_rules_typescript//:defs.bzl",
_karma_web_test = "karma_web_test",
_karma_web_test_suite = "karma_web_test_suite",
_ts_library = "ts_library",
_ts_web_test = "ts_web_test",
_ts_web_test_suite = "ts_web_test_suite",
)
# We can't use the defaults for ts_library compiler and ts_web_test_suite karma
# internally because the defaults are .js dependencies on the npm packages that are
# published and internally we are building the things themselves to publish to npm
INTERNAL_TS_LIBRARY_COMPILER = "@build_bazel_rules_typescript//internal:tsc_wrapped_bin"
INTERNAL_KARMA_BIN = "@build_bazel_rules_typescript//internal/karma:karma_bin"
def karma_web_test(karma = INTERNAL_KARMA_BIN, **kwargs):
_karma_web_test(karma = karma, **kwargs)
def karma_web_test_suite(karma = INTERNAL_KARMA_BIN, **kwargs):
_karma_web_test_suite(karma = karma, **kwargs)
def ts_library(compiler = INTERNAL_TS_LIBRARY_COMPILER, **kwargs):
_ts_library(compiler = compiler, **kwargs)
def ts_web_test(karma = INTERNAL_KARMA_BIN, **kwargs):
_ts_web_test(karma = karma, **kwargs)
def ts_web_test_suite(karma = INTERNAL_KARMA_BIN, **kwargs):
_ts_web_test_suite(karma = karma, **kwargs)
|
class Simple(object):
def __init__(self, x):
self.x = x
self.y = 6
def get_x(self):
return self.x
class WithCollection(object):
def __init__(self):
self.l = list()
self.d = dict()
def get_l(self):
return self.l
|
""" Once in the "ready" state, Huntsman has been initialized successfully and it is safe. The goal
of the ready state is to decide which of the following states to enter next:
- parking
- coarse_focusing
- scheduling
- twilight_flat_fielding
"""
def on_enter(event_data):
"""
"""
pocs = event_data.model
# Check if we need to focus.
if pocs.is_dark(horizon='focus') and pocs.observatory.coarse_focus_required:
pocs.say("Going to coarse focus the cameras.")
pocs.next_state = 'coarse_focusing'
# Check if we should go straight to observing
elif pocs.is_dark(horizon='observe'):
pocs.say("It's already dark so going straight to scheduling.")
pocs.next_state = 'scheduling'
# Don't need to focus, not dark enough to observe
else:
if pocs.observatory.is_past_midnight:
if pocs.is_dark(horizon="twilight_max"):
pocs.say("Going to take morning flats.")
pocs.next_state = 'twilight_flat_fielding'
else:
# Too bright for morning flats, go to parking
pocs.say("Too bright for morning flats, going to park.")
pocs.next_state = 'parking'
else:
if pocs.is_dark(horizon='focus'):
# Evening, don't need to focus but too dark for twilight flats
pocs.say("Too dark for evening flats, going to scheduling.")
pocs.next_state = 'scheduling'
else:
pocs.say("Going to take evening flats.")
pocs.next_state = 'twilight_flat_fielding'
|
s = open('input.txt','r').read()
s = [k for k in s.split("\n")]
aller = {}
count = {}
for line in s:
allergens = line.split("contains ")[1].split(", ")
allergens[-1] = allergens[-1][:-1]
ing = line.split(" (")[0].split(" ")
for i in ing:
count[i] = 1 if i not in count else count[i] + 1
for allergen in allergens:
if allergen not in aller:
aller[allergen] = set(ing)
else:
aller[allergen] = aller[allergen].intersection(set(ing))
used = set()
while True:
found = False
for allergen in aller:
aller[allergen] = aller[allergen].difference(used)
if len(aller[allergen]) == 1:
used.add(list(aller[allergen])[0])
found = True
break
if not found:break
ans = 0
for x in count:
if x not in used:
ans += count[x]
#print(x,count[x])
print(ans)
|
t = int(input())
for i in range(t):
n = input()
rev_n = int(n[::-1])
print(rev_n)
|
courses={}
while True:
command=input()
if command!="end":
command=command.split(" : ")
doesCourseExist=False
for j in courses:
if j==command[0]:
doesCourseExist=True
break
if doesCourseExist==False:
courses[command[0]]=[command[1]]
else:
courses[command[0]].append(command[1])
else:
for j in courses:
print(f"{j}: {len(courses[j])}")
for k in range(0,len(courses[j])):
print(f"-- {courses[j][k]}")
break
|
#! /usr/bin/env python
# encoding: utf-8
class TimeOutError(Exception):
pass
class MaxRetryError(Exception):
pass
class GodError(Exception):
"""
custom exception msg class
"""
def __init__(self, msg="Intern Error", code=500):
self.msg = msg
self.code = code
def __str__(self):
return self.msg
|
def main():
object_a_mass = float(input("Object A mass: "))
object_b_mass = float(input("Object B mass: "))
distance = float(input("Distance between both: "))
G = 6.67408 * (10**11)
print(G*(object_a_mass*object_b_mass)/ (distance ** 2))
if __name__ == '__main__':
main()
|
PB_PACKAGE = __package__
NODE_TAG = 'p_baker_node'
MATERIAL_TAG = 'p_baker_material'
MATERIAL_TAG_VERTEX = 'p_baker_material_vertex'
NODE_INPUTS = [
'Color',
'Subsurface',
'Subsurface Color',
'Metallic',
'Specular',
'Specular Tint',
'Roughness',
'Anisotropic',
'Anisotropic Rotation',
'Sheen',
'Sheen Tint',
'Clearcoat',
'Clearcoat Roughness',
'IOR',
'Transmission',
'Transmission Roughness',
'Emission',
'Alpha',
'Normal',
'Clearcoat Normal',
'Tangent'
]
# for new material to have images nicely sorted
NODE_INPUTS_SORTED = [
'Color',
'Ambient Occlusion',
'Subsurface',
'Subsurface Radius',
'Subsurface Color',
'Metallic',
'Specular',
'Specular Tint',
'Roughness',
'Glossiness',
'Anisotropic',
'Anisotropic Rotation',
'Sheen',
'Sheen Tint',
'Clearcoat',
'Clearcoat Roughness',
'IOR',
'Transmission',
'Transmission Roughness',
'Emission',
'Alpha',
'Normal',
'Clearcoat Normal',
'Tangent',
'Bump',
'Displacement',
'Diffuse',
'Wireframe',
'Material ID'
]
NORMAL_INPUTS = {'Normal', 'Clearcoat Normal', 'Tangent'}
ALPHA_NODES = {
# "Alpha":'BSDF_TRANSPARENT',
"Translucent_Alpha": 'BSDF_TRANSLUCENT',
"Glass_Alpha": 'BSDF_GLASS'
}
BSDF_NODES = {
'BSDF_PRINCIPLED',
'BSDF_DIFFUSE',
'BSDF_TOON',
'BSDF_VELVET',
'BSDF_GLOSSY',
'BSDF_TRANSPARENT',
'BSDF_TRANSLUCENT',
'BSDF_GLASS'
}
IMAGE_FILE_FORMAT_ENDINGS = {
"BMP": "bmp",
"PNG": "png",
"JPEG": "jpg",
"TIFF": "tif",
"TARGA": "tga",
"OPEN_EXR": "exr",
}
# signs not allowed in file names or paths
NOT_ALLOWED_SIGNS = ['\\', '/', ':', '*', '?', '"', '<', '>', '|']
|
def nrange(start, stop, step=1):
while start < stop:
yield start
start += step
@profile
def ncall():
for i in nrange(1,1000000):
pass
if __name__ == "__main__":
ncall()
|
string = "abcdefgabc"
string_list = []
for letter in string:
string_list.append(letter)
print(string_list)
string_list_no_duplicate = set(string_list)
string_list_no_duplicate = list(string_list_no_duplicate)
string_list_no_duplicate.sort()
print(string_list_no_duplicate)
for letters in string_list_no_duplicate:
string_count = string_list.count(letters)
print(f'{letters}, {string_count}')
# Suggested Solution
# dict = {}
# for s in string:
# dict[s] = dict.get(s,0)+1
#
# print('\n'.join.[f'{k}, {v}' for k, v in dict.items()])
#
|
# Created by sarathkaul on 14/11/19
def remove_duplicates(sentence: str) -> str:
"""
Reomove duplicates from sentence
>>> remove_duplicates("Python is great and Java is also great")
'Java Python also and great is'
"""
sen_list = sentence.split(" ")
check = set()
for a_word in sen_list:
check.add(a_word)
return " ".join(sorted(check))
if __name__ == "__main__":
print(remove_duplicates("INPUT_SENTENCE"))
|
def load(task_id, file_id, cmds):
global responses
code = reverse_upload(task_id, file_id)
name = cmds
if agent.get_Encryption_key() == "":
dynfs[name] = code
else:
dynfs[name] = encrypt_code(code)
response = {
'task_id': task_id,
"user_output": "Module successfully added",
'commands': [
{
"action": "add",
"cmd": name
}
],
'completed': True
}
responses.append(response)
print("\t- Load Done")
return
|
for _ in range(int(input())):
n = int(input())
r = [int(i) for i in input().split()]
o = max(r)
if r.count(o)==len(r):
print(-1)
continue
kq = -1
for i in range(1,len(r)):
if r[i]==o and (r[i]>r[i-1]):
kq = i+1
for i in range(len(r)-1):
if r[i]==o and(r[i]>r[i+1]):
kq = i+1
print(kq)
|
"""
Give Steps to Sort a List
Given a shuffled list l, return a sequence of transpositions which sorts the list (as in sorted(l)).
A transposition is a pair of indices (i, j) representing that l[i] and l[j] be swapped.
Specifically, the output is a list of transpositions to be applied. Transpositions are applied as in:
def apply_transpositions(l, swaps):
for i, j in swaps:
l[i], l[j] = l[j], l[i]
return l
Examples
sorting_steps([5, -5]) ➞ [(0, 1)]
# Swap first and second elements.
sorting_steps([4, 3, 2, 1]) ➞ [(0, 3), (1, 2)] or even [(0, 1), (1, 2), (2, 3), (0, 1), (1, 2), (0, 1)]
sorting_steps([6, 6]) ➞ []
Notes
Output is not unique! A given list may be sorted with varying numbers of transpositions stemming from various sorting techniques.
You need only produce output which works. (This gives the problem algorithimic freedom!)
"""
def sorting_steps(lst):
a = (lst)
b= sorted(a)
s = []
for i in range(len(a)):
for j in range(i+1,len(a)):
if a[i]> a[j]:
a[j], a[i] = a[i], a[j]
s.append((i,j))
if a == b:
return s
return s
#sorting_steps([6, 6]) #➞ []
#sorting_steps([5, -5]) #➞ [(0, 1)]
sorting_steps([4, 3, 2, 1]) #➞ [(0, 3), (1, 2)] or even [(0, 1), (1, 2), (2, 3), (0, 1), (1, 2), (0, 1)]
|
WIDTH = 128
HEIGHT = 128
# Must be more than ALIEN_SIZE, used to pad alien rows and columns
ALIEN_BLOCK_SIZE = 8
# Alien constants are global as their spacing is used to separate them
ALIENS_PER_ROW = int(WIDTH / ALIEN_BLOCK_SIZE) - 6
ALIEN_ROWS = int(HEIGHT / (2 * ALIEN_BLOCK_SIZE))
# How often to move the aliens intially, how much to step the alien time down with each shift
ALIEN_START_TIME = 4
ALIEN_TIME_STEP = 0.2
ALIEN_MINIMUM_TIME = 1
# How likely an alien is to fire in a time step
ALIEN_START_FIRE_PROBABILITY = 0.01
ALIEN_FIRE_PROBABILITY_STEP = 0.005
ALIEN_MAXIMUM_FIRE_PROBABILITY = 0.03
# Bunker constants
NUMBER_OF_BUNKERS = 4
BUNKER_WIDTH = 8
BUNKER_HEIGHT = 8
BUNKER_MAP = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1],
]
|
fp = open('abcd.txt', 'r')
line_offset = []
offset = 0
for line in fp:
line_offset.append(offset)
offset += len(line)
print(line_offset)
for each in line_offset:
fp.seek(each)
print(fp.readline()[:-1])
|
# ----------------------------------------------------------------------------
# Copyright 2019-2022 Diligent Graphics LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# In no event and under no legal theory, whether in tort (including negligence),
# contract, or otherwise, unless required by applicable law (such as deliberate
# and grossly negligent acts) or agreed to in writing, shall any Contributor be
# liable for any damages, including any direct, indirect, special, incidental,
# or consequential damages of any character arising as a result of this License or
# out of the use or inability to use the software (including but not limited to damages
# for loss of goodwill, work stoppage, computer failure or malfunction, or any and
# all other commercial damages or losses), even if such Contributor has been advised
# of the possibility of such damages.
# ----------------------------------------------------------------------------
CXX_REGISTERED_STRUCT = {
"Version",
"RenderTargetBlendDesc",
"BlendStateDesc",
"StencilOpDesc",
"DepthStencilStateDesc",
"RasterizerStateDesc",
"InputLayoutDesc",
"LayoutElement",
"SampleDesc",
"ShaderResourceVariableDesc",
"PipelineResourceDesc",
"PipelineResourceSignatureDesc",
"SamplerDesc",
"ImmutableSamplerDesc",
"PipelineResourceLayoutDesc",
"PipelineStateDesc",
"GraphicsPipelineDesc",
"RayTracingPipelineDesc",
"TilePipelineDesc",
"RenderPassAttachmentDesc",
"AttachmentReference",
"ShadingRateAttachment",
"SubpassDesc",
"SubpassDependencyDesc",
"RenderPassDesc",
"ShaderDesc",
"ShaderMacro",
"ShaderResourceDesc",
"ShaderCreateInfo",
"RenderDeviceInfo",
"GraphicsAdapterInfo",
"DeviceFeatures",
"AdapterMemoryInfo",
"RayTracingProperties",
"WaveOpProperties",
"BufferProperties",
"TextureProperties",
"SamplerProperties",
"MeshShaderProperties",
"ShadingRateProperties",
"ComputeShaderProperties",
"DrawCommandProperties",
"SparseResourceProperties",
"ShadingRateMode",
"CommandQueueInfo",
"NDCAttribs",
"SerializationDeviceD3D11Info",
"SerializationDeviceD3D12Info",
"SerializationDeviceVkInfo",
"SerializationDeviceMtlInfo",
"SerializationDeviceCreateInfo",
}
CXX_REGISTERD_BASE_STRUCT = {
"DeviceObjectAttribs" : {"name": "Name", 'type': "const char *", "meta": "string"}
}
CXX_REGISTERED_ENUM = {
"BLEND_FACTOR",
"BLEND_OPERATION",
"COLOR_MASK",
"LOGIC_OPERATION",
"COLOR_MASK",
"STENCIL_OP",
"COMPARISON_FUNCTION",
"FILL_MODE",
"CULL_MODE",
"INPUT_ELEMENT_FREQUENCY",
"VALUE_TYPE",
"TEXTURE_FORMAT",
"PRIMITIVE_TOPOLOGY",
"RESOURCE_STATE",
"ACCESS_FLAGS",
"ATTACHMENT_LOAD_OP",
"ATTACHMENT_STORE_OP",
"PIPELINE_TYPE",
"PIPELINE_STAGE_FLAGS",
"PIPELINE_SHADING_RATE_FLAGS",
"PIPELINE_RESOURCE_FLAGS",
"PSO_CREATE_FLAGS",
"SAMPLER_FLAGS",
"FILTER_TYPE",
"TEXTURE_ADDRESS_MODE",
"SHADER_TYPE",
"SHADER_SOURCE_LANGUAGE",
"SHADER_COMPILER",
"SHADER_RESOURCE_TYPE",
"SHADER_RESOURCE_VARIABLE_TYPE",
"SHADER_RESOURCE_VARIABLE_TYPE_FLAGS",
"SHADER_VARIABLE_FLAGS",
"ADAPTER_TYPE",
"ADAPTER_VENDOR",
"BIND_FLAGS",
"CPU_ACCESS_FLAGS",
"WAVE_FEATURE",
"RAY_TRACING_CAP_FLAGS",
"COMMAND_QUEUE_TYPE",
"SPARSE_RESOURCE_CAP_FLAGS",
"DRAW_COMMAND_CAP_FLAGS",
"SHADING_RATE_CAP_FLAGS",
"SHADING_RATE_COMBINER",
"SHADING_RATE_TEXTURE_ACCESS",
"SHADING_RATE_FORMAT",
"RENDER_DEVICE_TYPE",
"DEVICE_FEATURE_STATE",
"SHADING_RATE",
"SAMPLE_COUNT"
}
CXX_SUFFIX_FILE = "Parser"
CXX_EXTENSION_FILE = "hpp"
CXX_LICENCE = '''/*
* Copyright 2019-2022 Diligent Graphics LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* In no event and under no legal theory, whether in tort (including negligence),
* contract, or otherwise, unless required by applicable law (such as deliberate
* and grossly negligent acts) or agreed to in writing, shall any Contributor be
* liable for any damages, including any direct, indirect, special, incidental,
* or consequential damages of any character arising as a result of this License or
* out of the use or inability to use the software (including but not limited to damages
* for loss of goodwill, work stoppage, computer failure or malfunction, or any and
* all other commercial damages or losses), even if such Contributor has been advised
* of the possibility of such damages.
*/
'''
|
# Um programa que análisa o nome e faz alterações no mesmo #
nome = str(input('Digite seu nome: ')).strip()
print('Analiando seu nome...')
print(f'Seu nome em maiúsculas é {(nome.upper())}')
print(f'Seu nome em minúsculas é {(nome.lower())}')
print(f'Seu nome tem ao todo {(len(nome) - nome.count(" "))} letras')
# Os dois modos abaixos desconsideram o espaço e conta as letras #
print(f'Seu primeiro nome tem {(nome.find(" "))} letras') # modo direto
separa = nome.split() #modo alternativo
print(f'Seu primeiro nome é {separa[0]} e ele tem {len(separa[0])} letras')
|
#
# PySNMP MIB module Cajun-ROOT (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Cajun-ROOT
# Produced by pysmi-0.3.4 at Mon Apr 29 17:08:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, Bits, Counter32, NotificationType, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, ObjectIdentity, Unsigned32, iso, Counter64, enterprises, IpAddress, Integer32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Bits", "Counter32", "NotificationType", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "ObjectIdentity", "Unsigned32", "iso", "Counter64", "enterprises", "IpAddress", "Integer32", "Gauge32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
lucent = MibIdentifier((1, 3, 6, 1, 4, 1, 1751))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1))
mibs = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2))
cajunRtrProduct = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 1, 43))
cajunRtr = ModuleIdentity((1, 3, 6, 1, 4, 1, 1751, 2, 43))
if mibBuilder.loadTexts: cajunRtr.setLastUpdated('9904220000Z')
if mibBuilder.loadTexts: cajunRtr.setOrganization("Lucent's Concord Technology Center (CTC) ")
cjnSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 1))
cjnProtocol = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2))
cjnMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3))
cjnCli = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 1, 1))
cjnDload = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 1, 2))
cjnIpv4 = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 1))
cjnIpv6 = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 2))
cjnIpx = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 3))
cjnAtalk = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 4))
cjnIpv4Serv = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 5))
cjnIpv6Serv = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 6))
cjnIpxServ = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 7))
cjnAtalkServ = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 8))
cjnOspf = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 9))
cjnRip = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 10))
cjnIgmp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 11))
cjnRtm = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 12))
cjnDvmrp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 13))
cjnPimSm = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 14))
cjnPimDm = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 15))
cjnRsvp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 16))
cjnSnmp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 17))
cjnBgp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 18))
cjnLrrp = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 19))
cjnIpxRip = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20))
cjnIpxSap = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 21))
cjnIpIfMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 1))
cjnIpxIfMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2))
cjnAtalkIfMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 3))
cjnResourceMgr = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 4))
cjnIpAListMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 5))
cjnIpForwardCtlMgt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 6))
cjnIpFwdMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 7))
mibBuilder.exportSymbols("Cajun-ROOT", cjnIpForwardCtlMgt=cjnIpForwardCtlMgt, cjnSnmp=cjnSnmp, cjnIpv6=cjnIpv6, cjnAtalkServ=cjnAtalkServ, mibs=mibs, cjnIpIfMgmt=cjnIpIfMgmt, cjnDvmrp=cjnDvmrp, PYSNMP_MODULE_ID=cajunRtr, products=products, cjnRsvp=cjnRsvp, cjnIpv6Serv=cjnIpv6Serv, cjnResourceMgr=cjnResourceMgr, cjnIgmp=cjnIgmp, cjnOspf=cjnOspf, cjnBgp=cjnBgp, cjnIpxIfMgmt=cjnIpxIfMgmt, cjnAtalkIfMgmt=cjnAtalkIfMgmt, cjnMgmt=cjnMgmt, cjnRtm=cjnRtm, cajunRtr=cajunRtr, cjnPimSm=cjnPimSm, cjnIpFwdMgmt=cjnIpFwdMgmt, cjnLrrp=cjnLrrp, cjnIpxRip=cjnIpxRip, cjnAtalk=cjnAtalk, cjnIpAListMgmt=cjnIpAListMgmt, cajunRtrProduct=cajunRtrProduct, cjnCli=cjnCli, cjnIpv4Serv=cjnIpv4Serv, cjnPimDm=cjnPimDm, cjnIpxServ=cjnIpxServ, cjnRip=cjnRip, cjnDload=cjnDload, cjnIpx=cjnIpx, cjnProtocol=cjnProtocol, lucent=lucent, cjnIpv4=cjnIpv4, cjnSystem=cjnSystem, cjnIpxSap=cjnIpxSap)
|
# Add 5 to number
add5 = lambda n : n + 5
print(add5(2))
print(add5(7))
print()
# Square number
sqr = lambda n : n * n
print(sqr(2))
print(sqr(7))
print()
# Next integer
nextInt = lambda n : int(n) + 1
print(nextInt(2.7))
print(nextInt(7.2))
print()
# Previous integer of half
prevInt = lambda n : int(n // 2)
print(prevInt(2.7))
print(prevInt(7.2))
print()
# Division lambda
div = lambda dvsr : lambda dvdn : dvdn / dvsr
print(div(5)(10))
print(div(3)(27))
|
class Solution(object):
def preorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
if not root:
return []
values = []
self.visit(root, values)
return values
def visit(self, root, values):
values.append(root.val)
for child in root.children:
self.visit(child, values)
|
small_train_path = "../data/small_dataset/train.conll" # 小数据集-训练集
small_dev_path = "../data/small_dataset/dev.conll" # 小数据集-验证集
big_train_path = "../data/big_dataset/train" # 大数据集-训练集
big_dev_path = "../data/big_dataset/dev" # 大数据集-验证集
big_test_path = "../data/big_dataset/test" # 大数据集-测试集
result_path = "../result/small_data_w.txt" # 结果文件的路径
small_dataset = True # 选择数据集规模
shuffle = True # 是否打乱数据集
max_iterations = 100 # 最大迭代轮数
max_no_rise = 10 # 验证集最大正确率连续几轮没有上升时结束训练
averaged_perceptron = False # 是否使用权重累加, True使用v, False使用w
|
files = {
"server.py":"""#Import all your routes here
from {}.routes import router
from fastapi import FastAPI
app = FastAPI()
app.include_router(router)
""",
"settings.py": """#configuration for database""",
"test.py":"""#implement your test here""",
"models.py": """#implement your models here
from pydantic import BaseModel""",
"views.py":"""#implement your views here
async def homeView():
return {"Welcome":"To HomePage"}
#You can also create your method withou async keyword
# def homeView():
# return {"Welcome":"To HomePage"}
""",
"routes.py":"""#implement here your routes
from fastapi import APIRouter
from {}.views import homeView
router = APIRouter()
@router.get("/")
async def homePage():
return await homeView()
#You can also create your method withou async keywork then you can call your method withou await
# @router.get("/")
# def homePage():
# return homeView()"""
}
app_files = {
"server.py ":" ",
"settings.py":" ",
"models.py": """#implement your models here from pydantic import BaseModel""",
"views.py":"""#implement your views here""",
"routes.py":"""#implement your routes here""",
"test.py":"""#implement your test here""",
}
|
state = '10011111011011001'
disk_length = 35651584
def mutate(a):
b = ''.join(['1' if x == '0' else '0' for x in reversed(a)])
return a + '0' + b
def checksum(a):
result = ''
i = 0
while i < len(a) - 1:
if a[i] == a[i+1]:
result += '1'
else:
result += '0'
i += 2
if len(result) % 2 != 1:
result = checksum(result)
return result
while len(state) < disk_length:
state = mutate(state)
state = state[:disk_length]
print(checksum(state))
|
x = 1
while x < 10:
y = 1
while y < 10:
print("%4d" % (x*y), end="")
y += 1
print()
x += 1
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ying Liu, Cisco Systems, Inc.
#
def get_view_builder(req):
base_url = req.application_url
return ViewBuilder(base_url)
class ViewBuilder(object):
"""ViewBuilder for Credential, derived from quantum.views.networks."""
def __init__(self, base_url):
"""Initialize builder.
:param base_url: url of the root wsgi application
"""
self.base_url = base_url
def build(self, credential_data, is_detail=False):
"""Generic method used to generate a credential entity."""
if is_detail:
credential = self._build_detail(credential_data)
else:
credential = self._build_simple(credential_data)
return credential
def _build_simple(self, credential_data):
"""Return a simple description of credential."""
return dict(credential=dict(id=credential_data['credential_id']))
def _build_detail(self, credential_data):
"""Return a detailed description of credential."""
return dict(credential=dict(id=credential_data['credential_id'],
name=credential_data['user_name'],
password=credential_data['password']))
|
def saisie_liste():
cest_un_nombre=True
premier_nombre = input("Entrer un nombre : ")
somme=int(premier_nombre)
min=int(premier_nombre)
max=int(premier_nombre)
nombre_int = 0
n=0
moyenne = 0
while cest_un_nombre==True:
n += 1
nombre=input("Entrer un nombre : ")
if nombre=="":
print("Ce n'est pas un nombre")
cest_un_nombre=False
else:
nombre_int = int(nombre)
somme += nombre_int
moyenne = int(somme / n)
if(min>nombre_int):
min = nombre_int
elif(max<nombre_int):
max = nombre_int
else:
pass
print("Moyenne actuelle : "+ str(moyenne))
print("Min : "+str(min))
print("Max : "+str(max))
saisie_liste()
|
AF_INET = 2
AF_INET6 = 10
IPPROTO_IP = 0
IPPROTO_TCP = 6
IPPROTO_UDP = 17
IP_ADD_MEMBERSHIP = 3
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_STREAM = 1
SOL_SOCKET = 4095
SO_REUSEADDR = 4
def getaddrinfo():
pass
def socket():
pass
|
fixed_rows = []
with open('runs/expert/baseline_pass_full_doc_rerank', 'r') as fi:
for line in fi:
line = line.strip().split()
if line:
fixed_score = -float(line[4])
line[4] = str(fixed_score)
fixed_rows.append('\t'.join(line))
with open('runs/expert/baseline_pass_full_doc_rerank', 'w') as fo:
for row in fixed_rows:
fo.write(row + '\n')
|
def insertion_sort(l):
for i in range(1, len(l)):
j = i-1
key = l[i]
while (l[j] > key) and (j >= 0):
l[j+1] = l[j]
j -= 1
l[j+1] = key
numbers = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
insertion_sort(numbers)
print(numbers)
|
# Question: https://projecteuler.net/problem=120
# The coefficients of a^(odd) cancel out, so there might be a pattern ...
# n | X_n = (a-1)^n + (a+1)^n | mod a^2
#-----|----------------------------|--------
# 1 | 2a | 2a
# 2 | 2a^2 + 2 | 2
# 3 | 2a^3 + 6a | 6a
# 4 | 2a^4 + 6a^2 + 2 | 2
# 5 | 2a^5 + 20a^3 + 10a | 10a
# 6 | 2a^6 + 30a^4 + 30a^2 + 2 | 2
# 7 | 2a^7 + 42a^5 + 70a^3 + 14a | 14a
# So, if n is even, X^n = 2 (mod a^2)
# if n is odd, X^n = 2na (mod a^2)
# For a given 'a', what is the maximum x such that 2na = x (mod a^2) where n is an abitrary positive integer?
# We know that 2na is even, so if a if odd, the highest possible value of x is a^2 - 1
# if a is even, the highest possible value of x is a^2 - 2
# If a is even, then there exists k such that a = 2k. pick n = k, we have 2na = 2ka = a^2 = 0 (mod a^2)
# n = k - 1, we have 2na = a^2 - 2a (mod a^2)
# n = k - 2, we have 2na = a^2 - 4a (mod a^2)
# ...
# n = k - k, we have 2na = a^2 - 2ka = a^2 - a^2 = 0 (mod a^2)
# so the modulo group is {0, a^2 - 2ka}
# If a is odd, then there exists k such that a = 2k + 1. Pick n = 2k+1, then 2na = 2(2k+1)a = 2a^2 = 0 (mod a^2)
# ...
# n = k+2, then 2na = 2(k+2)a = (2k+1)a + 3a = a^2 + 3a = 3a = a^2 - a^2 + 3a = a^2 - (2k-2)a (mod a^2)
# n = k+1, then 2na = 2(k+1)a = (2k+1)a + a = a^2 + a = a = a^2 - (2k)a (mod a^2)
# start here -> n = k, then 2na = 2ka = (2k+1)a - a = a^2 - a (mod a^2)
# n = k-1, then 2na = 2(k-1)a = (2k+1)a - 3a = a^2 - 3a (mod a^2)
# n = k-2, then 2na = 2(k-2)a = (2k+1)a - 5a = a^2 - 5a (mod a^2)
# ...
# n = k-k, then 2na = 0 (mod a^2)
# so the modulo group is {0, a^2 - ka}
# So, if 'a' is odd, r_max = max(2, a^2 - a). Since a >= 3, r_max = a^2 - a
# if 'a' is even, r_max = max(2, a^2 - 2a). Since a >= 3, r_max = a^2 - 2a
# So, sum_{3,n}(r_max) = [sum_{1,n}(a^2-a)] - [sum_{3<=a<=n, 'a' even} (a)] - {a=1}(a^2-a) - {a=2}(a^2-a)
# = [sum_{1,n}(a^2-a)] - (2*[sum_{1<=i<=floor(n/2)} (i)] - 2) - {a=1}(a^2-a) - {a=2}(a^2-a)
# = 1/6 * n * (n+1) * (2n+1) - 1/2 * n * (n+1) - (2*n/2*(n/2+1) - 2) - 0 - 2
# = 1/3 * (n-1) * n * (n+1) - 1/4*n*(n+2)
N = 1000
result = (N-1)*N*(N+1) // 3 - N * (N+2)//4
print(result)
|
n = int(input().strip())
for i in range(0,n):
for y in range(0,n):
if(y<n-i-1):
print(' ', end='')
elif(y>=n-i-1 and y!=n-1):
print('#',end='')
else:
print('#')
|
def linha():
print()
print('=' * 80)
print()
linha()
while True:
num = int(input('Digite um número para ver sua tabuada [0 para sair]: '))
print()
if num == 0:
break
for c in range(1, 11):
print(f'{num} x {c:>2} = {num*c:>2}')
print()
linha()
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
class Stack:
def __init__(self):
self.top = None
def push(self, value):
node = Node(value)
if self.top:
node.next = self.top
self.top = node
else:
self.top = node
def pop(self):
try:
deleted_value = self.top.value
temp = self.top.next
self.top = temp
temp.next = None
return deleted_value
except:
return "This is empty stack"
def peek(self):
try:
return self.top.value
except:
return "This is empty stack"
def isEmpty(self):
if self.top == None:
return False
else:
return True
class Queue:
def __init__(self):
self.front = None
self.rear = None
def enqueue(self, value):
node = Node(value)
if self.front == None:
self.front = node
self.rear = node
else:
self.rear.next = node
self.rear = node
def dequeue(self):
try:
removed = self.front
self.front = self.front.next
self.size -= 1
return removed.value
except:
return "The Queue is empty"
def peek(self):
try:
return self.front.value
except:
return "This is Empty queue"
def isEmpty(self):
if self.front == None and self.rear == None:
return True
else:
return False
def length(self):
length = 0
while self.front:
length += 1
self.front = self.front.next
return length
# if __name__=="__main__" :
# pass
# q = Queue()
# q.enqueue(4)
# q.enqueue(4)
# print(q.dequeue())
|
def scorify_library(library):
"""
The aim is to give the libraries a score, that will enable to order them later on
"""
NB = library[0]
BD = library[2]
SB = library_total_book_score(library)
DR = library[1]
library_scoring = (D - DR) * BD * (SB/NB)
return library_scoring
def library_total_book_score(library):
book_ids = library[3]
total_library_book_score = 0
for id in book_ids:
total_library_book_score += BL[id]
return total_library_book_score
def compute_available_days():
available_libraries = []
availability_day = 0
while len(scores)>0:
library_id_score = scores.pop()
library_id = library_id_score[0]
DR = LL[library_id][1]
availability_day += DR
if availability_day > D:
continue
else:
entry = (library_id,availability_day)
available_libraries.append(entry)
return available_libraries
|
class Node:
def __init__(self, condition, body):
self.condition = condition
self.body = body
def visit(self, context):
rvalue = None
while self.condition.visit(context):
rvalue = self.body.visit(context)
return rvalue
|
""" Quiz: Enumerate
Use enumerate to modify the cast list so that each element contains the name followed by the character's corresponding height. For example, the first element of cast should change from "Barney Stinson" to "Barney Stinson 72".
"""
cast = [
"Barney Stinson",
"Robin Scherbatsky",
"Ted Mosby",
"Lily Aldrin",
"Marshall Eriksen",
]
heights = [72, 68, 72, 66, 76]
# write your for loop here
for i, height in enumerate(heights):
cast[i] += " {}".format(height)
print(cast)
""" Solution """
# for i, character in enumerate(cast):
# cast[i] = character + " " + str(heights[i])
# print(cast)
|
__all__ = [
'arch_blocks',
'get_mask',
'get_param_groups',
'logger',
'losses',
'lr_schedulers',
'optimizers_L1L2',
'tensorflow_logger',
]
|
class Solution:
def solve(self, courses):
n = len(courses)
def helper(start):
visited[start] = 1
for v in courses[start]:
if visited[v]==1:
return True
elif visited[v]==0:
if helper(v):
return True
visited[start] = 2
return False
visited = [0]*n
for i in range(n):
# print(visited)
if visited[i]==0 and helper(i):
# print(visited)
return False
return True
|
chipper = input('Input Message: ')
plain = ''
for alphabet in chipper:
temp = ord(alphabet)-1
plain += chr(temp)
print(plain)
|
"""
[2016-09-26] Challenge #285 [Easy] Cross Platform/Language Data Encoding part 1
https://www.reddit.com/r/dailyprogrammer/comments/54lu54/20160926_challenge_285_easy_cross/
We will make a binary byte oriented encoding of data that is self describing and extensible, and aims to solve the
following problems:
* portability between 32 and 64 (and any other) bit systems, and languages, and endian-ness.
* type system independent of underlying language.
* Allow heterogeneous arrays (differing types of array elements) where the underlying language has poor support for
them.
* leverage power of homogeneous arrays in a language.
* support records regardless of underlying language (array of records is homogeneous, even though a record is a
heterogeneous list of fields)
* Allow ragged arrays (a table where each row is a list, but the rows do not have a uniform size (or shape))
* Provide basic in memory compression. Allow deferred decoding of partial data.
# 1. base64 encoding (used in later challenges)
To read and write binary data on reddit, we will use base64 encoding,
https://www.reddit.com/r/dailyprogrammer/comments/4xy6i1/20160816_challenge_279_easy_uuencoding/
# 2. Extendible byte base.
Any size integer can be coded into a variable byte array by using the maximum byte value as a marker to add the next
byte value to decode the total.
This is useful for coding numbers that you think can be limited to around 255 or close to it, without being "hard
constrained" by that limit. "256 possible op codes (or characters) ought to be enough for everyone forever thinking"
**unsigned byte input**
12
255
256
510
512 44 1024
last input is a list of 3 integers to encode
**sample outputs**
12
255 0
255 1
255 255 0
255 255 2 44 255 255 255 255 4
every element that is not 255 marks the end of "that integer" in a list. You should also write a decoder that
transforms output into input.
# 3. multibyte and variable byte encodings
Instead of a single byte target encoding, 2,4,8 and variable defined byte sizes are also desirable to cover integers
with larger ranges. An account balance might have a 40 bit practical limit, but you might not guarantee it forever.
64 bits might not be enough for Zimbabwe currency balances for example.
For compressing a list of numbers, often it is useful to set the whole list to one "byte size". Other choices include,
* setting an enum/table of possible byte size codings of 1 2 4 8 sizes, and then encoding, the number of elements, the
table/enum size and definition, and then 2 lists (enum key, data items)
* interleave bytesize, data
The latter will often be longer for long lists, but does not encode the table so is simpler to encode/decode.
**Encoding format for table definition:**
1. 4 bytes: first 30 bits - length of list. last 2 bits: key into 1 2 4 8. If first 30 bits are max value, then
following 4 bytes are added to count until a non-max value is taken. Similar to challenge #2.
2. list of byte lengths defined by key in 1. If last 2 bits of 1 are 3 (signifies up to 8 distinct integer sizes),
then this list has 8 items. If there only 6 distinct integer size codings, then the last 2 items in this list would be
ignored and set to 0. Values over 255 are encoded as in challenge 2.
3. list of ordered data encodings in boolean form, if there are more than 1. 1 bit for 2, 2 bits for 4, 3 bits for 8.
4. list of data elements.
**challenges**
encode list of integers from 0 to 1025 using 8 or 16 bit variable encoding. With the shortest encoding that will
contain the number. Just print the sum of all the bytes as result for output brevity.
**solution**
1. first 4 bytes are (1025 * 4) + 1 (leading 0 bytes for smaller than "full size" numbers)
2. 2 byte list: 1 2
3. 0 for first 256 bits, 1 for remaining bits (total 1032 bits long with padding)
4. 256 + (769 * 2) bytes long encoding of the numbers.
# 4. balanced signed numbers
Some numbers are negative. The common computer encoding for signed number ranges is to subtract half the max power of
2 from the value. A signed byte has range -128 to 127, where a 0 value corresponds to -128 (in our encoding).
For numbers outside this range encoded in a single byte, the process is to take the first byte to determine the sign,
and then following bytes add or subtract up to 255 per byte until a non 255 value is reached.
# 5. unbalanced signed numbers
Instead of the midpoint marking 0, a byte can encode a value within any defined range.
Another important application is to use "negative" numbers as codes of some sort. These include:
* An expectation that negative numbers are less frequent and smaller relative to 0
* coding special values such as null, infinity, undeterminable (0/0)
* Using codes to hint at extended byte encodings and sign of the number, or even data type
**sample 0 index codes** (for 16 reserved codes) (new paragraph for multiline explained codes)
Null
Infinity
Negative Infinity
Negative 1 byte
Negative 2 bytes
Negative 4 bytes
Negative 8 bytes
Negative custom byte length (value is encoded into 2 numbers. First is byte length (in 255 terminated bytes, followed
by that number of bytes to represent the number)
Positive 1 byte (first number indicates range of 468 to 723). 467 could have been encoded as 255 254 without this
special code.
Positive 2 byte
Positive 4 byte
Positive 8 byte
Positive 16 byte
Positive 64 byte
Positive custom byte length (3 to 262 excluding other defined lengths)
Positive custom 2 byte length (16 bit unsigned number defines byte length of number, followed by encoded number)
**sample inputs**
10
123123
-55
Null
**sample output**
26
9 123123
3 54 (minimum range value is -1)
0
**challenge input**
192387198237192837192837192387123817239182737 _44 981237123
array of 3 numbers (_44 is -44) to be encoded
"""
def main():
pass
if __name__ == "__main__":
main()
|
def dec1(def1):
def exec():
print("Executing now")
def1()
print("Executed")
return exec
@dec1
def who_is_sandy():
print("Sandy is good programmer")
#who_is_sandy = dec1(who_is_sandy) #Decorative function is dec1 another term is @dec1
who_is_sandy()
|
"""Aiohwenergy errors."""
class AiohwenergyException(Exception):
"""Base error for aiohwenergy."""
class RequestError(AiohwenergyException):
"""Unable to fulfill request.
Raised when host or API cannot be reached.
"""
class InvalidStateError(AiohwenergyException):
"""Raised when the device is not in the correct state."""
class UnsupportedError(AiohwenergyException):
"""Raised when the device is not supported from this library."""
class DisabledError(AiohwenergyException):
"""Raised when device API is disabled. User has to enable API in app."""
|
# container with most water
# https://leetcode.com/problems/container-with-most-water/
# the function maxArea -> take in a list of integers and return an integer
# 3 variables to keep track of the current max area, left and right pointers
# left pointer initialized to the first elements of the list
# right pointer initialized to the last elements of the list
# current max area initialized to 0
# height will be the lower of the two elements at the left and right pointers
# width will be the difference between the right pointer and left pointer
# compute the area between the 2 pointer and compare result with current max area, if result is greater than current max area, update current max area to result
# compare the height of the 2 pointer and shift the pointer that is shorter
# [to compensate for the reduction in width, we want to move the pointer that is shorter to a taller line]
# recompute current max area
class Solution:
def maxArea(self, height: list[int]) -> int:
current_max_area = 0
left = 0
right = len(height)-1
while (left < right):
area = (right - left) * min(height[left], height[right])
if area > current_max_area:
current_max_area = area
if height[left] < height[right]:
left += 1
else:
right -= 1
return current_max_area
a = [1,8,6,2,5,4,8,3,7]
sol = Solution()
print(sol.maxArea(a))
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Settings file for the TestRailImporter tool.
"""
TESTRAIL_STATUS_IDS = { # For more info see http://docs.gurock.com/testrail-api2/reference-statuses
'pass_id': 1,
'block_id': 2,
'untested_id': 3,
'retest_id': 4,
'fail_id': 5,
'na_id': 6,
'pass_issues_id': 7,
'failed_nonblocker_id': 8,
'punted_id': 9,
}
|
print("------------------------------------")
print("********* Woorden switchen *********")
print("------------------------------------")
# Input temperatuur in Celsius
woord1 = input("Woord 1: ")
woord2 = input("Woord 2: ")
# Output
print()
print("Woord 1: " + woord1.upper())
print("Woord 2: " + woord2.upper())
print()
# Switchen van woorden
woord1, woord2 = woord2, woord1
# Output
print()
print("Woord 1: " + woord1.upper())
print("Woord 2: " + woord2.upper())
print()
# Workaround wachten tot enter
input("Druk op Enter om door te gaan...")
|
# Python 记录日志
flowFile = session.get()
if flowFile != None:
test = flowFile.getAttribute("greeting")
log.debug(test + ": Debug")
log.info(test + ": Info")
log.warn(test + ": Warn")
log.error(test + ": Error")
session.transfer(flowFile, REL_SUCCESS)
|
# * =======================
# *
# * Author: Matthew Moccaro
# * File: Network_Programming.py
# * Type: Python Source File
# *
# * Creation Date: 1/2/19
# *
# * Description: Python
# * source file for the
# * network programming
# * project.
# *
# * ======================
print("Network Programming For Python")
|
__author__ = 'mstipanov'
class ApiRequestErrorDetails(object):
messageId = ""
text = ""
variables = ""
additionalDescription = ""
def __init__(self, text=""):
self.text = text
def __str__(self):
return "ApiRequestErrorDetails: {" \
"messageId = \"" + str(self.messageId) + "\", " \
"text = \"" + str(self.text) + "\", " \
"variables = \"" + str(
self.variables) + "\", " \
"additionalDescription = \"" + str(self.additionalDescription) + "\"" \
"}"
class ApiRequestError(object):
clientCorrelator = ""
serviceException = ApiRequestErrorDetails()
def __init__(self, clientCorrelator="", serviceException=ApiRequestErrorDetails()):
self.clientCorrelator = clientCorrelator
self.serviceException = serviceException
def __str__(self):
return "ApiRequestError: {" \
"clientCorrelator = \"" + str(self.clientCorrelator) + "\", " \
"serviceException = " + str(
self.serviceException) + "" \
"}"
class ApiException(Exception):
requestError = ApiRequestError()
def __init__(self, requestError=ApiRequestError()):
self.requestError = requestError
def __str__(self):
return "ApiException: {" \
"requestError = " + str(self.requestError) + "" \
"}"
|
"""
Aim: Given an undirected graph and an integer M. The task is to determine if
the graph can be colored with at most M colors such that no two adjacent
vertices of the graph are colored with the same color.
Intuition: We consider all the different combinations of the colors for the
given graph using backtacking.
"""
def isSafe(graph, v, n, temp, color):
# This checks whether if it safe to color the given node with temp color i.e checking if the adjacent nodes are different from temp
for i in range(v):
if graph[n][i] == 1 and color[i] == temp:
return False
return True
def check(graph, m, v, n, color):
# This function iteratively checks different combinations.
if n == v: # base case : if all the nodes are traversed return
return True
for i in range(1, m + 1):
if isSafe(graph, v, n, i, color): # checking if it is safe to color
color[n] = i
if check(graph, m, v, n + 1, color):
return True
color[n] = 0
return False
def graphcoloring(graph, M, V):
color = [0] * (V + 1) # assigning colors to different nodes
return check(graph, M, V, 0, color)
# ------------------------DRIVER CODE ------------------------
def main():
for _ in range(int(input())):
V = int(input())
M = int(input())
E = int(input())
list = [int(x) for x in input().strip().split()]
graph = [[0 for i in range(V)] for j in range(V)]
cnt = 0
for i in range(E):
graph[list[cnt] - 1][list[cnt + 1] - 1] = 1
graph[list[cnt + 1] - 1][list[cnt] - 1] = 1
cnt += 2
if graphcoloring(graph, M, V) == True:
print(1)
else:
print(0)
if __name__ == "__main__":
main()
"""
Sample Input:
2
4
3
5
1 2 2 3 3 4 4 1 1 3
3
2
3
1 2 2 3 1 3
Sample Output:
1
0
"""
|
# -*- coding: utf-8 -*-
# Jupyter Extension points
def _jupyter_nbextension_paths():
return [
dict(
section="notebook",
# the path is relative to the `my_fancy_module` directory
src="resources/nbextension",
# directory in the `nbextension/` namespace
dest="nbsafety",
# _also_ in the `nbextension/` namespace
require="nbsafety/index",
)
]
def load_jupyter_server_extension(nbapp):
pass
|
'''
Вероятно, вы помните задачу про школьницу Вику, которая в свой день рождения
принесла в школу N шоколадных конфет, чтобы отпраздновать вместе с одноклассниками.
За день до столь знаменательного праздника Вика пошла в магазин,
чтобы купить N конфет, однако обнаружила, что поштучно их купить нельзя.
Конфеты, которые больше всего на свете любит Вика, продаются в пачках.
В каждой пачке ровно M конфет. Определите, сколько пачек конфет купила Вика.
Примечание: Вика твердо решила, что принесет в школу N конфет, поэтому меньше
конфет она купить не может, однако может переплатить при необходимости и
купить лишние конфеты.
Формат входных данных
Два натуральных числа N и M - количество конфет, которое Вика решила принести в школу, и количество конфет в одной пачке.
Формат выходных данных
Выведите одно число - количество пачек конфет, которое купит Вика.
Sample Input 1:
19
5
Sample Output 1:
4
Sample Input 2:
30
6
Sample Output 2:
5
'''
n_konf = int(input())
# n_konf = 19
m_item_pack = int(input())
# m_item_pack = 5
# print('конфет требуется:', n_konf)
# print('конфет в пачке:', m_item_pack)
packs = n_konf // m_item_pack
# print('целых пачек:', packs)
if n_konf % m_item_pack == 0:
# print(f'требуется {packs} целых пачек конфет')
print(packs)
else:
# print(f'требуется {packs + 1} целых пачек конфет')
print(packs + 1)
|
class BatteryAndInverter:
name = "battery and inverter"
params = [
{
"key": "capacity_dc_kwh",
"label": "",
"units": "kwh",
"private": False,
"value": 4000,
"confidence": 0,
"notes": "",
"source": "FAKE"
},
{
"key": "capacity_dc_kw",
"label": "",
"units": "kw",
"private": False,
"value": 4000,
"confidence": 0,
"notes": "",
"source": "FAKE"
},
{
"key": "roundtrip_efficiency",
"label": "",
"units": "decimal percent",
"private": False,
"value": 0.95,
"confidence": 0,
"notes": "",
"source": "FAKE"
},
{
"key": "wh_per_kg",
"label": "",
"units": "Wh/kg",
"private": False,
"value": 200,
"confidence": 0,
"notes": "",
"source": "FAKE"
},
{
"key": "m3_per_kwh",
"label": "",
"units": "m3/kWh",
"private": False,
"value": 0.0001,
"confidence": 0,
"notes": "",
"source": "FAKE"
}
]
states = [
{
"key": "available_dc_kwh",
"label": "",
"units": "kwh",
"private": False,
"value": 4000,
"confidence": 0,
"notes": "",
"source": ""
},
{
"key": "generated_dc_kwh",
"label": "",
"units": "kwh",
"private": False,
"value": 0,
"confidence": 0,
"notes": "The is the way that generators send kwh to battery",
"source": ""
},
{
"key": "mass",
"label": "",
"units": "kg",
"private": True,
"value": 0,
"confidence": 0,
"notes": "",
"source": ""
},
{
"key": "volume",
"label": "",
"units": "m3",
"private": True,
"value": 0,
"confidence": 0,
"notes": "",
"source": ""
}
]
@staticmethod
def run_step(states, params, utils):
if states.mass == 0:
inverter_mass = 0 # TODO: Incorporate inverter mass
states.mass = 1 / ( params.wh_per_kg / 1000) * params.capacity_dc_kwh + inverter_mass
states.volume = params.m3_per_kwh * params.capacity_dc_kwh
if states.available_dc_kwh < 0:
utils.terminate_sim_with_error("available_dc_kwh was negative")
if states.available_dc_kwh == 0:
utils.log_warning("Available AC kWh is zero!")
# Due to current limitations in modeling setup
# Apply the full round trip battery efficiency for
# energy added to the battery instead of part when added in
# and part when added out
states.available_dc_kwh += states.generated_dc_kwh * params.roundtrip_efficiency
# TODO: Check whether this shoudl be ac or dc
if states.available_dc_kwh > params.capacity_dc_kwh:
states.available_dc_kwh = params.capacity_dc_kwh
# Reset the input DC bus so PV etc can be added in next sim tick
states.generated_dc_kwh = 0
# Hack for clipping by max available power
states.available_dc_kwh = min(states.available_dc_kwh, params.capacity_dc_kw)
|
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(20, 18))
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap="YlGnBu", vmax=.30, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class LbaasToBigIP(object):
def __init__(self, benchmark, benchmark_filter):
self.benchmark_name = None
self.benchmark = None
self.benchmark_filter = None
self.benchmark_projects = None
self.subject_name = None
self.subject = None
self.subject_filter = None
self.subject_projects = None
self.validate_subject(benchmark)
self.init_benchmark(benchmark, benchmark_filter)
def compare_to(self, subject, subject_filter):
self.validate_subject(subject)
self.init_subject(subject, subject_filter)
def validate_subject(self, subject):
if not isinstance(subject, dict):
raise Exception("Comparator must be a dcit type")
if len(subject) != 1:
raise Exception("Only one Comparator should be "
"provided at a time")
def init_subject(self, subject, subject_filter):
self.subject_name = subject.keys()[0]
self.subject = subject.values()[0]
self.subject_filter = subject_filter
projects = self.subject.get_projects_on_device()
self.subject_projects = self.subject_filter.get_ids(
projects
)
def init_benchmark(self, benchmark, benchmark_filter):
self.benchmark_name = benchmark.keys()[0]
self.benchmark = benchmark.values()[0]
self.benchmark_filter = benchmark_filter
projects = \
self.benchmark.get_projects_on_device()
self.benchmark_projects = set(projects)
def get_common_resources_diff(self, bm_resources,
sub_method,
resource_type=None):
sub_resources = []
bm_res = self.benchmark_filter.get_resources(
bm_resources)
bm_ids = set(bm_res.keys())
for project in self.subject_projects:
sub_resources += sub_method(
project
)
sub_ids = self.subject_filter.get_ids(
sub_resources)
diff = bm_ids - sub_ids
result = self.benchmark_filter.convert_common_resources(
diff, bm_res, resource_type=resource_type
)
return result
def get_missing_projects(self):
res = self.benchmark_projects - self.subject_projects
diff = self.benchmark_filter.convert_projects(
res
)
return diff
def get_missing_loadbalancers(self):
lb_resources = []
sub_resources = []
missing = []
converted_lb = {}
for project in self.benchmark_projects:
lb_resources += self.benchmark.get_agent_project_loadbalancers(
project
)
for project in self.subject_projects:
sub_resources += self.subject.get_project_loadbalancers(
project
)
bigip_lbs = self.subject_filter.filter_loadbalancers(sub_resources)
for lb in lb_resources:
if lb.id not in bigip_lbs:
converted_lb = self.benchmark_filter.convert_loadbalancers(
lb, ""
)
missing.append(converted_lb)
else:
bigip_ip = bigip_lbs[lb.id]
if lb.vip_address != bigip_ip:
converted_lb = self.benchmark_filter.convert_loadbalancers(
lb, bigip_ip
)
missing.append(converted_lb)
return missing
def get_missing_listeners(self):
lb_resources = []
for project in self.benchmark_projects:
lb_resources += self.benchmark.get_agent_project_loadbalancers(
project
)
ls_resources = []
lb_ids = [lb.id for lb in lb_resources]
ls_resources += self.benchmark.get_listeners_by_lb_ids(lb_ids)
sub_method = self.subject.get_project_listeners
diff = self.get_common_resources_diff(
ls_resources, sub_method, "listener"
)
return diff
def get_missing_pools(self):
lb_resources = []
for project in self.benchmark_projects:
lb_resources += self.benchmark.get_agent_project_loadbalancers(
project
)
pl_resources = []
lb_ids = [lb.id for lb in lb_resources]
pl_resources += self.benchmark.get_pools_by_lb_ids(lb_ids)
sub_method = self.subject.get_project_pools
diff = self.get_common_resources_diff(
pl_resources, sub_method, "pool"
)
return diff
def get_missing_members(self):
bm_lbs = []
bm_pools = []
sub_pools = []
missing_mb = []
for project in self.benchmark_projects:
bm_lbs += self.benchmark.get_agent_project_loadbalancers(
project
)
lb_ids = [lb.id for lb in bm_lbs]
bm_pools += self.benchmark.get_pools_by_lb_ids(lb_ids)
bm_mbs = self.benchmark_filter.filter_pool_members(bm_pools)
for project in self.subject_projects:
sub_pools += self.subject.get_project_pools(
project
)
sub_mbs = self.subject_filter.filter_pool_members(sub_pools)
for pool_id, members in bm_mbs.items():
if pool_id not in sub_mbs:
if members:
missing_mb += self.benchmark_filter.convert_members(
pool_id, members)
continue
for mb in members:
if not mb["address_port"] in sub_mbs[pool_id]:
mb['bigip_ips'] = sub_mbs[pool_id]
missing_mb += self.benchmark_filter.convert_members(
pool_id, [mb])
return missing_mb
|
# -*- coding: utf-8 -*-
# API - cs
# FileName: default.py
# Version: 1.0.0
# Create: 2018-10-24
# Modify: 2018-10-27
"""
Default settings and values
"""
"""global"""
BUCKET_NAME = 'BUCKET_NAME'
CODING = 'utf-8'
DOMAIN = 'DOMAIN'
INTERNAL_DOMAIN = 'oss-cn-beijing-internal.aliyuncs.com'
PREFIX = 'mosdb/user/{0}/data/app/cs/'
RESERVE_CHAR = ','
RESERVE_CHAR_REPLACE = (RESERVE_CHAR, str())
"""auth"""
AK_ID = str()
AK_SECRET = str()
"""upload"""
UPLOAD_URL_EXPIRES = 60
FILE_ID_LEN = 32
FOLDER_ID_LEN = 8
FOLDER_RECORD_MAX_LEN = 16384
"""download"""
DOWNLOAD_URL_EXPIRES_RANGE = (0, 7200)
|
# Declaring the gotopt2 dependencies
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_bats//:deps.bzl", "bazel_bats_dependencies")
# Include this into any dependencies that want to compile gotopt2 from source.
# This declaration must be updated every time the dependencies in the workspace
# change.
def gotopt2_dependencies():
go_repository(
name = "com_github_golang_glog",
commit = "23def4e6c14b",
importpath = "github.com/golang/glog",
)
go_repository(
name = "com_github_google_go_cmp",
importpath = "github.com/google/go-cmp",
tag = "v0.2.0",
)
go_repository(
name = "in_gopkg_check_v1",
commit = "20d25e280405",
importpath = "gopkg.in/check.v1",
)
go_repository(
name = "in_gopkg_yaml_v2",
importpath = "gopkg.in/yaml.v2",
tag = "v2.2.2",
)
git_repository(
name = "bazel_bats",
remote = "https://github.com/filmil/bazel-bats",
commit = "78da0822ea339bd0292b5cc0b5de6930d91b3254",
shallow_since = "1569564445 -0700",
)
bazel_bats_dependencies()
|
# -*- coding: utf-8 -*-
"""
Created on 2018/5/20
@author: susmote
"""
kv_dict = {}
with open('../right_code.txt') as f:
for value in f:
value = value.strip()
for i in value:
kv_dict.setdefault(i, 0)
kv_dict[i] += 1
print(kv_dict.keys())
print(len(kv_dict))
|
def leiaInt(msg=''):
while True:
try:
valor = int(input(msg))
except KeyboardInterrupt:
print(f"\033[31mO usuário preferiu não digitar esse número.\033[m")
return 0
except (ValueError, TypeError):
print(f"\033[31mERRO!Digite um número inteiro válido\033[m")
continue
else:
return valor
def leiaFloat(msg=''):
while True:
try:
valor = float(input(msg))
except KeyboardInterrupt:
print(f"\033[31mO usuário preferiu não digitar esse número.\033[m")
return 0
except (ValueError, TypeError):
print(f"\033[31mERRO!Digite um número inteiro válido\033[m")
continue
else:
return valor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.