text
stringlengths 8
6.05M
|
|---|
import json
import requests
def get_book_by_isbn(isbn):
response = requests.get('https://www.googleapis.com/books/v1/volumes?q=isbn:'+isbn)
json_data = json.loads(response.text)
book_info = json_data["items"][0]["volumeInfo"]
title = book_info["title"]
authors = book_info["authors"]
published_date = book_info["publishedDate"]
categories = book_info["categories"][0]
if "description" in book_info:
description = book_info["description"]
else:
description = json_data["items"][0]["searchInfo"]["textSnippet"]
if "imageLinks" in book_info:
thumbnail = book_info["imageLinks"]["thumbnail"]
smallThumbnail = book_info["imageLinks"]["smallThumbnail"]
else:
thumbnail = "https://s.gr-assets.com/assets/nophoto/book/50x75-a91bf249278a81aabab721ef782c4a74.png"
smallThumbnail = "https://s.gr-assets.com/assets/nophoto/book/50x75-a91bf249278a81aabab721ef782c4a74.png"
book_object = {
"title" : title,
"thumbnail" : thumbnail,
"smallThumbnail" : smallThumbnail,
"description" : description,
"published_date": published_date,
"authors" : authors,
"categories" :categories,
}
return book_object
|
import dash
import copy
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_table
import pandas as pd
from datetime import datetime as dt, date
import dash_table.FormatTemplate as FormatTemplate
from dash_table.Format import Format, Scheme, Sign, Symbol
import calendar
import os
def date_to_int(input_date):
''' Convert datetime to ordinal timestamp '''
return input_date.toordinal()
def check_month_end(input_date):
''' Check if input date is the last day of month '''
if calendar.monthrange(input_date.year, input_date.month)[1] == input_date.day:
return True
else:
return False
def getMarks(start_date, end_date, spacing=30):
''' Returns the marks for labeling.
Every Nth value will be used.
'''
result = {}
for i in range(date_to_int(start_date), date_to_int(end_date) + 1):
date_i = date.fromordinal(i)
if i == date_to_int(start_date) or i == date_to_int(end_date):
result[i] = {'label': date_i.strftime('%d %b %y'),
'style': {'text-align': 'center',
'margin': 'auto'}}
if date_to_int(start_date) + 14 <= i <= date_to_int(end_date) - 14:
if check_month_end(date_i):
result[i] = {'label': date_i.strftime('%d %b %y'),
'style': {'text-align': 'center',
'margin': 'auto'}}
return result
csv_file_path = os.path.join('data/spx_test.csv')
api_data = pd.read_csv(csv_file_path)
api_data = api_data[(api_data.plot_type != 'spot') & (api_data.plot_type != 't')]
api_data['date'] = api_data['date'].apply(lambda x: dt.strptime(x, "%Y-%m-%d").date())
api_data['value'] = pd.to_numeric(api_data['value'])
plot_type = api_data['plot_type'].unique()
layout = dict(
autosize=True,
automargin=True,
margin=dict(l=30, r=30, b=20, t=40),
hovermode="closest",
plot_bgcolor="#F9F9F9",
paper_bgcolor="#F9F9F9",
legend=dict(font=dict(size=10), orientation="h"),
title="Satellite Overview"
)
# lower = datetime.date(2020, 5, 30)
# upper = datetime.date(2020, 6, 8)
min_date = min(api_data['date'])
max_date = max(api_data['date'])
colors = {
'background': 'rgb(230, 230, 230)',
'text': '#7FDBFF',
'text-align': 'center',
'fontWeight': 'bold',
'font-family': 'Arial',
'font-size': '15'
}
app = dash.Dash(__name__, assets_folder='assets')
server = app.server
app.config.suppress_callback_exceptions = True
app.layout = html.Div(
[
html.Div(
[
html.Div(
[
html.Img(
src=app.get_asset_url("dash-logo.png"),
id="plotly-image",
),
],
className="one-third column",
),
html.Div(
[
html.H1(
'Performance Plot',
),
],
id='title',
className="one-half column",
),
html.Div(
[
html.A(
html.Button("Learn More", id="learn-more-button"),
href="https://github.com/yjthay/ib_dashboard",
)
],
className="one-third column",
id="button",
),
],
id="header",
className="row header",
style={"margin-bottom": "25px"},
),
html.Div(
[
html.Div(
[
html.P(
"Risk Type to Plot on Graph",
className="control_label",
),
html.Div(
[
html.P('Granularity of Spot'),
dcc.Dropdown(
id="input_gap",
options=[{'value': i, 'label': i} for i in
range(1, 11)],
value=10
),
],
),
],
id="info-container_2",
className="pretty_container four columns",
),
html.Div(
[
html.Div(
[
dcc.RadioItems(
id='radio_y_axis',
options=[{'value': c, 'label': c} for c in plot_type],
value='value',
className="mini_container radiobutton-group",
),
# html.Div(
# [html.H6(id="delta_text"), html.P("Delta")],
# id="delta",
# className="mini_container",
# ),
# html.Div(
# [html.H6(id="gamma_text"), html.P("Gamma")],
# id="gamma",
# className="mini_container",
# ),
# html.Div(
# [html.H6(id="theta_text"), html.P("Theta")],
# id="theta",
# className="mini_container",
# ),
# html.Div(
# [html.H6(id="vega_text"), html.P("Vega")],
# id="vega",
# className="mini_container",
# ),
# html.Div(
# [html.H6(id="pnl_text"), html.P("P&L")],
# id="pnl",
# className="mini_container",
# ),
],
id="info-container",
className="row container-display",
),
html.Div(
[
html.Div(
[
# html.P('Date reference'),
dcc.RangeSlider(
id='date_slider',
min=date_to_int(min_date),
max=date_to_int(max_date),
value=[date_to_int(min_date),
date_to_int(max_date)],
marks=getMarks(min_date, max_date),
included=True,
allowCross=False,
updatemode='drag',
)
], className='date-slider'
),
dcc.Graph(
id='graph_dynamic',
animate=True,
),
html.Div(id='output-container-range-slider',
style={'text-align': 'left'}),
],
id="graph_dynamicContainer",
className="pretty_container"
),
],
id="right-column",
className="eight columns",
),
],
className="row flex-display",
),
html.Div(
[
html.Div(
[
html.Div(
id='output_date_picker'
),
dash_table.DataTable(
id='performance_table',
filter_action="native",
style_header=colors,
style_as_list_view=True,
style_table={'overflowX': 'scroll'}
),
],
className="eight columns",
),
],
id='lower-dash_table',
className="row flex-display",
),
],
id="mainContainer",
style={"display": "flex", "flex-direction": "column"},
)
# @app.callback(
# [
# Output("delta_text", "children"),
# Output("gamma_text", "children"),
# Output("theta_text", "children"),
# Output("vega_text", "children"),
# Output("pnl_text", "children"),
# ],
# [
# Input('y_axis', 'value'),
# Input('date_slider', 'value')
# ],
# )
# def update_mini_containers(y_axis, date_slider):
# start_date = date.fromordinal(min(date_slider))
# end_date = date.fromordinal(max(date_slider))
# df = api_data.loc[(api_data['date'] == start_date) & (api_data['plot_type'] == y_axis)]
# ref_df = api_data.loc[(api_data['date'] == end_date) & (api_data['plot_type'] == y_axis)]
# join_df = ref_df.merge(right=df, how='left', on=['spot', 'plot_type'], suffixes=['_end', '_start'])
#
# return data[0] + " mcf", data[1] + " bbl", data[2] + " bbl"
@app.callback(Output('output-container-range-slider', 'children'),
[Input('date_slider', 'value')])
def update_output(date_slider):
start_date = date.fromordinal(min(date_slider))
end_date = date.fromordinal(max(date_slider))
return 'Examining data from {} to {}'.format(dt.strftime(start_date, '%d-%b-%y'),
dt.strftime(end_date, '%d-%b-%y'))
@app.callback(Output('graph_dynamic', 'figure'),
[Input('radio_y_axis', 'value'),
Input('date_slider', 'value')])
def graph_against_spot(y_axis, date_slider):
layout_main_graph = copy.deepcopy(layout)
start_date = date.fromordinal(min(date_slider))
end_date = date.fromordinal(max(date_slider))
df = api_data.loc[(api_data['date'] == start_date) & (api_data['plot_type'] == y_axis)]
ref_df = api_data.loc[(api_data['date'] == end_date) & (api_data['plot_type'] == y_axis)]
join_df = ref_df.merge(right=df, how='left', on=['spot', 'plot_type'], suffixes=['_end', '_start'])
colors = []
for i in range(min(join_df.spot), max(join_df.spot) + 1):
colors.append("rgb(123, 199, 255)")
data = [
dict(
type='bar',
x=join_df['spot'],
y=(join_df.value_start - join_df.value_end).astype(int),
name='Start-End',
marker=dict(color=colors),
),
dict(
x=df['spot'],
y=df['value'],
mode='lines',
name='Start',
),
dict(
x=ref_df['spot'],
y=ref_df['value'],
mode='lines',
name='End',
),
]
layout_main_graph['title'] = 'Graph vs spot'
layout_main_graph["showlegend"] = True
layout_main_graph["autosize"] = True
layout_main_graph["hovermode"] = 'compare'
figure = dict(data=data, layout=layout_main_graph)
return figure
@app.callback([Output('performance_table', 'columns'), Output('performance_table', 'data'),
Output('performance_table', 'style_data_conditional')],
[Input('input_gap', 'value'), Input('date_slider', 'value')])
def simple_dash_table(input_gap, date_slider):
# input_date = dt.strptime(re.split('T| ', input_date)[0], '%Y-%m-%d').date()
start_date = date.fromordinal(min(date_slider))
end_date = date.fromordinal(max(date_slider))
df = api_data[(api_data['date'] == start_date) | (api_data['date'] == end_date)]
pt = df.pivot_table(columns=['spot'], values='value', index=['date', 'plot_type'],
aggfunc=sum).reset_index()
# pt = pt[(pt.plot_type != 'spot') & (pt.plot_type != 't')]
for i in pt.columns:
if isinstance(i, int):
pt[i] = pd.to_numeric(pt[i])
columns = [{'id': 'date', 'name': 'date', 'type': 'text'}] + \
[{'id': 'plot_type', 'name': 'plot_type', 'type': 'text'}] + \
[{'id': str(i),
'name': str(i),
'type': 'numeric',
'format': Format(
nully='N/A',
precision=2,
scheme=Scheme.fixed,
sign=Sign.parantheses
)}
for i in pt.columns if isinstance(i, int) and i % int(input_gap) == 0]
data = pt[[i for i in pt.columns if isinstance(i, str) or i % int(input_gap) == 0]].to_dict('records')
style_data_conditional = [{'if': {'row_index': 'odd'}, 'backgroundColor': '#e2f2f6'}] + \
[{'if': {'column_id': col['id'], 'filter_query': '{}<0.0'.format("{" + col['id'] + "}")},
'color': 'red'}
for col in columns if col['type'] == 'numeric']
return columns, data, style_data_conditional
if __name__ == '__main__':
# app.run_server(host='0.0.0.0', port=8080, debug=True, use_reloader=False)
app.run_server(debug=True)
|
from django.shortcuts import render, redirect
from django.template import loader
from django.contrib.auth import authenticate, login as log_in
from django.contrib.auth.decorators import login_required
from .forms import DocumentForm, ItemForm
from .models import Document, Item, Settings
from django.http import HttpResponse
import io as BytesIO
from xhtml2pdf import pisa
from django.template.loader import get_template
from django.template import Context
from django.http import HttpResponse
from cgi import escape
def index(request):
return HttpResponse(render(request, 'forms/base.html'))
def login(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
log_in(request, user)
return HttpResponse(render(request, 'forms/base.html'))
else:
return HttpResponse(render(request, 'forms/../templates/registration/login.html'))
@login_required()
def logout(request):
logout(request)
return HttpResponse(render(request, 'forms/../templates/registration/login.html'))
@login_required()
def documents(request):
docs = Document.objects.all().order_by('-created_date')
template = loader.get_template('forms/documents.html')
context = {
'documents': docs,
}
return HttpResponse(template.render(context, request))
@login_required()
def new_document(request):
if request.method == 'POST':
document_form = DocumentForm(request.POST)
if document_form.is_valid():
document = document_form.save()
return redirect(document_items, series=document.series, number=document.number)
else:
return HttpResponse(render(request, 'forms/new_document.html', {'form': document_form}))
else:
document_form = DocumentForm()
return HttpResponse(render(request, 'forms/new_document.html', {'form': document_form}))
@login_required()
def document_items(request, series, number):
document = Document.objects.get(series=series, number=number)
if request.method == 'POST':
item_form = ItemForm(request.POST)
if item_form.is_valid():
item = item_form.instance
item.document = document
item.save()
items = Item.objects.filter(document=number)
item_form = ItemForm()
template = loader.get_template('forms/document_items.html')
context = {
'document': document,
'items': items,
'form': item_form
}
return HttpResponse(template.render(context, request))
def render_to_pdf(template_src, context_dict):
template = get_template(template_src)
context = context_dict
html = template.render(context)
result = BytesIO.BytesIO()
pdf = pisa.pisaDocument(BytesIO.BytesIO(html.encode("UTF-8")), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return HttpResponse('We had some errors<pre>%s</pre>' % escape(html))
def get_pdf(request, document_number):
document = Document.objects.get(number=document_number)
items = Item.objects.filter(document=document_number)
settings = Settings.objects.first()
members = settings.commission_members.all()
user = request.user
return render_to_pdf(
'pdf_template.html',
{
'pagesize': 'A4',
'document': document,
'items': items,
'settings': settings,
'members': members,
'user': user
}
)
|
# 优先队列
# 左边进就加,右边出就删
# 高度变了就加到结果数组
# 做过了,优先队列问题
from sortedcontainers import SortedList
class Solution:
def getSkyline(self, buildings: List[List[int]]) -> List[List[int]]:
ans = []
changes = []
for left, right, height in buildings:
changes.append((left, -height))
changes.append((right, height))
# 按变化点的先后排序
changes.sort()
# 同样默认有个高度为0
lives = SortedList([0])
# 上一个建筑最高高度
prev = 0
for x, h in changes:
# 根据h大小加入或删除建筑
if h < 0:
lives.add(h)
else:
lives.remove(-h)
# 加入或删除后当前的最高高度
curr_max = -lives[0]
# 最高高度发生了变化
if curr_max != prev:
ans.append([x, curr_max])
prev = curr_max
return ans
|
import os
from twitchio.ext import commands
# set up the bot
bot = commands.Bot(
irc_token=os.environ['TMI_TOKEN'],
client_id=os.environ['CLIENT_ID'],
nick=os.environ['BOT_NICK'],
prefix=os.environ['BOT_PREFIX'],
initial_channels=[os.environ['CHANNEL']]
)
@bot.event
async def event_ready():
'Called once when the bot goes online.'
print(f"{os.environ['BOT_NICK']} is online!")
ws = bot._ws # this is only needed to send messages within event_ready
await ws.send_privmsg(os.environ['CHANNEL'], f"/me has landed!")
@bot.event
async def event_message(ctx):
'Runs every time a message is sent in chat.'
# make sure the bot ignores itself and the streamer
if ctx.author.name.lower() == os.environ['BOT_NICK'].lower():
return
await bot.handle_commands(ctx)
# await ctx.channel.send(ctx.content)
if 'hello' in ctx.content.lower():
await ctx.channel.send(f"Hi, @{ctx.author.name}!")
if ('scooter' in ctx.content.lower()) or ('scottie' in ctx.content.lower()):
await ctx.channel.send(f"Scooter bad!!! AHAHAHAAH XD XD")
if ('kappa' in ctx.content.lower()):
await ctx.channel.send(f"Kappa")
if ('pog' in ctx.content.lower()):
await ctx.channel.send(f"PogChamp")
if ('nick' in ctx.content.lower()):
await ctx.channel.send(f"Champion Ocean Best Player PogChamp OMG")
if ('cade' in ctx.content.lower()):
await ctx.channel.send(f"Wow coding genius and god on his quinary role wow BloodTrail Champion Ocean")
if ('anthony' in ctx.content.lower()):
await ctx.channel.send(f"10000 IQ Draven trash BibleThump")
if ('marshall' in ctx.content.lower()):
await ctx.channel.send(f"Wow great streamer sexy beard man")
@bot.command(name='test')
async def test(ctx):
await ctx.send('test passed!')
if __name__ == "__main__":
bot.run()
|
import os
import urllib
import shutil
import os
import urllib
import urllib.request
from urllib.error import *
def baseName(url):
return os.path.basename(urllib.parse.urlparse(url).path)
def download(url, filename):
try:
if filename == "":
print("Error: Empty destination file name")
quit()
v6 = filename
if os.path.isdir(filename):
print("Error: '%s' is a directory" % filename)
quit()
if "/" in filename: # a (in)valid path
v4 = []
v0 = filename
if v0[-2:] == "//":
print("Error: invalid path")
quit()
if os.name == "nt":
if v0[-1] == "/": # trim unnecessary last '/'
v0 = v0[:-1]
v6 = v0[:-1]
v0 = v0.split("/")
del v0[-1]
if "" in v0:
print("Error: invalid path")
quit()
else:
for v1 in range(0, len(v0)):
v2 = v0[0]
for v3 in range(1, v1+1):
v2 += "/" + v0[v3]
v4.append(v2)
else:
if v0 != "/" and v0[-1] == "/": # trim unnecessary last "/"
v0 = v0[:-1]
v6 = v0[:-1]
v0 = v0.split("/")
del v0[-1]
if v0[0] == "": # if is absolute path
for v1 in range(0, len(v0)):
v2 = ""
for v3 in range(1, v1 + 1):
v2 += "/" + v0[v3]
v4.append(v2)
v4[0] = "/"
elif "" in v0:
print("Error: invalid path")
quit()
else:
for v1 in range(0, len(v0)):
v2 = v0[0]
for v3 in range(1, v1+1):
v2 += "/" + v0[v3]
v4.append(v2)
for v5 in v4:
if os.path.isfile(v5):
print("Error: %s is a file - cannot create a directory there" % v5)
quit()
elif not os.path.exists(v5):
os.mkdir(v5)
except PermissionError:
print("Error: Permission denied to download file as '%s'" % v6)
quit()
try:
urllib.request.urlretrieve(url, v6)
except PermissionError:
print("Error: Permission denied to download file as '%s'" % v6)
quit()
except URLError as v0:
print("Network error: " + str(v0))
quit()
def multi_download(urls):
for url in urls:
filename = baseName(url)
download(url, filename)
def readlines(filename):
content=[]
with open(filename) as f:
content= f.readlines()
content = [line.strip() for line in content]
return content
def baseName(url):
return os.path.basename(urllib.parse.urlparse(url).path)
def move(old, new):
v0 = "%s/%s" % (new, old.split("/")[-1])
try:
if not os.path.exists(old):
print("Error: %s doesn't exist" % old)
quit()
if os.name == "nt": # Windows
if old[1] == ":" and len(old) < 4:
print("Error: file to move cannot be root")
quit()
else:
if(old == "/"):
print("Error: file to move cannot be root")
quit()
if not os.path.isdir(new):
v1 = new
v2 = []
if os.name == "nt": # Windows
if new[-1] == "/":
v1 = new[:-1]
v1 = v1.split("/")
if v1 == [""]:
print("Error: destination directory cannot be empty")
quit()
elif "" in v1:
print("Error: invalid path")
quit()
else:
for v4 in range(0, len(v1)):
v3 = v1[0]
for v5 in range(1, v4+1):
v3 += "/" + v1[v5]
v2.append(v3)
else:
if new != "/" and new[-1] == "/":
v1 = new[:-1]
v1 = v1.split("/")
if v1 == [""]:
print("Error: destination directory cannot be empty")
quit()
elif v1[0] == "": # e.g. /wef/we
for v4 in range(0, len(v1)):
v3 = v1[0]
for v5 in range(1, v4+1):
v3 += "/" + v1[v5]
v2.append(v3)
v2[0] = "/"
elif "" in v1:
print("Error: invalid path")
quit()
else:
for v4 in range(0, len(v1)):
v3 = v1[0]
for v5 in range(1, v4+1):
v3 += "/" + v1[v5]
v2.append(v3)
for v6 in v2:
if os.path.isfile(v6):
print("Error: %s is a file - cannot create a directory there" % v6)
quit()
elif not os.path.exists(v6):
os.mkdir(v6)
shutil.move(old, new)
elif os.path.exists(v0):
try:
os.remove(v0)
except OSError:
shutil.rmtree(v0)
shutil.move(old, new)
else:
shutil.move(old, new)
except PermissionError:
print("Error: Permission denied to move '%s' to '%s'" % (old, new))
quit()
filename = "download_list.txt"
lines = readlines(filename)
print("starting download")
multi_download(lines)
print("finished download")
for line in lines:
base = baseName(line)
move(base, "downloaded")
|
import daemon
from walscraper import main
print daemon.__file__
# with daemon.DaemonContext():
# main()
|
# Generated by Django 2.1 on 2018-09-14 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0002_organization'),
]
operations = [
migrations.AddField(
model_name='organization',
name='contact_text',
field=models.CharField(blank=True, max_length=128, null=True),
),
migrations.AddField(
model_name='organization',
name='email',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name='organization',
name='phone',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.AddField(
model_name='organization',
name='phone_prefix',
field=models.CharField(blank=True, max_length=10, null=True),
),
]
|
from collections import deque
from math import inf
def distanza(insieme_1, insieme_2, grafo):
def bfs(nodo, grafo):
distanze = [-1 for _ in grafo]
distanze[nodo] = 0
coda = deque()
coda.append(nodo)
while coda:
nodo = coda.popleft()
for adiacente in grafo[nodo]:
if distanze[adiacente] < 0:
distanze[adiacente] = distanze[nodo] + 1
coda.append(adiacente)
return distanze
# Creo un nuovo nodo attaccato ai nodi del primo insieme.
grafo_copy = {nodo:grafo[nodo] for nodo in grafo}
nodo = len(grafo)
grafo_copy[nodo] = [nodo for nodo in grafo if nodo in insieme_1]
distanze = bfs(nodo, grafo_copy)
minDistanza = inf
for nodo in grafo:
if nodo in insieme_2 and distanze[nodo] >= 0:
if distanze[nodo] < minDistanza:
minDistanza = distanze[nodo]
return minDistanza - 1
|
from .codelength import codelength
from . import grassberger
from .read_file import read_links, read_tree
__all__ = ["codelength", "grassberger", "read_links", "read_tree"]
|
from django.conf.urls import url
from .views import IssuesByDayForRepo
urlpatterns = [
url(r'^issues-by-day/(?P<repository_id>\d+)/', IssuesByDayForRepo.as_view(), name='issues_by_day_for_repo'),
]
|
from django.contrib import admin
from . import models
# Register your models here.
# class GTINBaseModelAdmin(admin.ModelAdmin):
# model = models.GTINBaseData
# list_display = [f.name for f in models.GTINBaseData._meta.fields]
#
class GTINInformationAdmin(admin.ModelAdmin):
model = models.GTINInformation
list_display = [f.name for f in models.GTINInformation._meta.fields]
# Register your models here.
admin.site.register(models.GTINInformation, GTINInformationAdmin)
# admin.site.register(models.GTINBaseData, GTINBaseModelAdmin)
|
from django.urls import path
from owner import views
urlpatterns=[
path("numbertostring", views.Num_to_str, name="Numtostrconverter"),
]
|
# -*- coding: utf-8 -*-
import sys
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse,HttpResponseRedirect
from django.template import RequestContext
from datetime import datetime
from django.shortcuts import render_to_response
from emart_models.models import Customers,Commodities,CommoditiesClass,ShoppingCarts,Orders
from config import class_map
try:
import simplejson as json
except ImportError:
import json
# view --> redirect to the login page
def login(request):
return render_to_response('login.html',{},
context_instance=RequestContext(request))
# view --> handle login cases
def handle_login(request):
if 'username' in request.POST and 'password' in request.POST:
customer_name = request.POST['username']
customer_pass = request.POST['password']
ret = Customers.objects.filter(CustomerName=customer_name,
CustomerPass=customer_pass)
rec_list = Commodities.objects.filter(Recommend='Y')
newup_list = Commodities.objects.filter(NewUpload='Y')
if ret:
request.session['user_id'] = ret[0].id
chart_count = get_chart_item_count(ret[0].id)
order_count = get_order_count(ret[0].id)
return render_to_response('index.html',{'username':ret[0].CustomerName,
'user_id':ret[0].id,'rec_list':rec_list,'newup_list':newup_list,
'chart_count':chart_count,'order_count':order_count},
context_instance=RequestContext(request))
else:
return render_to_response('index.html',{},
context_instance=RequestContext(request))
# view --> sign up a new user
def signup(request):
return render_to_response('signup.html',{},
context_instance=RequestContext(request))
# view --> view the detail info of commoditie(id = commoditie_id)
def detail(request,commoditie_id):
ret = Commodities.objects.select_related().get(id=commoditie_id)
commoditie = {'CommoditieName':ret.CommoditieName,'CommoditiePrice':ret. \
CommoditiePrice,'CommoditieImageURL':ret.CommoditieImageURL, \
'CommoditieDescrText':ret.CommoditieDescrText,'CommoditieClass': \
ret.CommoditieClass,'id':ret.id}
if request.session.get('user_id',False):
chart_count = get_chart_item_count(request.session.get('user_id'))
order_count = get_order_count(request.session.get('user_id'))
username = Customers.objects.filter(id=request.session['user_id'])[0].CustomerName
return render_to_response('detail.html',{'commoditie':commoditie,
'username':username,'chart_count':chart_count,'order_count':order_count},
context_instance=RequestContext(request))
else:
return render_to_response('detail.html',{'commoditie':commoditie},
context_instance=RequestContext(request))
# view --> redirect to the home page
def home(request):
newup_list = Commodities.objects.filter(NewUpload='Y')[0:4]
rec_list = Commodities.objects.filter(Recommend='Y')
if request.session.get('user_id',False):
chart_count = get_chart_item_count(request.session.get('user_id'))
order_count = get_order_count(request.session.get('user_id'))
username = Customers.objects.filter(id=request.session['user_id'])[0].CustomerName
return render_to_response('index.html',
{'rec_list':rec_list,'newup_list':newup_list,'username':username,
'chart_count':chart_count,'order_count':order_count},
context_instance=RequestContext(request))
else:
return render_to_response('index.html',
{'rec_list':rec_list,'newup_list':newup_list},
context_instance=RequestContext(request))
# view --> view commodities by commoditie classs
def view_commodities_by_class(request,commoditie_class):
commoditie_class_id = CommoditiesClass.objects.filter(CommoditieClassName=\
class_map[commoditie_class])[0].id
commoditie_list = Commodities.objects.filter(CommoditieClass_id=commoditie_class_id)
if request.session.get('user_id',False):
chart_count = get_chart_item_count(request.session.get('user_id'))
order_count = get_order_count(request.session.get('user_id'))
username = Customers.objects.filter(id=request.session['user_id'])[0].CustomerName
return render_to_response('view_commodities_by_class.html',
{'username':username,'commoditie_list':commoditie_list,
'commoditie_class_name':class_map[commoditie_class],
'commoditie_class':commoditie_class,'chart_count':chart_count,
'order_count':order_count},
context_instance=RequestContext(request))
else:
return render_to_response('view_commodities_by_class.html',{'commoditie_class_name': \
class_map[commoditie_class],'commoditie_class':commoditie_class,
'commoditie_list':commoditie_list},
context_instance=RequestContext(request))
# view --> view customer's chart
def my_chart(request):
# if is login
if not request.session.get('user_id',False):
return render_to_response('my_chart.html',{},
context_instance=RequestContext(request))
chart_list = ShoppingCarts.objects.filter(Customer_id=request.session['user_id'])
username = Customers.objects.get(id=request.session['user_id']).CustomerName
chart_count = get_chart_item_count(request.session.get('user_id'))
order_count = get_order_count(request.session.get('user_id'))
total_price = 0.0
for item in chart_list:
total_price += float(item.Commoditie.CommoditiePrice)
return render_to_response('my_chart.html',
{'username':username,'chart_list':chart_list,
'chart_count':chart_count,'total_price':total_price,'order_count': \
order_count},
context_instance=RequestContext(request))
# method --> get chart_list's count
def get_chart_item_count(customer_id):
count = 0
try:
if customer_id:
count = ShoppingCarts.objects.filter(Customer_id=customer_id).count()
except:
return 0
return count
# method --> get order's count
def get_order_count(customer_id):
count = 0
try:
if customer_id:
count = Orders.objects.filter(Customer_id=customer_id).count()
except:
return 0
return count
# method --> AJAX RESTFUL API add_to_chart
@csrf_exempt
def add_to_chart(request):
# if it is a ajax request
if not request.is_ajax():
return HttpResponse(json.dumps({'status':'error0','message':'not a ajax request'}))
# if user is login
if False == request.session.get('user_id',False):
return HttpResponse(json.dumps({'status':'error1','message':'please sign in'}))
try:
commoditie_id = request.POST.get('commoditie_id',None)
# check if it is in chart
ret = ShoppingCarts.objects.filter(Commoditie_id=commoditie_id).count()
if ret > 0:
return HttpResponse(json.dumps({'status':'error2','message':'already in chart'}))
customer_id = request.session.get('user_id',False)
if commoditie_id:
customer_instance = Customers.objects.get(id=customer_id)
commoditie_instance = Commodities.objects.get(id=commoditie_id)
shoppingcart_instance = ShoppingCarts(Customer=customer_instance,
Commoditie=commoditie_instance)
shoppingcart_instance.save()
chart_count = get_chart_item_count(customer_id)
except:
return HttpResponse(json.dumps({'status':str(sys.exc_info()[0]) + str(sys.exc_info()[1])}))
return HttpResponse(json.dumps({'status':'success','message':'success','commoditie_id':commoditie_id,\
'chart_count':chart_count}))
# method --> generate order by chart
def generate_order(request):
if False == request.session.get('user_id',False):
return render_to_response('login.html',{},
context_instance=RequestContext(request))
try:
commoditie_list = ''
ret = ShoppingCarts.objects.filter(Customer_id=request.session['user_id'])
for item in ret:
if commoditie_list == '':
commoditie_list = str(item.Commoditie_id)
else:
commoditie_list += '@'
commoditie_list += str(item.Commoditie_id)
customer_instance = Customers.objects.get(id=request.session['user_id'])
order_instance = Orders(Customer=customer_instance,CommoditieList=commoditie_list,
OrderTime=datetime.now())
order_instance.save()
except:
return render_to_response('my_orders.html',{'total_price':str(sys.exc_info()[0]) + str(sys.exc_info()[1])},
context_instance=RequestContext(request))
#clear the chart for user_id after genarate the order
ShoppingCarts.objects.filter(Customer_id=request.session['user_id']).delete()
#assert False,commoditie_list
return my_orders(request)
# view --> buy the commoditie right now
def buy_now(request,commoditie_id):
if False == request.session.get('user_id',False):
return render_to_response('buy_now.html',{'commoditie_id':commoditie_id},
context_instance=RequestContext(request))
username = Customers.objects.get(id=request.session['user_id']).CustomerName
customer_instance = Customers.objects.get(id=request.session['user_id'])
order_instance = Orders(Customer=customer_instance,CommoditieList=commoditie_id,
OrderTime=datetime.now())
order_instance.save()
#assert False,order_instance
return HttpResponseRedirect('/my_orders/')
# view --> view customer's orders
def my_orders(request):
if request.session.get('user_id',False):
username = Customers.objects.get(id=request.session['user_id']).CustomerName
chart_count = get_chart_item_count(request.session['user_id'])
order_count = get_order_count(request.session.get('user_id'))
order_list = []
order_item = {}
commoditie_list = []
order_id = 0
commoditie = {}
comm_list = Orders.objects.filter(Customer_id=request.session['user_id'])
price = 0.0
#assert False,comm_id
try:
for item in comm_list:
order_id = item.id
for comm_id in item.CommoditieList.split('@'):
#assert False,comm_id
commoditie['id'] = comm_id
commoditie['CommoditieName'] = Commodities.objects.get(id=comm_id).CommoditieName
commoditie['CommoditiePrice'] = Commodities.objects.get(id=comm_id).CommoditiePrice
commoditie['CommoditieImageURL'] = Commodities.objects.get(id=comm_id).CommoditieImageURL
price += float(commoditie['CommoditiePrice'])
commoditie_list.append(commoditie)
commoditie = {}
order_item['order_id'] = order_id
order_item['commoditie_list'] = commoditie_list
commoditie_list = []
order_item['order_total_price'] = price
order_item['order_time'] = str(comm_list[0].OrderTime)
order_list.append(order_item)
order_item = {}
# assert False,order_list
price = 0.0
except:
return render_to_response('my_orders.html',
{'username':username,'chart_count':chart_count},
context_instance=RequestContext(request))
#assert False,order_list
return render_to_response('my_orders.html',
{'username':username,'chart_count':chart_count,'order_list':order_list,\
'order_count':order_count},
context_instance=RequestContext(request))
return render_to_response('my_orders.html',{},
context_instance=RequestContext(request))
# view --> delete item from chart_list
def delete_item(request,commoditie_id):
# if is login
if not request.session.get('user_id',False):
return render_to_response('my_chart.html',{},
context_instance=RequestContext(request))
ShoppingCarts.objects.filter(Commoditie_id=commoditie_id)[0].delete()
chart_list = ShoppingCarts.objects.filter(Customer_id=request.session['user_id'])
username = Customers.objects.get(id=request.session['user_id']).CustomerName
chart_count = get_chart_item_count(request.session.get('user_id'))
order_count = get_order_count(request.session.get('user_id'))
total_price = 0.0
for item in chart_list:
total_price += float(item.Commoditie.CommoditiePrice)
return render_to_response('my_chart.html',
{'username':username,'chart_list':chart_list,
'chart_count':chart_count,'total_price':total_price,'order_count': \
order_count},
context_instance=RequestContext(request))
# view --> log out and redirect to the home page
def logout(request):
request.session.clear()
return HttpResponseRedirect('/home/')
|
from bs4 import BeautifulSoup
from urllib.request import urlopen
dict={'Jan':'01','Feb':'02','Mar':'03','Apr':'04','May':'05','Jun':'06','Jul':'07','Aug':'08','Sep':'09','Oct':'10','Nov':'11','Dec':'12'}
import csv
sz=0
# https://www.ndtv.com/page/topic-load-more?+type=news&page=60&query=kolkata
assmebl=['delhi','chennai','indore']
so=0
with open('1_'+assmebl[so]+'.csv','a') as out:
writer=csv.writer(out)
writer.writerow(['story_url','heading','desc','published_date','auther','timestamp','img_link'])
for ina in range(1,2):
if ina==1:
usl='https://www.indiatoday.in/topic/'+assmebl[so]
print(usl)
# else:
# usl='https://www.indiatoday.in/topic/'+assmebl[so]+'/'+str(ina)
print(usl)
html = urlopen(usl).read()
bs = BeautifulSoup(html, 'html.parser')
# print(bs)
ul=bs.findAll('div',{'class':'view-content'})
# print(ul)
a=[]
b=[]
z=0
for i in bs.findAll('li',{'class':'views-row-odd'}):
w1=i.find('a')
w2=w1['href']
# print(w2)
# print("^^^^^^^^^^^^^^^^^^^^^^^^^")
html1 = urlopen(w2).read()
bs1 = BeautifulSoup(html1, 'html.parser')
ul1=bs1.findAll('div',{'class':'story-section'})
for i1 in ul1:
a.append(i1.findAll('h1',{'itemprop':'headline'}))
# print("@@@@@@@@@")
# print(".................................................")
for i in bs.findAll('li',{'class':'views-row-even'}):
w1=i.find('a')
w2=w1['href']
# print(w2)
# print("^^^^^^^^^^^^^^^^^^^^^^^^^")
html1 = urlopen(w2).read()
bs1 = BeautifulSoup(html1, 'html.parser')
ul1=bs1.findAll('div',{'class':'story-section'})
for i1 in ul1:
b.append(i1.findAll('h1',{'itemprop':'headline'}))
# print("@@@@@@@@@")
# print(".................................................")
print("uuuuuuuuuuuuuuu")
for i in range(0,len(b)):
print(a[i])
print(b[i])
# for r in range(0,len(a)):
# li=ul[0].findAll('span',{'class':'field-content'})
# # for i in li:
# try:
# print(sz)
# sz+=1
# per='https://www.indiatoday.in/'+i.find('a')['href']
# html = urlopen(per).read()
# bs2 = BeautifulSoup(html, 'html.parser')
# h1=bs2.find("span" ,{"class":"title"})
# heading=h1.contents[0]
# print(h1.contents[0])
# print("-------------------------------------------------------------")
# story=bs2.find("div" ,{"class":"section1"})
# story1=story.find("div")
# s=''
# for sto in story1:
# if(str(sto)[0]!='<' and str(sto)[0]!=' ' and str(sto)[0]!='\n'):
# s+=str(sto)
# print(s.strip())
# print("----------------------------------------------------------")
# time=bs2.find("span", {"class":"time_cptn"})
# time2=time.findAll("span")
# exact_time=''
# if(str(time2[2].contents[0])[0]=='U'):
# exact_time=(time2[2].contents[0][9:-4])
# else:
# exact_time=(time2[2].contents[0][:-4])
# print(exact_time)
# auther=time2[1].contents[0]
# print(time2[1].contents[0])
# sls=exact_time.split(",")
# sls1=sls[0].split(" ")
# sls1.append(sls[1])
# sls1.append(sls[2])
# sls1[0]=dict[sls1[0]]
# sls1[3]+=':00.0'
# string=sls1[1].strip()+'/'+sls1[0]+'/'+sls1[2].strip()+' '+sls1[3].strip()
# print(string)
# import time
# import datetime
# element = datetime.datetime.strptime(string,"%d/%m/%Y %H:%M:%S.%f")
# tuple1 = element.timetuple()
# timestamp = time.mktime(tuple1)*1000
# print(timestamp)
# print("----------------------------------------------------------")
# image=bs2.find("section", {"class":"highlight"} )
# img_link='https://www.indiatoday.in'+image.find("img")['src']
# print(img_link)
# print(usl)
# print("******************************"+assmebl[so]+str(sz)+"*****************************")
# output=[]
# output.append(per)
# output.append(heading)
# output.append(s.strip())
# output.append(exact_time)
# output.append(auther)
# output.append(timestamp)
# output.append(img_link)
# writer.writerow(output)
# except Exception as e:
# print(sz,"-",str(e))
# continue
so+=1
out.close()
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions useful when writing scripts that are run from GN's
exec_script function."""
import sys
class GNException(Exception):
pass
# Computes ASCII code of an element of encoded Python 2 str / Python 3 bytes.
_Ord = ord if sys.version_info.major < 3 else lambda c: c
def _TranslateToGnChars(s):
for decoded_ch in s.encode('utf-8'): # str in Python 2, bytes in Python 3.
code = _Ord(decoded_ch) # int
if code in (34, 36, 92): # For '"', '$', or '\\'.
yield '\\' + chr(code)
elif 32 <= code < 127:
yield chr(code)
else:
yield '$0x%02X' % code
def ToGNString(value, allow_dicts=True):
"""Returns a stringified GN equivalent of a Python value.
allow_dicts indicates if this function will allow converting dictionaries
to GN scopes. This is only possible at the top level, you can't nest a
GN scope in a list, so this should be set to False for recursive calls."""
if isinstance(value, str) or isinstance(value, unicode):
if value.find('\n') >= 0:
raise GNException("Trying to print a string with a newline in it.")
return '"' + ''.join(_TranslateToGnChars(value)) + '"'
if isinstance(value, list):
return '[ %s ]' % ', '.join(ToGNString(v, False) for v in value)
if isinstance(value, dict):
if not allow_dicts:
raise GNException("Attempting to recursively print a dictionary.")
result = ""
for key in value:
if not isinstance(key, str):
raise GNException("Dictionary key is not a string.")
result += "%s = %s\n" % (key, ToGNString(value[key], False))
return result
if isinstance(value, int):
return str(value)
raise GNException("Unsupported type %s (value %s) when printing to GN." %
(type(value), value))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Ed Mountjoy
#
import sys
import os
import argparse
import gzip
from pprint import pprint
def main():
# Parse args
args = parse_args()
window = 5 * 1e6 # plus/minus 5Mb
only_save_overlapping = True
#
# Prepare data -------------------------------------------------------------
#
# Load set of valid (study, index) pairs
study_index_set = set([])
with gzip.open(args.top_loci, 'r') as in_h:
in_h.readline() # Skip header
for line in in_h:
study_id, chrom, pos, ref, alt = line.decode().rstrip().split('\t')[:5]
index_var = '_'.join([chrom, pos, ref, alt])
study_index_set.add((study_id, index_var))
# Load finemap data
print('Loading finemap...')
tag_dict_finemap = {}
with gzip.open(args.finemap, 'r') as in_h:
in_h.readline() # Skip header
for line in in_h:
line = line.decode()
(
study_id,
index_chrom,
index_pos,
index_ref,
index_alt,
tag_chrom,
tag_pos,
tag_ref,
tag_alt
) = line.rstrip().split('\t')[:9]
index_var = '_'.join([index_chrom, index_pos, index_ref, index_alt])
tag_var = '_'.join([tag_chrom, tag_pos, tag_ref, tag_alt])
key = (study_id, index_var)
# Skip (stid, index) pairs that are not in the top loci table
if not key in study_index_set:
continue
# Add to dict
try:
tag_dict_finemap[key].add(tag_var)
except KeyError:
tag_dict_finemap[key] = set([tag_var])
# Load LD data
print('Loading LD...')
tag_dict_ld = {}
with gzip.open(args.ld, 'r') as in_h:
in_h.readline() # Skip header
for line in in_h:
line = line.decode()
(
study_id,
index_chrom,
index_pos,
index_ref,
index_alt,
tag_chrom,
tag_pos,
tag_ref,
tag_alt,
r2
) = line.rstrip().split('\t')[:10]
index_var = '_'.join([index_chrom, index_pos, index_ref, index_alt])
tag_var = '_'.join([tag_chrom, tag_pos, tag_ref, tag_alt])
key = (study_id, index_var)
# Skip (stid, index) pairs that are not in the top loci table
if not key in study_index_set:
continue
# Skip low R2
if not r2 or float(r2) < args.min_r2:
continue
# Add to dict
try:
tag_dict_ld[key].add(tag_var)
except KeyError:
tag_dict_ld[key] = set([tag_var])
# Merge finemap and LD. This will select finemapping over LD if available.
print('Merging finemap and LD...')
tag_dict = {}
for d in [tag_dict_finemap, tag_dict_ld]:
for key in d:
if not key in tag_dict:
tag_dict[key] = d[key]
#
# Find overlaps ------------------------------------------------------------
#
# Find overlaps
print('Finding overlaps...')
overlap_data = []
header = ['study_id_A',
'index_variantid_b37_A',
'study_id_B',
'index_variantid_b37_B',
'set_type',
'distinct_A',
'overlap_AB',
'distinct_B']
# set_types = {'finemapping': tag_dict_finemap,
# 'ld_eur': tag_dict_ld,
# 'combined': tag_dict}
set_types = {'combined': tag_dict}
# Process each set type separately
for set_key in set_types:
set_dict = set_types[set_key]
# Partition by chromosome to speed things up
set_dict_chroms = {}
for key in set_dict:
chrom, _ = parse_chrom_pos(key[1])
try:
set_dict_chroms[chrom][key] = set_dict[key]
except KeyError:
set_dict_chroms[chrom] = {}
set_dict_chroms[chrom][key] = set_dict[key]
# Run each chromosome separately
c = 0
for chrom in set_dict_chroms:
set_dict_chrom = set_dict_chroms[chrom]
for study_A, var_A in set_dict_chrom.keys():
if c % 1000 == 0:
print(' processing {0} {1} of {2}...'.format(set_key, c, len(set_dict)))
c += 1
for study_B, var_B in set_dict_chrom.keys():
if varids_overlap_window(var_A, var_B, window):
# Find overlap in sets
distinct_A = set_dict_chrom[(study_A, var_A)].difference(set_dict_chrom[(study_B, var_B)])
overlap_AB = set_dict_chrom[(study_A, var_A)].intersection(set_dict_chrom[(study_B, var_B)])
distinct_B = set_dict_chrom[(study_B, var_B)].difference(set_dict_chrom[(study_A, var_A)])
# Save result
if len(overlap_AB) > 0 or only_save_overlapping == False:
out_row = [study_A,
var_A,
study_B,
var_B,
set_key,
len(distinct_A),
len(overlap_AB),
len(distinct_B)]
overlap_data.append(out_row)
# Write results
with gzip.open(args.outf, 'w') as out_h:
# Write header
out_h.write(('\t'.join(header) + '\n').encode())
for row in overlap_data:
out_h.write(('\t'.join([str(x) for x in row]) + '\n').encode())
def parse_chrom_pos(varid):
''' Gets chrom and pos from a variant ID
Returns:
(chrom, pos)
'''
chrom, pos = varid.split('_')[:2]
return chrom, pos
def varids_overlap_window(var_A, var_B, window):
''' Extracts chrom:pos info from two variant IDs and checks if they are
within a certain window of each other
Args:
var_A (chr_pos_a1_a2)
var_B (chr_pos_a1_a2)
window (int): bp window to consider an overlap
'''
# Get positional info
chrom_A, pos_A = parse_chrom_pos(var_A)
chrom_B, pos_B = parse_chrom_pos(var_B)
# Check chroms are the same
if not chrom_A == chrom_B:
return False
# Check window
if abs(int(pos_A) - int(pos_B)) > window:
return False
else:
return True
def parse_args():
""" Load command line args """
parser = argparse.ArgumentParser()
parser.add_argument('--top_loci', metavar="<file>", type=str, required=True)
parser.add_argument('--ld', metavar="<file>", type=str, required=True)
parser.add_argument('--finemap', metavar="<file>", type=str, required=True)
parser.add_argument('--min_r2', metavar="<float>", type=float, required=True)
parser.add_argument('--outf', metavar="<str>", type=str, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
|
from enum import Enum
class MedalType(Enum):
DUNGEON = "dungeon"
DUNGEON_X2 = "dungeon_x2"
CC = "cc"
GUILD_SHOP = "guild_shop"
REQUEST = "request"
MM = "mm"
DEFAULT = "default"
|
#!/usr/bin/python3
from rank_api import api
if __name__ == "__main__":
api.run()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
from django.urls import reverse_lazy
from django.views import generic
from .models import Post
class PostList(generic.ListView):
model = Post
class PostDetail(generic.DetailView):
model = Post
class PostCreate(generic.CreateView):
model = Post
fields = '__all__'
success_url = reverse_lazy('myapp:post_list')
class PostUpdate(generic.UpdateView):
model = Post
fields = '__all__'
success_url = reverse_lazy('myapp:post_list')
class PostDelete(generic.DeleteView):
model = Post
success_url = reverse_lazy('myapp:post_list')
|
import sys
import frida
jscode = """
Java.perform(function(){
var MainActivity = Java.use('top.q0o0p.q0o0p_six.MainActivity'); //获得MainActivity类
MainActivity.onClick.implementation = function(){ //Hook testFrida函数,用js自己实现
send('Statr! Hook!'); //发送信息,用于回调python中的函数
return 'wow q0o0p wow!' //劫持返回值,修改为我们想要返回的字符串
}
});
"""
def on_message(message,data): #js中执行send函数后要回调的函数
print(message)
process = frida.get_remote_device().attach('top.q0o0p.q0o0p_six') #得到设备并劫持进程com.example.testfrida(该开始用get_usb_device函数用来获取设备,但是一直报错找不到设备,改用get_remote_device函数即可解决这个问题)
script = process.create_script(jscode) #创建js脚本
script.on('message',on_message) #加载回调函数,也就是js中执行send函数规定要执行的python函数
script.load() #加载脚本
sys.stdin.read()
|
# -*- coding: utf-8; mode: python -*-
#
# This is the project specific sphinx-build configuration, which is loaded from
# the base configuration file (``../conf.py``). About config values consult:
#
# * http://www.sphinx-doc.org/en/stable/config.html
#
# While setting values here, please take care to not overwrite common needed
# configurations. This means, do not *overwrite* composite values (e.g. the
# list- or dictionary-value of "latex_elements" resp. "extensions") by
# thoughtless assignments. Manipulate composite values always by *update*
# (dict-values) or extend (list-values). Nevertheless, if you know what you are
# doing, you are free to *overwrite* values to your needs.
#
# useful preset names:
#
# * BASE_FOLDER: the folder where the top conf.py is located
# * main_name: the basename of this project-folder
# ------------------------------------------------------------------------------
# General configuration
# ------------------------------------------------------------------------------
project = u'POC'
copyright = u'2016, Linux documentation authors'
author = u'Linux contributors'
intersphinx_mapping['linuxdoc'] = ('https://return42.github.io/linuxdoc', None)
intersphinx_mapping['dbxml2rst'] = ('https://return42.github.io/dbxml2rst', None)
intersphinx_mapping['linux'] = ('https://h2626237.stratoserver.net/kernel/linux_src_doc/', None)
intersphinx_mapping['kernel-doc'] = ('https://h2626237.stratoserver.net/kernel/books/kernel-doc-HOWTO', None)
intersphinx_mapping['template-book'] = ('https://h2626237.stratoserver.net/kernel/books/template-book/', None)
# ------------------------------------------------------------------------------
# Options for HTML output
# ------------------------------------------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = main_name
|
__author__ = 'zhengxiaoyu'
import numpy as np
import sys
#first step : convert the input value to binary
def toBinary(num, bit):
'''
:param num: the inuput num
:param bit: n-bit
:return:the binary list in n-bit
>>> toBinary(6,3)
[1, 1, 0]
>>> toBinary(8,2)
bit too samll
>>> toBinary(16, 8)
[0, 0, 0, 1, 0, 0, 0, 0]
'''
if num > 255 or num <0 or pow(2,bit)<= num:
print("bit too samll")
return
binaryList = []
for i in range(bit):
if(num%2==1):
binaryList.append(1)
num = int(num/2)
else:
binaryList.append(0)
num = int(num/2)
binaryList.reverse()
#print(binaryList)
return binaryList
#secodn step: determine the rule:
def setRule(binaryList):
'''return a list containing eight rules
input: the binary list
>>> setRule([1,0,1,0,1,0,0,0])
[([1, 1, 1], 1), ([1, 1, 0], 0), ([1, 0, 1], 1), ([1, 0, 0], 0), ([0, 1, 1], 1), ([0, 1, 0], 0), ([0, 0, 1], 0), ([0, 0, 0], 0)]
'''
ruleList = []
for i in range(8):
ruleList.append((toBinary(7-i,3),binaryList[i]))
#print(ruleList)
return ruleList
#third step: start from the first line
def start(columns):
'''
input: the columns input form the keyborad
:return the first line of output
>>> start(7)
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
'''
startList = [0 for i in range(columns)] + [1] + [0 for i in range(columns)]
return startList
def process(columns,ruleList):
'''
:return: the result
input: columns and the rule list
>>> process(4,[([1, 1, 1], 0), ([1, 1, 0], 0), ([1, 0, 1], 0), ([1, 0, 0], 1), ([0, 1, 1], 1), ([0, 1, 0], 1), ([0, 0, 1], 1), ([0, 0, 0], 0)])
[[0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0], [0, 1, 1, 0, 1, 1, 1, 1, 0], [1, 1, 0, 0, 1, 0, 0, 0, 1]]
'''
resultList =[]
resultList.append(start(columns))
for i in range(columns):
newList = []
startList = resultList[len(resultList)-1]
for i in range(columns*2+1):
newList.append(applyRule(ruleList,([0]+startList+[0])[i:i+3]))
resultList.append(newList)
return resultList
def processWolfram(columns,ruleList):
'''
input: columns and the rule list
:return: the wolfram result
>>> processWolfram(4,[([1, 1, 1], 0), ([1, 1, 0], 0), ([1, 0, 1], 0), ([1, 0, 0], 1), ([0, 1, 1], 1), ([0, 1, 0], 1), ([0, 0, 1], 1), ([0, 0, 0], 0)])
[[0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 1], [0, 0, 1, 1, 0, 0, 1, 1, 1], [0, 1, 1, 0, 1, 1, 1, 0, 0], [1, 1, 0, 0, 1, 0, 0, 1, 1]]
>>> processWolfram(4,setRule(toBinary(139, 8)))
[[0, 0, 0, 0, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 1, 1, 1], [1, 1, 1, 0, 0, 1, 1, 1, 1], [1, 1, 0, 0, 1, 1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1, 1, 1]]
'''
resultList =[]
resultList.append(start(columns))
for i in range(columns):
newList = []
startList = resultList[len(resultList)-1]
for i in range(columns*2+1):
newList.append(applyRule(ruleList,([0]+startList+[1])[i:i+3]))
resultList.append(newList)
return resultList
def applyRule(ruleList, input):
'''
apply the rule
input: the rule list and the list with three elements
>>> applyRule([([1, 1, 1], 0), ([1, 1, 0], 0), ([1, 0, 1], 0), ([1, 0, 0], 1), ([0, 1, 1], 1), ([0, 1, 0], 1), ([0, 0, 1], 1), ([0, 0, 0], 0)],[0,0,0])
0
'''
for i in ruleList:
if(i[0]==input):
return i[1]
#show the output and write the output
def show(result,columns):
'''
print the result
input: the result from process() or processWolfram(), columns
>>> show(process(4,setRule(toBinary(30,8))),4)
P1 9 5
000010000
000111000
001100100
011011110
110010001
'''
formater = "%r"*(columns*2+1)
print ("P1 "+str(columns*2+1) +" "+ str(columns+1))
for i in result:
i = tuple(i)
print (formater%i)
def is_p(s):
if len(s)==1:
return True
else:
return s[0] == s[-1] and is_p(s[1:-1])
def double(x): return x*2
def mystery(arg):
return lambda f:f(arg)
def writeFile(fileName, result, columns):
'''
generate the image file
input: the image name, result, columns
'''
file = open(fileName, "w")
file.write("P1 "+str(columns*2+1)+" "+str(columns+1))
for line in result:
file.writelines(str(i) for i in line)
file.write('\n')
file.close()
count =0
def interleaved_sum(n, odd_term, even_term):
global count
#count = n
# count = 0
if n > count:
count = n
def sum_together(n ,term_1, term_2):
global count
if n == count:
count = 0
return term_1(n)
else:
return term_1(n)+term_2(n+1)+sum_together(n+2, term_1, term_2)
if n==0:
return sum_together(0, even_term, odd_term) - even_term(0)
elif n == 1:
return sum_together(1, odd_term, even_term)
else:
return interleaved_sum(n-2,odd_term,even_term)
if __name__ == '__main__':
import doctest
doctest.testmod()
if len(sys.argv) <3 :
print("need more parameter")
exit()
if len(sys.argv)==3 or len(sys.argv) == 5:
result = process(int(sys.argv[2]),setRule(toBinary(int(sys.argv[1]),8)))
elif len(sys.argv) == 4 or len(sys.argv) == 6:
if(sys.argv[3] == 'Wolfram'):
result = processWolfram(int(sys.argv[2]),setRule(toBinary(int(sys.argv[1]),8)))
else:
print("do you mean Wolfram ?")
show(result, int(sys.argv[2]))
#len(sys.argv) == 6 or len(sys.argv) == 5:
if len(sys.argv) > 4:
if(sys.argv[len(sys.argv)-2] == '>'):
writeFile(sys.argv[len(sys.argv)-1], result, int(sys.argv[2]))
|
import socket
HOST = 'chitturi' # The remote host
PORT = 50007 # The same port as used by the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('chitturi', 50007))
s.sendall('Hi good morning')
data = s.recv(200)
s.close()
print 'Received', str(data)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 19:21:34 2020
@author: HP
"""
number1=int(input("enter the first number:"))
number2=int(input("enter the second number:"))
sum=number1+number2
print("the sum is",sum)
|
t1=bytearray([12,20])
t2=bytearray([12,20])
print(id(t1))
print(id(t2))
print(t1 is t2)
|
def solve(s):
subs = []
current = 0
for cnt, x in enumerate(s):
if x not in 'aeiou':
current += ord(x)-96
if cnt == len(s)-1:
subs.append(current)
continue
subs.append(current)
current = 0
return max(subs)
'''
A consonant is any letter of the alphabet except a, e, i ,o, u. The consonant
substrings in the word "zodiacs" are z, d, cs. Assuming a = 1, b = 2 ... z = 26,
the values of these substrings are 26 ,4, 22 because z = 26,d = 4,cs=3+19=22.
The maximum value of these substrings is 26. Therefore, solve("zodiacs") = 26.
Given a lowercase string that has alphabetic characters only and no spaces,
return the highest value of consonant substrings
'''
|
from metaL import Object
class Web(Object):
pass
|
from math import ceil, floor
from cs50 import get_string
num = get_string("Number: ")
if int(num) < pow(10, 12) or int(num) > pow(10, 16) - 1:
print("INVALID")
exit(1)
n = len(num)
evendigits = floor(n/2)
odddigits = ceil(n/2)
x = 0
y = 0
for i in range(1, evendigits + 1):
x += (floor(2 * int(num[n - (2 * i)]) / 10)) + ((2 * int(num[n - (2 * i)])) % 10)
for i in range(odddigits):
y += int(num[n - (2 * i) - 1])
if ((x + y) % 10) == 0:
if num.startswith(("34", "37")):
print("AMEX")
exit(0)
elif num.startswith(("51", "52", "53", "54", "55")):
print("MASTERCARD")
exit(0)
elif num.startswith("4"):
print("VISA")
exit(0)
else:
print("INVALID")
exit(0)
|
X=float(input("X= "))
Y=float(input("Y= "))
A=float(input("A= "))
B=float(input("B= "))
onekgchoko=A/X
onekgsuga=B/Y
determine=onekgchoko/onekgsuga
print(onekgchoko)
print(onekgsuga)
print(determine)
|
import pytest
import time
from requests.exceptions import HTTPError
import pandas as pd
from pydodo import (
reset_simulation,
all_positions,
list_route,
)
from pydodo.bluebird_connect import ping_bluebird
# test if can connect to BlueBird
bb_resp = ping_bluebird()
@pytest.mark.skipif(not bb_resp, reason="Can't connect to bluebird")
def test_route_waypoints(upload_test_sector_scenario):
"""
Test list_route(), direct_to_waypoint()
"""
cmd = reset_simulation()
assert cmd == True
upload_test_sector_scenario()
# Get the position
position = all_positions()
acid1, acid2 = position.index
route1 = list_route(acid1)
route2 = list_route(acid2)
assert route1["aircraft_id"] == acid1
assert route2["aircraft_id"] == acid2
assert route1["next_waypoint"] == 'FIYRE'
assert route1["route_name"] == 'ASCENSION'
assert len(route1["route_waypoints"]) == 5
assert route2["next_waypoint"] == 'SPIRT'
assert route2["route_name"] == 'FALLEN'
assert len(route2["route_waypoints"]) == 5
route2["route_waypoints"].reverse()
assert all([
wp1 == wp2 for wp1, wp2
in zip(route1["route_waypoints"], route2["route_waypoints"])
])
|
import ray
import logging
from torch.utils.data import Dataset, IterableDataset
from torch.utils.data._utils.collate import default_collate
from typing import Callable
logger = logging.getLogger(__name__)
class IterableDataLoader:
def __init__(
self,
dataset: Dataset = None,
dataset_init_fn: Callable = None,
batch_size: int = 1,
collate_fn: Callable = None,
**kwargs
):
self.dataset = dataset
self.dataset_init_fn = dataset_init_fn
self.batch_size = batch_size
if collate_fn:
self.collate_fn = collate_fn
else:
self.collate_fn = default_collate
# we independently initialize each iterator
if dataset_init_fn is not None:
self.datasets = [dataset_init_fn(index=i, **kwargs) for i in range(batch_size)]
# if not ray.is_initialized():
# ray.init()
# logger.warning("Ray is initialized!")
def __iter__(self):
#iterators = ray.util.iter.from_iterators(self.datasets).gather_sync()
iterators = [iter(item) for item in self.datasets]
while True:
try:
# TODO: extend it with Ray
# batch = next(iterators)
batch = [next(iterators[i]) for i in range(self.batch_size)]
try:
yield self.collate_fn(batch)
except:
breakpoint()
except StopIteration:
break
def __len__(self):
return len(self.datasets[0])
|
#!/usr/bin/env python
# https://www.postgresqltutorial.com/postgresql-python/
# -----------------------------------------------------------------------
# database.py
# Author: Sophie Li, Jayson Wu, Connie Xu
# -----------------------------------------------------------------------
import os
import psycopg2
from sys import stderr
import time
from datetime import datetime
from database_files import booklistings, bookbag, purchases, listingphotos
# -----------------------------------------------------------------------
class Database:
def __init__(self):
self._connection = None
def connect(self):
try:
# connect to database
# heroku
try:
# in your terminal, type: export DATABASE_URL="postgres://postgres:rebook2021@localhost:5432/rebook"
DATABASE_URL = os.environ['DATABASE_URL']
self._connection = psycopg2.connect(
DATABASE_URL, sslmode='require')
# pgadmin local
except:
self._connection = psycopg2.connect(host='localhost', port=5432,
user='postgres', password='rebook2021', database='rebook')
print('Connected to database...')
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def disconnect(self):
self._connection.close()
print('Disconnected from database')
# method to create table schema
# return True if command properly executed, False if error occurred
def _create_tables(self):
booklistings.create(self)
bookbag.create(self)
purchases.create(self)
listingphotos.create(self)
# helper method to execute insert, update, delete row methods
def _execute_command(self, command, info, return_id=False):
cursor = self._connection.cursor()
try:
cursor.execute(command, tuple(info))
print("EXECUTED: " + str(command))
# get the generated serial id back
if return_id:
listing_id = cursor.fetchone()[0]
except Exception as e:
print(str(e), file=stderr)
return False
cursor.close()
self._connection.commit()
if return_id:
return listing_id
else:
return True
# takes as input a net_id
# returns dict with lists of active, pending, completed listing_ids
def get_buyer_bookbag(self, net_id):
cursor = self._connection.cursor()
QUERY_OTHER = 'SELECT listing_id FROM bookbag ' + \
'WHERE buyer = %s AND (listing_status = \'removed\' OR listing_status = \'taken\' OR listing_status = \'completed\')'
QUERY_ACTIVE = 'SELECT listing_id FROM bookbag ' + \
'WHERE buyer = %s AND listing_status = \'active\''
QUERY_PENDING = 'SELECT listing_id FROM purchases ' + \
'WHERE buyer = %s AND buyer_status = \'pending\''
QUERY_COMPLETED = 'SELECT listing_id FROM purchases ' + \
'WHERE buyer = %s AND buyer_status = \'completed\''
cursor.execute(QUERY_OTHER, (net_id,))
other_listings = self._get_list(cursor)
cursor.execute(QUERY_ACTIVE, (net_id,))
active_listings = self._get_list(cursor)
cursor.execute(QUERY_PENDING, (net_id,))
pending_listings = self._get_list(cursor)
cursor.execute(QUERY_COMPLETED, (net_id,))
completed_listings = self._get_list(cursor)
cursor.close()
buyer_listings = {"other": other_listings, "active": active_listings,
"pending": pending_listings, "completed": completed_listings}
return buyer_listings
# helper method for getting listing ids
def _get_list(self, cursor):
listings = []
row = cursor.fetchone()
while row:
listings.append(row[0])
row = cursor.fetchone()
return listings
# takes as input a net_id
# returns dict with lists of active, pending, completed listing_ids
def get_seller_station(self, net_id):
# cursor = self._connection.cursor()
# QUERY_ACTIVE = 'SELECT listing_id FROM booklistings ' + \
# 'WHERE seller = %s AND seller_status = \'active\''
# QUERY_PENDING = 'SELECT listing_id FROM booklistings ' + \
# 'WHERE seller = %s AND seller_status = \'pending\''
# QUERY_COMPLETED = 'SELECT listing_id FROM booklistings ' + \
# 'WHERE seller = %s AND seller_status = \'completed\''
# cursor.execute(QUERY_ACTIVE, (net_id,))
# active_listings = self._get_list(cursor)
# cursor.execute(QUERY_PENDING, (net_id,))
# pending_listings = self._get_list(cursor)
# cursor.execute(QUERY_COMPLETED, (net_id,))
# completed_listings = self._get_list(cursor)
# cursor.close()
active_listings = booklistings.get_active_listings(self, net_id)
pending_listings = booklistings.get_pending_listings(self, net_id)
completed_listings = booklistings.get_completed_listings(self, net_id)
seller_listings = {"active": active_listings,
"pending": pending_listings, "completed": completed_listings}
return seller_listings
def _add_querystring_price(self, params, filter_price):
query_string = ''
if filter_price is not None:
lower = filter_price[0]
upper = filter_price[1]
if lower is not None and lower != '':
query_string += ' AND CAST(price as double precision) >= %s'
params.append(str(lower))
if upper is not None and upper != '':
query_string += ' AND CAST(price as double precision) <= %s'
params.append(str(upper))
return query_string
# AUTHORS ILIKE AND price AND condition AND seller_status OR AUTHORS ILIKE AND price AND condition
def _get_querystring(self, params, filter_price, filter_condition, append_string, query, user):
QUERY_STRING = append_string
QUERY_STRING += self._add_querystring_price(params, filter_price)
QUERY_STRING += ' AND seller_status = \'active\' AND NOT seller = %s'
params.append(user)
if filter_condition is not None:
for i in range(len(filter_condition)):
QUERY_STRING += ' AND condition = %s'
params.append(filter_condition[i])
if i + 1 < len(filter_condition):
QUERY_STRING += ' OR ' + append_string
params.append(query)
QUERY_STRING += self._add_querystring_price(
params, filter_price)
QUERY_STRING += ' AND seller_status = \'active\' AND NOT seller = %s'
params.append(user)
return QUERY_STRING
# takes as input an author name, cousenum, book title, isbn
# returns list of dicts containing relevant listing info (isbn, price, condition, title, authors, coursenum, description, listing_id)
def search(self, query, user, filter_price=None, filter_condition=None, sort=None):
# create a cursor
cursor = self._connection.cursor()
querypercent = '%' + query + '%'
params = []
QUERY_STRING = 'SELECT listing_id ' + \
'FROM "booklistings" WHERE '
params.append(querypercent)
QUERY_STRING += self._get_querystring(
params, filter_price, filter_condition, 'authors ILIKE %s', querypercent, user)
params.append(querypercent)
QUERY_STRING += ' OR ' + \
self._get_querystring(
params, filter_price, filter_condition, 'coursenum ILIKE %s', querypercent, user)
params.append(querypercent)
QUERY_STRING += ' OR ' + \
self._get_querystring(
params, filter_price, filter_condition, 'title ILIKE %s', querypercent, user)
try:
queryint = int(query)
params.append(query)
QUERY_STRING += ' OR ' + \
self._get_querystring(
params, filter_price, filter_condition, 'isbn = %s', query, user)
except:
pass
if sort is None:
QUERY_STRING += ' ORDER BY time_created DESC'
else:
if sort == 'price_low_to_high':
QUERY_STRING += ' ORDER BY CAST(price AS double precision) ASC'
elif sort == 'price_high_to_low':
QUERY_STRING += ' ORDER BY CAST(price AS double precision) DESC'
elif sort == 'time_most_recent':
QUERY_STRING += ' ORDER BY time_created DESC'
elif sort == 'time_most_old':
QUERY_STRING += ' ORDER BY time_created ASC'
print(QUERY_STRING)
print(params)
cursor.execute(QUERY_STRING, tuple(params))
# fetch all data
rows = []
row = cursor.fetchone()
while row:
rows.append(row[0])
row = cursor.fetchone()
# close cursor
cursor.close()
return rows
# -----------------------------------------------------------------------
# For testing:
if __name__ == '__main__':
# count = 0
database = Database()
database.connect()
# print(os.environ['DATABASE_URL'])
# generating tables
database._create_tables()
# count += 1
# print(database.get_buyer(3))
# print(database.get_buyer(7))
# print(database.get_buyer(8))
# books1 = database.search('', filter_price=['0', '35'])
# for book in books1:
# print(book)
# print('\n')
# books = database.search('cos', filter_price=['', ''], filter_condition=[
# None, None, None, None])
# for book in books:
# print(book)
# # insert row
# listinginfo = ['123412542', 'clx', 'old', 131.50, 'pending', 'insert description here',
# 'WWS315', 'computer science for nerds', 'bob', time.time()]
# result = database.insert_row('booklistings', listinginfo)
# print(result)
# listinginfo = ['123412542', 'clx', 'new', 135.50, 'pending', 'insert description here',
# 'WWS315', 'computer science for nerds', 'bob', time.time()]
# result = database.insert_row('booklistings', listinginfo)
# print(result)
# userinfo = ['clx', 'Connie Xu', 'clx@princeton.edu']
# if database.insert_row('userinfo', userinfo):
# count += 1
# bookbag = ['rebook', '2', 'pending']
# if database.insert_row('bookbag', bookbag):
# count += 1
# purchases = ['1', 'clx', 'active']
# if database.insert_row('purchases', purchases):
# count += 1
# # is_in_bookbag function
# print('is_in_bookbag: True? ' + str(database.is_in_bookbag('rebook', '2')))
# print('is_in_bookbag: False? ' + str(database.is_in_bookbag('clx', '2')))
# # search function
# books = database.search('')
# print('database.search(\'\')')
# for book in books:
# print(book)
# print('\n')
# print(
# 'database.search(\'123412542\', filter_price=[135, 150], sort=\'price_low_to_high\')')
# books = database.search('123412542', filter_price=[135, 150], sort='price_low_to_high')
# for book in books:
# print(book)
# print('\n')
# print('database.search(\'123412542\', sort=\'price_high_to_low\')')
# books = database.search('123412542', sort='price_high_to_low')
# for book in books:
# print(book)
# print('\n')
# print('get_listing')
# result = database.get_listing('1')
# print(result)
# seller_listings = database.get_seller_station('clx')
# print(seller_listings)
# buyer_listings = database.get_buyer_bookbag('rebook')
# print(buyer_listings)
# # # update row
# listinginfo = ['1234125', 'clx', 'new', 31.50, 'pending', 'insert descrip here',
# 'WWS315', 'computer science for losers', 'bob', time.time(), 1]
# if database.update_row('booklistings', listinginfo):
# count += 1
# userinfo = ['Connie Xuer', 'clx@princeton.edu', 'clx']
# if database.update_row('userinfo', userinfo):
# count += 1
# purchases = ['pending', '1']
# if database.update_row('purchases', purchases):
# count += 1
# # # delete row
# listinginfo = ['1']
# if database.delete_row('booklistings', listinginfo):
# count += 1
# userinfo = ['clx']
# if database.delete_row('userinfo', userinfo):
# count += 1
# bookbag = ['clx', '1234125']
# if database.delete_row('bookbag', bookbag):
# count += 1
# purchases = ['1']
# if database.delete_row('purchases', purchases):
# count += 1
database.disconnect()
# print('\n' + str(count) + "/11 commands executed!")
|
import xmlrpclib
from pprint import pprint as pp
# bad len 4, 16
# good len 2,3,14,17
file = "inventorytest.csv"
import requests
import json
location = []
inventory = []
lines = [line.strip() for line in open('inventory-a.txt')]
for line in lines:
if len(line) in [2, 3]:
inventory.append(line)
elif len(line) == 4:
inventory.append(line + " BAD LINE")
elif len(line) == 14:
# B-B-155-80-R13
# U-PLT-C-285-75-R16
old = line.split("-")
coverted = "U-PLT-{}-{}-{}-{}".format(old[0], old[2], old[3], old[4])
inventory.append(coverted)
elif len(line) == 16:
inventory.append(line + " BAD SKU")
elif len(line) == 17:
# 0005-C-155-80-R13
# U-PLT-C-285-75-R16
old = line.split("-")
if len(old) == 4:
oldn = old[1].split(".")
coverted = "U-PLT-{}-{}-{}-{}".format(oldn[0], oldn[1], old[2], old[3])
else:
coverted = "U-PLT-{}-{}-{}-{}".format(old[1], old[2], old[3], old[4])
inventory.append(coverted)
pp(inventory)
|
#!/usr/bin/env python
import pymongo
conn = pymongo.Connection('localhost')
db = conn.TripShare
|
from django import forms
from .models import Review
class ReviewForm(forms.Mod)
|
'''
File name: AES_modeECB.py
Author: Emely da Mata
Title: TP03
Python Version: 3.8.5
'''
from PIL import BmpImagePlugin
import hashlib
from itertools import cycle
import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
backend = default_backend()
#Gerar chave aleatoria
key = os.urandom(32)
key
#Lendo e manipulando imagem de entrada em formato bmp:
input_image = BmpImagePlugin.BmpImageFile("teste.bmp")
image_data = input_image.tobytes()
# cifrando o dado com algoritmo AES no modo ECB:
# 1 - o objeto que criptografa AES com a chave gerada
aes = algorithms.AES(key)
# 2 - o modo CBC em inicialização
ecb = modes.ECB()
# 3 - criando e encriptando a partir do cipher
cipher = Cipher(aes, ecb, backend=backend)
encryptor = cipher.encryptor()
ct = encryptor.update(image_data) + encryptor.finalize()
# salvando a imagem com a cifra:
output_image = input_image.copy()
output_image.frombytes(ct)
output_image.save("testeecb4.bmp")
|
# Given an integer number n, return the difference between the product
# of its digits and the sum of its digits.
class Solution:
def subtractProductAndSum(self, n):
pr = 1
su = 0
for elem in str(n):
pr *= int(elem)
su += int(elem)
return pr - su
if __name__ == '__main__':
test_input = 4421
print(Solution.subtractProductAndSum(Solution, test_input))
|
import os
import logging
import numpy as np
import xml.etree.ElementTree as ET
from PIL import Image
from configs.paths import DATASETS_DIR
from utils.utils_general import make_list, read_textfile
from utils.utils_bbox import draw_bbox
log = logging.getLogger()
VOC_CATS = ['__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
class VOCLoader():
"""Data manager"""
def __init__(self, year, split, segmentation=False, augmented_seg=False,
cats_exclude=[], cats_include=[], subsets=None, is_training=False,
gt_seg=False, cut_bad_names=True, names_file=''):
self.dataset = 'voc'
self.is_training = is_training
self.gt_seg = gt_seg
self.segmentation = segmentation
self.augmented_seg = augmented_seg
self.set_up_internal_cats(cats_include, cats_exclude)
# creating cats' names
cats = VOC_CATS
self.cats_to_ids = dict(map(reversed, enumerate(cats)))
self.ids_to_cats = dict(enumerate(cats))
self.num_classes = len(self.cats_include)
self.categories = cats[1:]
# Processing possibly many datasets
year, split = map(make_list, [year, split])
if not subsets:
subsets = ['all'] * len(year)
else:
for s in subsets:
assert s in ['all', 'pos', 'neg'],\
'Wrong subset must be in [all, pos, neg]'
self.splits = split
self.years = year
self.roots = [os.path.join(DATASETS_DIR, 'VOCdevkit/VOC20%s/' % year)
for year in self.years]
for s in self.splits:
assert s in ['train', 'val', 'trainval', 'test', 'custom']
assert len(year) == len(split) == len(subsets),\
'Different number of components'
if names_file:
self.create_filenames_from_file(names_file)
else:
self.create_filenames(subsets)
if cut_bad_names:
self.cut_bad_names_out()
# deleting from the filenames images that
# don't contain cats_include instances
if cats_exclude != [] and is_training is True:
self.filter_filenames_by_cat()
def set_up_internal_cats(self, cats_include, cats_exclude):
"""Defines a set of classes supported by this loader"""
assert len(cats_include) * len(cats_exclude) == 0, """Only one of cats_include or
cats_exclude could be set, not together"""
# splitting cats used and not supported by the loader
if len(cats_include) > 0:
self.cats_include = sorted(list(set([0] + cats_include)))
self.cats_exclude = sorted(list(set(list(range(0, 21))) -
set(self.cats_include)))
else:
self.cats_exclude = cats_exclude
self.cats_include = sorted(list(set(list(range(0, 21))) -
set(cats_exclude)))
# Defining mappings from internal cats to general and back
#TODO swap the names of these two dicts
self.cats_to_cats_include = dict(enumerate(self.cats_include))
self.cats_include_to_cats = dict(map(reversed,
enumerate(self.cats_include)))
def cut_bad_names_out(self):
"""Throws away samples with inaccurate annotations"""
bad_names = ['2007_002403', '2011_000834',
'2010_000748', '2009_005069']
self.filenames = [f for f in self.filenames if f not in bad_names]
def create_filenames(self, subsets):
"""Loads a file with filenames in memory
Filters out unwanted names. In case of several datasets merges their
names together while keeping track of each root"""
self.filenames = []
self.name2root = dict()
for year, root, split, subset in zip(self.years, self.roots,
self.splits, subsets):
if self.segmentation and year == '12':
filelist = 'ImageSets/Segmentation/%s.txt'
else:
filelist = 'ImageSets/Main/%s.txt'
filenames = read_textfile(os.path.join(root, filelist % split))
name2root = dict(zip(filenames, [root] * len(filenames)))
self.name2root.update(name2root)
info_message = 'Created a loader VOC%s %s with %i images' \
% (year, split, len(filenames))
# filtering our names that don't belong to the subset
if subset != 'all':
pos_names, neg_names = self.split_filenames(filenames)
filenames = pos_names if subset == 'pos' else neg_names
info_message = ('%s, after selecting %s subset %i '
'images is left') % (info_message, subset, len(filenames))
log.info(info_message)
self.filenames += filenames
self.name2root = {f: self.name2root[f] for f in self.filenames}
def create_filenames_from_file(self, names_file):
""" Initializes loader image names support from a file
Creates self.filenames and self.name2root
"""
self.filenames = []
self.name2root = dict()
self.filenames = read_textfile(names_file)
self.name2root = dict(zip(self.filenames,
[self.roots[0]] * len(self.filenames)))
info_message = 'Created a VOC loader with %i images from file %s' \
% (len(self.filenames), names_file)
log.info(info_message)
def filter_filenames_by_cat(self):
"""Filters out filenames that don't contain cats in self.cats_include
"""
new_filenames = []
for name in self.filenames:
cats = self.read_annotations(name, map_cats=False)[2]
cats_presence = list(set(cats))
for cat_in in cats_presence:
if cat_in in self.cats_include:
new_filenames.append(name)
break
message = ('Filtered by cat: After deleting {} cats, '
' {} images remained out of {}').format(
[c for c in self.cats_exclude],
len(new_filenames),
len(self.filenames))
print(message)
log.info(message)
self.filenames = new_filenames
def get_image_path(self, name):
root = self.name2root[name]
path = '%sJPEGImages/%s.jpg' % (root, name)
return path
def get_seg_path(self, name):
seg_file = os.path.join(self.name2root[name], 'SegmentationClass/',
name + '.png')
return seg_file
def split_filenames(self, filenames):
"""Splits all filenames in positive and negative"""
pos_names, neg_names = [], []
for f in filenames:
cats = self.read_annotations(f)[2]
pos_names.append(f) if len(cats) > 0 else neg_names.append(f)
return pos_names, neg_names
def get_filenames(self, _type='all'):
"""Returns requested set of filenames
all - all, pos - positive, neg - negative
"""
if _type == 'all':
return self.filenames
assert _type in ['pos', 'neg'], 'Wrong filenames type: %s' % _type
try:
self.pos_names
except AttributeError:
self.pos_names, self.neg_names \
= self.split_filenames(self.filenames)
if _type == 'pos':
return self.pos_names
if _type == 'neg':
return self.neg_names
def load_image(self, name, given_path=None):
"""Loads an image given it's internal name
The format is suitable for feeding to the network
"""
path = given_path if given_path else self.get_image_path(name)
im = Image.open(path)
im = np.array(im) / 255.0
im = im.astype(np.float32)
return im
def read_annotations(self, name, map_cats=True):
"""Loads sample annotations from the disk.
Args:
name (str): internal sample name
map_cats (bool): if the values of categories are mapped to the
internal ones
Returns:
A tuple containing Object bounding boxes, Image segmentation mask,
Object categories, Image width, Image height, Object difficulty.
"""
bboxes = []
cats = []
tree = ET.parse('%sAnnotations/%s.xml' % (self.name2root[name], name))
root = tree.getroot()
width = int(root.find('size/width').text)
height = int(root.find('size/height').text)
difficulty = []
for obj in root.findall('object'):
cat = self.cats_to_ids[obj.find('name').text]
difficult = (int(obj.find('difficult').text) != 0)
difficulty.append(difficult)
cats.append(cat)
bbox_tag = obj.find('bndbox')
x = int(bbox_tag.find('xmin').text)
y = int(bbox_tag.find('ymin').text)
w = int(bbox_tag.find('xmax').text)-x
h = int(bbox_tag.find('ymax').text)-y
bboxes.append((x, y, w, h))
gt_cats = np.array(cats)
gt_bboxes = np.array(bboxes).reshape((len(bboxes), 4))
difficulty = np.array(difficulty)
if map_cats:
# throwing away boxes with exclude_cats and mapping their values
# from global to internal
inds = np.ones(gt_cats.shape, dtype=bool)
if self.cats_exclude != []:
for i, cat in enumerate(gt_cats):
inds[i] = cat not in self.cats_exclude
assert np.any(inds) or not self.is_training, \
'No positives in an image left'
gt_cats = gt_cats[inds]
gt_bboxes = gt_bboxes[inds]
difficulty = difficulty[inds]
# mapping old categories to new cats (cats_include)
for i, gt_cat in enumerate(gt_cats):
gt_cats[i] = self.cats_include_to_cats[gt_cat]
seg_gt = self.read_segmentations(name, height, width)
output = gt_bboxes, seg_gt, gt_cats, width, height, difficulty
return output
def read_segmentations(self, name, height=None, width=None):
"""Loads segmentation annotation from the disk"""
seg_path = self.get_seg_path(name)
if self.segmentation and os.path.exists(seg_path):
seg_map = Image.open(seg_path)
segmentation = np.array(seg_map, dtype=np.uint8)
else:
# if there is no segmentation for a particular image
# we fill the mask with zeros to keep the same amount
# of tensors but don't learn from it
assert height is not None, ('In this case h and w '
'have to be passed: %s') % name
segmentation = np.zeros([height, width], dtype=np.uint8)
# in case the file doesn't exist we don't know what could be inside
# so it's better to stay not certain
if (not os.path.exists(seg_path) and self.gt_seg):
segmentation += 255
return segmentation
def get_sample(self, name):
"""Outputs a training sample with different types of annotations"""
gt_bboxes, seg_gt, gt_cats, w, h, difficulty \
= self.read_annotations(name)
gt_bboxes = np.clip(gt_bboxes / np.reshape([w, h, w, h], (1, 4)), 0, 1)
diff = np.array(difficulty, dtype=np.int32)
image = self.load_image(name)
assert (h, w) == image.shape[:2]
assert len(gt_cats) == 0 or max(gt_cats) <= len(self.categories),\
'gt_cats: {}, len(cats): {}'.format(max(gt_cats),
len(self.categories))
out = {'img': image,
'gt_bboxes': gt_bboxes,
'gt_cats': gt_cats,
'seg_gt': seg_gt[:, :, None],
'diff': diff}
return out
def visualize(self, name, draw=True, seg=False):
"""Makes visualization of a training sample with its annotations"""
sample = self.get_sample(name)
im = np.uint8(sample['img'] * 255)
h, w = im.shape[:2]
bboxes = sample['gt_bboxes'] * np.reshape([w, h, w, h], (1, 4))
cats = sample['gt_cats']
img = (draw_bbox(im, bboxes=bboxes, cats=cats) if draw
else Image.fromarray(np.uint8(im)))
if seg:
from my_utils import array2palette
seg = array2palette(np.squeeze(sample['seg_gt'])).convert('RGB')
new_img = Image.new('RGB', (2*w, h))
new_img.paste(img, (0, 0))
new_img.paste(seg, (w, 0))
img = new_img
return img
|
# -*- coding: utf-8 -*-
dict = {'A':4,'R':6,'N':2,'D':2,'C':2,'Q':2,'E':2,'G':4,'H':2,'I':3,'L':6,'K':2,'M':1,'F':2,'P':4,'S':6,'T':4,'W':1,'Y':2,'V':4,'*':3}
if __name__ == "__main__":
fid = open('rosalind_mrna.txt','r')
#fout = open('out.txt','w')
s = fid.readline().strip()
#Protein
prod = 1
for x in range(len(s)):
prod = prod*dict[s[x]]
if prod>1000000:
prod = prod % 1000000
#stop!
prod = prod*3
prod = prod % 1000000
print prod
|
from channels.routing import route
from sleep.consumers import ws_message, ws_connect, ws_disconnect
channel_routing = [
route("websocket.connect", ws_connect, path=r'^/graph/(?P<id>[^/]+)/values/$'),
route("websocket.receive", ws_message, path=r'^/graph/(?P<id>[^/]+)/values/$'),
route("websocket.disconnect", ws_disconnect, path=r'^/graph/(?P<id>[^/]+)/values/$')
]
|
from decouple import config
class Config:
SECRET_KEY = 'adsi'
class DevelopmentConfig(Config):
motor = 'mysql://'
user_db = 'sena:'
password_db = 'sena123@'
server = 'localhost/'
name_db = 'project_web'
DEBUG = True
SQLALCHEMY_DATABASE_URI = motor + user_db + password_db + server + name_db
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS =True
MAIL_USERNAME = 'sdonapp@gmail.com'
MAIL_PASSWORD = 'M%%cfe$DavidC.calleHdhBNF3ji2'
config = {
'development': DevelopmentConfig,
'default' : DevelopmentConfig
}
|
# Generated by Django 3.0.8 on 2021-01-15 10:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DestinationCompany',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
('image', models.ImageField(upload_to='pictures')),
('name', models.CharField(blank=True, max_length=150, null=True)),
('dsm', models.BooleanField(default=False)),
],
),
]
|
import os
import csv
import datetime
def print_csv(curs, tbnm, printsql, count):
start_time = datetime.datetime.now()
# output each table content to a separate CSV file
# filepath =
print("当前路径:{}".format(os.getcwd()))
filepath = input("请输入路径:输入为空则使用上述当前路径")
if filepath == '':
filepath = os.getcwd()
else:
if not os.path.isdir(filepath):
os.makedirs(filepath)
else:
os.chdir(filepath)
print("修改后路径:{}".format(filepath))
rr = curs.execute(printsql)
interval = eval(input("请输入每个文件存放的行数:"))
if count[0] < interval:
csv_file_dest = tbnm + ".csv"
if os.path.isfile(csv_file_dest):
ifdel = input('是否覆盖重命名文件(y/n:)')
if ifdel in ['y', 'yes', 'YES', 'Y']:
os.remove(csv_file_dest)
else:
filename = input("请输入新的文件名")
if filename == '':
print('新文件名为空,可以输出结果但是可能无法打开,请先从系统中重命名后再次打开。')
csv_file_dest = filename + '.csv'
print("Starting Printing to csv ... \n Path : {0}//{1}".format(filepath, csv_file_dest))
outputfile = open(csv_file_dest, 'w', newline='') # 'wb'
output = csv.writer(outputfile, dialect='excel')
# add column headers if requested
cols = []
for col in curs.description:
cols.append(col[0])
output.writerow(cols)
for row_data in curs: # add table rows
output.writerow(row_data)
else:
for i in range(1, (count[0]//interval)+2):
csv_file_dest = tbnm + '-' + str(i) + ".csv"
if os.path.isfile(csv_file_dest):
ifdel = input('是否覆盖重命名文件(y/n:)')
if ifdel in ['y', 'yes', 'YES', 'Y']:
os.remove(csv_file_dest)
else:
filename = input("请输入新的文件名")
if filename == '':
print('新文件名为空,可以输出结果但是可能无法打开,请先从系统中重命名后再次打开。')
csv_file_dest = filename + '-' + str(i) + ".csv"
print("Starting Printing to csv ... \n Path : {0}//{1}".format(filepath, csv_file_dest))
outputfile = open(csv_file_dest, 'w', newline='') # 'wb'
output = csv.writer(outputfile, dialect='excel')
# add column headers if requested
cols = []
for col in curs.description:
cols.append(col[0])
output.writerow(cols)
if i == (count[0]//interval)+1:
for row_data in rr.fetchall(): # add table rows
output.writerow(row_data)
else:
for row_data in rr.fetchmany(numRows=interval): # add table rows
output.writerow(row_data)
outputfile.close()
end_time = datetime.datetime.now()
interval = (end_time - start_time).seconds
if interval > 60:
final_time = interval / 60.0
print("Print to CSV complete success! \n Time : {:2f} min".format(final_time))
else:
print("Print to CSV complete success! \n Time : {:2f} s".format(interval))
|
import myutil
import random
def choose(next_move_types, next_moves, last_move_type, model, cards_left1, cards_left2, player_id, net, playerecord):
from mcts import MCTSModel
if player_id == 0:
my_cards = cards_left1
enemy_cards = cards_left2
else:
my_cards = cards_left2
enemy_cards = cards_left1
if model == "random":
#print("?????????????????????")
return myutil.choose_random(next_move_types, next_moves, last_move_type)
if model == "little_smart":
#print("!!!!!!!!!!!!!!!!!!!!!!!!")
return myutil.choose_with_little_smart(next_move_types, next_moves, last_move_type)
if model == "mcts":
prop = random.randint(1,100)
if prop > 79:
myutil.choose_random(next_move_types, next_moves, last_move_type)
if len(next_move_types) == 0:
return "yaobuqi",[]
mc = MCTSModel()
return mc.choose_with_mcts(next_moves, next_move_types, last_move_type,my_cards,enemy_cards,player_id)
if model == "DQN":
return myutil.choose_DQN(next_move_types, next_moves, last_move_type, my_cards, net, playerecord, len(enemy_cards))
if model == "manual":
return myutil.choose_manual(next_move_types, next_moves, last_move_type, my_cards)
|
from bs4 import BeautifulSoup
from urllib.request import urlopen
import bs4
url1 = "https://www.mk.co.kr/news/bestclick/"
url2 = "https://www.hankyung.com/ranking"
html1 = urlopen(url1)
html2 = urlopen(url2)
bs_obj1 = bs4.BeautifulSoup(html1.read(),"html.parser")#html형식으로 쉽게 보여줌
bs_obj2 = bs4.BeautifulSoup(html2.read(),"html.parser")
div = bs_obj1.find("div",{"class":"list_area"})
dts = div.findAll("dt",{"class":"tit"})
#print(dts)
#for i in dts:
# print(i.text)#텍스트만 떼오기
ul = bs_obj2.find("ul",{"class":"down_rank_news"})
divs = ul.findAll("h2")
print(divs)
for i in divs:
print(i.text)#텍스트만 떼오기
|
import time
from function_scheduling_distributed_framework import task_deco,BrokerEnum
@task_deco('queue_test_step1',qps=0.5,broker_kind=BrokerEnum.LOCAL_PYTHON_QUEUE)
def step1(x):
print(f'x 的值是 {x}')
if x == 0:
for i in range(1, 300):
step1.pub(dict(x=x + i))
for j in range(10):
step2.push(x * 100 + j) # push是直接发送多个参数,pub是发布一个字典
time.sleep(10)
@task_deco('queue_test_step2',qps=3,broker_kind=BrokerEnum.LOCAL_PYTHON_QUEUE)
def step2(y):
print(f'y 的值是 {y}')
time.sleep(10)
# step1.clear()
step1.pub({'x': 0})
step1.consume()
step2.consume()
|
def changeMessage(packet):
if(packet["TCP"]["dstport"] == 8554 and packet["TCP"]["payload"]):
try:
payload = packet["TCP"]["payload"].decode()
splitted = payload.split("\r\n")
for i in range (len(splitted)):
if "PLAY" in splitted[i]:
splitted[i] += " "
payload = "\r\n".join(splitted)
packet["TCP"]["payload"] = payload.encode()
print('sending ', packet["TCP"]["payload"])
except:
print("cant decode packet")
return packet
|
# Generated by Django 3.1.1 on 2020-09-04 12:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0003_student_password'),
]
operations = [
migrations.AddField(
model_name='student',
name='gender',
field=models.CharField(choices=[('0', 'Female'), ('1', 'male')], default=0, max_length=20),
),
]
|
from django.urls import path
from .views import home, product_detail, contact
app_name = 'stock'
urlpatterns = [
path('', home, name='home'),
path('produc/<int:product_id>', product_detail, name='prodict_detail'),
path('contact/', contact, name='contact'),
]
|
import shelve
s = shelve.open('test_shelf')
try:
existing = s['key1']
finally:
s.close()
print(existing)
# {'int': 10, 'float': 3.4, 'string': 'sample data'}
|
# Generated by Django 2.1 on 2018-09-24 06:34
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0014_auto_20180924_0632'),
]
operations = [
migrations.AlterField(
model_name='category',
name='desc',
field=tinymce.models.HTMLField(blank=True, null=True, verbose_name='Описание'),
),
migrations.RemoveField(
model_name='category',
name='related',
),
migrations.AddField(
model_name='category',
name='related',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='mainapp.Category', verbose_name='Сопутствующие категории'),
),
]
|
# -*- coding: utf-8 -*-
d={}
while True:
print("Key:",end="")
key=input()
if key == "end":
break
else:
print("Value:",end="")
value=input()
d[key]=value
print("Search key:",end="")
skey=input()
dk=d.keys()
if skey in dk:
print("True")
else:
print("False")
|
from handlers import CeleryHandler
class BaseRequester(object):
handler_cls = NotImplemented
def __init__(self, host):
self.handler = self.handler_cls(host)
def request(self, key):
if self.is_invalid(key):
return 'key is invalid'
return self.handler.get(key)
def is_invalid(self, key):
raise NotImplementedError
class CeleryRequester(BaseRequester):
handler_cls = CeleryHandler
def is_invalid(self, key):
return not key or not key.isdigit()
|
"""1Forge REST API Class Wrapper"""
import os
import requests
class OneForge(object):
"""1Forge REST API Class Wrapper"""
ONEFORGE_URL = 'https://forex.1forge.com/1.0.3'
def __init__(self, api_key=None):
"""Wrapper for 1Forge REST API
Keyword Arguments:
api_key {str} -- 1Forge api key. (default: {ONEFORGE_API_KEY environment variable})
Raises:
RuntimeError -- If the api key is not provided via parameter or environemnt variable.
"""
if api_key is None:
api_key = os.getenv('ONEFORGE_API_KEY')
if api_key is None:
raise RuntimeError('Invalid API KEY. Either provide the api key as a parameter set ONEFORGE_API_KEY environment variable')
self.api_key = api_key
def _simple_request(self, resource):
url = f'{self.ONEFORGE_URL}/{resource}'
payload = {'api_key': self.api_key}
res = requests.get(url, params=payload)
res.raise_for_status()
return res.json()
def convert(self, src, dst, qty=1):
"""Convert from one currency to another"""
url = f'{self.ONEFORGE_URL}/convert'
payload = {'from': src, 'to': dst, 'quantity': qty, 'api_key': self.api_key}
res = requests.get(url, params=payload)
res.raise_for_status()
return res.json()
def market_status(self):
"""Check if the market is open"""
return self._simple_request('market_status')
def quota(self):
"""Check your current usage and remaining quota"""
return self._simple_request('quota')
def quotes(self, pairs):
"""Get quotes for specific currency pair(s)"""
url = f'{self.ONEFORGE_URL}/quotes'
payload = {'pairs': ','.join(pairs), 'api_key': self.api_key}
res = requests.get(url, params=payload)
res.raise_for_status()
return res.json()
def symbols(self):
"""Get a list of symbols"""
return self._simple_request('symbols')
|
import platform
import re
from asciinema import __version__
from asciinema.urllib_http_adapter import URLLibHttpAdapter
from asciinema.http_adapter import HTTPConnectionError
class APIError(Exception):
pass
class Api:
def __init__(self, url, user, token, http_adapter=None):
self.url = url
self.user = user
self.token = token
self.http_adapter = http_adapter if http_adapter is not None else URLLibHttpAdapter()
def auth_url(self):
return "{}/connect/{}".format(self.url, self.token)
def upload_url(self):
return "{}/api/asciicasts".format(self.url)
def upload_asciicast(self, path):
with open(path, 'rb') as f:
try:
status, headers, body = self.http_adapter.post(
self.upload_url(),
files={"asciicast": ("asciicast.json", f)},
headers=self._headers(),
username=self.user,
password=self.token
)
except HTTPConnectionError as e:
raise APIError(str(e))
if status != 200 and status != 201:
self._handle_error(status, body)
return body, headers.get('Warning')
def _headers(self):
return {'User-Agent': self._user_agent()}
def _user_agent(self):
os = re.sub('([^-]+)-(.*)', '\\1/\\2', platform.platform())
return 'asciinema/%s %s/%s %s' % (__version__,
platform.python_implementation(),
platform.python_version(),
os
)
def _handle_error(self, status, body):
errors = {
400: "Invalid request: %s" % body,
401: "Invalid or revoked recorder token",
404: "API endpoint not found. This asciinema version may no longer be supported. Please upgrade to the latest version.",
413: "Sorry, your asciicast is too big.",
422: "Invalid asciicast: %s" % body,
503: "The server is down for maintenance. Try again in a minute."
}
error = errors.get(status)
if not error:
if status >= 500:
error = "The server is having temporary problems. Try again in a minute."
else:
error = "HTTP status: %i" % status
raise APIError(error)
|
from rply import ParserGenerator
from ast import NegationEliminationDef, HypotesisDef, PremisseDef
from formule import BinaryFormule, NegationFormule, AthomFormule
from symbol_table import SymbolTable
class Parser():
def __init__(self, state):
self.state = state
self.pg = ParserGenerator(
# A list of all token names accepted by the parser.
['NUM', 'DOT', 'COMMA', 'OPEN_PAREN', 'CLOSE_PAREN', 'NOT', 'IMPLIE',
'AND', 'OR', 'IMP_INTROD', 'AND_INTROD', 'BOTTOM', 'OPEN_BRACKET',
'NEG_INTROD', 'NEG_ELIM', 'HYPOTESIS', 'PREMISE', 'ATHOM', 'CLOSE_BRACKET'],
precedence=[
('left', ['NOT','AND', 'OR', 'IMPLIE']),
]
)
self.symbol_table = SymbolTable()
def parse(self):
@self.pg.production('program : steps')
def program(p):
formules = p[0]
for i in range(0, len(formules)):
formule = formules[i]
rule = self.symbol_table.get_rule(formule.toString())
if(isinstance(rule, PremisseDef) ):
pass
elif(isinstance(rule, HypotesisDef)):
pass
elif(isinstance(rule, NegationEliminationDef)):
formule1 = self.symbol_table.lookup_formule_by_line(rule.formule, rule.reference1)
formule2 = self.symbol_table.lookup_formule_by_line(rule.formule, rule.reference2)
if(rule.eval(formule1, formule2) == 4):
print('Deu certo')
else:
print('Algo deu errado')
@self.pg.production('steps : steps step')
@self.pg.production('steps : step')
def steps(p):
if len(p) == 1:
return [p[0]]
else:
p[0].append(p[1])
return p[0]
@self.pg.production('step : NUM DOT formule PREMISE')
def Premisse(p):
formule = p[2]
premisse = PremisseDef(p[0].value, formule)
self.symbol_table.insert(premisse)
return formule
@self.pg.production('step : NUM DOT formule HYPOTESIS')
@self.pg.production('step : NUM DOT formule HYPOTESIS OPEN_BRACKET')
def Hypotesis(p):
if len(p) > 4:
self.symbol_table.add_scope()
formule = p[2]
hypotesis = HypotesisDef(p[0].value, formule)
self.symbol_table.insert(hypotesis)
return formule
@self.pg.production('step : NUM DOT formule NEG_ELIM NUM COMMA NUM')
@self.pg.production('step : NUM DOT formule NEG_ELIM NUM COMMA NUM CLOSE_BRACKET')
def Negation_elim(p):
formule = p[2]
negationElimination = NegationEliminationDef(p[0].value, formule, p[4].value, p[6].value)
self.symbol_table.insert(negationElimination)
if len(p) == 8:
self.symbol_table.end_scope()
return formule
@self.pg.production('formule : NOT formule')
@self.pg.production('formule : ATHOM')
@self.pg.production('formule : BOTTOM')
@self.pg.production('formule : formule AND formule')
@self.pg.production('formule : formule OR formule')
@self.pg.production('formule : formule IMPLIE formule')
def formule(p):
if len(p) < 3:
if p[0].gettokentype() == 'ATHOM':
return AthomFormule(key=p[0].value)
elif p[0].gettokentype() == 'BOTTOM':
return AthomFormule(key=p[0].value)
elif p[0].gettokentype() == 'NOT':
return NegationFormule(key=p[1])
else:
return BinaryFormule(key=p[1].value, left=p[0], right=p[2])
@self.pg.production('formule : OPEN_PAREN formule CLOSE_PAREN')
def paren_formule(p):
return p[1]
@self.pg.error
def error_handle(token):
raise ValueError(token)
def get_parser(self):
return self.pg.build()
|
def main():
bef = open("Before.txt", "r")
aft = open("After.txt", "w")
for i in bef:
Uname = i.upper()
print(Uname, file=aft)
bef.close()
aft.close()
main()
|
import sys
from Btree import BTree
from dropdown import dropdown
class File(object):
def __init__(self, ruta1=None, ruta2=None):
self.ruta1 = ruta1
self.ruta2 = ruta2
self.buffer = []
def remove_chars(self, lista, cadena):
for char in lista:
cadena = cadena.replace(char, "")
return cadena
def write_tree(self, BTree):
B_tree = BTree
with open("Index_Tree.txt", "w+") as f:
f.write(str(B_tree))
#print (B_tree)
def getRecords(self):
lista_records = []
with open(self.ruta1, "r+")as file:
lista_tipos = file.readline()
lista_tipos = self.remove_chars(["[","]","'","\n"," "],lista_tipos)
lista_tipos = lista_tipos.split(",")
lista_nombres = file.readline()
lista_nombres = self.remove_chars(["[","]","'","\n"," "],lista_nombres)
lista_nombres = lista_nombres.split(",")
cant = int(file.readline())
windo = dropdown(lista_nombres)
windo.show()
if windo.exec_():
valor = windo.reto()
indice = int(lista_nombres.index(valor))
aux = lista_nombres[0]
lista_nombres[0]= lista_nombres[indice]
lista_nombres[indice]= aux
aux = lista_tipos[0]
lista_tipos[0]= lista_tipos[indice]
lista_tipos[indice]= aux
for i in range(cant):
cadena = file.readline()
cadena = cadena.replace("\n","")
cadena = cadena.split("|")
aux = cadena[0]
cadena[0]= cadena[indice]
cadena[indice]= aux
lista_records.append(cadena)
return lista_records , [lista_tipos,lista_nombres,cant]
def getTypeF1(self):
file = open(self.ruta1, "r+")
tipos = file.readline()
tipos = self.remove_chars(["[", "]", "'", "\n"], tipos)
tipos = tipos.split(",")
file.close()
return tipos
def getTypeF2(self):
file = open(self.ruta2, "r+")
tipos = file.readline()
tipos = self.remove_chars(["[", "]", "'", "\n"], tipos)
tipos = tipos.split(",")
file.close()
return tipos
def getCeldasF1(self):
file = open(self.ruta1, "r+")
campos = file.readline()
campos = file.readline()
campos = self.remove_chars(["[", "]", "'", "\n"], campos)
campos = campos.split(",")
file.close()
return campos
def getCeldasF2(self):
file = open(self.ruta2, "r+")
campos = file.readline()
campos = file.readline()
campos = self.remove_chars(["[", "]", "'", "\n"], campos)
campos = campos.split(",")
file.close()
return campos
def getCantFile1(self):
file = open(self.ruta1, "r+")
file.readline()
file.readline()
cant= file.readline()
cant= cant.split("&")
#totales,head = aux[0],aux[1]
print "FILE 1",cant[0]
return int(cant[0])
def getCantFile2(self):
file = open(self.ruta2, "r+")
file.readline()
file.readline()
cant= file.readline()
cant = cant.split("&")
print "FILE 2",cant[0]
return int(cant[0])
def createNewFile(self,tipos, campos, cantidad):
self.tipos= tipos
self.campos= campos
self.cant= cantidad
file = open("mergeFile.qls", "w+")
file.write(str(self.tipos)+"\n")
file.write(str(self.campos)+"\n")
par = '%' + str(10) + 'd'
new_size = (par % self.cant)
file.write(str(new_size)+"&"+str(000000-1)+"\n")
file.close()
def write_in_newFile(self, indexList1, indexList2):
self.indexList1 = indexList1
self.indexList2= indexList2
print "-------"
print self.indexList1
print self.indexList2
file = open("mergeFile.qls", "r+")
file.readline()
file.readline()
file.readline()
file1 = open(self.ruta1, "r+")
file2 = open(self.ruta2, "r+")
file1.readline()
file1.readline()
file1.readline()
file2.readline()
file2.readline()
file2.readline()
lista_temp = []
print("este es cant" +str(self.cant))
for k in range(self.cant):
temp=[]
#print(k)
cadena1 = file1.readline()
cadena1 = cadena1.split("|")
cadena2 = file2.readline()
cadena2 = cadena2.split("|")
for j in range(len(self.indexList1)):
#print(str(cadena)+"--------------------i")
temp.append(cadena1[self.indexList1[j]]+"|")
print(len(self.indexList2))
for i in range(len(self.indexList2)):
#print(str(cadena)+"--------------------j")
if i==len(self.indexList2)-1:
temp.append(cadena2[self.indexList2[i]])
else:
temp.append(cadena2[self.indexList2[i]]+"|")
lista_temp.append(str(temp))
print "LENGTH", len(lista_temp)
for i in range(len(lista_temp)):
print(i)
print lista_temp[i]
print("escribire entry")
self.write_Entrys(lista_temp)
print("ya")
file.close()
file2.close()
file.close()
def write_Entrys(self, registros):
file = open("mergeFile.qls", "r+")
file.readline()
file.readline()
file.readline()
for i in range(len(registros)):
cadena = registros[i]
cadena = self.remove_chars(["[", "]", "'", ","], cadena)
file.write(str(cadena)+"\n")
print "FUCK YA-----------"
def write_entry(self,buffer):
self.buffer = buffer
for registro in self.buffer:
with open(self.ruta1, "a+") as file:
cadena = ""
for i in range(len(registro)):
if i == len(registro)-1:
cadena += str(registro[i])+"\n"
else:
cadena += str(registro[i])+"|"
file.write(cadena)
with open(self.ruta1, "r+") as file:
aux = ""
for i in range(3):
aux = file.readline()
par = '%' + str(10) + 'd'
total = int(aux.split("&")[0])+len(self.buffer)
new_size = (par % total)
end = (par%int(aux.split("&")[1]))
file.seek(-22, 1)
file.write(new_size+"&"+end+"\n")
self.buffer = []
|
#
# Developed by sujayVittal; Sat Mar 11 01:05:34 IST 2017
#
###############
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def main(_):
# Import data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
# Define loss and optimizer
y = tf.matmul(x,W) + b
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
#layer 1 of CNN: We can now implement our first layer. It will consist of convolution, followed by max pooling. The convolution will compute 32 features for each 5x5 patch. Its weight tensor will have a shape of [5, 5, 1, 32]. The first two dimensions are the patch size, the next is the number of input channels, and the last is the number of output channels. We will also have a bias vector with a component for each output channel.
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#layer 2 CNN: In order to build a deep network, we stack several layers of this type. The second layer will have 64 features for each 5x5 patch.
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# densely connected layer : Now that the image size has been reduced to 7x7, we add a fully-connected layer with 1024 neurons to allow processing on the entire image. We reshape the tensor from the pooling layer into a batch of vectors, multiply by a weight matrix, add a bias, and apply a ReLU.
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# dropout
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Readout layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy_prob = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy = accuracy_prob*100
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
from django.shortcuts import render, redirect
def home(request):
return redirect('/admin/')
|
from sage.all import *
def diophantine_solver(a, b, c = None):
if b > a or not a or not b: return []
a = Integer(a)
b = Integer(b)
# it should be sufficient to not swap `a' with `b' if `b > a'
if c is None: c = a*b
quo, rem = a.quo_rem(b)
extended_gcd_matrix = matrix(ZZ,[
[a, 1, 0, -1],
[b, 0, 1, quo] ])
while True:
row = [rem, extended_gcd_matrix[0, 1] -quo*extended_gcd_matrix[1, 1],
extended_gcd_matrix[0, 2] -quo*extended_gcd_matrix[1, 2], None]
if not row[0]: break
quo, rem = extended_gcd_matrix[1,0].quo_rem(row[0])
row[-1] = quo
extended_gcd_matrix[0, :] = extended_gcd_matrix[1,:]
extended_gcd_matrix[1, :] = matrix([row])
if rem == 0: break
x_zero = c * extended_gcd_matrix[-1, 1]
y_zero = c * extended_gcd_matrix[-1, 2]
k = var('k')
ab_gcd = gcd(a,b)
x_equation = (x_zero + k*b/ab_gcd)
y_equation = (y_zero - k*a/ab_gcd)
x_sol = solve(x_equation >= 0, k)[0][0].rhs()
y_sol = solve(y_equation >= 0, k)[0][0].rhs()
x_function = x_equation.function(k)
y_function = y_equation.function(k)
print x_function, y_function
solutions = [(x_function(k), y_function(k)) for k in range(ceil(x_sol), floor(y_sol) + 1)]
print solutions
return solutions
#print x_equation, y_equation
#print x_sol, y_sol
#diophantine_solver(Integer(31), Integer(21))
def build_matrix(n=20):
return matrix(n, lambda i,j: len(diophantine_solver(i,j)))
|
import dash_bootstrap_components as dbc
from dash import html
pagination = html.Div(
[
html.Div("Collapse long pagination objects using ellipsis"),
dbc.Pagination(max_value=10, fully_expanded=False),
html.Div("If space won't be saved, it won't be collapsed"),
dbc.Pagination(max_value=5, fully_expanded=False),
]
)
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import random
import logging
import jinja2
import os
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.dirname(__file__)))
def is_palindrome(word):
first_half = None
second_half = None
if len(word) % 2 == 0:
first_half = len(word) / 2
second_half = len(word) / 2
else:
first_half = (len(word) / 2) + 1
second_half = len(word) / 2
first_half = word[:first_half]
second_half = word[second_half:]
second_half = second_half[::-1]
if first_half == second_half:
return word + ' is a palindrome!'
else:
return word + ' is not a palindrome...'
class TemplateHandler(webapp2.RequestHandler):
def get(self):
palindrome_output = is_palindrome('kayak')
template = jinja_environment.get_template('palindrome.html')
self.response.write(template.render(
{'palindrome': palindrome_output}))
class MainHandler(webapp2.RequestHandler):
def get(self):
palindrome_output = is_palindrome('kayak')
self.response.write(palindrome_output)
class FortuneHandler(webapp2.RequestHandler):
def get(self):
fortunes = [
'something good',
'something bad',
'something mysterious',
]
rand_fortune = random.choice(fortunes)
template = (
jinja_environment.get_template('fortune.html'))
self.response.write(template.render(
{
'fortune': rand_fortune
}))
class SumHandler(webapp2.RequestHandler):
def get(self):
first_number = random.randint(0, 100)
second_number = random.randint(0, 100)
template = jinja_environment.get_template('sum.html')
self.response.write(template.render({
'num1' : first_number,
'num2' : second_number,
'sum' : first_number + second_number
}))
app = webapp2.WSGIApplication([
('/', MainHandler),
('/template', TemplateHandler),
('/fortune', FortuneHandler)
], debug=True)
|
from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from tickets import models, factories
class TestEndpoints(APITestCase):
def test_movie_flow(self):
for i in range(10):
factories.MovieFactory()
response = self.client.get(reverse("movies-list"), follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create_movie(self):
data = {'name': 'Batman', 'description': 'Another Good Movie'}
response = self.client.post(reverse("movies-list"), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.Movie.objects.count(), 1)
self.assertEqual(models.Movie.objects.get().name, 'Batman')
def test_showing_rooms(self):
room = {'showing_room_name': 'Argentina', 'capacity': '40'}
response = self.client.post(reverse("rooms-list"), room, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.ShowingRoom.objects.count(), 1)
self.assertEqual(models.ShowingRoom.objects.get().showing_room_name, 'Argentina')
response = self.client.get(reverse("rooms-list"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_new_showings(self):
movie = factories.MovieFactory()
room = factories.ShowingRoomFactory()
showing = {
"price_per_ticket": "10",
"movie": movie.id,
"showing_room": room.id,
"start": "2019-10-25 14:30",
"end": "2019-10-25 15:30"
}
response = self.client.post(reverse("showings-list"), showing, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get(reverse("showings-list"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_orders(self):
factories.OrderFactory()
response = self.client.get(reverse("order-list"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_new_order(self):
showing = factories.ShowingFactory()
order = {
"email": "test@test.com",
"showing": showing.id,
"quantity": '1',
}
response = self.client.post(reverse("order-list"), order, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
|
# Test of fcsreader.py
# (cc) 2017 Ali Rassolie
# Formagna
import fcsreader
import matplotlib.pyplot as plt
process = fcsreader.fcsReader("data.fcs")
s = process.data()
print(s.columns)
s.plot(x = "SSC-A", y="FSC-A", kind="scatter")
plt.show()
|
#coding:gb2312
#打印每个朋友的姓名,并为其打印一条问候信息。
names=['cys','ljy','ft','hl','jy']
message=","+"I'm very glad to meet you"+"!"
print(names[0].title()+message)
print(names[1].title()+message)
print(names[2].title()+message)
print(names[3].title()+message)
print(names[4].title()+","+"I'm very glad to meet you"+"!")
|
## This file contains all of the required code to set up a new database on the
## system.
import databasefunctions as dbf
database_name = "StudioTest"
password = "password"
port = 5432
cnxn = dbf.connecting_to_postgresql(database_name, password, port)
dbf.create_customer_table(cnxn)
dbf.create_menu_table(cnxn)
dbf.add_items_to_menu(cnxn)
dbf.print_table(cnxn, "Customer_Info")
dbf.print_table(cnxn, "Menu")
|
int_data = 1 # 정수 선언
float_data = 3.14 # 실수 선언
complex_data = 1+5j # 복소수 선언
str_data1 = 'I love Python' # 문자열 선언(영문)
str_data2 = "반갑습니다." # 문자열 선언(한글)
list_data=[1,2,3] # 리스트 선언
tuple_data=(1,2,3) # 튜플 선언
dict_data = {0:'False', 1:'True'} # 사전 선언
|
large = 0
for i in range(0,4):
userinput = input("Number please...")
usernum = int(userinput)
if large < usernum:
large = usernum
print("The largest number is: " + str(large ))
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by 'bens3' on 2013-06-21.
Copyright (c) 2013 'bens3'. All rights reserved.
"""
import ckanapi
from ke2mongo.log import log
from ke2mongo import config
import luigi
class APITask(luigi.Task):
"""
Base CKAN API Task
"""
# Date to process
date = luigi.IntParameter()
full_export_date = config.get('keemu', 'full_export_date')
def __init__(self, *args, **kwargs):
# If a date parameter has been passed in, we'll just use that
# Otherwise, loop through the files and get all dates
super(APITask, self).__init__(*args, **kwargs)
self.remote_ckan = ckanapi.RemoteCKAN(config.get('ckan', 'site_url'), apikey=config.get('ckan', 'api_key'))
|
import os
import cv2
import numpy as np
import shutil
###########################Display image##################################################################
def show(Im,Name="img"):
cv2.imshow(Name,Im.astype(np.uint8))
cv2.waitKey()
cv2.destroyAllWindows()
##################################################################################################################################################################
#Split binary mask correspond to a singele segment into connected components
def GetConnectedSegment(Seg):
[NumCCmp, CCmpMask, CCompBB, CCmpCntr] = cv2.connectedComponentsWithStats(Seg.astype(np.uint8)) # apply connected component
Mask=np.zeros([NumCCmp,Seg.shape[0],Seg.shape[1]],dtype=bool)
BBox=np.zeros([NumCCmp,4])
Sz=np.zeros([NumCCmp],np.uint32)
for i in range(1,NumCCmp):
Mask[i-1] = (CCmpMask == i)
BBox[i-1] = CCompBB[i][:4]
Sz[i-1] = CCompBB[i][4] #segment Size
return Mask,BBox,Sz,NumCCmp-1
############################################################################################################################
##############################################################################################
MainDir=r"C:\Users\Sagi\Desktop\CHEMSCAPE\ChemLabScapeDataset\TestAnnoatations\\"
for AnnDir in os.listdir(MainDir):
VesDir = MainDir + "/" + AnnDir + r"//Vessel//"
SemDir = MainDir + "/" + AnnDir + r"//Semantic//"
EmptyDir = MainDir + "/" + AnnDir + r"//EmptyRegions//"
Img = cv2.imread(MainDir +"/"+ AnnDir + "/Image.png")
if os.path.isdir(EmptyDir): shutil.rmtree(EmptyDir)
os.mkdir(EmptyDir)
#___________________________________________________________________________________________________________________
NumEmptyInst=0
IsFilled=False
IsVapor=False
if os.path.exists(SemDir+"16_Filled.png"):
Filled=cv2.imread(SemDir+"16_Filled.png")[:,:,0]>0
IsFilled = True
if os.path.exists(SemDir+"14_Vapor"):
Vapor=cv2.imread(SemDir+"14_Vapor.png")[:,:,0]>0
IsVapor = True
for Name in os.listdir(VesDir):
path=VesDir+Name
if not os.path.exists(path): continue
Ves=cv2.imread(path)
Ves[:,:,1]*=0
if not 7 in Ves:
Ves[:,:,2]*=0
cv2.imwrite(path,Ves)
print(path)
# show(Ves*30)
Ves[:, :, 1]=Ves[:, :, 0]
Mask=Ves[:, :, 0]>0
if IsFilled:
Mask[Filled]=0
if IsVapor:
Mask[Vapor]=0
# show((Img/2+Ves*100).astype(np.uint8))
#show(Ves * 100, str(NumEmptyInst)+"ALL VESSEL")
Mask,BBox,Sz,NumCCmp=GetConnectedSegment(Mask.astype(np.uint8))
for i in range(NumCCmp):
if Mask[i].sum()<1200: continue
Inst=Ves.copy()
Inst[:, :, 0]=Inst[:, :, 1]*Mask[i]
# NumEmptyInst+=1
cv2.imwrite(EmptyDir+"//"+str(NumEmptyInst)+".png",Inst)
# show(Inst[:,:,0]*30,str(NumEmptyInst))
# if 7 in Ves:
# print("444")
# show(Ves*20)
# show((Ves==7).astype(np.uint8)*100)
|
# 给程序传参数
import sys
print(sys.argv)
# name = sys.argv[1]
# print('Welcome %s !!!'%name) #可以在程序外直接传入name的值
# 列表生成式
a = [i for i in range(1, 8)]
print(a)
b = [6 for i in range(1, 8)]
print(b)
c = [i for i in range(10) if i % 2 == 0]
print(c)
d = [i for i in range(3) for j in range(2)]
print(d)
e = [(i, j) for i in range(3) for j in range(2)]
print(e)
f = [(i, j, k) for i in range(3) for j in range(2) for k in range(2)]
print(f)
aa = (11,22,33,44,55,11,22,33)
bb = [11,22,33,11,22,33]
print(aa)
print(bb)
print(type(aa))
cc = set(bb) #set 也有去重效果
print(cc)
dd = list(cc)
print(dd)
|
# Generated by Django 2.0.2 on 2019-03-08 13:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EnterPrice',
fields=[
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='企业ID')),
('name', models.CharField(max_length=50, verbose_name='企业名称')),
('product_desc', models.TextField(verbose_name='产品介绍')),
('enterprice_desc', models.TextField(verbose_name='产品介绍')),
('enterprice_gm', models.CharField(default='50-99人', max_length=20)),
('enterprice_type', models.CharField(default='移动互联网', max_length=20, verbose_name='所属行业')),
('finance_stage', models.CharField(default='不需要融资', max_length=10, verbose_name='融资阶段')),
('logo', models.ImageField(upload_to='enterprice/%Y/%m', verbose_name='logo')),
('address', models.CharField(max_length=150, verbose_name='企业地址')),
('city', models.CharField(max_length=20, verbose_name='城市')),
],
options={
'verbose_name': '企业',
'verbose_name_plural': '企业',
},
),
]
|
'''
Created on 2017年2月6日
@author: admin
'''
#将要被测试的类
class Widget:
def __init__(self, size=(40,40)):
self._size = size
def getSize(self):
return self._size
def resize(self,width,height):
if width < 0 or height<0:
raise ValueError("illegal size")
self._size=(width,height)
def dispose(self):
pass
|
def isYearLeap(year):
if year % 100 == 0:
if year % 400 == 0:
return True
else:
return False
elif year % 4 == 0:
return True
else:
return False
def daysInMonth(year, month):
if year < 1500 or month < 1 or month > 12:
return None
if isYearLeap(year) and month == 2:
return 29
elif month == 2:
return 28
elif month < 8 and month % 2 != 0:
return 31
elif month >= 8 and month % 2 == 0:
return 31
else:
return 30
testYears = [1900, 2000, 2016, 1987]
testMonths = [2, 2, 1, 11]
testResults = [28, 29, 31, 30]
for i in range(len(testYears)):
yr = testYears[i]
mo = testMonths[i]
print(yr, mo, "->", end="")
result = daysInMonth(yr, mo)
if result == testResults[i]:
print("OK")
else:
print("Failed")
|
#========================================
# author: Changlong.Zang
# mail: zclongpop123@163.com
# time: Tue Sep 19 14:44:30 2017
#========================================
import pymel.core as pm
import maya.OpenMaya as OpenMaya
import dag
#--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def rig_joint(joint):
'''
'''
#- create
ctl = pm.createNode('transform', name='XXnamespaceXX_ctl_0')
cth = pm.createNode('transform', name='XXnamespaceXX_cth_0')
ctg = pm.createNode('transform', name='XXnamespaceXX_ctg_0')
grp = pm.createNode('transform', name='XXnamespaceXX_grp_0')
#- parent
pm.parent(ctl, cth)
pm.parent(cth, ctg)
pm.parent(ctg, grp)
#- match positions
pm.delete(pm.parentConstraint(joint, grp))
#- constraint
pm.parentConstraint(ctl, joint)
#- control shape
circle = pm.circle(nr=(1, 0, 0), ch=False)[0]
pm.parent(circle.getShape(), ctl, s=True, r=True)
pm.delete(circle)
return ctl, cth, ctg, grp
def rig_joint_tree(root):
'''
'''
controls = dict()
for i, jnt in enumerate(list(dag.get_dag_tree(root))):
jnt_mfn = OpenMaya.MFnDagNode(jnt)
#-
jnt_ctl = rig_joint(jnt_mfn.fullPathName())
#-
jnt_pnt = jnt_mfn.parent(0)
if jnt_pnt.apiType() == OpenMaya.MFn.kJoint:
jnt_pnt_mfn = OpenMaya.MFnDagNode(jnt_pnt)
jnt_pnt_ctl = controls.get(jnt_pnt_mfn.uuid().asString(), [jnt_pnt_mfn.fullPathName()])
pm.parent(jnt_ctl[-1], jnt_pnt_ctl[0])
#-
jnt_uuid = jnt_mfn.uuid().asString()
controls[jnt_uuid] = jnt_ctl
return True
|
# class Solution(object):
# def findKthLargest(self, nums, k):
# """
# :type nums: List[int]
# :type k: int
# :rtype: int
# """
# l = nums[:k]
#
# for i in range(k, len(nums)):
# if nums[i] > min(l):
# l.remove(min(l))
# l.append(nums[i])
# return min(l)
#
#
#
class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
import heapq
heapq.heapify(nums)
for i in range(len(nums) + 1 -k):
t = heapq.heappop(nums)
return t
class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
return self.findKthLargestHelper(nums, 0, len(nums) - 1, len(nums) + 1 - k)
def findKthLargestHelper(self, nums, p, r, i):
if p == r:
return nums[p]
q = self.randomPartation(nums, p, r)
k = q - p + 1
if i == k:
return nums[q]
elif i < k:
return self.findKthLargestHelper(nums, p, q-1, i)
else:
return self.findKthLargestHelper(nums, q+1, r, i-k)
def randomPartation(self, nums, p, r):
import random
t = random.randint(p, r)
nums[r], nums[t] = nums[t], nums[r]
return self.partation(nums, p, r)
def partation(self, nums, p, r):
j = p - 1
key = nums[r]
for i in range(p, r):
if nums[i] <= key:
j += 1
nums[i], nums[j] = nums[j], nums[i]
nums[j+1], nums[r] = nums[r], nums[j+1]
return j+1
if __name__ == '__main__':
s = Solution()
l = [1, 5, 6, 4, 2, 3]
t = s.findKthLargest(l, 2)
print(t)
|
def factors(n):
for i in range(1,n+1):
if (n%i==0):
print(i)
n=int(input("Enter the number"))
print("Factors of %d are"%n)
factors(n)
|
import os
import sqlite3
db_filename = 'dhcp_snooping.db'
schema_filename = 'dhcp_snooping_schema.sql'
def create_db(db_filename,schema_filename):
db_exists = os.path.exists(db_filename)
conn = sqlite3.connect(db_filename)
if not db_exists:
print('Creating schema...')
with open(schema_filename, 'r') as f:
schema = f.read()
conn.executescript(schema)
print('Done')
else:
print('Database alredy exists.')
if __name__ == '__main__':
create_db(db_filename, schema_filename)
|
# coding: utf-8
# In[3]:
1.#Basic arithmatic operation
a=int(input("Input first number"))
b=int(input("Input second number"))
Addition=a+b
Subtraction=a-b
Multiplication=a*b
Division=a/b
print ("Addition is:",Addition)
print ("Subtraction is:",Subtraction)
print ("Multiplication is:",Multiplication)
print ("Division is:",Division)
# In[6]:
2. #Biggest of 3 numbers
a=int(input("Input first number"))
b=int(input("Input second number"))
c=int(input("Input third number"))
if (a>b) and (b>c):
biggest_number=a
elif (b>a) and (b>c):
biggest_number=b
else:
biggest_number=c
print ("Biggest of 3 numbers is:",biggest_number)
# In[11]:
3. #Even or Odd number
a=int(input("Input number"))
b=int(a/2)*2
if(b==a):
print("This Number is Even")
else:
print("This Number is Odd")
# In[15]:
4. #Prime number
def primenum(Number):
primenumber = []
for Primes in range(2, Number + 1):
isPrime = True
for num in range(2, Primes):
if Primes % num == 0:
isPrime = False
if isPrime:
primenumber.append(Primes)
return(primenumber)
# In[ ]:
5. #Read string and print each character separately
|
# xmltools.py
import sys
import xml.sax.saxutils as sux
def pprint_xml(node, indent="", f=sys.stdout):
""" Pretty-print an ElementTree XML node. (Does not handle attributes.) """
children = node.getchildren()
if children:
f.write("%s<%s>\n" % (indent, node.tag))
for child in children:
pprint_xml(child, indent+" ", f)
f.write("%s</%s>\n" % (indent, node.tag))
else:
f.write(indent)
f.write("<%s>" % node.tag)
f.write(sux.escape(node.text or ""))
f.write("</%s>\n" % node.tag)
|
from tipo_questao import *
from lockable import *
from questao import *
from questao_avaliacao import *
from fonte import *
from filtro_questao import *
from multipla_escolha import *
from path_utils import *
|
import pandas as pd
import numpy as np
from sklearn import linear_model
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler
pd.set_option('display.float_format', lambda x: '%.5f' % x)
# pd.set_option('display.max_columns', None)
pd.set_option('display.width', 120)
pd.set_option('display.max_rows', 100)
pd.set_option('display.min_rows', 30)
df = pd.read_csv("data/immobilier/data_clean/appartements.csv", index_col=0, dtype={"code_departement":str, "code_commune":str})
departement_prices_df = pd.read_csv("data/immobilier/data_clean/m2_appartement_price_per_departement.csv", index_col=0)
df = df.dropna()
# drop outre-mer for more metropolitan precision
df = df[~df["code_departement"].isin(["971", "972", "973", "974"])]
df = df.merge(departement_prices_df["2019_median"], how="inner", left_on="code_departement", right_on="code_departement", validate="many_to_one")
X = df[["surface_reelle_bati", "nombre_pieces_principales", "2019_median"]]
Y = df["valeur_fonciere"]
# les methodes de normalisation de scikit learn ne normalise pas les colonnes independaments
# quand appliqué directement sur un dataframe pandas
# utilisé:
min_max_sclaer = preprocessing.MinMaxScaler()
X_transformed = X.copy()
X_transformed[X_transformed.columns] = min_max_scaler.fit_transform(X_transformed)
# scaler = StandardScaler()
# scaler.fit(X)
# X = scaler.transform(X)
# normaliser1 = Normalizer()
# normaliser1.fit(X)
# X = normaliser1.transform(X)
# X = X.values #returns a numpy array
# min_max_scaler = preprocessing.MinMaxScaler()
# X_scaled = min_max_scaler.fit_transform(X)
# X = pd.DataFrame(X_scaled)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y)
lr1 = linear_model.LinearRegression()
lr1.fit(X_train, Y_train)
Y_predict = lr1.predict(X_test)
print(lr1.coef_)
print(lr1.intercept_)
print(metrics.r2_score(Y_test, Y_predict))
print(metrics.mean_squared_error(Y_test, Y_predict, squared=False))
print(metrics.mean_absolute_error(Y_test, Y_predict))
print(metrics.mean_absolute_percentage_error(Y_test, Y_predict))
|
a = 20
b = 15
c = a
a = b
b = c
print(a)
print(b)
|
#http://www.codeskulptor.org/#user43_38xv9eBr3U_4.py
"""
Monte Carlo Tic-Tac-Toe Player
"""
import random
import poc_ttt_gui
import poc_ttt_provided as provided
# Constants for Monte Carlo simulator
# You may change the values of these constants as desired, but
# do not change their names.
NTRIALS = 200 # Number of trials to run
SCORE_CURRENT = 2.0 # Score for squares played by the current player
SCORE_OTHER = 1.5 # Score for squares played by the other player
# Add your functions here.
def mc_trial(board, player):
'''This function takes a current board and the next player to move.
The function should play a game starting with the given player by making random moves,
alternating between players. The function should return when the game is over.
The modified board will contain the state of the game,
so the function does not return anything.
In other words, the function should modify the board input.'''
while True:
empty_square_list = board.get_empty_squares()
row_col_tuple = random.choice(empty_square_list)
board.move(row_col_tuple[0],row_col_tuple[1],player)
player = provided.switch_player(player)
game_state = board.check_win()
if game_state == None:
pass
else:
# if game_state == provided.PLAYERX:
# print "X wins!"
# elif game_state == provided.PLAYERO:
# print "O wins!"
# elif game_state == provided.DRAW:
# print "Tie!"
# else:
# print "Error: unknown winner"
break
def update_temp_scores_me_win(board_dim,board,temp_scores,player):
'''update_temp_scores_me_win'''
for row in range(board_dim):
for col in range(board_dim):
if board.square(row,col) == provided.EMPTY:
temp_scores[row][col] = 0
elif board.square(row,col) == player:
temp_scores[row][col] = SCORE_CURRENT
elif board.square(row,col) == provided.switch_player(player):
temp_scores[row][col] = -SCORE_OTHER
def update_temp_scores_you_win(board_dim,board,temp_scores,player):
'''update_temp_scores_you_win'''
for row in range(board_dim):
for col in range(board_dim):
if board.square(row,col) == provided.EMPTY:
temp_scores[row][col] = 0
elif board.square(row,col) == player:
temp_scores[row][col] = -SCORE_CURRENT
elif board.square(row,col) == provided.switch_player(player):
temp_scores[row][col] = SCORE_OTHER
def mc_update_scores(scores, board, player):
'''This function takes a grid of scores (a list of lists)
with the same dimensions as the Tic-Tac-Toe board,
a board from a completed game, and which player the machine player is.
The function should score the completed board and update the scores grid.
As the function updates the scores grid directly, it does not return anythin.'''
board_dim = board.get_dim()
temp_scores = [[0 for dummycol in range(board_dim)]
for dummyrow in range(board_dim)]
game_state = board.check_win()
if game_state == provided.DRAW:
pass
elif game_state == player:
update_temp_scores_me_win(board_dim,board,temp_scores,player)
elif game_state == provided.switch_player(player):
update_temp_scores_you_win(board_dim,board,temp_scores,player)
for row in range(board_dim):
for col in range(board_dim):
scores[row][col] += temp_scores[row][col]
def get_best_move(board, scores):
'''This function takes a current board and a grid of scores.
The function should find all of the empty squares
with the maximum score and randomly return one of them as a (row, column) tuple.
It is an error to call this function
with a board that has no empty squares (there is no possible next move),
so your function may do whatever it wants in that case.
The case where the board is full will not be tested.'''
empty_square_list = board.get_empty_squares()
if len(empty_square_list) == 0:
print 'error in get_best_move: there is no empty square to move'
return None
for idx in range(len(empty_square_list)):
row = empty_square_list[idx][0]
col = empty_square_list[idx][1]
if idx == 0:
max_score = scores[row][col]
elif scores[row][col] > max_score:
max_score = scores[row][col]
# max_row_col = (row,col)
max_row_col_list = []
for idx in range(len(empty_square_list)):
row = empty_square_list[idx][0]
col = empty_square_list[idx][1]
if scores[row][col] == max_score:
max_row_col_list.append((row,col))
return random.choice(max_row_col_list)
def mc_move(board, player, trials):
'''This function takes a current board, which player the machine player is,
and the number of trials to run.
The function should use the Monte Carlo simulation described above to return a move
for the machine player in the form of a (row, column) tuple.
Be sure to use the other functions you have written!'''
board_dim = board.get_dim()
scores = [[0 for dummycol in range(board_dim)]
for dummyrow in range(board_dim)]
for dummy in range(trials):
board_clone = board.clone()
mc_trial(board_clone,player)
mc_update_scores(scores,board_clone,player)
best_move_tuple = get_best_move(board,scores)
#print 'get_best_move : '+str(best_move_tuple)
return best_move_tuple
# Test game with the console or the GUI. Uncomment whichever
# you prefer. Both should be commented out when you submit
# for testing to save time.
####################################
# # my test
# # func1: mc_trial(board, player)
# print 'func1: mc_trial(board, player)'
# board = provided.TTTBoard(3)
# board.move(1,1,provided.PLAYERX)
# board.move(0,2,provided.PLAYERO)
# print board
# board_clone = board.clone()
# mc_trial(board_clone, provided.PLAYERX)
# print board_clone
# # func2: mc_update_scores(scores, board, player)
# print 'func2: mc_update_scores(scores, board, player)'
# board_dim = board.get_dim()
# scores = [[0 for dummycol in range(board_dim)]
# for dummyrow in range(board_dim)]
# mc_update_scores(scores,board_clone,provided.PLAYERX)
# print scores
# board_clone = board.clone()
# mc_trial(board_clone, provided.PLAYERX)
# print board_clone
# mc_update_scores(scores,board_clone,provided.PLAYERX)
# print scores
# # func3: get_best_move(board, scores)
# print 'func3: get_best_move(board, scores)'
# print get_best_move(board, scores)
# # func4: mc_move(board, player, trials)
# print 'func4: mc_move(board, player, trials)'
# mc_move(board,provided.PLAYERX,1000)
####################################
#provided.play_game(mc_move, NTRIALS, False)
#poc_ttt_gui.run_gui(3, provided.PLAYERX, mc_move, NTRIALS, False)
|
#可变参数,用list和tuple传参,可以传入任意个参数
def calcu(*numbers):
sum=0;
for n in numbers:
sum=sum+n*n
return sum
print(calcu(0,1,3))
#关键字参数还多了名字,与可变参数比
def person(name,age,**kw):
print('name',name,'age',age,'others:',kw)
person('zhao',12,city=5)
#命名关键字参数以dic传递,要有key值
L=range(100)
for i in L[::5]:
print(i)#间隔取值,要考
#python中字典也可迭代
#用collections库中的iterable判断是否可迭代
f=calcu
print(f(1,2,3))
#函数也可以赋值,函数名也是变量
#f=10,则f不再是函数,是一个10的变量
f=abs
def add(x,y,f):
return f(x)+f(y)
print(add(1,3,f))#函数传参
s=[-1,-2]
for n in map(abs,s):
print(n) #map函数返回列表
#reduce()函数
|
"""
this file will show kitti lidar point cloud data
in sequence continuous
"""
import numpy as np
import open3d
from open3d import *
import os
import glob
from mayavi import mlab
import pcl
import vtk
kitti_seq_dir = '/media/jintain/sg/permanent/datasets/KITTI/videos/2011_09_26/2011_09_26_drive_0009_sync'
image_02_dir = os.path.join(kitti_seq_dir, 'image_02/data')
velo_dir = os.path.join(kitti_seq_dir, 'velodyne_points/data')
all_idx = [i.split('.')[0] for i in os.listdir(velo_dir)]
all_images = [os.path.join(image_02_dir, '{}.png'.format(i)) for i in all_idx]
all_velos = [os.path.join(velo_dir, '{}.bin'.format(i)) for i in all_idx]
assert len(all_images) == len(all_velos), \
'images and velos are not equal. {} vs {}'.format(len(all_images), len(all_velos))
def load_pc(pc_f):
pc = np.fromfile(pc_f, dtype=np.float32).reshape(-1, 4)
return pc
def show_pc():
img_f = all_images[0]
velo_f = all_velos[0]
points = load_pc(velo_f)
# fig = mlab.figure(figure=None, bgcolor=(0, 0, 0),
# fgcolor=None, engine=None, size=(500, 500))
# mlab.points3d(points[:, 0], points[:, 1], points[:, 3], mode='sphere',
# colormap='gnuplot', scale_factor=0.1, figure=fig)
# mlab.show()
if __name__ == '__main__':
show_pc()
|
# Exercício 5.25 - Livro | Não resolvido
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List, Optional
from gym import spaces
from examples.bandit import BanditEnv # type: ignore[import]
from mtenv.utils import seeding
from mtenv.utils.types import TaskObsType, TaskStateType
from mtenv.wrappers.env_to_mtenv import EnvToMTEnv
class MTBanditWrapper(EnvToMTEnv):
def set_task_observation(self, task_obs: TaskObsType) -> None:
self._task_obs = task_obs
self.env.reward_probability = self._task_obs
self._is_task_seed_set = False
def get_task_state(self) -> TaskStateType:
return self._task_obs
def set_task_state(self, task_state: TaskStateType) -> None:
self._task_obs = task_state
self.env.reward_probability = self._task_obs
def sample_task_state(self) -> TaskStateType:
"""Sample a `task_state` that contains all the information needed to revert to any
other task. For examples, refer to TBD"""
return self.observation_space["task_obs"].sample()
def seed_task(self, seed: Optional[int] = None) -> List[int]:
"""Set the seed for task information"""
self._is_task_seed_set = True
_, seed = seeding.np_random(seed)
self.observation_space["task_obs"].seed(seed)
return [seed]
def assert_task_seed_is_set(self) -> None:
"""Check that the task seed is set."""
assert self._is_task_seed_set, "please call `seed_task()` first"
def run() -> None:
n_arms = 5
env = MTBanditWrapper(
env=BanditEnv(n_arms),
task_observation_space=spaces.Box(low=0.0, high=1.0, shape=(n_arms,)),
)
env.seed(1)
env.seed_task(seed=2)
for task in range(3):
print("=== task " + str(task))
env.reset_task_state()
print(env.reset())
for _ in range(5):
action = env.action_space.sample()
print(env.step(action))
print(f"reward_probability: {env.unwrapped.reward_probability}")
if __name__ == "__main__":
run()
|
# Write a function that takes an array of postive integer and returns the
# max sum of its non-adjacent numbers of that array.
# Example: I/P: [75, 105, 120, 75, 90, 135]
# Output: 330 (75, 120, 135)
def maxSubsetSumNoAdjacent(array):
pass
|
import constants
import json
import requests
import logging
from urlparse import urljoin
class LightspeedAPIException(Exception):
pass
class LightspeedAPIUnavailable(LightspeedAPIException):
def __init__(self, url, message=None):
self.message = message or "API is unavailable"
self.url = url
def __str__(self):
return self.message
class LightspeedAPIRequestError(LightspeedAPIException):
def __init__(self, message):
super(LightspeedAPIRequestError, self).__init__(message)
class LightspeedAPI:
"""
Lightspeed api client
"""
def __init__(self, auth_token, log_level=logging.DEBUG):
self.retry_count = 0
self.MAX_RETRIES = constants.MAX_RETRIES
self.AUTH_TOKEN = auth_token
self.HEADER = {"Authorization": "Bearer {}".format(self.AUTH_TOKEN)}
self.ACCOUNT_ID = self.get_account().get('accountID')
self.URL = constants.BASE_URL.format(ACCOUNT_ID=self.ACCOUNT_ID)
logging.basicConfig(level=log_level)
def get_account(self):
return self._get_response(None, constants.ACCOUNT_URL, {}).get('Account')
def _create_request(self, action, url=None, **kwargs):
datas = {}
datas.update(kwargs)
req_url = url or constants.URL_MAP.get(action, '/')
request_url = self._get_url(req_url)
return request_url, datas
def _get_url(self, pathname):
""" For each pathname returns corresponding URL """
return urljoin(self.URL, pathname)
def _get_response(self, action, request_url, datas):
"""Perform json-request to API
:param action: method action (name of the function in API)
:type action: str
:param request_url: Absolute url to API.
:type request_url: str
:param headers: header contents to API.
:type request_url: dict
:param datas: Datas to send to the API.
:type datas: dict
:return: json response
:rtype: dict
"""
json_response = {}
try:
req = requests.get(
request_url,
params=datas,
headers=self.HEADER)
content = req.content
logging.debug(content)
except requests.exceptions.RequestException as e:
""" API not available """
raise LightspeedAPIUnavailable(self.URL, str(e))
try:
json_response = req.json()
except Exception as e:
raise LightspeedAPIRequestError(e)
return json_response
def _handle_error_response(self, action, request_url, json_response, **kwargs):
if not json_response:
raise LightspeedAPIRequestError("Response contains no data")
if json_response.get('errorClass'):
logging.warning(
"Error response received, "
"Action:%s, Request"
"Url:%s, kwargs:%s, error:%s" % (
action, request_url,
kwargs, json_response.get(
'message')))
raise LightspeedAPIRequestError(
"Response contains error data: {!r}".format(json_response)
)
def _retry_request(self, action, url, request_url, json_response, **kwargs):
if self.retry_count <= self.MAX_RETRIES:
self.retry_count += 1
logging.warning(
"Error found in request, action:%s, Request"
"Url:%s, retry count:%s, kwargs:%s, error:%s" % (
action, request_url, self.retry_count,
kwargs, json_response.get(
'error') or json_response.get('errors')))
return self.request(action, url=url, **kwargs)
elif self.retry_count > self.MAX_RETRIES:
logging.warning(
"Request aborting, max retryes"
"completed. Action:%s, Request"
"Url:%s, retry count:%s ,kwargs:%s, error:%s" % (
action, request_url, self.retry_count,
kwargs, json_response.get(
'error') or json_response.get('errors')))
self.retry_count = 0
return {}
def request(self, action, url=None, **kwargs):
"""Create and perform json-request to API
:param action: method action (name of the function in API)
:type action: str
:param url: If this is a specific url, url should be given.
:type action: str
:return: json response
:rtype: dict
"""
request_url, datas = self._create_request(
action, url, **kwargs)
json_response = {}
try:
json_response = self._get_response(
action, request_url, datas)
except LightspeedAPIUnavailable:
return self._retry_request(
action, url, request_url, json_response, **kwargs)
self._handle_error_response(action, request_url, json_response, **kwargs)
return json_response
def post_request(self, action, url=None, post_json=None, **kwargs):
"""Create and perform json-request to API
:param action: method action (name of the function in API)
:type action: str
:param url: If this is a specific url, url should be given.
:type action: str
:return: json response
:rtype: dict
"""
request_url, datas = self._create_request(
action, url, **kwargs)
json_response = {}
try:
json_response = self._get_response_post(
action, request_url, datas, post_json)
except LightspeedAPIUnavailable:
return self._retry_request(
action, url, request_url, json_response, **kwargs)
self._handle_error_response(action, request_url, json_response, **kwargs)
return json_response
def _get_response_post(self, action, request_url, datas, post_json):
"""Perform json-request to API
:param action: method action (name of the function in API)
:type action: str
:param request_url: Absolute url to API.
:type request_url: str
:param headers: header contents to API.
:type request_url: dict
:param datas: Datas to send to the API.
:type datas: dict
:return: json response
:rtype: dict
"""
json_response = {}
try:
req = requests.post(
request_url,
params=datas,
json=post_json,
headers=self.HEADER)
content = req.json()
logging.debug(content)
except requests.exceptions.RequestException as e:
""" API not available """
raise LightspeedAPIUnavailable(self.URL, str(e))
try:
json_response = req.json()
except Exception as e:
raise LightspeedAPIRequestError(e)
return json_response
def _get_response_put(self, action, request_url, datas, post_json):
"""Perform json-request to API
:param action: method action (name of the function in API)
:type action: str
:param request_url: Absolute url to API.
:type request_url: str
:param headers: header contents to API.
:type request_url: dict
:param datas: Datas to send to the API.
:type datas: dict
:return: json response
:rtype: dict
"""
json_response = {}
try:
req = requests.put(
request_url,
params=datas,
json=json.loads(post_json),
headers=self.HEADER)
content = req.json()
logging.debug(content)
except requests.exceptions.RequestException as e:
""" API not available """
raise LightspeedAPIUnavailable(self.URL, str(e))
try:
json_response = req.json()
except Exception as e:
raise LightspeedAPIRequestError(e)
return json_response
def post_request_xml(self, action, url=None, post_xml=None, **kwargs):
"""Create and perform json-request to API
:param action: method action (name of the function in API)
:type action: str
:param url: If this is a specific url, url should be given.
:type action: str
:return: json response
:rtype: dict
"""
request_url, datas = self._create_request(
action, url, **kwargs)
xml_response = ''
try:
xml_response, status_code = self._get_response_post_xml(
action, request_url, datas, post_xml)
except:
pass
if not xml_response:
raise LightspeedAPIRequestError("Response contains no data")
return xml_response, status_code
def _get_response_post_xml(self, action, request_url, datas, post_xml):
"""Perform xml to API
:param action: method action (name of the function in API)
:type action: str
:param request_url: Absolute url to API.
:type request_url: str
:param headers: header contents to API.
:type headers: dict
:param datas: Datas to send to the API.
:type datas: dict
:return: xml response
:rtype: xml
"""
xml_response = ''
try:
req = requests.post(
request_url,
params=datas,
data=post_xml,
headers=self.HEADER)
except requests.exceptions.RequestException as e:
""" API not available """
raise LightspeedAPIUnavailable(self.URL, str(e))
try:
xml_response = req.content
except Exception as e:
raise LightspeedAPIRequestError(e)
else:
return xml_response, req.status_code
def put_request(self, action, url=None, put_json=None, **kwargs):
"""Create and perform json-request to API
:param action: method action (name of the function in API)
:type action: str
:param url: If this is a specific url, url should be given.
:type action: str
:return: json response
:rtype: dict
"""
request_url, datas = self._create_request(
action, url, **kwargs)
json_response = {}
try:
json_response = self._get_response_put(
action, request_url, datas, put_json)
except LightspeedAPIUnavailable:
pass
self._handle_error_response(action, request_url, json_response, **kwargs)
return json_response
def leaf_categories(self):
"""
:return: Level 1 categories in lightspeed.
"""
querystring = {'nodeDepth': '1'}
leaf_categories = self.request(action='category', **querystring)
return leaf_categories if 'Category' in leaf_categories else None
def categories(self):
"""
:return: All of the categories in lightspeed.
"""
categories = self.request(action='category')
return categories if 'Category' in categories else None
def manufacturers(self, offset=0):
"""
:return: all the manufacturers.
"""
querystring = {'offset': offset}
manufacturers = self.request(action='manufacturer', **querystring)
return manufacturers if 'Manufacturer' in manufacturers else None
def product_detail(self, item_id):
"""
:param item_id: Item id in lightspeed.
:return: Returns product details.
"""
querystring = {'itemID': item_id, 'load_relations': '["Images", "ItemShops", "Manufacturer", "CustomFieldValues", "ItemVendorNums"]'}
product = self.request(action='item', **querystring)
return product if 'Item' in product else None
def list_products(self, cat_list, tag=None, offset=0, limit=60, reorder_level=None, shop_id=None):
"""
:return: Returns products under a category.
"""
querystring = {'categoryID': "IN,{0}".format(cat_list), 'offset': offset,
'load_relations': '["Images", "Manufacturer", "ItemShops", "Tags"]', 'limit': limit,
'orderby': 'createTime', 'orderby_desc': '1'}
querystring['or'] = 'ItemShops.reorderLevel=>,0|ItemShops.qoh=>,0'
if shop_id is not None:
querystring['ItemShops.shopID'] = shop_id
if tag is not None:
querystring['tag'] = tag
products = self.request(action='item', **querystring)
return products if 'Item' in products else None
def get_products(self, item_ids):
"""
:param item_ids: list containing item_id of products.
:return: Return products corresponding to the passed ids.
"""
item_ids = ','.join(str(item_id) for item_id in item_ids)
querystring = {'load_relations': '["Images", "ItemShops", "Manufacturer"]', 'itemID': 'IN,[%s]' %(item_ids)}
products = self.request(action='item', **querystring)
return products if 'Item' in products else None
def search_products(self, search_query, offset=0, limit=60, reorder_level=None, shop_id=None, cat_list=[]):
"""
:param search_query: search query.
:return: products matching the search query.
"""
search_query = '~,%{0}%'.format(search_query)
querystring = {'description': search_query,
'load_relations': '["Images", "Manufacturer", "ItemShops", "Category", "Tags"]',
'limit': limit, 'offset': offset, 'orderby': 'createTime', 'orderby_desc': '1'}
if len(cat_list):
querystring.update({'categoryID': "IN,{}".format(cat_list)})
# querystring['ItemShops.reorderLevel'] = '>=,{0}'.format(reorder_level)
querystring['or'] = 'ItemShops.reorderLevel=>,0|ItemShops.qoh=>,0'
if shop_id is not None:
querystring['ItemShops.shopID'] = shop_id
products = self.request(action='item', **querystring)
return products if 'Item' in products else None
def search_products_brand(self, brand, offset=0, limit=60, reorder_level=None, shop_id=None, cat_list=[]):
"""
:param search_query: search query.
:return: products matching the search query.
"""
querystring = {'Manufacturer.manufacturerID': brand,
'load_relations': '["Images", "Manufacturer", "ItemShops", "Category", "Tags"]',
'limit': limit, 'offset': offset, 'orderby': 'createTime', 'orderby_desc': '1'}
if len(cat_list):
querystring.update({'categoryID': "IN,{}".format(cat_list)})
querystring['or'] = 'ItemShops.reorderLevel=>,0|ItemShops.qoh=>,0'
if shop_id is not None:
querystring['ItemShops.shopID'] = shop_id
products = self.request(action='item', **querystring)
return products if 'Item' in products else None
def get_price_levels(self):
"""
Fetch the price level.
:return: returns the price levels.
"""
querystring = {}
price_levels = self.request(action='price_level', **querystring)
return price_levels if 'PriceLevel' in price_levels else None
def get_customers(self, email_id):
"""
Fetches Customer matching the given email id
:param email: email id of the Customer
:return: returns Customers having given email id.
"""
querystring = {'load_relations': '["Contact"]', 'Contact.email': email_id}
customers = self.request(action='customer', **querystring)
return customers if 'Customer' in customers else None
def create_customer(self, customer_dict):
"""
:param customer_dict: Dictionary containing customer data to post.
:return:
"""
created = self.post_request(action='customer', post_json=customer_dict)
return created if 'Customer' in created else None
def get_sales(self, customer_id, ordering='1'):
"""
:param customer_id: CustomerID retrieved from lightspeed.
:return: returns Sales records of the customer.
"""
querystring = {'load_relations': 'all',
'Customer.customerID': customer_id,
'orderby': 'timeStamp', 'orderby_desc': ordering}
sales = self.request(action='sale', **querystring)
return sales if 'Sale' in sales else None
def generate_sale(self, sale_xml):
sale_xml, status_code = self.post_request_xml(action='sale_xml', post_xml=sale_xml)
return sale_xml, status_code
def generate_coupon(self, coupon_xml):
sale_xml, status_code = self.post_request_xml(action='item-xml', post_xml=coupon_xml)
return sale_xml, status_code
def update_customer(self, customer_id, customer_dict):
"""
:param customer_id:
:param customer_dict:
:return:
"""
request_url = 'Customer/{}.json'.format(customer_id)
updated = self.put_request(action='customer', url=request_url, put_json=customer_dict)
return updated if 'Customer' in updated else None
def get_customer_using_id(self, customer_id):
querystring = {'customerID': customer_id}
customers = self.request(action='customer', **querystring)
return customers if 'Customer' in customers else None
def get_vendors(self, offset=0):
querystring = {'offset': offset}
vendors = self.request(action='vendor', **querystring)
return vendors if 'Vendor' in vendors else None
def fetch_all_products(self, offset=0, limit=100, reorder_level=None, shop_id=None):
"""
:return: Returns all the products
"""
relations = [
"Category",
"TaxClass",
"Department",
"ItemAttributes",
"ItemAttributes.ItemAttributeSet",
"Manufacturer",
"Note",
"Season",
"Images",
"ItemShops",
"ItemShelfLocations",
"ItemShelfLocations.ShelfLocation",
"ItemVendorNums",
"ItemComponents",
"ItemECommerce",
"TagRelations",
"TagRelations.Tag",
"CustomFieldValues",
"CustomFieldValues.value",
"ItemPrices"
]
querystring = {
'offset': offset,
'load_relations': json.dumps(relations),
'limit': limit,
'orderby': 'createTime',
'orderby_desc': '1',
'ItemPrices.price': '>,0'
}
products = self.request(action='item', **querystring)
return products if 'Item' in products else None
def fetch_all_customers(self, offset=0, limit=100):
"""
:return: Returns all the customers
"""
relations = [
"Contact",
"CustomerType",
"Tags"
]
querystring = {
'offset': offset,
'load_relations': json.dumps(relations),
'limit': limit
}
customers = self.request(action='customer', **querystring)
return customers if 'Customer' in customers else None
|
inp = map(int,raw_input().split())
n = inp[0]
t = inp[1]
number = 0
temp = 10**(n-1)
fraction = temp/t
if (temp%t == 0):
number = temp + t
else:
number = (fraction+1)*t
number1 = str(number)
if number1[-1] == '0':
number += t
lis = list(str(number))
if len(lis) == n:
print number
else:
print '-1'
|
#!/usr/bin/env python
from __future__ import print_function
import fastjet as fj
import fjcontrib
import fjext
import tqdm
import argparse
import os
import numpy as np
from heppy.pythiautils import configuration as pyconf
import pythia8
import pythiafjext
import pythiaext
def groom(jet, jetR, zcut, beta):
gshop = fjcontrib.GroomerShop(jet)
return gshop.soft_drop(beta, zcut, jetR)
def groom_copy(jet, jetR, zcut, beta):
gshop = fjcontrib.GroomerShop(jet)
return gshop.soft_drop(beta, zcut, jetR).copy()
def main():
parser = argparse.ArgumentParser(description='pythia8 fastjet on the fly', prog=os.path.basename(__file__))
pyconf.add_standard_pythia_args(parser)
parser.add_argument('--ignore-mycfg', help="ignore some settings hardcoded here", default=False, action='store_true')
args = parser.parse_args()
# print the banner first
fj.ClusterSequence.print_banner()
print()
# set up our jet definition and a jet selector
jet_R0 = 0.4
jet_def = fj.JetDefinition(fj.antikt_algorithm, jet_R0)
jet_selector = fj.SelectorPtMin(100.0) & fj.SelectorAbsEtaMax(1)
print(jet_def)
all_jets = []
mycfg = ['PhaseSpace:pThatMin = 100']
if args.ignore_mycfg:
mycfg = []
pythia = pyconf.create_and_init_pythia_from_args(args, mycfg)
if not pythia:
print("[e] pythia initialization failed.")
return
if args.nev < 10:
args.nev = 10
for i in tqdm.tqdm(range(args.nev)):
if not pythia.next():
continue
attach_pythia_particle_info = True
parts = pythiafjext.vectorize_select(pythia, [pythiafjext.kFinal], attach_pythia_particle_info)
jets = jet_selector(jet_def(parts))
if len(jets) < 2:
continue
gsetting = {'sd': [0.1, 0]}
g1 = groom(jets[0], 0.4, 0.1, 0)
print(' |-> GroomerShop::soft_drop 1 no copy', g1.as_string())
g2 = groom(jets[1], 0.4, 0.1, 0)
print(' |-> GroomerShop::soft_drop 2 no copy', g2.as_string())
g1_copy = groom_copy(jets[0], 0.4, 0.1, 0)
g2_copy = groom_copy(jets[1], 0.4, 0.1, 0)
print(' |-> GroomerShop::soft_drop 1 w/ copy', g1_copy.as_string())
print(' |-> GroomerShop::soft_drop 1 no copy', g1.as_string(), ' * this can be a hard to debug runtime error *')
print(' |-> GroomerShop::soft_drop 2 w/ copy', g2_copy.as_string())
print(' |-> GroomerShop::soft_drop 2 no copy', g2.as_string())
gshops = [fjcontrib.GroomerShop(j, 0.4, fj.antikt_algorithm) for j in jets]
for ij, jj in enumerate(jets):
print('orig jet:', jj, 'from gshop:', gshops[ij].jet(), 'SD:', gshops[ij].soft_drop(0.0, 0.1, 0.4).as_string())
print(gshops[ij].lund_generator().description(), '\n number of splits:', len(gshops[ij].lund_splits()))
gshops = [fjcontrib.GroomerShop(j, 0.4, fj.cambridge_algorithm) for j in jets]
for ij, jj in enumerate(jets):
print('orig jet:', jj, 'from gshop:', gshops[ij].jet(), 'SD:', gshops[ij].soft_drop(0.0, 0.1, 0.4).as_string())
print(gshops[ij].lund_generator().description(), '\n number of splits:', len(gshops[ij].lund_splits()))
# print (' xxx ')
# fjcontrib.setGroomer(jets[0], 0.4)
# fjcontrib.setGroomer(jets[0], 0.4)
# fjcontrib.setGroomer(jets[0], 0.4)
# print (' xxx ', fj.cambridge_algorithm)
# algo = fj.cambridge_algorithm
# this is removed from the library - GroomerShopUI
# [print(' |-> Groomed SD', fjcontrib.groom(j).soft_drop(0.0, 0.1, 0.4).as_string()) for j in jets]
# [print(' |-> Groomed max-tf', fjcontrib.groom(j).max_tf().as_string()) for j in jets]
print('---')
pythia.stat()
if __name__ == '__main__':
main()
|
from spectree import SpecTree
SpecTree("quart")
print("=> passed quart plugin import test")
|
import json
#判断用户名是否存在
def is_user_exists(username):
filename="username.json"
try:
with open(filename,'r') as file:
lines=file.readlines()
if lines:
for line in lines:
if username==line.rstrip():
return True
return False
except FileNotFoundError:
print("The file is not found!")
#数据存入文件
def get_new_username(username):
filename="username.json"
try:
with open(filename,"a") as file:
file.write(username+"\n")
except Exception:
print("File not found!")
#问候用户
def greet_user():
username=input("Please input your name:\n")
if is_user_exists(username):
print("Hello,"+username)
else:
get_new_username(username)
print("We will wait you come back,"+username.title())
greet_user();
|
def calcular_Distancia(s1, s2):
m=len(s1)+1
n=len(s2)+1
tbl = {}
for i in range(m): tbl[i,0]=i
for j in range(n): tbl[0,j]=j
for i in range(1, m):
for j in range(1, n):
cost = 0 if s1[i-1] == s2[j-1] else 1
tbl[i,j] = min(tbl[i, j-1]+1, tbl[i-1, j]+1, tbl[i-1, j-1]+cost)
return tbl[i,j]
N, M = [int(i) for i in input().split()]
p_dicionario = [0 for i in range(N)]
p_analizar = [0 for i in range(M)]
in_dicionario = [[] for i in range(len(p_analizar))]
for i in range(N):
linha_n = input().lower()
if (len(linha_n)) > 20:
break
p_dicionario[i] = linha_n
for j in range(M):
linha_m = input().lower()
if (len(linha_m)) > 20:
break
p_analizar[j] = linha_m
print()
for k in p_dicionario:
for l in p_analizar:
temp = 0
if calcular_Distancia(k, l) <= 2:
in_dicionario[temp][l].append(k)
temp +=1
for i in in_dicionario:
if i == []:
print()
print()
else:
for j in i:
print(j, end=" ")
|
# no.1
a=[1,2,3,4,5,6,7,8,9]
newlist=[x for x in a if x%3==0] #like this
print (newlist)
|
from functions import sieve
def factors(n,primes):
factors = []
for i in primes:
if n%i == 0:
n /= i
factors.append(i)
return factors
def main():
primes = sieve(20000)
for j in range(0,1000000):
if j%1000 == 0:
print(j)
if all(len(factors(j+i,primes))==4 for i in range(4)):
print(f'----------->{j}<-------------')
break
if __name__ == '__main__':
main()
|
from django.urls import path
from aliados.views import ListarAliados, InsertarAliado, EditarAliado, BorrarAliado
urlpatterns=[
path('aliados', ListarAliados.as_view(), name='aliados_list'),
path('aliados/new', InsertarAliado.as_view(), name='insertar_aliado'),
path('aliados/edit<int:pk>', EditarAliado.as_view(), name='editar_aliado'),
path('aliados/delete<int:pk>', BorrarAliado.as_view(), name='borrar_aliado'),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.