blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1df75ea249e9b902876129b1f377f9baad11d651 | 0210dc6652a9abd37d8200b1338e0db177d2b199 | /model/transforms/autoregressive/ar_linear.py | e6a998a0c34555211f6eb3e208946e72e391fc78 | [
"MIT"
] | permissive | dihjiang/argmax_flows | 77fdb303eb6e508f07cd5b2b7b84855e37c4511b | 4ffff4bd6f7b25e20292eff6bad2bf5a962e8d39 | refs/heads/main | 2023-05-14T07:03:24.527960 | 2021-06-01T12:10:18 | 2021-06-01T12:10:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | import torch
from survae.utils import sum_except_batch
from .ar import AutoregressiveBijection
class AdditiveAutoregressiveBijection(AutoregressiveBijection):
'''Additive autoregressive bijection.'''
def _num_params(self):
return 1
def _forward(self, x, params):
return x + params, x.new_zeros(x.shape[0])
def _element_inverse(self, z, element_params):
return z - element_params
class AffineAutoregressiveBijection(AutoregressiveBijection):
'''Affine autoregressive bijection.'''
def _num_params(self):
return 2
def _forward(self, x, params):
assert params.shape[-1] == self._num_params()
log_scale, shift = self._split_params(params)
scale = torch.exp(log_scale)
z = scale * x + shift
ldj = sum_except_batch(log_scale)
return z, ldj
def _element_inverse(self, z, element_params):
assert element_params.shape[-1] == self._num_params()
log_scale, shift = self._split_params(element_params)
scale = torch.exp(log_scale)
x = (z - shift) / scale
return x
def _split_params(self, params):
unconstrained_scale = params[..., 0]
shift = params[..., 1]
return unconstrained_scale, shift
| [
"didrik.nielsen@gmail.com"
] | didrik.nielsen@gmail.com |
ec9449b8fc562621af4968040ac96cc6fe147d4b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_67/87.py | c30dc2a35d202661326157975327a7f60af30452 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,790 | py | #! /usr/bin/env python
# vim: set et ts=4 sw=4 ci cino=(0:
import sys
import os
import math
import binascii
def main():
f = open(sys.argv[1])
ntest = int(f.readline().strip())
for nt in xrange(ntest):
nrect = int(f.readline().strip())
rects = []
maxx = 0
maxy = 0
for r in xrange(nrect):
l = [ int(x) for x in f.readline().strip().split() ]
if len(l) != 4:
print "Error ", l
sys.exit(1)
xa = l[0]
ya = l[1]
xb = l[2]
yb = l[3]
if xa > xb:
xa,xb = xb,xa
if ya > yb:
ya,yb = yb,ya
if xb > maxx:
maxx= xb
if yb > maxy:
maxy = yb
rects.append( (xa, ya, xb, yb))
grid = [[ 0 for x in xrange(maxx)] for y in xrange(maxy+2) ]
for rec in rects:
xa, ya, xb, yb = rec
for x in xrange(xa-1, xb):
for y in xrange(ya, yb+1):
grid[y][x] = 1
# for xx in grid:
# print xx
found = True
t = 0
while found:
found = False
t += 1
sy = maxy
for sx in xrange(maxx-1, 0, -1):
p = 0
x = sx
y = sy
while True:
n = grid[y][x]
if p == 1 and n == 1:
grid[y+1][x] = 1
elif p == 0 and n == 0:
grid[y+1][x] = 0
if grid[y+1][x] == 1:
found = True
p = n
x += 1
y -= 1
if x >= maxx or y < 0:
break
sx = 0
for sy in xrange(maxy, -1, -1):
p = 0
x = sx
y = sy
while True:
n = grid[y][x]
if p == 1 and n == 1:
grid[y+1][x] = 1
elif p == 0 and n == 0:
grid[y+1][x] = 0
if grid[y+1][x] == 1:
found = True
p = n
x += 1
y -= 1
if x >= maxx or y < 0:
break
# found = doit( sx, sy, grid, grid, maxx )
# print "New"
# for xx in grid:
# print xx
print "Case #%d: %d" % (nt + 1, t)
if __name__ == "__main__":
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
388b5093f3eea31cab989db5d839937c077d237f | df2cbe914f463ad050d7ed26194424afbe3a0a52 | /addons/stock_account/tests/test_account_move.py | 7b2607004c5d051787e4e778dcda9115697a8446 | [
"Apache-2.0"
] | permissive | SHIVJITH/Odoo_Machine_Test | 019ed339e995be980606a2d87a63312ddc18e706 | 310497a9872db7844b521e6dab5f7a9f61d365a4 | refs/heads/main | 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 | Apache-2.0 | 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null | UTF-8 | Python | false | false | 5,741 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.addons.stock_account.tests.test_stockvaluation import _create_accounting_data
from odoo.tests.common import tagged, Form
@tagged("post_install", "-at_install")
class TestAccountMove(AccountTestInvoicingCommon):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
(
cls.stock_input_account,
cls.stock_output_account,
cls.stock_valuation_account,
cls.expense_account,
cls.stock_journal,
) = _create_accounting_data(cls.env)
cls.product_A = cls.env["product.product"].create(
{
"name": "Product A",
"type": "product",
"default_code": "prda",
"categ_id": cls.env.ref("product.product_category_all").id,
"taxes_id": [(5, 0, 0)],
"supplier_taxes_id": [(5, 0, 0)],
"lst_price": 100.0,
"standard_price": 10.0,
"property_account_income_id": cls.company_data["default_account_revenue"].id,
"property_account_expense_id": cls.company_data["default_account_expense"].id,
}
)
cls.product_A.categ_id.write(
{
"property_stock_account_input_categ_id": cls.stock_input_account.id,
"property_stock_account_output_categ_id": cls.stock_output_account.id,
"property_stock_valuation_account_id": cls.stock_valuation_account.id,
"property_stock_journal": cls.stock_journal.id,
"property_valuation": "real_time",
"property_cost_method": "standard",
}
)
def test_standard_perpetual_01_mc_01(self):
rate = self.currency_data["rates"].sorted()[0].rate
move_form = Form(self.env["account.move"].with_context(default_move_type="out_invoice"))
move_form.partner_id = self.partner_a
move_form.currency_id = self.currency_data["currency"]
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = self.product_A
line_form.tax_ids.clear()
invoice = move_form.save()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 1)
invoice._post()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 4)
self.assertEqual(len(invoice.mapped("line_ids").filtered("is_anglo_saxon_line")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 2)
def test_fifo_perpetual_01_mc_01(self):
self.product_A.categ_id.property_cost_method = "fifo"
rate = self.currency_data["rates"].sorted()[0].rate
move_form = Form(self.env["account.move"].with_context(default_move_type="out_invoice"))
move_form.partner_id = self.partner_a
move_form.currency_id = self.currency_data["currency"]
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = self.product_A
line_form.tax_ids.clear()
invoice = move_form.save()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 1)
invoice._post()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 4)
self.assertEqual(len(invoice.mapped("line_ids").filtered("is_anglo_saxon_line")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 2)
def test_average_perpetual_01_mc_01(self):
self.product_A.categ_id.property_cost_method = "average"
rate = self.currency_data["rates"].sorted()[0].rate
move_form = Form(self.env["account.move"].with_context(default_move_type="out_invoice"))
move_form.partner_id = self.partner_a
move_form.currency_id = self.currency_data["currency"]
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = self.product_A
line_form.tax_ids.clear()
invoice = move_form.save()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 1)
invoice._post()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 4)
self.assertEqual(len(invoice.mapped("line_ids").filtered("is_anglo_saxon_line")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 2)
| [
"36736117+SHIVJITH@users.noreply.github.com"
] | 36736117+SHIVJITH@users.noreply.github.com |
4abeb645d33ce6f4cf795c8c9e123787d71aec95 | b427fc41375fd95d6da5c7168a823f1b286bb250 | /imageAugmentation/noise.py | dcf11f01bcfd107942d802d3774aa805d2e5191e | [] | no_license | heixialeeLeon/DeepFundation | 40b9cc14ed639d1b9cd34dad32c9b497c9e23927 | f42560cc25e447f812bdfabd24d426bd9b49bb94 | refs/heads/master | 2020-04-18T06:11:05.222490 | 2019-08-02T01:09:21 | 2019-08-02T01:09:21 | 167,309,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | import numpy as np
from numpy import random
import torch
from torchvision import transforms
import cv2
from PIL import Image, ImageFilter
import skimage
def RandomNoise(img, noise):
img = img.astype(np.float32)
img = img / 255.0
mode = [
lambda x : skimage.util.random_noise(x, 'gaussian', mean=0, var=noise),
lambda x : skimage.util.random_noise(x, 'speckle', mean=0, var=noise),
lambda x : skimage.util.random_noise(x, 's&p', amount= noise)
]
img = (random.choice(mode)(img)*255).astype(np.uint8)
return img
if __name__ == "__main__":
img = cv2.imread("../test/1.jpg")
cv2.imshow("raw", img)
img_blur = RandomNoise(img, 0.01)
cv2.imshow("cv2", img_blur)
cv2.waitKey(-1) | [
"lipeizhao@em-data.com.cn"
] | lipeizhao@em-data.com.cn |
2e2f683e57de6ef5a99d838c90b3c50d17ab40b9 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_None/trend_PolyTrend/cycle_7/ar_12/test_artificial_1024_None_PolyTrend_7_12_0.py | c929841014abe7e46526d945e3cdc89788a4419e | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 265 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
611d72e952623f37ad15946469334fd8729f472b | 25310086303e892b29754ad3287ede45ee189f4c | /torcms/script/script_sitemap.py | bf194a786e2fea0b7465ee171ec65f7baa38fa68 | [
"MIT"
] | permissive | CW0606/TorCMS | 41d65431a439bba0360305291f34fb4ed1b32b9c | 23ddc4d2c27bda890ef2a7998770888857dc6a61 | refs/heads/master | 2021-05-15T13:42:40.041532 | 2017-10-17T00:40:21 | 2017-10-17T00:40:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,206 | py | # -*- coding: utf-8 -*-
'''
Generating sitemap.
'''
import os
from torcms.model.post_model import MPost
from torcms.model.wiki_model import MWiki
from config import router_post, SITE_CFG
def gen_post_map(file_name, ext_url=''):
'''
Generate the urls for posts.
:return: None
'''
with open(file_name, 'a') as fout:
for kind_key in router_post:
recent_posts = MPost.query_all(kind=kind_key, limit=1000000)
for recent_post in recent_posts:
url = os.path.join(SITE_CFG['site_url'],
router_post[recent_post.kind],
ext_url,
recent_post.uid)
fout.write('{url}\n'.format(url=url))
def gen_wiki_map(file_name, ext_url=''):
'''
Generate the urls for wiki.
:return: None
'''
# wiki
wiki_recs = MWiki.query_all(limit=10000, kind='1')
with open(file_name, 'a') as fileout:
for rec in wiki_recs:
url = os.path.join(SITE_CFG['site_url'],
'wiki' + '/_edit' if len(ext_url) else '',
rec.title)
fileout.write('{url}\n'.format(url=url))
## page.
page_recs = MWiki.query_all(limit=10000, kind='2')
with open(file_name, 'a') as fileout:
for rec in page_recs:
url = os.path.join(SITE_CFG['site_url'],
'page' + '/_edit' if len(ext_url) else '',
rec.uid)
fileout.write('{url}\n'.format(url=url))
def run_sitemap(_):
'''
Generate the sitemap file.
:param args: args
:return: None
'''
site_map_file = 'xx_sitemap.txt'
if os.path.exists(site_map_file):
os.remove(site_map_file)
gen_wiki_map(site_map_file)
gen_post_map(site_map_file)
def run_editmap(_):
'''
Generate the urls file for editing.
:param args: args
:return: None
'''
edit_map_file = 'xx_editmap.txt'
if os.path.exists(edit_map_file):
os.remove(edit_map_file)
gen_wiki_map(edit_map_file, ext_url='_edit')
gen_post_map(edit_map_file, ext_url='_edit')
| [
"bukun@osgeo.cn"
] | bukun@osgeo.cn |
5c822d163aa8bd895275596a4e7a02de83a3ff35 | b7f3edb5b7c62174bed808079c3b21fb9ea51d52 | /third_party/blink/web_tests/external/wpt/cookies/resources/dropSecure.py | 9820295697d22e258b885da239db9fd828b64d3c | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 64bee65c921db7e78e25d08f1e98da2668b57be5 | refs/heads/webml | 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 483 | py | from helpers import makeDropCookie, setNoCacheAndCORSHeaders
def main(request, response):
"""Respond to `/cookie/drop/secure` by dropping the two cookie set by
`setSecureTestCookies()`"""
headers = setNoCacheAndCORSHeaders(request, response)
# Expire the cookies, and return a JSON-encoded success code.
headers.append(makeDropCookie(b"alone_secure", False))
headers.append(makeDropCookie(b"alone_insecure", False))
return headers, b'{"success": true}'
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
ace3fa96b99c770cfc4abe0e6bbc691329863df0 | 3433314089e976a121e0a4ff7320d1214faabc8b | /autoarray/util/mapper_util.py | 71ba884a15c17b9107ebb5ccd6c466f43e568f46 | [
"MIT"
] | permissive | Sketos/PyAutoArray | ab7a63543a35401560ee575c4a8ede7a2561d743 | 72dc7e8d1c38786915f82a7e7284239e5ce87624 | refs/heads/master | 2021-02-12T19:06:17.247806 | 2020-04-10T13:15:00 | 2020-04-10T13:15:00 | 244,619,959 | 0 | 0 | MIT | 2020-03-03T17:21:03 | 2020-03-03T11:35:40 | Python | UTF-8 | Python | false | false | 7,865 | py | import numpy as np
from autoarray import decorator_util
@decorator_util.jit()
def mapping_matrix_from_pixelization_1d_index_for_sub_mask_1d_index(
pixelization_1d_index_for_sub_mask_1d_index,
pixels,
total_mask_pixels,
mask_1d_index_for_sub_mask_1d_index,
sub_fraction,
):
"""Computes the util matrix, by iterating over the known mappings between the sub-grid and pixelization.
Parameters
-----------
pixelization_1d_index_for_sub_mask_1d_index : ndarray
The mappings between the observed grid's sub-pixels and pixelization's pixels.
pixels : int
The number of pixels in the pixelization.
total_mask_pixels : int
The number of datas pixels in the observed datas and thus on the grid.
mask_1d_index_for_sub_mask_1d_index : ndarray
The mappings between the observed grid's sub-pixels and observed grid's pixels.
sub_fraction : float
The fractional area each sub-pixel takes up in an pixel.
"""
mapping_matrix = np.zeros((total_mask_pixels, pixels))
for sub_mask_1d_index in range(mask_1d_index_for_sub_mask_1d_index.shape[0]):
mapping_matrix[
mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index],
pixelization_1d_index_for_sub_mask_1d_index[sub_mask_1d_index],
] += sub_fraction
return mapping_matrix
@decorator_util.jit()
def pixelization_1d_index_for_voronoi_sub_mask_1d_index_from_grids_and_geometry(
grid,
nearest_pixelization_1d_index_for_mask_1d_index,
mask_1d_index_for_sub_mask_1d_index,
pixelization_grid,
pixel_neighbors,
pixel_neighbors_size,
):
""" Compute the mappings between a set of sub-grid pixels and pixelization pixels, using information on \
how the pixels hosting each sub-pixel map to their closest pixelization pixel on the image-plane pix-grid \
and the pixelization's pixel centres.
To determine the complete set of sub-pixel to pixelization pixel mappings, we must pair every sub-pixel to \
its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via \
the Voronoi grid) are used to localize each nearest neighbor search by using a graph search.
Parameters
----------
grid : Grid
The grid of (y,x) arc-second coordinates at the centre of every unmasked pixel, which has been traced to \
to an irgrid via lens.
nearest_pixelization_1d_index_for_mask_1d_index : ndarray
A 1D array that maps every grid pixel to its nearest pix-grid pixel (as determined on the unlensed \
2D array).
pixelization_grid : (float, float)
The (y,x) centre of every Voronoi pixel in arc-seconds.
pixel_neighbors : ndarray
An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_size : ndarray
An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \
Voronoi grid.
"""
pixelization_1d_index_for_sub_mask_1d_index = np.zeros((grid.shape[0]))
for sub_mask_1d_index in range(grid.shape[0]):
nearest_pixelization_1d_index = nearest_pixelization_1d_index_for_mask_1d_index[
mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
]
while True:
nearest_pixelization_pixel_center = pixelization_grid[
nearest_pixelization_1d_index
]
sub_pixel_to_nearest_pixelization_distance = (
(grid[sub_mask_1d_index, 0] - nearest_pixelization_pixel_center[0]) ** 2
+ (grid[sub_mask_1d_index, 1] - nearest_pixelization_pixel_center[1])
** 2
)
closest_separation_from_pixelization_to_neighbor = 1.0e8
for neighbor_pixelization_1d_index in range(
pixel_neighbors_size[nearest_pixelization_1d_index]
):
neighbor = pixel_neighbors[
nearest_pixelization_1d_index, neighbor_pixelization_1d_index
]
separation_from_neighbor = (
(grid[sub_mask_1d_index, 0] - pixelization_grid[neighbor, 0]) ** 2
+ (grid[sub_mask_1d_index, 1] - pixelization_grid[neighbor, 1]) ** 2
)
if (
separation_from_neighbor
< closest_separation_from_pixelization_to_neighbor
):
closest_separation_from_pixelization_to_neighbor = (
separation_from_neighbor
)
closest_neighbor_pixelization_1d_index = (
neighbor_pixelization_1d_index
)
neighboring_pixelization_1d_index = pixel_neighbors[
nearest_pixelization_1d_index, closest_neighbor_pixelization_1d_index
]
sub_pixel_to_neighboring_pixelization_distance = (
closest_separation_from_pixelization_to_neighbor
)
if (
sub_pixel_to_nearest_pixelization_distance
<= sub_pixel_to_neighboring_pixelization_distance
):
pixelization_1d_index_for_sub_mask_1d_index[
sub_mask_1d_index
] = nearest_pixelization_1d_index
break
else:
nearest_pixelization_1d_index = neighboring_pixelization_1d_index
return pixelization_1d_index_for_sub_mask_1d_index
@decorator_util.jit()
def adaptive_pixel_signals_from_images(
pixels,
signal_scale,
pixelization_1d_index_for_sub_mask_1d_index,
mask_1d_index_for_sub_mask_1d_index,
hyper_image,
):
"""Compute the (hyper) signal in each pixel, where the signal is the sum of its datas_-pixel fluxes. \
These pixel-signals are used to compute the effective regularization weight of each pixel.
The pixel signals are hyper in the following ways:
1) Divided by the number of datas_-pixels in the pixel, to ensure all pixels have the same \
'relative' signal (i.e. a pixel with 10 pixels doesn't have x2 the signal of one with 5).
2) Divided by the maximum pixel-signal, so that all signals vary between 0 and 1. This ensures that the \
regularizations weights are defined identically for any datas_ unit_label or signal-to-noise_map ratio.
3) Raised to the power of the hyper_galaxy-parameter *signal_scale*, so the method can control the relative \
contribution regularization in different regions of pixelization.
Parameters
-----------
pixels : int
The total number of pixels in the pixelization the regularization scheme is applied to.
signal_scale : float
A factor which controls how rapidly the smoothness of regularization varies from high signal regions to \
low signal regions.
regular_to_pix : ndarray
A 1D array util every pixel on the grid to a pixel on the pixelization.
hyper_image : ndarray
The image of the galaxy which is used to compute the weigghted pixel signals.
"""
pixel_signals = np.zeros((pixels,))
pixel_sizes = np.zeros((pixels,))
for sub_mask_1d_index in range(len(pixelization_1d_index_for_sub_mask_1d_index)):
mask_1d_index = mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
pixel_signals[
pixelization_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
] += hyper_image[mask_1d_index]
pixel_sizes[pixelization_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]] += 1
pixel_sizes[pixel_sizes == 0] = 1
pixel_signals /= pixel_sizes
pixel_signals /= np.max(pixel_signals)
return pixel_signals ** signal_scale
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
e2f2035ebe825909c200b197f392cd98c62b4dd1 | 93ad28897161a30027a2ac1d10c7235f6d66b2f8 | /docker_registry_client_async/formattedsha256.py | c788ae5c66bd8f8ada4732201c212cd1574d8e51 | [
"Apache-2.0"
] | permissive | GitHK/docker-registry-client-async | 0ce117374eb094c3705510d72c081d24c316bada | 384b1b7f7abcda55258028d930b45054ab03f6c4 | refs/heads/main | 2023-05-11T10:50:55.432955 | 2021-03-16T18:57:25 | 2021-03-16T18:57:25 | 371,687,021 | 0 | 0 | Apache-2.0 | 2021-05-28T12:03:12 | 2021-05-28T12:03:11 | null | UTF-8 | Python | false | false | 1,292 | py | #!/usr/bin/env python
"""Utility classes."""
import hashlib
class FormattedSHA256(str):
"""A algorithm prefixed SHA256 hash value."""
def __new__(cls, sha256: str):
if sha256:
sha256 = sha256.replace("sha256:", "")
if not sha256 or len(sha256) != 64:
raise ValueError(sha256)
obj = super().__new__(cls, f"sha256:{sha256}")
obj.sha256 = sha256
return obj
@staticmethod
def parse(digest: str) -> "FormattedSHA256":
"""
Initializes a FormattedSHA256 from a given SHA256 digest value.
Args:
digest: A SHA256 digest value in form SHA256:<digest value>.
Returns:
The newly initialized object.
"""
if not digest or not digest.startswith("sha256:") or len(digest) != 71:
raise ValueError(digest)
return FormattedSHA256(digest[7:])
@staticmethod
def calculate(data: bytes) -> "FormattedSHA256":
"""
Calculates the digest value for given data.
Args:
data: The data for which to calculate the digest value.
Returns:
The FormattedSHA256 containing the corresponding digest value.
"""
return FormattedSHA256(hashlib.sha256(data).hexdigest())
| [
"crashvb@gmail.com"
] | crashvb@gmail.com |
1ec4029f94f10baeb145b9970801a547ff3b09f6 | de9cb0ae2f80832c0573cba63330983ed23fa0d0 | /dict/insert_word.py | fcd1770448fd942442c4bf8c2d079c3408bd233f | [] | no_license | brooot/Aid_Files | 3fa25ccf3ac23a8ca69fdefb3ab1089534ff9d81 | 11cdbc5037cddf042b857558902fdc04513335be | refs/heads/master | 2023-03-27T15:29:24.534347 | 2021-03-25T13:02:52 | 2021-03-25T13:02:52 | 200,647,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 867 | py | import pymysql
import re
f = open('dict.txt')
db = pymysql.connect('localhost', 'root',
'xzl1122', 'dict')
cur = db.cursor()
sql = 'insert into words (word,interpret) \
VALUES (%s,%s)'
a = 1
for line in f:
# 获取匹配内容元组 (word,mean)
tup = re.findall(r'(\w+)\s+(.*)', line)[0]
'''要加[0]是因为返回的是查找到的子组的tupple组成的数组,
如[(子组1,子组2),(),...,()]
findall在有子组的情况下会只会返回子组匹配到的内容,
当存在两个及以上的子组的时候便会将每次匹配到的子组放在一个元组内返回,
组成一个列表的元素'''
if a:
print(tup)
a = 0
try:
pass
# cur.execute(sql, tup)
# db.commit()
except Exception:
db.rollback()
f.close()
cur.close()
db.close()
| [
"1442704671@qq.com"
] | 1442704671@qq.com |
8947cf626fb6b113322a37e8f0468da4d664e36b | ea393959886a5cd13da4539d634f2ca0bbcd06a2 | /tensorflow/cs224d_tf_tutorial_example.py | c6b21c628c02abe4a105916a220217b28b672dce | [] | no_license | zhangchizju2012/LeetCode | f605f35b82f16282559af71e4e61ec2629a90ebc | 0c4c38849309124121b03cc0b4bf39071b5d1c8c | refs/heads/master | 2020-04-05T12:12:14.810639 | 2018-08-09T10:24:52 | 2018-08-09T10:24:52 | 81,021,830 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,732 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 28 16:58:40 2018
@author: zhangchi
"""
#import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
#import seaborn
# Define input data
X_data = np.arange(100, step=.1)
y_data = 20 * X_data + 10 #X_data + 20 * np.sin(X_data/10)
# Plot input data
#plt.scatter(X_data, y_data)
# Define data size and batch size
n_samples = 1000
batch_size = 100
# Tensorflow is finicky about shapes, so resize
X_data = np.reshape(X_data, (n_samples,1))
y_data = np.reshape(y_data, (n_samples,1))
# Define placeholders for input
X = tf.placeholder(tf.float32, shape=(batch_size, 1))
y = tf.placeholder(tf.float32, shape=(batch_size, 1))
with tf.variable_scope("linear-regression"):
W = tf.get_variable("weights", (1, 1),
initializer=tf.random_normal_initializer())
b = tf.get_variable("bias", (1,),
initializer=tf.constant_initializer(0.0))
y_pred = tf.matmul(X, W) + b
loss = tf.reduce_sum((y - y_pred)**2/n_samples)
# Sample code to run full gradient descent:
# Define optimizer operation
opt_operation = tf.train.AdamOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
# Initialize Variables in graph
sess.run(tf.initialize_all_variables())
# Gradient descent loop for 500 steps
for _ in range(10000):
# Select random minibatch
indices = np.random.choice(n_samples, batch_size)
X_batch, y_batch = X_data[indices], y_data[indices]
# Do gradient descent step
_, loss_val, W_, b_ = sess.run([opt_operation, loss, W, b], feed_dict={X: X_batch, y: y_batch})
#print loss_val
print(str(loss_val)+ " "+ str(W_)+" "+str(b_)) | [
"zhangchizju2012@zju.edu.cn"
] | zhangchizju2012@zju.edu.cn |
b7ce35e05be0a2aeebc41796f49d3ef7d711954c | 24a38619bfb7458e9124c4eddb86e67f46a7cdcd | /weo/dates.py | 1b39ad9c71ad183e5cf6f79f06437c4734f4f8e2 | [] | no_license | FinTrek/weo-reader | 8a976b08bb8d4b606ea0930507bf57529d9d094d | 8a9f6a51e19ca38caeaa35d2c814dc73a7b1388d | refs/heads/master | 2023-04-07T09:24:33.690258 | 2021-04-15T08:12:49 | 2021-04-15T08:12:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,177 | py | import os
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import List, Optional, Tuple, Union
import requests
__all__ = [
"download",
"all_releases",
"make_url_countries",
"make_url_commodities",
"Date",
]
def cur_year() -> int:
return datetime.today().year
def cur_month() -> int:
return datetime.today().month
class Release(Enum):
Spring = 1
Fall = 2
@dataclass
class Date:
year: int
release: Release
def __gt__(self, x):
return not (self <= x)
def __lt__(self, x):
return (self.year, self.release) < (x.year, x.release)
def __le__(self, x):
return (self < x) | (self == x)
def succ(d: Date) -> Date:
year, rel = d.year, d.release
if d.release == 2:
year += 1
rel = 1
else:
rel = 2
return Date(year, rel)
def first() -> Date:
return Date(2007, 2)
def current() -> Date:
y = cur_year()
m = cur_month()
if m >= 10:
return Date(y, 2)
elif 4 <= m < 10:
return Date(y, 1)
else:
return Date(y - 1, 2)
def month(d: Date) -> int:
if d == Date(2011, 2):
return 9
elif d.release == 2:
return 10
else:
return 4
def month_str(d: Date) -> str:
return {4: "Apr", 9: "Sep", 10: "Oct"}[month(d)]
def name(d: Date) -> str:
return f"{d.year}-{month_str(d)} WEO dataset"
def period_str(d: Date) -> str:
return str(d.release).zfill(2)
base_url = "https://www.imf.org/-/media/Files/Publications/WEO/WEO-Database"
def filename(year, month, prefix):
return f"WEO{month}{year}{prefix}.xls"
def url_after_2020(base_url, year, month, period_marker, prefix):
fn = filename(year, month, prefix)
return f"{base_url}/{year}/{period_marker}/{fn}"
def url_before_2020(base_url, year, month, period_marker, prefix):
fn = filename(year, month, prefix)
return f"{base_url}/{year}/{fn}"
def make_url(d: Date, prefix: str, base_url: str = base_url):
year = d.year
month = month_str(d)
period_marker = period_str(d)
args = base_url, year, month, period_marker, prefix
if d == Date(2021, 1):
return "https://www.imf.org/-/media/Files/Publications/WEO/WEO-Database/2021/WEOApr2021all.ashx"
if d >= Date(2020, 2):
return url_after_2020(*args)
else:
return url_before_2020(*args)
def make_url_countries(d: Date):
return make_url(d, prefix="all")
def make_url_commodities(d: Date):
return make_url(d, prefix="alla")
def yield_dates():
d = first()
last = current()
while d <= last:
yield d
d = succ(d)
def all_releases() -> List[Tuple[int, int]]:
"""Provide all (year, release) pairs to use in bulk download."""
return [(d.year, d.release) for d in yield_dates()]
def is_future(d: Date):
return d > current()
def is_ancient(d: Date):
return d < first()
class DateError(ValueError):
pass
def validate(d: Date):
if is_ancient(d):
raise DateError(f"Cannot work with date earlier than October 2007, got {d}")
if is_future(d):
raise DateError(f"The date is in the future: {d}")
def get_season(tag: Union[int, str]) -> int:
if isinstance(tag, str):
tag = tag.lower()[:3]
if tag in [1, 2]:
return tag
elif tag in ["apr", 4]:
return 1
elif tag in ["oct", "sep", 9, 10]:
return 2
else:
raise DateError(tag)
def default_filename(d: Date):
return f"weo_{d.year}_{d.release}.csv"
def get_date(year: int, release: Union[int, str]):
release = get_season(release)
d = Date(year, release)
validate(d)
return d
def locate(d: Date, filename: Optional[str] = None, directory: Optional[str] = None):
if filename is None:
filename = default_filename(d)
if directory is None:
path = filename
else:
path = os.path.join(directory, filename)
return os.path.normpath(path)
def curl(path: str, url: str):
r = requests.get(url, stream=True)
iterable = r.iter_content(chunk_size=1024)
with open(path, "wb") as f:
for chunk in iterable:
if chunk: # filter out keep-alive new chunks
f.write(chunk)
print(path, size_str(path))
return path
def accept(
year: int,
release: Union[int, str],
filename: Optional[str] = None,
directory: Optional[str] = None,
):
d = get_date(year, release)
path = locate(d, filename, directory)
url = make_url_countries(d)
return d, path, url
def download(
year: int,
release: Union[int, str],
filename: Optional[str] = None,
directory: str = ".",
fetch=curl,
):
"""Download dataset from IMF WEO website by release.
from weo import download
download(2020, 'Oct', 'weo.csv')
Equivalent to:
curl -o weo.csv https://www.imf.org/-/media/Files/Publications/WEO/WEO-Database/2020/02/WEOOct2020all.xls
To download all releases (folder 'weo_data' must exist):
from weo import all_releases
for (year, release) in all_releases():
download(year, release, directory='weo_data')
Parameters
----------
year : int
Year of WEO release.
release : int or str
For spring WEO release use 1 or 'Apr'
For fall WEO release use 2, 'Oct' or (in 2011) - 'Sep'.
filename : str
Filename where to save file.
directory:
Directory where to write file.
fetch: callable, optional
Used for testing.
Returns
-------
path, url
"""
d = get_date(year, release)
path = locate(d, filename, directory)
url = make_url_countries(d)
if os.path.exists(path):
print("Already downloaded", name(d), "at", path)
else:
fetch(path, url)
print("Downloaded", name(d))
return path, url
def mb(bytes: int):
"""Express bytes in Mb"""
x = bytes / (2 ** (10 * 2))
return round(x, 1)
def size(path: str) -> int:
return Path(path).stat().st_size
def size_str(path: str) -> str:
x = mb(size(path))
return f"{x}Mb"
| [
"e.pogrebnyak@gmail.com"
] | e.pogrebnyak@gmail.com |
daa5e171eace2bc81c2aa7d6a425aaead522bd20 | 6b551bec528a1d6544201d3c6d86835e885343b5 | /deep_privacy/config/__init__.py | e890ce1d31413b02d716c5a49fc2d643da63d24f | [
"MIT",
"Apache-2.0"
] | permissive | hukkelas/DeepPrivacy | 9471c8e9389828aa09330905081205b061161d81 | 5ee3f1b0608f03ac54d5694b6421f6132cb63f0e | refs/heads/master | 2023-08-16T00:41:02.366235 | 2023-03-28T06:23:34 | 2023-03-28T06:23:34 | 206,106,232 | 1,288 | 194 | MIT | 2021-08-18T08:21:33 | 2019-09-03T15:08:27 | Python | UTF-8 | Python | false | false | 188 | py | import argparse
from .base import Config
def default_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("config_path")
return parser
| [
"hakon.hukkelas@ntnu.no"
] | hakon.hukkelas@ntnu.no |
437e08dbe2b55f9fa56c1e729dcc134bed63ab13 | 58f38f1d69d4bfc650ad18e0045c36ae29c9d84a | /Django基础部分代码/chapter11/authenticate_demo/front/models.py | fb099528d5cfdb0ff7e815a2b0d07bce8ee2011e | [] | no_license | zjf201811/DjangoWebProject | 0670c61b89387901089bf67cf2423d9341f69913 | fab15784fb326ba4517951e180418ea54de03afe | refs/heads/master | 2020-04-18T12:03:08.798484 | 2019-05-06T03:59:46 | 2019-05-06T03:59:46 | 167,522,193 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,094 | py | from django.db import models
from django.contrib.auth.models import AbstractUser,BaseUserManager
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth import get_user_model
# 如果模型是一个代理模型
# 那么就不能在这个模型中添加新的Field
# User.objects.all()
# Person.objects.all()
# 以上两种写法是等价的
# class Person(User):
# telephone = models.CharField(max_length=11)
# class Meta:
# proxy = True
#
# @classmethod
# def get_blacklist(cls):
# return cls.objects.filter(is_active=False)
# class UserExtension(models.Model):
# user = models.OneToOneField(User,on_delete=models.CASCADE,related_name='extension')
# telephone = models.CharField(max_length=11)
# school = models.CharField(max_length=100)
#
# @receiver(post_save,sender=User)
# def handler_user_extension(sender,instance,created,**kwargs):
# if created:
# UserExtension.objects.create(user=instance)
# else:
# instance.extension.save()
class UserManager(BaseUserManager):
def _create_user(self,telephone,username,password,**kwargs):
if not telephone:
raise ValueError('必须要传递手机号码!')
if not password:
raise ValueError('必须要传递密码!')
user = self.model(telephone=telephone,username=username,**kwargs)
user.set_password(password)
user.save()
return user
def create_user(self,telephone,username,password,**kwargs):
kwargs['is_superuser'] = False
return self._create_user(telephone=telephone,username=username,password=password,**kwargs)
def create_superuser(self,telephone,username,password,**kwargs):
kwargs['is_superuser'] = True
return self._create_user(telephone=telephone,username=username,password=password,**kwargs)
# class User(AbstractUser):
# telephone = models.CharField(max_length=11,unique=True)
# school = models.CharField(max_length=100)
#
# USERNAME_FIELD = 'telephone'
#
# objects = UserManager()
class User(AbstractBaseUser,PermissionsMixin):
telephone = models.CharField(max_length=11,unique=True)
email = models.CharField(max_length=100,unique=True)
username = models.CharField(max_length=100)
is_active = models.BooleanField(default=True)
USERNAME_FIELD = 'telephone'
REQUIRED_FIELDS = []
objects = UserManager()
def get_full_name(self):
return self.username
def get_short_name(self):
return self.username
# 切记:
# 如果要自定义User模型
# 那么必须在第一次运行migrate命令之前就先创建好模型
class Article(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
author = models.ForeignKey(get_user_model(),on_delete=models.CASCADE)
class Meta:
permissions = [
('view_article','看文章的权限!')
] | [
"thor201105@163.com"
] | thor201105@163.com |
b32ecefa6149e3e45d3aa903f5f8953541a5d1af | a9ac3c537fc778b34cb32d4528e2d1190e65e19e | /shabanipy/version.py | 964651219a1dc54208efb4ad35ddff70d2eb6d1e | [
"MIT"
] | permissive | wms269/shabanipy | 9f770cfdf113ca8e8af69cd793be2f8bf9b0141a | 1e751631e031c528e18d5e0d8ff4fa1457f4107e | refs/heads/master | 2022-09-23T15:43:43.875608 | 2020-04-09T17:49:24 | 2020-04-09T17:49:24 | 265,638,022 | 1 | 0 | MIT | 2020-05-20T17:25:40 | 2020-05-20T17:25:39 | null | UTF-8 | Python | false | false | 1,508 | py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2018 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""The version information for this release of ShabaniPy.
"""
from collections import namedtuple
# The major release number. Differences in the major number indicate
# possibly large differences in API.
MAJOR = 0
# The minor release number. Differences in the minor number indicate
# possibly small differences in the API, but these changes will come
# backwards compatibility support when possible. Minor releases are
# typically used for large feature additions.
MINOR = 1
# The micro release number. The micro release number is incremented
# for bug fix releases and small feature additions.
MICRO = 2
# The status indicate if this is a development or pre-release version
STATUS = ''
#: A namedtuple of the version info for the current release.
version_info = namedtuple('version_info', 'major minor micro status')
version_info = version_info(MAJOR, MINOR, MICRO, STATUS)
# Remove everything but the 'version_info' from this module.
del namedtuple, MAJOR, MINOR, MICRO, STATUS
__version__ = ('{0}.{1}.{2}'.format(*version_info) if not version_info.status
else '{0}.{1}.{2}.{3}'.format(*version_info))
| [
"marul@laposte.net"
] | marul@laposte.net |
93225b0a14654568b4b5d579a6201445d80399e2 | cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc | /Python Books/scikit-learn-Cookbook/code/chapter1/swiss_roll.py | c73c474a387fc3b5967f5bbab6fa6a4afc785319 | [] | no_license | theGreenJedi/Path | df24fca355590efef0c6cb5c52e7216c6b5d2464 | b5ed2805dbb046480929e49e550bfd8af5bb4d6f | refs/heads/master | 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 | Jupyter Notebook | UTF-8 | Python | false | false | 429 | py | import sklearn.datasets as d
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import numpy as np
swiss_roll = d.make_swiss_roll()
f = plt.figure(figsize=(8, 4))
ax = f.add_subplot(111, projection='3d')
ax.set_title("A swiss roll with Y flattened.")
colors = np.array(['r', 'g', 'b'])
X = swiss_roll[0]
ax.scatter(X[:, 0], np.zeros_like(X[:, 1]), X[:, 2], alpha=0.75)
f.savefig("swiss_roll.png")
| [
"GreenJedi@protonmail.com"
] | GreenJedi@protonmail.com |
88d76acedf188298f224f8ab1e5713387b2cc660 | 8dde6f201657946ad0cfeacab41831f681e6bc6f | /digit_sum.py | 0325ac29e4631c9bb8dd93c9c6043d1056304b3f | [] | no_license | peraktong/LEETCODE_Jason | c5d4a524ba69b1b089f18ce4a53dc8f50ccbb88c | 06961cc468211b9692cd7a889ee38d1cd4e1d11e | refs/heads/master | 2022-04-12T11:34:38.738731 | 2020-04-07T21:17:04 | 2020-04-07T21:17:04 | 219,398,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | import collections
def getCounter(i):
# if i < 10:
# return collections.Counter(range(i+1))
threshold = 10
counters = [collections.Counter({0: 1})]
while i >= threshold:
cur_counter = counters[-1]
next_counter = collections.Counter()
for num in cur_counter:
for leading in range(10):
next_counter[num + leading] += cur_counter[num]
counters.append(next_counter)
threshold *= 10
threshold //= 10
res = collections.Counter()
leading = 0
i += 1
while i:
max_digit = i // threshold
cur = counters.pop()
for num in cur:
for digit in range(max_digit):
res[leading + digit + num] += cur[num]
leading += max_digit
i %= threshold
threshold //= 10
return res
def waysToChooseSum(i, j):
c = getCounter(j) - getCounter(i - 1)
s = max(c.values())
return s, list(c.values()).count(s) | [
"caojunzhi@caojunzhisMBP3.fios-router.home"
] | caojunzhi@caojunzhisMBP3.fios-router.home |
99c7253a3a82a9b457f75291657f219af610974a | 7baef4f7dad8e6538096d42c4e03e6d531fbf0bf | /day03/pymysql回顾.py | 8024a2823e0fbd29a0872938bafc1ab8e1a1cd6e | [] | no_license | q737645224/spyder | a16a2bdbdab12ca6b2546f81363d3571d1358a3e | bae94180ef6953a21a2491da378cce738312afc7 | refs/heads/master | 2020-04-07T20:03:30.966825 | 2018-11-23T04:29:34 | 2018-11-23T04:29:34 | 158,673,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | # 创建一个库 testspider
# 创建一张表 t1 (id int)
# 在表中插入一条记录 id=1
import pymysql
# 创建数据库连接对象
db = pymysql.connect("localhost","root","123456",
charset="utf8")
# 创建游标对象ss
cursor = db.cursor()
cursor.execute("create database if not exists testspider;")
cursor.execute("use testspider;")
cursor.execute("create table if not exists t1(id int);")
cursor.execute("insert into t1 values(1);")
db.commit()
cursor.close()
db.close()
| [
"764375224@qq.com"
] | 764375224@qq.com |
c73811aca9f9efdecf49e96164b6a636150efd2a | 4e02d5b0b1b0739553fd40bbbdfb0d02c9830350 | /128_Longest_Consecutive_Sequence.py | c9b07f448a3b08cc5afae8e2d2900b8f23681a6f | [] | no_license | bingli8802/leetcode | b039ab6af62f0c8992463393f561caafd21056e6 | a509b383a42f54313970168d9faa11f088f18708 | refs/heads/master | 2023-03-29T03:11:45.801090 | 2021-03-23T22:55:16 | 2021-03-23T22:55:16 | 279,321,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | class Solution(object):
# 先消重再排序
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
nums = list(set(nums))
nums.sort()
n = len(nums)
dp = [1] * n
res = 1
# print nums
for i in range(1, n):
if nums[i] == nums[i-1] + 1:
dp[i] = dp[i-1] + 1
res = max(res, dp[i])
return res
| [
"noreply@github.com"
] | bingli8802.noreply@github.com |
a94cec00255f5040df4c55fb1849dca6fed62f52 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/423/usersdata/310/90249/submittedfiles/mdc.py | a77c841ed2e89333d50d419a258627beefa4aba9 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | # -*- coding: utf-8 -*-
import math
n1 = int(input('Digite n1: '))
n2 = int(input('Digite n2: '))
i = 1
while true:
i+=1
(n1%i)== (n2%i)==0
if i == n1 or i==n2:
break
print (i)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f21d03a72329d8ac5a16de1eab795c0d58849ca1 | ef11cb7a2ee550e4fb95be46cd4d67d6cc230787 | /python/Top Interview Questions - Medium/Sorting and Searching/tests/test_peakelement.py | d127805ee2d84e333d8f1e8d744f4cc27a3513b7 | [] | no_license | Hilldrupca/LeetCode | 44b32161743ba982ea5e3fe593ff8a27c96e9350 | c6d600bc74afd14e00d4f0ffed40696192b229c3 | refs/heads/master | 2023-03-31T22:21:17.967663 | 2021-04-07T16:18:17 | 2021-04-07T16:18:17 | 288,544,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | import unittest, sys
sys.path.append('..')
from peakelement import Solution
class TestPeakElement(unittest.TestCase):
def setUp(self):
self.s = Solution()
def test_find_peak_element(self):
case_one = [1,2,3,1]
self.assertEqual(self.s.findPeakElement(case_one), 2)
case_two = [1,2,1,3,5,6,4]
self.assertIn(self.s.findPeakElement(case_two), [1,5])
case_three = [1,2]
self.assertEqual(self.s.findPeakElement(case_three), 1)
case_four = [2,1]
self.assertEqual(self.s.findPeakElement(case_four), 0)
if __name__ == '__main__':
unittest.main()
| [
"hilldrupca@gmail.com"
] | hilldrupca@gmail.com |
304249226458b0590377088be01d33c50df7627a | 4a5c67c87150955ce0164b4263458dbcb563bbda | /api_venv/bin/sqlformat | ce4f3d3fe4f575bd89d0ab0ad3ace1e2db24b0c2 | [] | no_license | madhav06/python-rest-api | 24715a1b8f0f58ab5ba45d03e8f2fc9eb355a5f0 | fb49b308bfa478ed53817e1d0a504099a1317e96 | refs/heads/master | 2023-02-07T05:30:07.347111 | 2020-12-19T09:02:16 | 2020-12-19T09:02:16 | 294,404,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | #!/Users/madhavnandan/python-rest-api/api_venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mnandan06@gmail.com"
] | mnandan06@gmail.com | |
f4574ec50464cb4a665ca662d90ca13be44605c6 | 75f0580af1734b9edb9e06bfadfe48f45b057872 | /2017/16/sol.py | 835caece5131fdce96b033253e6823d0875f54c4 | [] | no_license | penteract/adventofcode | 5bb317f8093f60c1d776d0983016a5288d059603 | 7b7344708ef1d58caa339a32a13f3390556b664c | refs/heads/master | 2023-01-29T16:08:13.541190 | 2023-01-16T20:21:02 | 2023-01-16T20:21:02 | 160,901,373 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | from functools import *
from itertools import *
from collections import defaultdict
import sys
sys.setrecursionlimit(100000)
f = open("input")
l=[x for x in f]
d=defaultdict(int)
#r=list(map(int,l[0].split()))
#l=["s1,x3/4,pe/b"]
l = l[0].split(",")
p = [chr(x) for x in range(ord("a"),ord("p")+1)]
#p=list("abcde")
print("".join(p))
d={"".join(p):0}
aa=0
for i in range(2000):
for ins in l:
if ins[0]=="s":
n=int(ins[1:])
p=p[-n:]+p[:-n]
elif ins[0]=="x":
a,b=list(map(int,ins[1:].split("/")))
x=p[a]
p[a]=p[b]
p[b]=x
elif ins[0]=="p":
x,y = ins[1:].split("/")
a=p.index(x)
b=p.index(y)
x=p[a]
p[a]=p[b]
p[b]=x
else:
print(ins)
k=("".join(p))
if k in d:
print(k)
break
aa+=1
d[k]=aa
perm = [ord(x)-ord("a") for x in "dcmlhejnifpokgba"]
def ap(l,p):
return [l[i] for i in p]
def apply(prm,n):
if n==1: return prm
if n%2:
return ap(apply(prm,n-1),prm)
else:
return apply(ap(prm,prm),n//2)
def disp(s):
return ("".join(chr(n+ord("a")) for n in s))
disp(apply(perm,10**9))
#wrong:
#jlmenhdafcbkgoip doing x wrong (moving from the front)
#pmbdaelhgonkjcif still doing x wrong
#wrong pt2
#dcmljghfinpokeba (permutation nonsense)
#legnajicfkmdobph (adding 1 after break statement)
| [
"tcathcartburn@gmail.com"
] | tcathcartburn@gmail.com |
db49ab033224ca4e10e045059a3acc5df038ce33 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part002442.py | 5629f88f997a1d7f910f1dc3a272cbf65af41175 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,955 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher58448(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.1.0_1', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({}), [
(VariableWithCount('i2.4.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.4.1.0_1', 1, 1, S(1)), Mul)
]),
2: (2, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher58448._instance is None:
CommutativeMatcher58448._instance = CommutativeMatcher58448()
return CommutativeMatcher58448._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 58447
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 73934
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 73935
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.2', tmp5)
except ValueError:
pass
else:
pass
# State 73936
if len(subjects2) == 0:
pass
# State 73937
if len(subjects) == 0:
pass
# 0: x**n
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
4404f045b07bb343172c518668f96bba1f6a6075 | 3a99586e226f0f23ace22fcc30af7b190e90dd08 | /esp32/tools/get.py | 51be563a24a667b244c4f3d8cf43666479955974 | [] | no_license | thewenneur/esp32-1 | 19a1095b0b619c0bb9cb9eaa86abd2a9d9678269 | d683123af8cc39fe2dfc281ac44444023186960f | refs/heads/master | 2021-01-20T08:36:48.001717 | 2017-08-29T16:39:49 | 2017-08-29T16:39:49 | 101,567,493 | 0 | 0 | null | 2017-08-29T16:44:49 | 2017-08-27T17:06:58 | C | UTF-8 | Python | false | false | 5,065 | py | #!/usr/bin/env python
# This script will download and extract required tools into the current directory.
# Tools list is obtained from package/package_esp8266com_index.template.json file.
# Written by Ivan Grokhotkov, 2015.
#
from __future__ import print_function
import os
import shutil
import errno
import os.path
import hashlib
import json
import platform
import sys
import tarfile
import zipfile
import re
if sys.version_info[0] == 3:
from urllib.request import urlretrieve
else:
# Not Python 3 - today, it is most likely to be Python 2
from urllib import urlretrieve
if 'Windows' in platform.system():
import requests
current_dir = os.path.dirname(os.path.realpath(__file__))
dist_dir = current_dir + '/dist/'
def sha256sum(filename, blocksize=65536):
hash = hashlib.sha256()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
hash.update(block)
return hash.hexdigest()
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
def report_progress(count, blockSize, totalSize):
percent = int(count*blockSize*100/totalSize)
percent = min(100, percent)
sys.stdout.write("\r%d%%" % percent)
sys.stdout.flush()
def unpack(filename, destination):
dirname = ''
print('Extracting {0}'.format(os.path.basename(filename)))
sys.stdout.flush()
if filename.endswith('tar.gz'):
tfile = tarfile.open(filename, 'r:gz')
tfile.extractall(destination)
dirname= tfile.getnames()[0]
elif filename.endswith('zip'):
zfile = zipfile.ZipFile(filename)
zfile.extractall(destination)
dirname = zfile.namelist()[0]
else:
raise NotImplementedError('Unsupported archive type')
# a little trick to rename tool directories so they don't contain version number
rename_to = re.match(r'^([a-z][^\-]*\-*)+', dirname).group(0).strip('-')
if rename_to != dirname:
print('Renaming {0} to {1}'.format(dirname, rename_to))
if os.path.isdir(rename_to):
shutil.rmtree(rename_to)
shutil.move(dirname, rename_to)
def get_tool(tool):
sys_name = platform.system()
archive_name = tool['archiveFileName']
local_path = dist_dir + archive_name
url = tool['url']
#real_hash = tool['checksum'].split(':')[1]
if not os.path.isfile(local_path):
print('Downloading ' + archive_name);
sys.stdout.flush()
if 'CYGWIN_NT' in sys_name:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
urlretrieve(url, local_path, report_progress, context=ctx)
elif 'Windows' in sys_name:
r = requests.get(url)
f = open(local_path, 'wb')
f.write(r.content)
f.close()
else:
urlretrieve(url, local_path, report_progress)
sys.stdout.write("\rDone\n")
sys.stdout.flush()
else:
print('Tool {0} already downloaded'.format(archive_name))
sys.stdout.flush()
#local_hash = sha256sum(local_path)
#if local_hash != real_hash:
# print('Hash mismatch for {0}, delete the file and try again'.format(local_path))
# raise RuntimeError()
unpack(local_path, '.')
def load_tools_list(filename, platform):
tools_info = json.load(open(filename))['packages'][0]['tools']
tools_to_download = []
for t in tools_info:
tool_platform = [p for p in t['systems'] if p['host'] == platform]
if len(tool_platform) == 0:
continue
tools_to_download.append(tool_platform[0])
return tools_to_download
def identify_platform():
arduino_platform_names = {'Darwin' : {32 : 'i386-apple-darwin', 64 : 'x86_64-apple-darwin'},
'Linux' : {32 : 'i686-pc-linux-gnu', 64 : 'x86_64-pc-linux-gnu'},
'LinuxARM': {32 : 'arm-linux-gnueabihf', 64 : 'aarch64-linux-gnu'},
'Windows' : {32 : 'i686-mingw32', 64 : 'i686-mingw32'}}
bits = 32
if sys.maxsize > 2**32:
bits = 64
sys_name = platform.system()
sys_platform = platform.platform()
print('System: %s, Info: %s' % (sys_name, sys_platform))
if 'Linux' in sys_name and sys_platform.find('arm') > 0:
sys_name = 'LinuxARM'
if 'CYGWIN_NT' in sys_name:
sys_name = 'Windows'
return arduino_platform_names[sys_name][bits]
if __name__ == '__main__':
identified_platform = identify_platform()
print('Platform: {0}'.format(identified_platform))
tools_to_download = load_tools_list(current_dir + '/../package/package_esp32_index.template.json', identified_platform)
mkdir_p(dist_dir)
for tool in tools_to_download:
get_tool(tool)
print('Done')
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
583dd181b2b7cd621ebd9e4f11ad5c3aaa71c632 | db69a3e20ec69bd8a08ed14ec6193a08e543965d | /mars/dataframe/window/expanding/tests/test_expanding.py | 2b03e57dbb175028f762828d25976440b7f1e046 | [
"BSD-3-Clause",
"CC0-1.0",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | Fernadoo/mars | dbf62cd6f3ff82e3c399f1c06c6da1681b462856 | f8e93edeecbe184b018cd1d0d948b3a2ba74bee6 | refs/heads/master | 2023-08-12T12:48:33.726883 | 2021-09-29T14:29:18 | 2021-09-29T14:29:18 | 377,359,795 | 0 | 1 | Apache-2.0 | 2021-09-29T14:29:19 | 2021-06-16T03:29:08 | Python | UTF-8 | Python | false | false | 2,337 | py | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
from ..... import dataframe as md
from .....core import tile
def test_expanding():
df = pd.DataFrame(np.random.rand(4, 3), columns=list('abc'))
df2 = md.DataFrame(df)
with pytest.raises(NotImplementedError):
_ = df2.expanding(3, center=True)
with pytest.raises(NotImplementedError):
_ = df2.expanding(3, axis=1)
r = df2.expanding(3, center=False)
expected = df.expanding(3, center=False)
assert repr(r) == repr(expected)
assert 'b' in dir(r)
with pytest.raises(AttributeError):
_ = r.d
with pytest.raises(KeyError):
_ = r['d']
with pytest.raises(KeyError):
_ = r['a', 'd']
assert 'a' not in dir(r.a)
assert 'c' not in dir(r['a', 'b'])
def test_expanding_agg():
df = pd.DataFrame(np.random.rand(4, 3), columns=list('abc'))
df2 = md.DataFrame(df, chunk_size=3)
r = df2.expanding(3).agg('max')
expected = df.expanding(3).agg('max')
assert r.shape == df.shape
assert r.index_value is df2.index_value
pd.testing.assert_index_equal(r.columns_value.to_pandas(),
expected.columns)
pd.testing.assert_series_equal(r.dtypes, df2.dtypes)
r = tile(r)
for c in r.chunks:
assert c.shape == c.inputs[0].shape
assert c.index_value is c.inputs[0].index_value
pd.testing.assert_index_equal(c.columns_value.to_pandas(),
expected.columns)
pd.testing.assert_series_equal(c.dtypes, expected.dtypes)
aggs = ['sum', 'count', 'min', 'max', 'mean', 'var', 'std']
for a in aggs:
r = getattr(df2.expanding(3), a)()
assert r.op.func == [a]
| [
"noreply@github.com"
] | Fernadoo.noreply@github.com |
58ed44fde6b4a7efde10c71105ac6a59cce696d1 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /HaxQfQTEpo7BFE5rz_22.py | ad90b5d4387874d64b88c56f046176ed2d7ab37e | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py |
def alternate_pos_neg(lst):
Length = len(lst)
if (0 in lst):
return False
if (Length < 2):
return True
Group_A = []
Group_B = []
Counter = 0
Length = len(lst)
while (Counter < Length):
Item = lst[Counter]
if (Counter % 2 == 0):
Group_A.append(Item)
Counter += 1
else:
Group_B.append(Item)
Counter += 1
Test_A1 = min(Group_A)
Test_A2 = max(Group_A)
Test_B1 = min(Group_B)
Test_B2 = max(Group_B)
if (Test_A1 > 0) and (Test_B2 < 0):
return True
elif (Test_B1 > 0) and (Test_A2 < 0):
return True
else:
return False
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
1f04965e75982f1b5afc12d0b634c0c48480c05a | 356b5455a4fb86c49c800a6181323b7fabef2b08 | /ppci/cli/java.py | fb06d54a7bd02da0b0ff2377aad9a91c0f00bb8b | [
"BSD-2-Clause"
] | permissive | obround/ppci | be7d1ce7832513629ee1301e7b67c0ceda38d668 | ba0840bc5f4ffd889f882a814fb26f88cd854379 | refs/heads/master | 2023-02-11T13:47:35.439871 | 2021-01-05T22:33:08 | 2021-01-05T22:33:08 | 327,131,704 | 0 | 0 | BSD-2-Clause | 2021-01-05T22:08:23 | 2021-01-05T22:08:23 | null | UTF-8 | Python | false | false | 2,137 | py | """ Java handling utility.
"""
import argparse
import sys
from .base import base_parser, march_parser, LogSetup, get_arch_from_args
from .compile_base import compile_parser, do_compile
from ..arch.jvm import read_class_file, class_to_ir, print_class_file
from ..arch.jvm import read_jar
from ..irutils import print_module
parser = argparse.ArgumentParser(description=__doc__, parents=[base_parser])
subparsers = parser.add_subparsers(
title="commands", description="possible commands", dest="command"
)
java_compile_parser = subparsers.add_parser(
"compile",
help="Compile a java class file ahead of time.",
parents=[compile_parser, march_parser],
)
java_compile_parser.add_argument(
"class_file",
metavar="java class file",
type=argparse.FileType("rb"),
help="class file to compile",
)
dis_parser = subparsers.add_parser(
"javap", help="Disassemble (javap) a java class."
)
dis_parser.add_argument(
"class_file",
metavar="java class file",
type=argparse.FileType("rb"),
help="class file to inspect",
)
jar_parser = subparsers.add_parser("jar", help="Explore jar file.")
jar_parser.add_argument(
"jarfile", metavar="java jar file", help="jar file to inspect"
)
def java(args=None):
""" Java command line utility. """
args = parser.parse_args(args)
with LogSetup(args) as log_setup:
if args.command == "compile":
march = get_arch_from_args(args)
class_file = read_class_file(args.class_file, verbose=True)
args.class_file.close()
ir_module = class_to_ir(class_file)
print_module(ir_module, verify=False)
ir_modules = [ir_module]
do_compile(ir_modules, march, log_setup.reporter, log_setup.args)
elif args.command == "javap":
class_file = read_class_file(args.class_file)
args.class_file.close()
print_class_file(class_file)
elif args.command == "jar":
read_jar(args.jarfile)
else: # pragma: no cover
parser.print_usage()
sys.exit(1)
if __name__ == "__main__":
java()
| [
"windel@windel.nl"
] | windel@windel.nl |
a6a0f7b4beb4b4725dd0e18f9b8fc2ea3da17868 | ecc5e006b8336a444678ac0db9ef63fffc1307d3 | /backend/affinity_24197/wsgi.py | 862ceeb3ae2ce5ecf19d749a9131ca9f323b9128 | [] | no_license | crowdbotics-apps/affinity-24197 | f280bff6a9352c86e4df6306167cd322f1e798d4 | 699558a60e58f52c53b50650200b9c0ff27292ed | refs/heads/master | 2023-02-27T06:40:05.898471 | 2021-02-01T20:55:17 | 2021-02-01T20:55:17 | 335,081,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for affinity_24197 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'affinity_24197.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
487a87eccdc9fded9c01a8c11fc065a68e28b11e | 87140007e96872d3611f0778eb0eebe5799616d7 | /runs/src8-tgt4/par-bro-iter03000.cfg.py | 9b417d6b0b220f0e5ea3402e7eca3723d83b49d6 | [
"MIT"
] | permissive | janpawellek/broeval | 49499fa302abff916ffced201034d3b9394503cd | 57e31aa6e354d0bba88103b44910483e8d982d00 | refs/heads/master | 2021-01-11T12:19:13.619220 | 2016-12-20T16:23:27 | 2016-12-20T16:23:27 | 76,468,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py |
# Write results to this file
OUTFILE = 'runs/src8-tgt4/par-bro-iter03000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.11', '10.0.0.12', '10.0.0.13', '10.0.0.14', '10.0.0.31', '10.0.0.32', '10.0.0.33', '10.0.0.34']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [True, True, True, True, True, True, True, True]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.21', '10.0.0.22', '10.0.0.23', '10.0.0.24']
# Should Bro be enabled on the target machines?
TARGET_BRO = [True, True, True, True]
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 3000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
| [
"pawellek@stud.uni-heidelberg.de"
] | pawellek@stud.uni-heidelberg.de |
914826d139e292b9eaad569990e10a927b19d38f | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part000375.py | aa7700fe091caac398fee48064c6a60790bbfb21 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher54899(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.2.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher54899._instance is None:
CommutativeMatcher54899._instance = CommutativeMatcher54899()
return CommutativeMatcher54899._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 54898
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
aae86dec0874ccba8a31b92ae911f0ffd56c20b4 | e233f9bf52ad0f88416962edd957a3c866c19b78 | /reagent/workflow/spark_utils.py | fbd43d68752885e10e28a6381fa830d1d76ccb3b | [
"BSD-3-Clause"
] | permissive | dwtcourses/ReAgent | 38c99dfe47adf1471620419f744cb4145f4f4151 | b9b54d4f30ff65cf1c54dc0cf90c938b48c44f90 | refs/heads/master | 2022-04-26T15:35:46.109984 | 2020-04-29T05:38:14 | 2020-04-29T05:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | #!/usr/bin/env python3
import logging
import os
import pprint
from os.path import abspath, dirname, join
from typing import Dict, Optional
import reagent
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
logger = logging.getLogger(__name__)
# This is where Scala preprocessing (i.e TimelineOperator) is located
SPARK_JAR_FROM_ROOT_DIR = "preprocessing/target/rl-preprocessing-1.1.jar"
"""
SPARK_JAR is abspath to the above jar file.
Assume file structure
ReAgent/
preprocessing/...
reagent/...
"""
SPARK_JAR = join(dirname(reagent.__file__), os.pardir, SPARK_JAR_FROM_ROOT_DIR)
DEFAULT_SPARK_CONFIG = {
"spark.master": "local[*]",
"spark.app.name": "ReAgent",
"spark.sql.session.timeZone": "UTC",
"spark.sql.warehouse.dir": abspath("spark-warehouse"),
# Set shuffle partitions to a low number, e.g. <= cores * 2 to speed
# things up, otherwise the tests will use the default 200 partitions
# and it will take a lot more time to complete
"spark.sql.shuffle.partitions": "12",
"spark.sql.execution.arrow.enabled": "true",
# For accessing timeline operator
"spark.driver.extraClassPath": SPARK_JAR,
# Same effect as builder.enableHiveSupport() [useful for test framework]
"spark.sql.catalogImplementation": "hive",
}
def get_spark_session(config: Optional[Dict[str, str]] = DEFAULT_SPARK_CONFIG):
logger.info(f"Building with config: \n{pprint.pformat(config)}")
spark = SparkSession.builder.enableHiveSupport()
if config is not None:
for k, v in config.items():
spark = spark.config(k, v)
spark = spark.getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
return spark
def get_table_url(table_name: str) -> str:
spark = get_spark_session()
url = (
spark.sql(f"DESCRIBE FORMATTED {table_name}")
.filter((col("col_name") == "Location"))
.select("data_type")
.toPandas()
.astype(str)["data_type"]
.values[0]
)
# unfortunately url is file:/... or hdfs:/... not file:///...
# so we need to insert '//'
assert url.count(":") == 1, f"{url} has more than one :"
schema, path = url.split(":")
return f"{schema}://{path}"
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
4ea1512ea43feb40f0800c9b8e9a1a44af519500 | fac37d77a8d00e3d13106bcd728d51a455dd16f2 | /aspc.py | 42705d3d34343e8607e14290df10a67f66313486 | [] | no_license | anu-bioinfo/rosalind-4 | c6a628bba94f647cf4a34bdf505f1527af4346a9 | 3ddc659d44298f4dd4b5dde66d7833b4d27a2580 | refs/heads/master | 2020-03-25T13:47:39.521215 | 2014-09-14T02:30:54 | 2014-09-14T02:30:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | #!/usr/bin/env python
from __future__ import print_function, division
import os
from math import factorial
def sum_of_combinations(n, m):
return sum(factorial(n) // (factorial(k) * factorial(n - k))
for k in range(m, n + 1)) % 1000000
if __name__ == "__main__":
with open(os.path.join('data', 'rosalind_aspc.txt')) as dataset:
n, m = [int(r) for r in dataset.readline().rstrip().split()]
print(int(sum_of_combinations(n, m)))
| [
"luiz.irber@gmail.com"
] | luiz.irber@gmail.com |
5e792aae5764fc35b1a8a29c85694f13c0e7eb99 | 53181572c4b22df4b569a9901bcd5347a3459499 | /ceit_191116/py200307/function_1_def.py | 7356a48cd8e325c795895d041c04ae7aae902bff | [] | no_license | edu-athensoft/ceit4101python_student | 80ef067b77421fce76d04f778d5c6de8b12f676c | 33cfa438c062d45e8d246b853e93d3c14b92ff2d | refs/heads/master | 2020-07-30T01:04:21.084384 | 2020-07-27T02:21:57 | 2020-07-27T02:21:57 | 210,027,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | """
function definition, syntax
without parameters
def function_name(*parameters):
'''docstring'''
statement(s)
"""
# example 1 - my print a line
# function name: myprint
# parameters: ()
# define - user-defined
def myprint():
print("===")
print()
print("===")
# call a function
myprint()
| [
"lada314@gmail.com"
] | lada314@gmail.com |
d17f822579e1bc3a3eb2590493618026d10cf984 | 1a82a96e860d69d4f1ce862c1fa86919f356a7a8 | /finalcif/gui/new_key_dialog.py | 9ca94cbda85fd2c273bbcbb20e78edb0b3b9babc | [
"Beerware"
] | permissive | dkratzert/FinalCif | f9d27aaf4a5da2620c000e75cf7a59c78646c2dc | f97ccec1e8be9ce4034784fa52bbc5257d9a9e7d | refs/heads/master | 2023-08-31T10:24:08.258193 | 2023-08-31T10:22:20 | 2023-08-31T10:22:20 | 191,889,707 | 21 | 1 | NOASSERTION | 2023-05-21T15:07:51 | 2019-06-14T06:41:16 | Python | UTF-8 | Python | false | false | 1,569 | py | import re
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSignal
from finalcif.cif import all_cif_dicts
from finalcif.gui.new_key_dialog_ui import Ui_AddKeyWindow
class NewKey(QtWidgets.QMainWindow, Ui_AddKeyWindow):
new_key_added = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setupUi(self)
self.searchLineEdit.textChanged.connect(self.search)
self.addKeyPushButton.clicked.connect(self.add_keys)
self.cancelPushButton.clicked.connect(lambda: self.close())
self.keysListWidget.addItems(all_cif_dicts.cif_all_dict.keys())
for num in range(self.keysListWidget.count()):
item = self.keysListWidget.item(num)
helptext = all_cif_dicts.cif_all_dict.get(item.text())
item.setToolTip(helptext)
def add_keys(self):
for item in self.keysListWidget.selectedItems():
self.new_key_added.emit(item.text())
def search(self, searchtext: str):
self.keysListWidget.clear()
cif_keys = all_cif_dicts.cif_all_dict.keys()
if searchtext:
searchpattern = re.compile(f'.*{searchtext}.*', re.IGNORECASE)
searched = [x for x in cif_keys if searchpattern.match(x)]
self.keysListWidget.addItems(searched)
else:
self.keysListWidget.addItems(cif_keys)
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
nk = NewKey()
nk.show()
sys.exit(app.exec_())
| [
"dkratzert@gmx.de"
] | dkratzert@gmx.de |
aef336f70ef99ce4f5996661124bb6ba969bbb5d | 5118ee8e91da7e76949cbb8d401462d5f402723d | /python_work_fs01/2018/0329/test3.py | 819908059479c589e80f0bf5257e3b48cb5e76da | [] | no_license | nakanishi-akitaka/python_backup | ad87941396e84cacd8540d66033b560025c4de92 | d11e5b9cf4dd57f51938d3638ff74c102380e624 | refs/heads/master | 2020-06-30T08:22:39.696697 | 2019-12-21T23:18:55 | 2019-12-21T23:18:55 | 200,776,513 | 0 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,751 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from pymatgen import Composition, Element
from numpy import zeros, mean
trainFile = open("bandgapDFT.csv","r").readlines()
# input: pymatgen NO Composition object
# output: SOSEI vector
def naiveVectorize(composition):
vector = zeros((MAX_Z))
for element in composition:
# element HA GENSI. fraction HA SONO GENSI GA SOSEI NI HUKUMARERU WARIAI
fraction = composition.get_atomic_fraction(element)
vector[element.Z - 1] = fraction
return(vector)
materials = []
bandgaps = []
naiveFeatures = []
MAX_Z = 100
for line in trainFile:
split = str.split(line, ',')
material = Composition(split[0])
materials.append(material) # KAGAKUSIKI
naiveFeatures.append(naiveVectorize(material)) # TOKUCHORYO
bandgaps.append(float(split[1])) # band gap NO YOMIKOMI
baselineError = mean(abs(mean(bandgaps) - bandgaps))
print("Mean Absolute Error : " + str(round(baselineError, 3)) + " eV")
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn import neural_network
# sklearn NO random forest KAIKI
lr = LinearRegression()
dtr = DecisionTreeRegressor()
rfr = RandomForestRegressor()
nn = neural_network.MLPRegressor(max_iter=1000)
estimators = {'LR ':lr,'DTR':dtr,'RFR':rfr,'NN ':nn}
# for k, v in estimators.items():
# print(k,v,type(k),type(v))
# KOUSA KENSHO SIMASU
cv = ShuffleSplit(n_splits=10, test_size=0.1, random_state=0)
for k,v in estimators.items():
sc = cross_val_score( v, naiveFeatures, bandgaps, cv=cv, scoring='r2')
print("R2 by "+k+" with composition data: "+ str(round(abs(mean(sc)), 3)) + " ")
pf= [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
npf=4
npf+=1
for material in materials:
theseFeatures = []
feature = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
for element in material:
feature[ 1].append(float(element.Z))
feature[ 2].append(material.get_atomic_fraction(element)*material.num_atoms)
feature[ 3].append(float(element.group))
feature[ 4].append(float(element.row))
feature[ 5].append(element.X)
feature[ 6].append(float(element.max_oxidation_state))
feature[ 7].append(float(element.min_oxidation_state))
feature[ 8].append(float(str(element.atomic_mass).split("a")[0]))
feature[ 9].append(float(element.mendeleev_no))
feature[10].append(float(str(element.melting_point).split("K")[0]))
feature[11].append(float(str(element.molar_volume).split("c")[0]))
feature[12].append(float(str(element.thermal_conductivity).split("W")[0]))
feature[13].append(element.is_noble_gas)
feature[14].append(element.is_transition_metal)
feature[15].append(element.is_rare_earth_metal)
feature[16].append(element.is_metalloid)
feature[17].append(element.is_alkali)
feature[18].append(element.is_alkaline)
feature[19].append(element.is_halogen)
feature[20].append(element.is_chalcogen)
feature[21].append(element.is_lanthanoid)
feature[22].append(element.is_actinoid)
for i in range(1,npf):
theseFeatures.extend(feature[i])
pf[i].append(theseFeatures[:])
for k,v in estimators.items():
for i in range(1,npf):
sc = cross_val_score( v, pf[i], bandgaps, cv=cv, scoring='r2')
print("R2 by "+k+" with physical ", i, " data: ave ", round(sc.mean(), 3)," std ", round(sc.std(), 3))
| [
"noreply@github.com"
] | nakanishi-akitaka.noreply@github.com |
0463e4ee319c4bf4ebffe5cd815ab8f85b45adef | c6759b857e55991fea3ef0b465dbcee53fa38714 | /tools/nntool/nntool/importer/tflite2/handlers/backend/transpose.py | 23234c75127cac797ca8c1adf4ffb04f15ab2673 | [
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] | permissive | GreenWaves-Technologies/gap_sdk | 1b343bba97b7a5ce62a24162bd72eef5cc67e269 | 3fea306d52ee33f923f2423c5a75d9eb1c07e904 | refs/heads/master | 2023-09-01T14:38:34.270427 | 2023-08-10T09:04:44 | 2023-08-10T09:04:44 | 133,324,605 | 145 | 96 | Apache-2.0 | 2023-08-27T19:03:52 | 2018-05-14T07:50:29 | C | UTF-8 | Python | false | false | 2,705 | py | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from nntool.graph.dim import Dim
from nntool.graph.types import ConstantInputNode, NNEdge, TransposeNode
from nntool.importer.common.constant_mixin import ConstantMixin
from nntool.importer.common.provisional_dim import ProvisionalDim
from nntool.importer.tflite2.common import LOG
from nntool.importer.tflite2.common.tflite_node import TFLiteNode
from ..backend_handler import BackendHandler
from ..handler import tflite_op
@tflite_op("TRANSPOSE")
class Transpose(BackendHandler, ConstantMixin):
@classmethod
def _common(cls, node: TFLiteNode, **kwargs):
G = kwargs['G']
opts = kwargs['opts']
all_nodes = kwargs['all_nodes']
inputs = [all_nodes[t] for t in node.input]
x = inputs[0]
x_shape = x[2].shape
new_axes = {}
for idx, dim in enumerate(x_shape):
if dim is not None:
new_axes[idx] = len(new_axes)
ptranspose = cls._verify_constant(inputs[1])
pout_shape = [x_shape[dim] for dim in ptranspose]
transpose = [new_axes[axis] for axis in ptranspose if x_shape[axis] is not None]
node.input[1].used = True
if cls.is_constant(x):
LOG.info("reducing %s to a constant", node.name)
val = np.transpose(cls.get_constant(x), ptranspose)
params = ConstantInputNode(node.name, value=np.transpose(val, ptranspose),
dims=Dim.unnamed(val.shape))
else:
params = TransposeNode(node.name, transpose=transpose)
if opts.get('load_quantization'):
G.quantization[params.name] = cls.load_tf_quantization([node.input[0]], node.output)
G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
return params
@classmethod
def version_1(cls, node: TFLiteNode, **kwargs):
return cls._common(node, **kwargs)
| [
"yao.zhang@greenwaves-technologies.com"
] | yao.zhang@greenwaves-technologies.com |
aa65c39966c5c3996b0e4ccc5cd54969b654a3b4 | b3d552675b36cb88a1388fcfc531e497ad7cbee9 | /day8/form_validater/cms/models.py | bd100b3fda14c14a849c139d0223a34e3b01935a | [] | no_license | gaohj/1902_django | 3cea1f0935fd983f25c6fd832b103ac5165a2e30 | 822af7b42120c6edc699bf97c800887ff84f5621 | refs/heads/master | 2022-12-11T10:02:50.233398 | 2019-11-26T08:33:38 | 2019-11-26T08:33:38 | 209,241,390 | 2 | 0 | null | 2022-12-08T07:28:24 | 2019-09-18T07:05:48 | Python | UTF-8 | Python | false | false | 179 | py | from django.db import models
# Create your models here.
class User(models.Model):
username = models.CharField(max_length=100)
telephone = models.CharField(max_length=100) | [
"gaohj@126.com"
] | gaohj@126.com |
582cd839c799c316ee8471223a0c1ea010ab379b | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Quantization/trend_LinearTrend/cycle_0/ar_12/test_artificial_1024_Quantization_LinearTrend_0_12_0.py | 77be5894f2be385b71feb3c48605ac0b18920a78 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 270 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 0, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
598ac98ebf7a8341d74571338f3b6e48b6e28ed2 | 708074835900ae623239ce3c0d1e6f948b799fd0 | /ftp-2/ftp_client/ftp_client.py | 0656e0224631f01bd97e806116aa4fd738f037a1 | [] | no_license | hukeyy/learn_python | 66688bcbaa43d79775030d2876979bbda08892ef | c71a37da88b089316536587ed47d32405bd987a3 | refs/heads/master | 2020-03-21T11:07:24.049328 | 2018-12-25T11:59:17 | 2018-12-25T11:59:17 | 138,490,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: hkey
import os, sys
import socket
class MyClient:
def __init__(self, ip_port):
self.client = socket.socket()
self.ip_port = ip_port
def connect(self):
self.client.connect(self.ip_port)
def start(self):
self.connect()
while True:
print('注册(register)\n登录(login)')
auth_type = input('>>>').strip()
if auth_type == 'register' or auth_type == 'login':
user = input('用户名:')
pwd = input('密码:')
auth_info = '%s:%s:%s' %(auth_type, user, pwd)
self.client.sendall(auth_info.encode())
status_code = self.client.recv(1024)
else:
print('\033[31;1m输入错误,请重新输入.\033[0m')
| [
"mickey.20@qq.com"
] | mickey.20@qq.com |
4bcc563980a7cf202366282871a65a1d905a2c74 | 143e7c46515697d009bdb0bb4825db18942db002 | /movies/forms.py | a59290839cee1ac462126f093ba2d4be41f147bc | [] | no_license | yoonwoo123/project_08 | e171aec3729b5d2686b3d10769c2bbd84a2b90ad | b978e86696b84c7e505263ad0fa776edb93d0a58 | refs/heads/master | 2020-09-01T14:18:29.392806 | 2019-04-04T05:15:16 | 2019-04-04T05:15:16 | 218,978,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | from django import forms
from .models import Movie
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class MovieForm(forms.ModelForm):
class Meta:
model = Movie
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.add_input(Submit('submit', 'Submit')) | [
"lkkjasd@korea.ac.kr"
] | lkkjasd@korea.ac.kr |
9d503b9b1c3674a28181a2570002021ac4478d1d | 210ecd63113ce90c5f09bc2b09db3e80ff98117a | /AbletonLive9_RemoteScripts/Axiom_49_61_Classic/Axiom.py | 0ab7f4b3308a768b95753f436555c09b9f96c849 | [] | no_license | ajasver/MidiScripts | 86a765b8568657633305541c46ccc1fd1ea34501 | f727a2e63c95a9c5e980a0738deb0049363ba536 | refs/heads/master | 2021-01-13T02:03:55.078132 | 2015-07-16T18:27:30 | 2015-07-16T18:27:30 | 38,516,112 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,330 | py | #Embedded file name: /Users/versonator/Jenkins/live/Binary/Core_Release_64_static/midi-remote-scripts/Axiom_49_61_Classic/Axiom.py
from _Axiom.consts import *
from _Axiom.Transport import Transport
from _Axiom.Pads import Pads
from _Axiom.Encoders import Encoders
from SliderSection import SliderSection
import Live
import MidiRemoteScript
class Axiom:
""" A controller script for the M-Audio Axiom Keyboard/Controller series """
def __init__(self, c_instance):
self.__c_instance = c_instance
self.__current_track = self.song().view.selected_track
self.__current_device = self.__current_track.view.selected_device
self.song().add_visible_tracks_listener(self.__tracks_changed)
self.__transport_unit = Transport(self)
self.__encoder_unit = Encoders(self, True)
self.__slider_unit = SliderSection(self)
self.__pad_unit = Pads(self)
def application(self):
"""returns a reference to the application that we are running in
"""
return Live.Application.get_application()
def song(self):
"""returns a reference to the Live song instance that we do control
"""
return self.__c_instance.song()
def disconnect(self):
"""Live -> Script
Called right before we get disconnected from Live.
"""
self.song().remove_visible_tracks_listener(self.__tracks_changed)
self.__encoder_unit.disconnect()
def can_lock_to_devices(self):
return True
def suggest_input_port(self):
"""Live -> Script
Live can ask the script for an input port name to find a suitable one.
"""
return str('USB Axiom')
def suggest_output_port(self):
"""Live -> Script
Live can ask the script for an output port name to find a suitable one.
"""
return str('USB Axiom')
def suggest_map_mode(self, cc_no, channel):
"""Live -> Script
Live can ask the script for a suitable mapping mode for a given CC.
"""
suggested_map_mode = Live.MidiMap.MapMode.absolute
if cc_no in AXIOM_ENCODERS:
suggested_map_mode = Live.MidiMap.MapMode.relative_smooth_binary_offset
return suggested_map_mode
def show_message(self, message):
self.__c_instance.show_message(message)
def supports_pad_translation(self):
return True
def connect_script_instances(self, instanciated_scripts):
"""Called by the Application as soon as all scripts are initialized.
You can connect yourself to other running scripts here, as we do it
connect the extension modules (MackieControlXTs).
"""
pass
def request_rebuild_midi_map(self):
"""Script -> Live
When the internal MIDI controller has changed in a way that you need to rebuild
the MIDI mappings, request a rebuild by calling this function
This is processed as a request, to be sure that its not too often called, because
its time-critical.
"""
self.__c_instance.request_rebuild_midi_map()
def send_midi(self, midi_event_bytes):
"""Script -> Live
Use this function to send MIDI events through Live to the _real_ MIDI devices
that this script is assigned to.
"""
self.__c_instance.send_midi(midi_event_bytes)
def refresh_state(self):
"""Live -> Script
Send out MIDI to completely update the attached MIDI controller.
Will be called when requested by the user, after for example having reconnected
the MIDI cables...
"""
pass
def build_midi_map(self, midi_map_handle):
"""Live -> Script
Build DeviceParameter Mappings, that are processed in Audio time, or
forward MIDI messages explicitly to our receive_midi_functions.
Which means that when you are not forwarding MIDI, nor mapping parameters, you will
never get any MIDI messages at all.
"""
script_handle = self.__c_instance.handle()
self.__transport_unit.build_midi_map(script_handle, midi_map_handle)
self.__encoder_unit.build_midi_map(script_handle, midi_map_handle)
self.__slider_unit.build_midi_map(script_handle, midi_map_handle)
self.__pad_unit.build_midi_map(script_handle, midi_map_handle)
self.__c_instance.set_pad_translation(PAD_TRANSLATION)
def update_display(self):
"""Live -> Script
Aka on_timer. Called every 100 ms and should be used to update display relevant
parts of the controller
"""
if self.__transport_unit:
self.__transport_unit.refresh_state()
def receive_midi(self, midi_bytes):
"""Live -> Script
MIDI messages are only received through this function, when explicitly
forwarded in 'build_midi_map'.
"""
if midi_bytes[0] & 240 == CC_STATUS:
channel = midi_bytes[0] & 15
cc_no = midi_bytes[1]
cc_value = midi_bytes[2]
if list(AXIOM_TRANSPORT).count(cc_no) > 0:
self.__transport_unit.receive_midi_cc(cc_no, cc_value)
elif list(AXIOM_BUTTONS).count(cc_no) > 0:
self.__slider_unit.receive_midi_cc(cc_no, cc_value, channel)
elif list(AXIOM_ENCODERS).count(cc_no) > 0:
self.__encoder_unit.receive_midi_cc(cc_no, cc_value, channel)
elif list(AXIOM_PADS).count(cc_no) > 0:
self.__pad_unit.receive_midi_cc(cc_no, cc_value, channel)
elif midi_bytes[0] == 240:
pass
def lock_to_device(self, device):
self.__encoder_unit.lock_to_device(device)
def unlock_from_device(self, device):
self.__encoder_unit.unlock_from_device(device)
def set_appointed_device(self, device):
self.__encoder_unit.set_appointed_device(device)
def __tracks_changed(self):
self.request_rebuild_midi_map()
def bank_changed(self, new_bank):
if self.__encoder_unit.set_bank(new_bank):
self.request_rebuild_midi_map()
def restore_bank(self, bank):
self.__encoder_unit.restore_bank(bank)
self.request_rebuild_midi_map()
def instance_identifier(self):
return self.__c_instance.instance_identifier() | [
"admin@scoopler.com"
] | admin@scoopler.com |
5f1b314f8c6a6fa4da53ae06524ff29cf5a0f199 | ecd9b7e440f002a00d3a8a8e62de208508fadb75 | /flask/docker_flask_simple/app.py | cb6628b3c9dbfca90bb81b8a092b41c7c253c91b | [] | no_license | enderst3/challenges | ce2248eabc36e64dbd84dffad3fea176108bc3f3 | 0b13195cc53d5fc25f7bedd3dad1c14051e216cc | refs/heads/master | 2023-01-09T00:47:51.129159 | 2020-04-20T22:09:00 | 2020-04-20T22:09:00 | 77,492,197 | 0 | 1 | null | 2023-01-03T22:44:04 | 2016-12-28T01:08:14 | null | UTF-8 | Python | false | false | 180 | py | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Flask Docerized'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| [
"enderst3@gmail.com"
] | enderst3@gmail.com |
e76a7e53420bb1d7554e59aa283fc3ad1b29a39a | 268568ff2d483f39de78a5b29d941ce499cace33 | /external-deps/python-language-server/pyls/_utils.py | b1a3bd96c8db094dcff95ae71fcefcf25cd50da1 | [
"MIT",
"Python-2.0"
] | permissive | MarkMoretto/spyder-master | 61e7f8007144562978da9c6adecaa3022758c56f | 5f8c64edc0bbd203a97607950b53a9fcec9d2f0b | refs/heads/master | 2023-01-10T16:34:37.825886 | 2020-08-07T19:07:56 | 2020-08-07T19:07:56 | 285,901,914 | 2 | 1 | MIT | 2022-12-20T13:46:41 | 2020-08-07T19:03:37 | Python | UTF-8 | Python | false | false | 6,670 | py | # Copyright 2017 Palantir Technologies, Inc.
import functools
import inspect
import logging
import os
import sys
import threading
import jedi
PY2 = sys.version_info.major == 2
JEDI_VERSION = jedi.__version__
if PY2:
import pathlib2 as pathlib
else:
import pathlib
log = logging.getLogger(__name__)
def debounce(interval_s, keyed_by=None):
"""Debounce calls to this function until interval_s seconds have passed."""
def wrapper(func):
timers = {}
lock = threading.Lock()
@functools.wraps(func)
def debounced(*args, **kwargs):
call_args = inspect.getcallargs(func, *args, **kwargs)
key = call_args[keyed_by] if keyed_by else None
def run():
with lock:
del timers[key]
return func(*args, **kwargs)
with lock:
old_timer = timers.get(key)
if old_timer:
old_timer.cancel()
timer = threading.Timer(interval_s, run)
timers[key] = timer
timer.start()
return debounced
return wrapper
def find_parents(root, path, names):
"""Find files matching the given names relative to the given path.
Args:
path (str): The file path to start searching up from.
names (List[str]): The file/directory names to look for.
root (str): The directory at which to stop recursing upwards.
Note:
The path MUST be within the root.
"""
if not root:
return []
if not os.path.commonprefix((root, path)):
log.warning("Path %s not in %s", path, root)
return []
# Split the relative by directory, generate all the parent directories, then check each of them.
# This avoids running a loop that has different base-cases for unix/windows
# e.g. /a/b and /a/b/c/d/e.py -> ['/a/b', 'c', 'd']
dirs = [root] + os.path.relpath(os.path.dirname(path), root).split(os.path.sep)
# Search each of /a/b/c, /a/b, /a
while dirs:
search_dir = os.path.join(*dirs)
existing = list(filter(os.path.exists, [os.path.join(search_dir, n) for n in names]))
if existing:
return existing
dirs.pop()
# Otherwise nothing
return []
def match_uri_to_workspace(uri, workspaces):
if uri is None:
return None
max_len, chosen_workspace = -1, None
path = pathlib.Path(uri).parts
for workspace in workspaces:
try:
workspace_parts = pathlib.Path(workspace).parts
except TypeError:
# This can happen in Python2 if 'value' is a subclass of string
workspace_parts = pathlib.Path(unicode(workspace)).parts
if len(workspace_parts) > len(path):
continue
match_len = 0
for workspace_part, path_part in zip(workspace_parts, path):
if workspace_part == path_part:
match_len += 1
if match_len > 0:
if match_len > max_len:
max_len = match_len
chosen_workspace = workspace
return chosen_workspace
def list_to_string(value):
return ",".join(value) if isinstance(value, list) else value
def merge_dicts(dict_a, dict_b):
"""Recursively merge dictionary b into dictionary a.
If override_nones is True, then
"""
def _merge_dicts_(a, b):
for key in set(a.keys()).union(b.keys()):
if key in a and key in b:
if isinstance(a[key], dict) and isinstance(b[key], dict):
yield (key, dict(_merge_dicts_(a[key], b[key])))
elif b[key] is not None:
yield (key, b[key])
else:
yield (key, a[key])
elif key in a:
yield (key, a[key])
elif b[key] is not None:
yield (key, b[key])
return dict(_merge_dicts_(dict_a, dict_b))
def format_docstring(contents):
"""Python doc strings come in a number of formats, but LSP wants markdown.
Until we can find a fast enough way of discovering and parsing each format,
we can do a little better by at least preserving indentation.
"""
contents = contents.replace('\t', u'\u00A0' * 4)
contents = contents.replace(' ', u'\u00A0' * 2)
return contents
def clip_column(column, lines, line_number):
"""
Normalise the position as per the LSP that accepts character positions > line length
https://microsoft.github.io/language-server-protocol/specification#position
"""
max_column = len(lines[line_number].rstrip('\r\n')) if len(lines) > line_number else 0
return min(column, max_column)
def position_to_jedi_linecolumn(document, position):
"""
Convert the LSP format 'line', 'character' to Jedi's 'line', 'column'
https://microsoft.github.io/language-server-protocol/specification#position
"""
code_position = {}
if position:
code_position = {'line': position['line'] + 1,
'column': clip_column(position['character'],
document.lines,
position['line'])}
return code_position
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32
PROCESS_QUERY_INFROMATION = 0x1000
def is_process_alive(pid):
"""Check whether the process with the given pid is still alive.
Running `os.kill()` on Windows always exits the process, so it can't be used to check for an alive process.
see: https://docs.python.org/3/library/os.html?highlight=os%20kill#os.kill
Hence ctypes is used to check for the process directly via windows API avoiding any other 3rd-party dependency.
Args:
pid (int): process ID
Returns:
bool: False if the process is not alive or don't have permission to check, True otherwise.
"""
process = kernel32.OpenProcess(PROCESS_QUERY_INFROMATION, 0, pid)
if process != 0:
kernel32.CloseHandle(process)
return True
return False
else:
import errno
def is_process_alive(pid):
"""Check whether the process with the given pid is still alive.
Args:
pid (int): process ID
Returns:
bool: False if the process is not alive or don't have permission to check, True otherwise.
"""
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True
| [
"mark.moretto@forcepoint.com"
] | mark.moretto@forcepoint.com |
205911f940c06e8ae528b745e89315ebdc625763 | 3546dd5dbcffc8509440c820faa7cf28080c5df7 | /python35/Lib/site-packages/scipy/spatial/_procrustes.py | e94af255f13055b789b6289a7e0f6872ad2a0b60 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LGPL-2.1-only"
] | permissive | Matchoc/python_env | 55ad609c8270cc6148eda22d37f36709d73b3652 | 859d84d1717a265a4085ad29706b12c19c62d36f | refs/heads/master | 2022-02-13T11:05:51.825544 | 2020-06-05T02:42:08 | 2020-06-05T02:42:08 | 75,793,921 | 0 | 1 | Apache-2.0 | 2018-12-14T07:30:28 | 2016-12-07T03:06:13 | Python | UTF-8 | Python | false | false | 4,498 | py | """
This module provides functions to perform full Procrustes analysis.
This code was originally written by Justin Kucynski and ported over from
scikit-bio by Yoshiki Vazquez-Baeza.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.linalg import orthogonal_procrustes
__all__ = ['procrustes']
def procrustes(data1, data2):
r"""Procrustes analysis, a similarity test for two data sets.
Each input matrix is a set of points or vectors (the rows of the matrix).
The dimension of the space is the number of columns of each matrix. Given
two identically sized matrices, procrustes standardizes both such that:
- :math:`tr(AA^{T}) = 1`.
- Both sets of points are centered around the origin.
Procrustes ([1]_, [2]_) then applies the optimal transform to the second
matrix (including scaling/dilation, rotations, and reflections) to minimize
:math:`M^{2}=\sum(data1-data2)^{2}`, or the sum of the squares of the
pointwise differences between the two input datasets.
This function was not designed to handle datasets with different numbers of
datapoints (rows). If two data sets have different dimensionality
(different number of columns), simply add columns of zeros the smaller of
the two.
Parameters
----------
data1 : array_like
Matrix, n rows represent points in k (columns) space `data1` is the
reference data, after it is standardised, the data from `data2` will be
transformed to fit the pattern in `data1` (must have >1 unique points).
data2 : array_like
n rows of data in k space to be fit to `data1`. Must be the same
shape ``(numrows, numcols)`` as data1 (must have >1 unique points).
Returns
-------
mtx1 : array_like
A standardized version of `data1`.
mtx2 : array_like
The orientation of `data2` that best fits `data1`. Centered, but not
necessarily :math:`tr(AA^{T}) = 1`.
disparity : float
:math:`M^{2}` as defined above.
Raises
------
ValueError
If the input arrays are not two-dimensional.
If the shape of the input arrays is different.
If the input arrays have zero columns or zero rows.
See Also
--------
scipy.linalg.orthogonal_procrustes
Notes
-----
- The disparity should not depend on the order of the input matrices, but
the output matrices will, as only the first output matrix is guaranteed
to be scaled such that :math:`tr(AA^{T}) = 1`.
- Duplicate data points are generally ok, duplicating a data point will
increase its effect on the procrustes fit.
- The disparity scales as the number of points per input matrix.
References
----------
.. [1] Krzanowski, W. J. (2000). "Principles of Multivariate analysis".
.. [2] Gower, J. C. (1975). "Generalized procrustes analysis".
Examples
--------
>>> from scipy.spatial import procrustes
The matrix ``b`` is a rotated, shifted, scaled and mirrored version of
``a`` here:
>>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
>>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
>>> mtx1, mtx2, disparity = procrustes(a, b)
>>> round(disparity)
0.0
"""
mtx1 = np.array(data1, dtype=np.double, copy=True)
mtx2 = np.array(data2, dtype=np.double, copy=True)
if mtx1.ndim != 2 or mtx2.ndim != 2:
raise ValueError("Input matrices must be two-dimensional")
if mtx1.shape != mtx2.shape:
raise ValueError("Input matrices must be of same shape")
if mtx1.size == 0:
raise ValueError("Input matrices must be >0 rows and >0 cols")
# translate all the data to the origin
mtx1 -= np.mean(mtx1, 0)
mtx2 -= np.mean(mtx2, 0)
norm1 = np.linalg.norm(mtx1)
norm2 = np.linalg.norm(mtx2)
if norm1 == 0 or norm2 == 0:
raise ValueError("Input matrices must contain >1 unique points")
# change scaling of data (in rows) such that trace(mtx*mtx') = 1
mtx1 /= norm1
mtx2 /= norm2
# transform mtx2 to minimize disparity
R, s = orthogonal_procrustes(mtx1, mtx2)
mtx2 = np.dot(mtx2, R.T) * s
# measure the dissimilarity between the two datasets
disparity = np.sum(np.square(mtx1 - mtx2))
return mtx1, mtx2, disparity
| [
"matchoc@hotmail.com"
] | matchoc@hotmail.com |
99a4eea6eda5b5fb118f8699d09cd04ae35731c3 | 2f898bb332097d11f321186207e94f6d156587f3 | /audio/generar_lista.py | 19b54a1a93b7ab07278abcc704016addeba3b756 | [
"MIT"
] | permissive | miltonsarria/teaching | ad2d07e9cfbfcf272c4b2fbef47321eae765a605 | 7a2b4e6c74d9f11562dfe34722e607ca081c1681 | refs/heads/master | 2022-01-05T05:58:13.163155 | 2019-05-02T20:45:46 | 2019-05-02T20:45:46 | 102,375,690 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | import os
data_dir='/home/sarria/python/audio/audio'
######################
# frequency range to plot
archivo=open('lista.txt','w')
for dirpath, dirnames, filenames in os.walk(data_dir):
for name in filenames:
if name.split('.')[1]=='wav':
fullname = os.path.join(dirpath,name)
archivo.write(fullname+'\n')
archivo.close()
| [
"miltonsarria@gmail.com"
] | miltonsarria@gmail.com |
6e2543b569d01c6fb691b474f00c0e8cba92b412 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /22_专题/字典序列删除/No.205 マージして辞書順最小.py | eaa9b544e0e0d282d781269d4968c7757f805bb3 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | # https://yukicoder.me/problems/no/205
from heapq import heapify, heappop, heappush
from typing import List
def minLexMerge(words: List[str]) -> str:
"""字典序最小的合并字符串"""
pq = [w + chr(200) for w in words]
heapify(pq)
res = []
while pq:
min_ = heappop(pq)
res.append(min_[0])
min_ = min_[1:]
if len(min_) >= 2:
heappush(pq, min_)
return "".join(res)
if __name__ == "__main__":
N = int(input())
words = [input() for _ in range(N)]
print(minLexMerge(words))
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
04354364843fb4756d535de602e0fdfbd92a9c56 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_Class592.py | 0d982aed897b3624ea008b4b08f71e31f6e89d1f | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,619 | py | # qubit number=3
# total number=16
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.cx(input_qubit[0],input_qubit[1]) # number=13
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[1]) # number=15
prog.cx(input_qubit[0],input_qubit[1]) # number=10
prog.x(input_qubit[1]) # number=11
prog.cx(input_qubit[0],input_qubit[1]) # number=12
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_Class592.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
0db7239c4f03d3370f18ba124dbfcb684296c2b2 | 962bc309985d39c115948dc788027dd860491ec8 | /src/bioservices/tcga.py | 67281041e4d623bedabba038cd7b7b6e13f0a6f3 | [] | no_license | kp14/bioservices | 6d03808ca5d14c84b708a819c8e4ad3ba3cb8931 | a0e7f0f7aee3c8afc41ebebeb70d04dd02155e4f | refs/heads/master | 2020-12-25T20:43:15.568664 | 2015-11-04T15:46:04 | 2015-11-04T15:46:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,865 | py | # -*- python -*-
#
# This file is part of bioservices software
#
# Copyright (c) 2013-2014 - EBI-EMBL
#
# File author(s):
# Thomas Cokelaer <cokelaer@ebi.ac.uk>
#
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# website: https://github.com/cokelaer/bioservices
# documentation: http://packages.python.org/bioservices
#
##############################################################################
# $Id$
"""Interface to the quickGO interface
.. topic:: What is quickGO
:URL: http://www.ebi.ac.uk/QuickGO/
:Service: http://www.ebi.ac.uk/QuickGO/WebServices.html
.. highlights::
"QuickGO is a fast web-based browser for Gene Ontology terms and
annotations, which is provided by the UniProt-GOA project at the EBI. "
-- from QuickGO home page, Dec 2012
"""
from __future__ import print_function
from bioservices.services import REST
__all__ = ["TCGA"]
class TCGA(REST):
"""Interface to the `TCGA`_ service
DRAFT in progress
https://wiki.nci.nih.gov/display/TCGA/TCGA+Annotations+Web+Service+User%27s+Guide
"""
def __init__(self, verbose=False, cache=False):
""".. rubric:: Constructor
:param bool verbose: print informative messages.
"""
super(TCGA,
self).__init__(url="http://tcga-data.nci.nih.gov",
name="TCGA", verbose=verbose, cache=cache)
def search_annotations(self, item=None, annotationId):
"""Obtain Term information
"""
params = {'item':item, 'annotationId': annotationId}
res = self.http_get("annotations/resources/searchannotations/json",
frmt="json", params=params)
return res
def view_annotations(self):
raise NotImplementedError
| [
"cokelaer@gmail.com"
] | cokelaer@gmail.com |
78e539bc4df5bd03fd0482b41756fceec1fe2904 | 0aedbdf98c00ff0c74a6fa759d4991563b957cde | /TwitterStream.py | 5061256bb6e4ebb9d7cc5fee8bd48a203c55c4fe | [] | no_license | seethakris/ASPP2018 | b953b0cc6684f9a9c667e85b6a37de73a2f0f41a | 44aca0d6f3d3a1dac3bd9348477d23adc7b53f43 | refs/heads/master | 2021-09-04T11:07:51.423377 | 2018-01-18T04:24:54 | 2018-01-18T04:24:54 | 117,678,504 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | # Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from time import sleep
from collections import deque # list-like container with fast appends and pops on either end
# Variables that contains the user credentials to access Twitter API
access_token = "YOURKEYHERE"
access_token_secret = "YOURKEYHERE"
consumer_key = "YOURKEYHERE"
consumer_secret = "YOURKEYHERE"
# This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
"""
Class with functions to stream tweets
"""
def __init__(self, api=None, maxlength=int(1e5)):
super(StdOutListener, self).__init__()
self.queue = deque(maxlen=maxlength)
def on_status(self, status):
if status.lang.find('en') < 0:
return
if status.coordinates is not None and status.entities.get('hashtags') != []:
self.queue.append(status)
def on_error(self, status_code):
print('Error:', status_code)
return False
def gettweets(maxlength=int(1e5), wait_time=0.001):
"""
Tweets are streamed and stored in a queue. The queue is popped from the left during function call
:param maxlength: maximum length of the queue
wait_time: time to wait for a new tweet
"""
listener = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, listener)
stream.filter(locations=[-180, -90, 180, 90], async=True) # This listens to tweets from all over the world
while True:
if len(listener.queue) > 0:
yield listener.queue.popleft()
else:
sleep(wait_time)
| [
"seethakris@gmail.com"
] | seethakris@gmail.com |
5481f6f58c583a5b277d9510ddf791c355253b36 | 4510bbf54e2ca619c3a863f5ca03df6584585402 | /tfx/components/__init__.py | c41bd948dd4197767f194a8a52a9a827be681b74 | [
"Apache-2.0"
] | permissive | Mdlglobal-atlassian-net/tfx | e55f38336d1989ac970b5069c7128097ed86b422 | 37cbbb95c65e1a891045dd13232a7f2a293a7b70 | refs/heads/master | 2022-10-02T07:44:41.180873 | 2020-06-01T18:49:15 | 2020-06-01T18:49:53 | 268,607,840 | 0 | 1 | Apache-2.0 | 2020-06-01T19:01:51 | 2020-06-01T19:01:50 | null | UTF-8 | Python | false | false | 2,189 | py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subpackage for TFX components."""
import tensorflow as tf
# For component user to direct use tfx.components.[...] as an alias.
from tfx.components.bulk_inferrer.component import BulkInferrer
from tfx.components.common_nodes.importer_node import ImporterNode
from tfx.components.common_nodes.resolver_node import ResolverNode
from tfx.components.evaluator.component import Evaluator
from tfx.components.example_gen.big_query_example_gen.component import BigQueryExampleGen
from tfx.components.example_gen.component import FileBasedExampleGen
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.components.example_gen.import_example_gen.component import ImportExampleGen
from tfx.components.example_validator.component import ExampleValidator
from tfx.components.infra_validator.component import InfraValidator
from tfx.components.model_validator.component import ModelValidator
from tfx.components.pusher.component import Pusher
from tfx.components.schema_gen.component import SchemaGen
from tfx.components.statistics_gen.component import StatisticsGen
from tfx.components.trainer.component import Trainer
from tfx.components.transform.component import Transform
# Prevents double logging: TFX and TF uses `tf.logging` but Beam uses standard
# logging, both logging modules add its own handler. Following setting disables
# tf.logging to propagate up to the parent logging handlers. This is a global
# behavior (perhaps thread hostile) which affects all code that uses component
# libaray.
tf.get_logger().propagate = False
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
fbfcabf6cfb11d516401f5429f766accc46becaf | 7a09af404f29389504742a3d5f1727bfbe562750 | /TrekBot2_WS/build/tf2_tools/catkin_generated/pkg.installspace.context.pc.py | f8537ec21dba2e692b461229e9f52497428867f2 | [
"MIT"
] | permissive | Rafcin/TrekBot | 4baa2ed93b90920b36adba0b72384ac320d2de01 | d3dc63e6c16a040b16170f143556ef358018b7da | refs/heads/master | 2020-03-30T02:15:35.361254 | 2018-12-14T03:30:25 | 2018-12-14T03:30:25 | 150,622,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "tf2;tf2_msgs;tf2_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "tf2_tools"
PROJECT_SPACE_DIR = "/xavier_ssd/TrekBot/TrekBot2_WS/install"
PROJECT_VERSION = "0.6.3"
| [
"Rafcin.s@gmail.com"
] | Rafcin.s@gmail.com |
34104a73ede55a038ffc76a358918c958c9d3b49 | b36c05bfa929cac8f956feebba1e7cb2fc550387 | /d5/d5p2.py | ef18fab1691f09b791969af3089469d440ed7028 | [] | no_license | jabadia/advent-of-code-2017 | ddc5dd2e141e0620ec7d017d0345cc3807debfbf | 9e595bf7d074073dde0f85353fe060e6bf147969 | refs/heads/master | 2021-09-01T20:15:39.211708 | 2017-12-25T22:33:12 | 2017-12-25T22:33:12 | 115,634,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,315 | py | TEST_INPUT = """
0
3
0
1
-3
"""
INPUT = """
0
1
0
0
1
-3
0
0
2
-2
-6
-3
2
-5
-6
-3
-3
0
-8
-12
1
-9
-12
-9
0
-7
-17
-6
-18
-7
-6
-21
-28
-14
-23
-14
-17
-5
-35
-17
-26
-14
1
-27
-19
-40
-32
-44
2
-14
-15
-12
-35
0
-49
-12
-7
-46
-47
-32
-33
-47
-7
-62
-20
-35
-4
-35
-8
-3
-61
-38
-63
-27
-33
-57
-48
-66
-68
-11
-61
-50
-34
-31
-36
-79
-49
-71
1
-34
-65
-61
-91
-12
-21
-82
-85
-51
-89
0
-83
-53
-44
-7
1
-19
-39
-27
-94
-36
-31
-35
-97
-45
-90
-15
-106
-30
-79
-18
-25
-105
-30
-63
-109
-32
-91
-96
-87
-121
-116
-103
-71
-1
-113
-10
-47
-109
-107
-38
-66
-26
-8
-38
-31
-129
-42
-91
-89
-107
-125
-75
-118
-81
-45
-111
-27
-63
-106
-110
-64
-63
-80
-44
-33
-130
-55
-90
-144
-15
-132
-122
-155
-122
-94
-159
-5
-89
-6
-97
-129
-159
-15
-44
-156
-124
-113
-154
-95
-96
-29
-121
-30
-73
-118
-57
-76
-141
-138
-108
-185
-56
-136
-161
-138
-192
2
-126
-12
-39
-60
-125
-149
-193
-146
-116
-101
-16
-207
-122
-92
-204
-42
-112
-28
-93
-96
-57
-136
-19
-36
-107
-170
-19
-20
-96
-229
-59
-172
-58
-89
-31
-57
-223
-37
-189
-43
-135
-90
-150
-22
-152
-243
-37
-231
-112
-57
-168
-30
-77
-162
-181
-176
-202
-138
-206
-183
-190
-257
-181
-47
-23
-248
-114
-98
-77
-143
-168
-166
-30
-155
-237
-51
-113
-243
-41
-142
-231
-139
-20
-190
-262
-142
-238
-200
-270
-113
-35
-296
-146
-205
-129
-198
-68
-139
-56
-196
-133
-16
-229
-258
-91
-63
-249
-274
-156
-273
-182
-166
-115
-154
-296
-115
-89
-120
-201
-44
-287
-8
1
-260
-297
-282
-114
-323
-326
-166
-241
-109
-21
-236
-280
-19
-80
-77
-271
-292
-340
-300
-206
-308
-99
-156
-277
-245
-132
-56
-172
-53
-271
-32
-5
-235
-329
-1
-150
-247
-268
-133
-341
-221
-2
-43
-229
-190
-337
-40
-71
-72
-149
-25
-253
-44
-113
-164
-370
-284
-235
-9
-234
-291
1
-152
-302
-393
-47
-289
-75
-140
-349
-140
-353
-298
-27
-292
-380
-55
-62
-208
-221
-41
-316
-411
-367
-220
-248
-59
-177
-372
-55
-241
-240
-140
-315
-297
-42
-118
-141
-70
-183
-153
-30
-63
-306
-110
-8
-356
-80
-314
-323
-41
-176
-165
-41
-230
-132
-222
-2
-404
-38
-130
2
-16
-141
-136
-336
-245
-6
-348
-172
-267
-208
-291
-285
-67
-219
-216
-136
-325
-27
-382
-242
-50
-284
-149
-454
-336
-346
-293
-402
-76
-324
-219
-336
-24
-446
-123
-185
-196
-295
-173
-400
-137
-414
-14
-104
-62
-252
-17
-398
-490
-440
-89
-347
-101
-142
-228
-301
-396
-320
-52
-508
-122
-436
-311
-344
-240
-434
-220
-197
-31
-295
-44
-452
-269
-430
-373
-409
-438
-365
-13
-241
-418
-20
-24
-141
-1
-148
-307
-63
-423
-254
-8
-438
-326
-19
-135
-109
-394
2
-398
-273
-158
-453
-346
-86
-431
-536
-549
-379
-483
-85
-476
-483
-104
-87
-462
-249
-540
-164
-360
-100
-238
-45
-390
-59
-156
-248
-257
-150
-164
-160
-545
-520
-364
-384
-237
-456
-28
-366
-147
0
-303
-583
-420
-370
-299
-154
-380
-188
-491
-258
-598
-429
-349
-333
-569
-4
-556
-421
-182
-441
-407
-542
-364
-370
-384
1
-529
-45
-319
-395
-279
-160
-575
-193
-25
-565
-548
-445
-266
-304
-361
-348
-303
-159
-39
-75
-437
-608
-622
-556
-108
-343
-283
-68
-632
-393
-68
-140
-126
-531
-87
-519
-334
-56
-70
-275
-247
-370
-439
-118
-497
-630
-594
-612
-541
-161
-646
-397
-100
-284
-313
0
-59
-200
-601
-663
-529
-676
-610
-7
-228
-50
-494
-382
-250
-306
-274
-163
-110
-375
-124
-237
-98
-645
-692
-495
-593
-647
-178
-531
-336
-697
-646
-671
-633
-542
-461
-200
-658
-525
-389
-643
-258
-329
-656
-400
-692
-557
-506
-594
-67
-623
-113
-459
-211
-713
-115
-602
-131
-181
-30
-227
-53
-719
-631
-641
-434
-552
-716
-368
-19
-439
-443
-552
-85
-79
-449
-254
-620
-474
-121
-210
-285
-608
-456
-513
-496
-13
-418
-399
-437
-258
-15
-623
-178
-336
-379
-721
-299
-729
-742
-64
-13
-438
-603
-666
-278
-767
-200
-686
-497
-256
-541
-491
-360
-615
-326
-682
-759
-524
-580
-323
-578
-793
-478
-107
-440
-657
-790
-605
-21
-163
-392
-560
-336
-430
-613
-182
-15
-782
-607
-281
-269
-25
-699
-89
-593
-280
-269
-438
-103
-359
-387
-157
-747
-619
-176
-772
-500
-735
-691
-797
-612
-573
-36
-617
-630
-357
-718
-210
-48
-185
-20
-556
-206
-722
-559
-416
-578
-745
-564
-273
-62
-300
-218
-711
-744
-805
-277
-522
-346
-280
-762
-438
-381
-379
-198
-737
-555
-466
-218
-511
-334
-353
-259
-225
-675
-350
-585
-647
-52
-395
-324
-106
-826
-279
-81
-396
-611
-312
-529
-291
-129
-594
-437
-188
-649
-820
-237
-673
-6
-387
-195
-503
-350
-83
-88
-626
-30
-313
-13
-633
-403
-319
-832
-185
-146
-839
-9
-557
-799
-841
-700
-465
-669
-769
-235
-849
-863
-819
-76
-912
-931
-909
-762
-607
-522
-64
-769
-377
-133
-414
-772
-206
-746
-730
-393
-901
-72
-33
-811
-372
-298
-835
-637
-302
-481
-958
-878
-867
-25
-260
-448
-21
-930
-903
-581
-547
-664
-843
-140
-337
-383
-513
-368
-221
-474
-169
-673
-728
-266
-862
-753
-815
-647
-106
-15
-728
-912
-147
-828
-6
-694
-434
-737
-335
-183
-732
-841
-364
-155
-116
-966
-822
-65
-22
-853
-208
-326
-826
-472
-491
-436
-771
-1009
-98
-401
-915
-275
-574
-313
-884
-648
-935
-94
-326
-553
-744
-723
-782
-719
-175
-868
-190
-153
-48
-218
-414
-721
-715
-995
-991
-575
-264
-70
-366
-381
-130
-409
-817
-258
-1028
-552
-878
-449
-138
-900
-45
-119
-677
-844
-869
-985
-1019
-60
-649
-915
-93
-1053
-121
-631
-156
-332
-193
"""
def solve(input):
code = [int(n.strip()) for n in input.strip().split('\n')]
pc = 0
count = 0
while 0 <= pc < len(code):
next_pc = pc + code[pc]
code[pc] += 1
count += 1
pc = next_pc
return count
res = solve(TEST_INPUT)
assert (res == 5)
print(solve(INPUT))
| [
"javi.abadia@gmail.com"
] | javi.abadia@gmail.com |
90d1ba7d7f1065e6b4287dc4f9bdf753dd042573 | de56d8ffb98bf4ef8336a89f7dea2ba4e4797134 | /blog/migrations/0005_auto_20200619_0217.py | 4f935d59ebd5bda644ffa14190f257db3d6f39a6 | [] | no_license | topdev38/agecny-cms | 9bb5280646d498592738936599ceb13c87899428 | ff9f76b06e2e41b82cc2ebf54fbd27a9a22994f3 | refs/heads/master | 2023-01-01T23:15:50.436794 | 2020-10-18T03:51:17 | 2020-10-18T03:51:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | # Generated by Django 3.0.5 on 2020-06-19 09:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0009_auto_20200619_0205'),
('blog', '0004_auto_20200618_0812'),
]
operations = [
migrations.AddField(
model_name='blogdetailpage',
name='navigation_bar',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.NavigationSnippet'),
),
migrations.AddField(
model_name='blogpage',
name='navigation_bar',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.NavigationSnippet'),
),
]
| [
"gabriel.sabadac@yahoo.ro"
] | gabriel.sabadac@yahoo.ro |
425f90eb8552b8eddac49d0942fe278eeb209643 | 85e078ee3ceda5091624233ca19ba42f78747499 | /LeetCode/binary_tree_tilt.py | da123fed23b66a5d89eb908cb09913176514a842 | [] | no_license | papayetoo/StudyinPython | d5e6ec0cff0e97fcc4afc8d846e3658c06eb67c2 | f686b6e08720ad4d7d57b41d24c63c4bfa64dd90 | refs/heads/master | 2021-07-22T04:05:38.993123 | 2021-02-03T14:12:26 | 2021-02-03T14:12:26 | 240,009,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def findTilt(self, root: TreeNode) -> int:
def postorder(node: TreeNoe):
if not node:
return 0
l = postorder(node.left)
r = postorder(node.right)
return abs(l - r)
result = postorder(root)
print(result)
return 0
| [
"rhkdgus0826@gmail.com"
] | rhkdgus0826@gmail.com |
fc14e4aaa42ca1b1f9774ae8c9c96ace464ac8e0 | 0760fb4901a75766921a205b55686d6d6f049b30 | /python/ray/train/xgboost/xgboost_predictor.py | 1b319b93b299bc02a5b83a2f1cdcfa1e8fab6e8e | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | ray-project/ray | a4bb6940b08b59a61ef0b8e755a52d8563a2f867 | edba68c3e7cf255d1d6479329f305adb7fa4c3ed | refs/heads/master | 2023-08-31T03:36:48.164405 | 2023-08-31T03:20:38 | 2023-08-31T03:20:38 | 71,932,349 | 29,482 | 5,669 | Apache-2.0 | 2023-09-14T21:48:14 | 2016-10-25T19:38:30 | Python | UTF-8 | Python | false | false | 5,718 | py | from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
import pandas as pd
import xgboost
from ray.air.constants import TENSOR_COLUMN_NAME
from ray.air.data_batch_type import DataBatchType
from ray.air.util.data_batch_conversion import _unwrap_ndarray_object_type_if_needed
from ray.train.predictor import Predictor
from ray.train.xgboost import XGBoostCheckpoint
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray.data.preprocessor import Preprocessor
@PublicAPI(stability="beta")
class XGBoostPredictor(Predictor):
"""A predictor for XGBoost models.
Args:
model: The XGBoost booster to use for predictions.
preprocessor: A preprocessor used to transform data batches prior
to prediction.
"""
def __init__(
self, model: xgboost.Booster, preprocessor: Optional["Preprocessor"] = None
):
self.model = model
super().__init__(preprocessor)
def __repr__(self):
return (
f"{self.__class__.__name__}(model={self.model!r}, "
f"preprocessor={self._preprocessor!r})"
)
@classmethod
def from_checkpoint(cls, checkpoint: XGBoostCheckpoint) -> "XGBoostPredictor":
"""Instantiate the predictor from a Checkpoint.
This is a helper constructor that instantiates the predictor from a
framework-specific XGBoost checkpoint.
Args:
checkpoint: The checkpoint to load the model and preprocessor from.
"""
model = checkpoint.get_model()
preprocessor = checkpoint.get_preprocessor()
return cls(model=model, preprocessor=preprocessor)
def predict(
self,
data: DataBatchType,
feature_columns: Optional[Union[List[str], List[int]]] = None,
dmatrix_kwargs: Optional[Dict[str, Any]] = None,
**predict_kwargs,
) -> DataBatchType:
"""Run inference on data batch.
The data is converted into an XGBoost DMatrix before being inputted to
the model.
Args:
data: A batch of input data.
feature_columns: The names or indices of the columns in the
data to use as features to predict on. If None, then use
all columns in ``data``.
dmatrix_kwargs: Dict of keyword arguments passed to ``xgboost.DMatrix``.
**predict_kwargs: Keyword arguments passed to ``xgboost.Booster.predict``.
Examples:
.. testcode::
import numpy as np
import xgboost as xgb
from ray.train.xgboost import XGBoostPredictor
train_X = np.array([[1, 2], [3, 4]])
train_y = np.array([0, 1])
model = xgb.XGBClassifier().fit(train_X, train_y)
predictor = XGBoostPredictor(model=model.get_booster())
data = np.array([[1, 2], [3, 4]])
predictions = predictor.predict(data)
# Only use first and second column as the feature
data = np.array([[1, 2, 8], [3, 4, 9]])
predictions = predictor.predict(data, feature_columns=[0, 1])
.. testcode::
import pandas as pd
import xgboost as xgb
from ray.train.xgboost import XGBoostPredictor
train_X = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
train_y = pd.Series([0, 1])
model = xgb.XGBClassifier().fit(train_X, train_y)
predictor = XGBoostPredictor(model=model.get_booster())
# Pandas dataframe.
data = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
predictions = predictor.predict(data)
# Only use first and second column as the feature
data = pd.DataFrame([[1, 2, 8], [3, 4, 9]], columns=["A", "B", "C"])
predictions = predictor.predict(data, feature_columns=["A", "B"])
Returns:
Prediction result.
"""
return Predictor.predict(
self,
data,
feature_columns=feature_columns,
dmatrix_kwargs=dmatrix_kwargs,
**predict_kwargs,
)
def _predict_pandas(
self,
data: "pd.DataFrame",
feature_columns: Optional[Union[List[str], List[int]]] = None,
dmatrix_kwargs: Optional[Dict[str, Any]] = None,
**predict_kwargs,
) -> "pd.DataFrame":
dmatrix_kwargs = dmatrix_kwargs or {}
feature_names = None
if TENSOR_COLUMN_NAME in data:
data = data[TENSOR_COLUMN_NAME].to_numpy()
data = _unwrap_ndarray_object_type_if_needed(data)
if feature_columns:
# In this case feature_columns is a list of integers
data = data[:, feature_columns]
elif feature_columns:
# feature_columns is a list of integers or strings
data = data[feature_columns].to_numpy()
# Only set the feature names if they are strings
if all(isinstance(fc, str) for fc in feature_columns):
feature_names = feature_columns
else:
feature_columns = data.columns.tolist()
data = data.to_numpy()
if all(isinstance(fc, str) for fc in feature_columns):
feature_names = feature_columns
if feature_names:
dmatrix_kwargs["feature_names"] = feature_names
matrix = xgboost.DMatrix(data, **dmatrix_kwargs)
df = pd.DataFrame(self.model.predict(matrix, **predict_kwargs))
df.columns = (
["predictions"]
if len(df.columns) == 1
else [f"predictions_{i}" for i in range(len(df.columns))]
)
return df
| [
"noreply@github.com"
] | ray-project.noreply@github.com |
6e6667cf463f0e2376456bda7fa413e8cc8c9d58 | 45b8e141f762b95edec36ce40809ea4b89e3d287 | /mahkalastore/home/views.py | 23c7f82f9021101fb873e62508a747c6d244c294 | [] | no_license | nimanoori22/mys | 73d7a0ad141e1c6208e776a15d079a2599c46a7f | 0122586a4d69f80219ad25e42ef89f3052f5cb81 | refs/heads/master | 2022-11-28T22:24:44.947703 | 2020-08-13T14:52:19 | 2020-08-13T14:52:19 | 279,652,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,800 | py | from django.shortcuts import render, HttpResponse, HttpResponsePermanentRedirect
from product.models import Product, Category, Images, Comment
from home.models import Setting, ContactForm, ContactMessage
from .forms import SearchForm
from django.contrib import messages
from django.http import HttpResponseRedirect
import json
# Create your views here.
def index(request):
page = "home"
category = Category.objects.all()
setting = Setting.objects.get(pk=1)
products_slider = Product.objects.all().order_by('id')[:4] #first 4 products
products_latest= Product.objects.all().order_by('-id')[:4] #last 4 products
products_picked = Product.objects.all().order_by('?')[:4] #randomly picked
context = {'setting': setting,
'page': page,
'products_slider': products_slider,
'products_latest': products_latest,
'products_picked': products_picked,
'category': category}
return render(request, 'index.html', context=context)
def aboutus(request):
setting = Setting.objects.get(pk=1)
context = {'setting': setting}
return render(request, 'about.html', context=context)
def contact(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
data = ContactMessage()
data.name = form.cleaned_data['name']
data.email = form.cleaned_data['email']
data.subject = form.cleaned_data['subject']
data.message = form.cleaned_data['message']
data.ip = request.META.get('REMOTE_ADDR')
data.save()
messages.success(request, 'your message has been sent, thank you <3')
HttpResponseRedirect('/contact')
setting = Setting.objects.get(pk=1)
form = ContactForm
context = {'setting': setting, 'form': form,}
return render(request, 'contact.html', context=context)
def category_product(request, id, slug):
category = Category.objects.all()
products = Product.objects.filter(category_id=id)
context = {
'products': products,
'category': category,
}
return render(request, 'category_product.html', context=context)
def search(request):
if request.method == 'POST':
form = SearchForm(request.POST)
if form.is_valid():
query = form.cleaned_data['query']
catid = form.cleaned_data['catid']
if catid == 0:
products = Product.objects.filter(name__icontains=query)
else:
products = Product.objects.filter(name__icontains=query, category_id=catid)
category = Category.objects.all()
context = {
'products': products,
'query': query,
'category': category,
}
return render(request, 'search_products.html', context=context)
return HttpResponsePermanentRedirect('/')
def search_auto(request):
if request.is_ajax():
q = request.GET.get('term', '')
products = Product.objects.filter(name__icontains=q)
results = []
for rs in products:
product_json = {}
product_json = rs.name
results.append(product_json)
data = json.dumps(results)
else:
data = 'fail'
mimetype = 'application/json'
return HttpResponse(data, mimetype)
def product_detail(request, id, slug):
category = Category.objects.all()
product = Product.objects.get(pk=id)
images = Images.objects.filter(product_id=id)
comments = Comment.objects.filter(product_id=id, status='True')
context = {
'product': product,
'category': category,
'mypics': images,
'comments': comments,
}
return render(request, 'product_detail.html', context=context)
| [
"nimanoori000@gmail.com"
] | nimanoori000@gmail.com |
d9f06fb6f20f3f1270aaab0aff1acbd04c6ce096 | d4c82eb9ae3037cf8742c3fc8c31cf4a80f5d21c | /examples/Python/examples/Tools/scripts/finddiv.py | 7f483e293126378754d76f484901c26e6bae0d42 | [] | no_license | gzhu-team-509/programming-knowledge-base | 68132b1a669f208dab94dcf2401ce39d89ebe53b | 3f3d026927157b7fdf210da195cb912366975e75 | refs/heads/master | 2021-05-05T12:17:12.686569 | 2017-11-04T07:30:28 | 2017-11-04T07:30:28 | 104,754,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,517 | py | #! /usr/bin/python2.6
"""finddiv - a grep-like tool that looks for division operators.
Usage: finddiv [-l] file_or_directory ...
For directory arguments, all files in the directory whose name ends in
.py are processed, and subdirectories are processed recursively.
This actually tokenizes the files to avoid false hits in comments or
strings literals.
By default, this prints all lines containing a / or /= operator, in
grep -n style. With the -l option specified, it prints the filename
of files that contain at least one / or /= operator.
"""
import os
import sys
import getopt
import tokenize
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "lh")
except getopt.error, msg:
usage(msg)
return 2
if not args:
usage("at least one file argument is required")
return 2
listnames = 0
for o, a in opts:
if o == "-h":
print __doc__
return
if o == "-l":
listnames = 1
exit = None
for filename in args:
x = process(filename, listnames)
exit = exit or x
return exit
def usage(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Usage: %s [-l] file ...\n" % sys.argv[0])
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
def process(filename, listnames):
if os.path.isdir(filename):
return processdir(filename, listnames)
try:
fp = open(filename)
except IOError, msg:
sys.stderr.write("Can't open: %s\n" % msg)
return 1
g = tokenize.generate_tokens(fp.readline)
lastrow = None
for type, token, (row, col), end, line in g:
if token in ("/", "/="):
if listnames:
print filename
break
if row != lastrow:
lastrow = row
print "%s:%d:%s" % (filename, row, line),
fp.close()
def processdir(dir, listnames):
try:
names = os.listdir(dir)
except os.error, msg:
sys.stderr.write("Can't list directory: %s\n" % dir)
return 1
files = []
for name in names:
fn = os.path.join(dir, name)
if os.path.normcase(fn).endswith(".py") or os.path.isdir(fn):
files.append(fn)
files.sort(lambda a, b: cmp(os.path.normcase(a), os.path.normcase(b)))
exit = None
for fn in files:
x = process(fn, listnames)
exit = exit or x
return exit
if __name__ == "__main__":
sys.exit(main())
| [
"lightyears1998@hotmail.com"
] | lightyears1998@hotmail.com |
cc1d8630c5d911d03efe32fa35c8639a4a387cea | 31648f7ba9eab4841eae211b36a5ea025570ba78 | /exam_16_08_2020/project/software/express_software.py | 23896589c3ea9c3f097ae03b4565a7edec1c9828 | [
"MIT"
] | permissive | ivan-yosifov88/python_oop_june_2021 | d7c4d3ba93f3085f019a4409c33b8ae9739de372 | 7ae6126065abbcce7ce97c86d1150ae307360249 | refs/heads/main | 2023-07-04T21:12:07.592730 | 2021-08-18T15:12:50 | 2021-08-18T15:12:50 | 385,363,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | from project.software.software import Software
class ExpressSoftware(Software):
type = "Express"
coefficient_changing_capacity = 1
coefficient_changing_memory = 2
def __init__(self, name, capacity_consumption, memory_consumption):
super().__init__(name, self.type, int(capacity_consumption * self.coefficient_changing_capacity),
int(memory_consumption * self.coefficient_changing_memory)) | [
"ivan.yosifov88gmail.com"
] | ivan.yosifov88gmail.com |
9fb28b5884cad2119f5b0d25c46f453739e1002b | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/Bisong19Building/G_PartVI/H_Chapter37/B_StackedAutoencoders/index.py | 5ea3ea09bf83cb054a2ed818f98e32b291b162f3 | [] | no_license | nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,914 | py | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Chapter 37 Autoencoders
#
#
#
#
# Figure 37-2. Stacked or deep autoencoder. The hidden layers are added
# symmetrically at both the Encoder and Decoder
#
# Stacked Autoencoders with TensorFlow 2.0
# The code example in this section shows how to implement an autoencoder network
# using TensorFlow 2.0. For simplicity, the MNIST handwriting dataset is used to create
# reconstructions of the original images. In this example, a stacked autoencoder is
# implemented with the original and reconstructed image shown in Figure 37-3. The code
# listing is presented in the following, and corresponding notes on the code are shown
# thereafter.
#
# # import TensorFlow 2.0 with GPU
# !pip install -q tf-nightly-gpu-2.0-preview
#
# # import packages
# import tensorflow as tf
#
# 477
#
# Chapter 37 Autoencoders
#
# import numpy as np
# import matplotlib.pyplot as plt
#
# # import dataset
# (x_train, _), (x_test, _) = tf.keras.datasets.mnist.load_data()
#
# # change datatype to float
# x_train = x_train.astype('float32')
# x_test = x_test.astype('float32')
#
# # scale the dataset from 0 -> 255 to 0 -> 1
# x_train /= 255
# x_test /= 255
#
# # flatten the 28x28 images into vectors of size 784
# x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
# x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
#
# # create the autoencoder model
# def model_fn():
# model_input = tf.keras.layers.Input(shape=(784,))
# encoded = tf.keras.layers.Dense(units=512, activation='relu')(model_input)
# encoded = tf.keras.layers.Dense(units=128, activation='relu')(encoded)
# encoded = tf.keras.layers.Dense(units=64, activation='relu')(encoded)
# coding_layer = tf.keras.layers.Dense(units=32)(encoded)
# decoded = tf.keras.layers.Dense(units=64, activation='relu')(coding_layer)
# decoded = tf.keras.layers.Dense(units=128, activation='relu')(decoded)
# decoded = tf.keras.layers.Dense(units=512, activation='relu')(decoded)
# decoded_output = tf.keras.layers.Dense(units=784)(decoded)
#
# # the autoencoder model
# autoencoder_model = tf.keras.Model(inputs=model_input, outputs=decoded_output)
#
# # compile the model
# autoencoder_model.compile(optimizer='adam',
# loss='binary_crossentropy',
# metrics=['accuracy'])
#
# return autoencoder_model
#
#
# 478
#
# Chapter 37 Autoencoders
#
# # build the model
# autoencoder_model = model_fn()
#
# # print autoencoder model summary
# autoencoder_model.summary()
#
# # train the model
# autoencoder_model.fit(x_train, x_train, epochs=1000, batch_size=256,
# shuffle=True, validation_data=(x_test, x_test))
#
# # visualize reconstruction
# sample_size = 6
# test_image = x_test[:sample_size]
# # reconstruct test samples
# test_reconstruction = autoencoder_model.predict(test_image)
#
# plt.figure(figsize = (8,25))
# plt.suptitle('Stacked Autoencoder Reconstruction', fontsize=16)
# for i in range(sample_size):
# plt.subplot(sample_size, 2, i*2+1)
# plt.title('Original image')
#
# plt.imshow(test_image[i].reshape((28, 28)), cmap="Greys",
# interpolation="nearest", aspect='auto')
# plt.subplot(sample_size, 2, i*2+2)
# plt.title('Reconstructed image')
#
# plt.imshow(test_reconstruction[i].reshape((28, 28)), cmap="Greys",
# interpolation="nearest", aspect='auto')
# plt.show()
#
# From the preceding code listing, take note of the following:
# • Observe the arrangement of the encoder layers and the decoder
# layers of the stacked autoencoder. Specifically note how the
# corresponding layer arrangement of the encoder and the decoder has
# the same number of neurons.
# • The loss error measures the squared difference between the inputs
# into the autoencoder network and the decoder output.
# The image in Figure 37-3 contrasts the reconstructed images from the autoencoder
# network with the original images in the dataset.
# 479
#
# Chapter 37 Autoencoders
#
#
#
#
# Figure 37-3. Stacked autoencoder reconstruction. Left: Original image. Right:
# Reconstructed image.
# 480
#
# Chapter 37 Autoencoders
#
#
# D
# enoising Autoencoders
# Denoising autoencoders add a different type of constraint to the network by imputing
# some Gaussian noise into the inputs. This noise injection forces the autoencoder to
# learn the uncorrupted form of the input features; by doing so, the autoencoder learns the
# internal representation of the dataset without memorizing the inputs.
# Another way a denoising autoencoder constrains the input is by deactivating some
# input neurons in a similar fashion to the Dropout technique. Denoising autoencoders
# use an overcomplete network architecture. This means that the dimensions of the
# hidden Encoder and Decoder layers are not restricted; hence, they are overcomplete. An
# illustration of a denoising autoencoder architecture is shown in Figure 37-4.
#
#
#
#
# Figure 37-4. Denoising autoencoder. Constraint is applied by either adding
# Gaussian noise or by switching off some a random selection of the input neurons.
#
#
#
# 481
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Stacked Autoencoders with TensorFlow 2.0",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
self.add(mbk("# Stacked Autoencoders with TensorFlow 2.0"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class StackedAutoencoders(HierNode):
def __init__(self):
super().__init__("Stacked Autoencoders with TensorFlow 2.0")
self.add(Content())
# eof
| [
"lawrence.mcafee@gmail.com"
] | lawrence.mcafee@gmail.com |
d5484b50a845a80c7c8d7c731acd7778724d8da8 | b21abd3873c76739ceefd1b4613a343ba2b454d1 | /jwst/assign_wcs/util.py | 97140961df5adc10fe0b8c84bfb6276f03a866ae | [
"BSD-2-Clause"
] | permissive | rij/jwst | 96a7baf95de953c51bbe67f3cdd459c114c47eef | 1d3acecb28d9a3dcb44b993e451b69da9856187d | refs/heads/master | 2020-12-24T09:56:21.784342 | 2016-06-09T19:17:01 | 2016-06-09T19:17:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,154 | py | """
Utility function for WCS
"""
import logging
import math
import copy
import functools
import numpy as np
from numpy import arctan2
from astropy.utils.misc import isiterable
from astropy.io import fits
from astropy.modeling.core import Model
from astropy.modeling.parameters import Parameter, InputParameterError
from astropy.modeling import projections
from astropy.modeling import models as astmodels
from gwcs import WCS
from gwcs import utils as gwutils
from gwcs.wcstools import wcs_from_fiducial
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def reproject(wcs1, wcs2, origin=0):
"""
Given two WCSs return a function which takes pixel coordinates in
the first WCS and computes their location in the second one.
It performs the forward transformation of ``wcs1`` followed by the
inverse of ``wcs2``.
Parameters
----------
wcs1, wcs2 : `~gwcs.wcs.WCS`
WCS objects.
Returns
-------
_reproject : func
Function to compute the transformations. It takes x, y
positions in ``wcs1`` and returns x, y positions in ``wcs2``.
"""
def _reproject(x, y):
sky = wcs1.forward_transform(x, y)
return wcs2.backward_transform(*sky)
return _reproject
def wcs_from_footprints(wcslist, refwcs=None, transform=None, domain=None):
"""
Create a WCS from a list of WCS objects.
A fiducial point in the output coordinate frame is created from the
footprints of all WCS objects. For a spatial frame this is the center
of the union of the footprints. For a spectral frame the fiducial is in
the beginning of the footprint range.
If ``refwcs`` is not specified, the first WCS object in the list is considered
a reference. The output coordinate frame and projection (for celestial frames)
is taken from ``refwcs``.
If ``transform`` is not suplied, a compound transform comprised of
scaling and rotation is copied from ``refwcs``.
If ``domain`` is not supplied, the domain of the new WCS is computed
from the domains of all input WCSs
Parameters
----------
wcslist : list of `~gwcs.wcs.WCS`
A list of WCS objects.
refwcs : `~gwcs.wcs.WCS`, optional
Reference WCS. The output coordinate frame, the projection and a
scaling and rotation transform is created from it. If not supplied
the first WCS in the list is used as ``refwcs``.
transform : `~astropy.modeling.core.Model`, optional
A transform, passed to :class_method:`~gwcs.WCS.wcs_from_fiducial`
If not supplied Scaling | Rotation is computed from ``refwcs``.
domain : list of dicts, optional
Domain of the new WCS.
If not supplied it is computed from the domain of all inputs.
"""
if not isiterable(wcslist):
raise ValueError("Expected 'wcslist' to be an iterable of WCS objects.")
if not all([isinstance(w, WCS) for w in wcslist]):
raise TypeError("All items in wcslist are expected to be instances of gwcs.WCS.")
if refwcs is None:
refwcs = wcslist[0]
else:
if not isinstance(refwcs, WCS):
raise TypeError("Expected refwcs to be an instance of gwcs.WCS.")
fiducial = compute_fiducial(wcslist, domain)
prj = np.array([isinstance(m, projections.Projection) for m \
in refwcs.forward_transform]).nonzero()[0]
if prj:
# TODO: Fix the compound model indexing with numpy integers in astropy.
# Remove the work around this issues from here.
prj = refwcs.forward_transform[int(prj[0])]
else:
prj = None
trans = []
scales = [m for m in refwcs.forward_transform if isinstance(m, astmodels.Scale)]
if scales:
trans.append(functools.reduce(lambda x, y: x & y, scales))
rotation = [m for m in refwcs.forward_transform if \
isinstance(m, astmodels.AffineTransformation2D)]
if rotation:
trans.append(rotation[0])
if trans:
tr = functools.reduce(lambda x, y: x | y, trans)
else:
tr = None
out_frame = getattr(refwcs, getattr(refwcs, 'output_frame'))
wnew = wcs_from_fiducial(fiducial, coordinate_frame=out_frame,
projection=prj, transform=tr)
#domain_bounds = np.hstack([gwutils._domain_to_bounds(d) for d in [w.domain for w in wcslist]])
domain_footprints = [w.footprint() for w in wcslist]
domain_bounds = np.hstack([wnew.backward_transform(*f) for f in domain_footprints])
for axs in domain_bounds:
axs -= axs.min()
domain = []
for axis in out_frame.axes_order:
axis_min, axis_max = domain_bounds[axis].min(), domain_bounds[axis].max()
domain.append({'lower': axis_min, 'upper': axis_max,
'includes_lower': True, 'includes_upper': True})
wnew.domain = domain
return wnew
def compute_fiducial(wcslist, domain=None):
"""
For a celestial footprint this is the center.
For a spectral footprint, it is the beginning of the range.
This function assumes all WCSs have the same output coordinate frame.
"""
output_frame = getattr(wcslist[0], 'output_frame')
axes_types = getattr(wcslist[0], output_frame).axes_type
spatial_axes = np.array(axes_types) == 'SPATIAL'
spectral_axes = np.array(axes_types) == 'SPECTRAL'
footprints = np.hstack([w.footprint(domain=domain) for w in wcslist])
spatial_footprint = footprints[spatial_axes]
spectral_footprint = footprints[spectral_axes]
fiducial = np.empty(len(axes_types))
if (spatial_footprint).any():
lon, lat = spatial_footprint
lon, lat = np.deg2rad(lon), np.deg2rad(lat)
x_mean = np.mean(np.cos(lat) * np.cos(lon))
y_mean = np.mean(np.cos(lat) * np.sin(lon))
z_mean = np.mean(np.sin(lat))
lon_fiducial = np.rad2deg(np.arctan2(y_mean, x_mean)) % 360.0
lat_fiducial = np.rad2deg(np.arctan2(z_mean, np.sqrt(x_mean**2 + y_mean\
**2)))
fiducial[spatial_axes] = lon_fiducial, lat_fiducial
if (spectral_footprint).any():
fiducial[spectral_axes] = spectral_footprint.min()
return fiducial
def is_fits(input):
"""
Returns
--------
isFits: tuple
An ``(isfits, fitstype)`` tuple. The values of ``isfits`` and
``fitstype`` are specified as:
- ``isfits``: True|False
- ``fitstype``: if True, one of 'waiver', 'mef', 'simple'; if False, None
Notes
-----
Input images which do not have a valid FITS filename will automatically
result in a return of (False, None).
In the case that the input has a valid FITS filename but runs into some
error upon opening, this routine will raise that exception for the calling
routine/user to handle.
"""
isfits = False
fitstype = None
names = ['fits', 'fit', 'FITS', 'FIT']
#determine if input is a fits file based on extension
# Only check type of FITS file if filename ends in valid FITS string
f = None
fileclose = False
if isinstance(input, fits.HDUList):
isfits = True
f = input
else:
isfits = True in [input.endswith(l) for l in names]
# if input is a fits file determine what kind of fits it is
#waiver fits len(shape) == 3
if isfits:
if not f:
try:
f = fits.open(input, mode='readonly')
fileclose = True
except Exception as e:
if f is not None:
f.close()
raise
data0 = f[0].data
if data0 is not None:
try:
if isinstance(f[1], fits.TableHDU):
fitstype = 'waiver'
except IndexError:
fitstype = 'simple'
else:
fitstype = 'mef'
if fileclose:
f.close()
return isfits, fitstype
def not_implemented_mode(input_model, ref):
exp_type = input_model.meta.exposure.type
message = "WCS for EXP_TYPE of {0} is not implemented.".format(exp_type)
log.critical(message)
#raise AttributeError(message)
return None
| [
"jhunkeler@gmail.com"
] | jhunkeler@gmail.com |
e2f4c086c474a87cb50581f634658462740af143 | 65ad03b8f4d975585776a5ba3d6a6ee1750ebad4 | /03-First Class Functions/Before/__main__.py | ba99256cf1a798f52d97ca90cd1eeff148e7ba1c | [] | no_license | afettouhi/PythonFP | e51351d5901a28f8ecd938d8213fcb5982c51e62 | c6a2319bc053f26cfbe70102a2dd7c1a4bcbbd57 | refs/heads/master | 2020-05-31T18:53:49.373967 | 2019-06-06T07:53:30 | 2019-06-06T07:53:30 | 190,445,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | from order import Order
from customer import Customer
def main():
cust1 = Customer()
cust1.name = 'Heart of Gold'
cust1.address = 'The Milky Way Galaxy'
cust1.enterprise = False
cust2 = Customer()
cust2.name = 'Milliways Restaurant'
cust2.address = 'Magrathea'
cust2.enterprise = True
ord1 = Order()
ord1.customer = cust1
ord1.expedited = False
ord1.shipping_address = 'Infinitely Improbable'
ord2 = (Order())
ord2.customer = cust2
ord2.expedited = True
ord2.shipping_address = 'Magrathea'
Order.orders = [ord1, ord2]
for name in ord1.get_expedited_orders_customer_names():
print(name)
for address in ord1.get_expedited_orders_customer_addresses():
print(address)
for address in ord1.get_expedited_orders_shipping_addresses():
print(address)
main()
| [
"A.Fettouhi@gmail.com"
] | A.Fettouhi@gmail.com |
1d542e307d523389d60ab5251e07e5c7fc776881 | 18d51ac0a6ca14c8221c26f0dacd8d3721ca28e9 | /hun31.py | 1d7fbd0f0f8cb874ba79e0399716f3e93eb88345 | [] | no_license | mahakalai/mahak | 05f96d52880ed7b2e5eb70dd1dbf14fc533236e8 | 613be9df7743ef59b1f0e07b7df987d29bb23ec7 | refs/heads/master | 2020-04-15T05:01:58.541930 | 2019-07-15T16:28:32 | 2019-07-15T16:28:32 | 164,406,486 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | n=int(input())
l=[int(x) for x in input().split()]
l1=[]
mul=1
for i in range(1,len(l)):
mul=mul*l[i]
l1.append(mul)
print(max(l1))
| [
"noreply@github.com"
] | mahakalai.noreply@github.com |
d5689f706ada50f514498638a501965f892b556d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_worded.py | b372693f29f593edd5f1838b4d766e0aac3745bb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
from xai.brain.wordbase.verbs._word import _WORD
#calss header
class _WORDED(_WORD, ):
def __init__(self,):
_WORD.__init__(self)
self.name = "WORDED"
self.specie = 'verbs'
self.basic = "word"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
545bbda1adda90ddac80fd555953d3811e4d7e98 | 7a4c6fe3802b740b928136fc7a5497c473386e2b | /credentials.py | 6078e9e0769e2cd330defc5649428f5dca5d6788 | [] | no_license | sirrotich/Password--Locker | 4cb882d15a2a1659c48e33227b2b46e5d44a9456 | b2f4dfd330a7812675b71d6a3311e44139f2ae94 | refs/heads/master | 2020-05-05T03:55:41.040277 | 2019-04-08T15:56:22 | 2019-04-08T15:56:22 | 179,690,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | import pyperclip #This allows you copy and paste a given text-unit
class Credentials:
"""
Class that generates new instances of users.
"""
credentials_list = [] #empty credentials list
def __init__(self,credentials_name,usr_name,password):
self.credentials_name = credentials_name
self.usr_name = usr_name
self.password = password
def save_credentials(self):
'''
save_credentials method saves credentials objects into credentials_list
'''
Credentials.credentials_list.append(self)
@classmethod
def delete_credentials(self):
'''
delete_credentials method deletes a saved credentials from the credentials_list
'''
for credentials in cls.credentials_list:
if credentials.credentials_name == name:
return credentials_list.remove(self)
@classmethod
def find_by_name(cls,name):
for credentials in cls.credentials_list:
if credentials.credentials_name == name:
return credentials
@classmethod
def credentials_exist(cls,name):
'''
Method that checks if a credentials exists from the credentials list.
Args:
name: Acc name to search if it exists
Returns :
Boolean: True or false depending if the credentials exists
'''
for credentials in cls.credentials_list:
if credentials.password == name:
return credentials
return False
@classmethod
def display_credentials(cls): #check this line later
'''
method that returns the credentials list
'''
return cls.credentials_list
@classmethod
def copy_usr_name(cls,number):
credentials_found = Credentials.find_by_name(name)
pyperclip.copy(contact_found.usr_name)
| [
"rotichtitus12@gmail.com"
] | rotichtitus12@gmail.com |
a616707b079d8e5fa123ef8a00425c270eaf8257 | c003c5faf5b442fa4bf3010eae46370ebcf39040 | /voseq/genbank_fasta/tests/test_utils.py | b89cfb300f5c60393db249b3155c9fd3863a95d5 | [] | no_license | mezarino/VoSeq | 35d1fb3bb1de85f50bc39bc7ac6aefcad310e120 | bf75f1f91176a57ee23465e520d27df021576712 | refs/heads/master | 2021-01-18T05:28:44.318625 | 2015-02-15T15:40:12 | 2015-02-15T15:40:12 | 29,065,916 | 0 | 0 | null | 2015-01-10T18:04:33 | 2015-01-10T18:04:32 | null | UTF-8 | Python | false | false | 1,722 | py | from django.test import TestCase
from django.core.management import call_command
from core.utils import get_gene_codes
from core.utils import get_voucher_codes
from public_interface.models import TaxonSets
from public_interface.models import GeneSets
from public_interface.models import Genes
from genbank_fasta import utils
class TestGenBankFastaUtils(TestCase):
def setUp(self):
args = []
opts = {'dumpfile': 'test_db_dump.xml', 'verbosity': 0}
cmd = 'migrate_db'
call_command(cmd, *args, **opts)
gs = GeneSets.objects.get(geneset_name='2genes')
g = Genes.objects.get(gene_code='COI')
g2 = Genes.objects.get(gene_code='16S')
ts = TaxonSets.objects.get(taxonset_name='Erebia')
self.cleaned_data = {
'gene_codes': [g, g2],
'taxonset': ts,
'voucher_codes': 'CP200-10\r\nCP100-11',
'geneset': gs,
}
def test_get_gene_codes(self):
expected = 3
result = get_gene_codes(self.cleaned_data)
self.assertEqual(expected, len(result))
def test_dataset_reading_frame_2(self):
res = utils.Results(['CP100-10', 'CP100-11'], ['COI'])
res.get_datasets()
self.assertEqual('WAGMIGTSLSLIIRTELGNP', res.protein.splitlines()[1][0:20])
def test_get_voucher_codes(self):
expected = 3
result = get_voucher_codes(self.cleaned_data)
self.assertEqual(expected, len(result))
def test_get_voucher_codes_dropped(self):
self.cleaned_data['voucher_codes'] = 'CP100-10\r\n--CP100-11\r\nCP100-12'
expected = 2
result = get_voucher_codes(self.cleaned_data)
self.assertEqual(expected, len(result))
| [
"mycalesis@gmail.com"
] | mycalesis@gmail.com |
6f9362635f191a549b5555c279d7bffca6d697f5 | 11130633fe59b222da0696dc05e72ac30871a573 | /Problem_Solving/leetcode/Sequential(Non-Linear)_data_structure/Tree/543_Diameter_of_Binary_Tree/diameterOfBinaryTree.py | 07a99675edeb2bce7241276d6b2a9954688e53f8 | [] | no_license | Jin-SukKim/Algorithm | 024aa77c6bf63666910a1eb03407e808a05307ec | 5f2a14fe1f64032126df55b1eadc1580a32735f3 | refs/heads/master | 2023-09-01T20:50:13.150780 | 2023-09-01T07:54:32 | 2023-09-01T07:54:32 | 263,555,962 | 4 | 0 | null | 2023-02-14T14:36:38 | 2020-05-13T07:24:51 | C++ | UTF-8 | Python | false | false | 576 | py | class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def diameterOfBinaryTree(root: TreeNode) ->int :
longest: int = 0
def dfs(node: TreeNode) -> int:
if not node:
return -1
# 왼쪽 오른쪽의각 리프 노드까지 탐색
left = dfs(node.left)
right = dfs(node.right)
# 가장 긴 경로
longest = max()
# 상태값
return max(left, right) + 1
dfs(root)
return longest | [
"99wlstjr@gmail.com"
] | 99wlstjr@gmail.com |
446b8e28e38c08382df586afe3e8b6076aabc31c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/3170.py | b6dc51d486002f5afd37165abc48d1430d695198 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | #!/usr/bin/python
import sys
N = int(sys.stdin.readline())
for case in xrange(N):
C, F, X = map(float,sys.stdin.readline().split())
n_farms_f = (F*X - 2*C)/(C*F) - 1.
n_farms = int((F*X - 2*C)/(C*F) - 1.)
if n_farms_f > 0:
t1 = 0.
for i in xrange(n_farms+1):
t1 += 1./(2.+i*F) # 2 comes from initial rate
t = C * t1 + (X / (2. + ((n_farms+1) * F)))
else:
t = X / 2.
print "Case #%d: %.7f" % (case+1, t)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8ed4bb03243b16d9453a9c48d83128a0f7695c57 | e63ab09f227459380c317aa1694cffd04255c807 | /cheshire3/graph/selector.py | 1fdbfff0492435f2acdc4cbfc817d76c3192b8b3 | [
"ICU",
"X11"
] | permissive | bitwhite/cheshire3 | 91a0d2f8d2e79ac277ac4f7a3bea9efa911ce3d6 | ca27bc2600d217e36a429ccfe064f11d9b200193 | refs/heads/master | 2021-05-27T03:50:09.456813 | 2013-10-10T13:47:16 | 2013-10-10T13:47:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py |
from cheshire3.selector import SimpleSelector
from cheshire3.exceptions import C3ObjectTypeError
from cheshire3.graph.record import GraphRecord
class SparqlSelector(SimpleSelector):
def __init__(self, session, config, parent):
SimpleSelector.__init__(self, session, config, parent)
def process_record(self, session, record):
if not isinstance(record, GraphRecord):
raise C3ObjectTypeError("{0.__class__.__name__} can only process GraphRecords; {1.__class__.__name__} supplied".format(self, record))
else:
vals = []
for src in self.sources:
for xp in src:
# this will be a SparqlQueryResult object
mv = record.process_sparql(session, xp['string'], xp['maps'])
vals.append(mv.selected)
return vals
| [
"info@cheshire3.org"
] | info@cheshire3.org |
c9aee74c7e8a5fbe9602ecf231180839e4630013 | d17bfe4c8e16ed0727ce5bf893db6d287045a6ec | /M3/kcb/db/__init__.py | 52d0428c6210fc981d00752dafb4aa1f4f416803 | [] | no_license | 248808194/python | de81da3779399c4647a8bc7d803b63cd2eb59dea | da44c4949ab921a7822f891a2901c08b487b3de6 | refs/heads/master | 2020-03-08T01:42:19.838894 | 2018-04-19T01:05:46 | 2018-04-19T01:05:46 | 127,838,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Zhoutao
#create_date:2017-02-14-13:49
# Python 3.5
#
# import pickle
#
# import os,sys,time,datetime,pickle,json
# # sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# # from core.schools import School
# # # with open('/home/zt/PycharmProjects/51cto_python_homework/M3/课程表/db/sh_school', 'rb') as f:
# # # shcool_obj = pickle.load(f)
# #
# #
# #
# #
# # with open('/home/zt/PycharmProjects/51cto_python_homework/M3/kcb/db/bj_school','rb') as f:
# # shcool_obj = pickle.load(f)
# # print(shcool_obj.SCHOOL_TECHER )
# # print(shcool_obj.SCHOOL_CLASS )
# # print(shcool_obj.SCHOOL_LESSON ) | [
"zhoutao@zhoutao.name"
] | zhoutao@zhoutao.name |
9d6f29cf29bdbe519853ad91f64c4a882a7ba5a5 | 7b6377050fba4d30f00e9fb5d56dfacb22d388e1 | /brownies/LLNL/bin/bdflsToFluxes.py | 58e8cbb093f0893f40ddcd99f615430519929bc6 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LLNL/fudge | 0a4fe8e3a68b66d58e42d1f4d209ea3f713c6370 | 6ba80855ae47cb32c37f635d065b228fadb03412 | refs/heads/master | 2023-08-16T21:05:31.111098 | 2023-08-01T22:09:32 | 2023-08-01T22:09:32 | 203,678,373 | 21 | 4 | NOASSERTION | 2023-06-28T20:51:02 | 2019-08-21T23:22:20 | Python | UTF-8 | Python | false | false | 2,604 | py | #! /usr/bin/env python3
# <<BEGIN-copyright>>
# Copyright 2022, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
import os
import argparse
from brownies.legacy.endl import bdfls as bdflsModule
from xData import XYs1d as XYs1dModule
addFluxExec = os.path.join( os.path.dirname( os.path.dirname( os.path.dirname( os.path.dirname( os.path.realpath( __file__ ) ) ) ) ), "bin", "addFlux.py" )
parser = argparse.ArgumentParser( description = "Converts one or more bdfls fluxesin a bdfls file into the new GNDS flux structure and outputs results to a file." )
parser.add_argument( 'bdfls', help = "The name of the bdfls file to extract flux data from." )
parser.add_argument( "output", help = "The name of the outputted flux file." )
parser.add_argument( "input", nargs = "?", default = None, help = "The file to read existing flux data from." )
parser.add_argument( '-f', '--fids', action = 'append', help = "Append the fid to the bdfls flux id to convert. If absent, all are converted (e.g., -f 1)." )
parser.add_argument( "--override", action = "store_true", help = "If label exists and option present, replace flux with new one; otherwise, execute a raise." )
if( __name__ == '__main__' ) :
args = parser.parse_args( )
bdfls = bdflsModule.bdfls( template = args.bdfls )
if( args.fids is None ) :
fids = [ flux.id for flux in bdfls.f ]
else :
fids = [ int( fid ) for fid in args.fids ]
override = ""
if( args.override ) : override = " --override"
input = ''
if( args.input is not None ) : input = args.input
for bdflsFlux in bdfls.f :
if( bdflsFlux.id in fids ) :
orders = []
grid = XYs1dModule.XYs1d( ) # Only used to get a comment energy grid.
for order in bdflsFlux.EF_l :
flux = XYs1dModule.XYs1d( order )
orders.append( flux )
grid += flux
flux = [ ]
for energy, dummy in grid :
energyLegendreCoefficients = '%s' % energy
for order in orders : energyLegendreCoefficients += ' %s' % order.evaluate( energy )
flux.append( energyLegendreCoefficients )
os.system( """%s %s --values %s "%s" %s %s""" % ( addFluxExec, override, bdflsFlux.name, "; ".join( flux ), args.output, input ) )
input = ''
| [
"mattoon1@llnl.gov"
] | mattoon1@llnl.gov |
7aef9623504a9ae338b05888b4715693e16c82be | abe0dd7786f8d0731ba871425bf07e3215391b68 | /part1/LabRab/labrab-02/01.py | 9c61e9666cb43d3e93adef69f534f58c61ea08df | [] | no_license | Alekceyka-1/algopro21 | aff1cef4f1ac9a80ee6d569ecb6a78d5c9fb1f32 | f82e4582c1017c8043f399480104b3e7af4867ca | refs/heads/main | 2023-08-25T17:31:36.682000 | 2021-10-19T08:14:57 | 2021-10-19T08:14:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | '''
Пользователь вводит два целых числа из диапазона [-100, +100].
Программа выводит последовательно по возрастанию
все нечётные числа от минимального, введённого пользьзователем, до максимального (включительно).
Пользователь может ввести сначала большее число, потом меньшее - программа должна работать корректно и в этом случае.
'''
a = int(input('Введите первое число - '))
b = int(input('Введите второе число - '))
if a > b:
a, b = b, a
for num in range(a, b):
if num % 2 != 0: # mod
print(num)
| [
"ttxiom@gmail.com"
] | ttxiom@gmail.com |
132daf0f45e263b277451e8d817a095e13b03485 | 9fa71d5834dae1c8900b3444f564b11326374d36 | /packages/tools/compras/rotinas_envio/anl_processos.py | 7c1b7f954066f2e225850a19e8a5f3f326a2da80 | [] | no_license | JoaoPauloLeal/toolbox | a85e726cfeb74603cb64d73c4af64757a9a60db7 | 924c063ba81395aeddc039a51f8365c02e527963 | refs/heads/master | 2023-06-07T02:17:42.069985 | 2021-06-28T19:06:40 | 2021-06-28T19:06:40 | 381,128,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | import bth.db_connector as db
tipo_registro = 'anl_processo_correcao'
def iniciar_processo_busca(param_exec, ano):
entidade_dsk = str(input('Entidade do desktop : '))
sql = f'''SELECT i_anl_processo as id , i_anl_processo, i_processo, i_ano_proc
FROM compras.anl_processos ap
WHERE i_responsaveis_atos IS NULL
AND i_entidades = {entidade_dsk}
ORDER BY i_ano_proc, i_processo, i_anl_processo'''
# x = db.consulta_sql(sql, index_col='i_anl_processo')
# print(x)
for x in db.consulta_sql(sql, index_col='id').to_dict('records'):
print(f"Anulação {x['i_anl_processo']} do processo {x['i_processo']}/{x['i_ano_proc']}")
correcao = str(input('Realizar a correção automática ? '))
if correcao in 'sSyYSIMsimYESyes1':
query = db.get_consulta(param_exec, f'{tipo_registro}.sql')
db.execute_sql(query)
elif correcao in 'nNnaoNAOnãoNÃO0':
return 'x'
| [
"joao.leal@betha.com.br"
] | joao.leal@betha.com.br |
41037a8a5daa9ae01366e348de9873ba9e6d665a | 39b35326534d6efa8a60344ef59eac3d8cea562f | /formpj/form/views.py | 927372f3f8aa02c6898a0501e4b2e54334d1208c | [] | no_license | Hyo-gyeong/Django_review | 8635e8311111cab56066c6b87429c7f57c5e42c3 | 8b59d717c0c8c4404230c8eaa42e6074cacdd712 | refs/heads/master | 2021-01-03T08:32:06.706689 | 2020-08-31T04:55:59 | 2020-08-31T04:55:59 | 240,000,924 | 0 | 0 | null | 2020-08-17T19:21:30 | 2020-02-12T11:53:19 | Python | UTF-8 | Python | false | false | 3,486 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.core.paginator import Paginator
from .models import Blog
from .form import BlogPost
def index(request):
blog = Blog.objects.all()
#페이지 3개로 자르기
paginator = Paginator(blog, 3)
#request된 페이지 변수에 담음
page = request.GET.get('page')
posts = paginator.get_page(page)
return render(request, 'index.html', {'blogs':blog, 'posts':posts})
def detail(request, detail_id):
blog_detail = get_object_or_404(Blog, pk = detail_id)
return render(request, 'detail.html', {'detail':blog_detail})
# def new(request):
# return render(request, 'new.html')
# def create(request):
# blog = Blog()
# blog.title = request.POST['title']
# blog.body = request.POST['body']
# blog.photo = request.FILES['photo']
# blog.pub_date = timezone.datetime.now()
# blog.save()
# return redirect('/')
def new(request):
#1.입력된 내용을 처리하는 기능:POST
if request.method == 'POST':
form = BlogPost(request.POST, request.FILES) #POST로써 들어온 내용을 form에 담아줌
if form.is_valid():#잘 입력되어는지 확인
blogpost = form.save(commit = False)#블로그 객체를 가져오되 아직은 저장하지 말아라
#blogpost는 Blog객체가 됨
blogpost.pub_date = timezone.datetime.now()
blogpost.save()
return redirect('/detail/' + str(blogpost.id))
#2.빈페이지를 띄워주는 기능:GET
else:
form = BlogPost()
return render(request, 'new.html', {'form':form})
#처음 new.html에 들어갔을 때 빈 입력공간을 띄우기 : GET : form이라는것을 갖다줘야하니까 if
#이용자가 뭘 입력하면 그 입력값들을 처리하기 : POST : 데이터를 처리해줘 else
#정의한 모든 모델을 입력받고싶지 않을수 있음, 자동으로 입력되게 하고싶은것도 있음(예, 날짜)
#form.save(commit = False)#일단 저장하지 말고 form객체 말고 model객체에 접근, commit = False라는 인자때문에 model객체가 저장되지 않고 반환됨
#model객체 안의 date변수에 접근, 수정
#model객체 저장
# def update(request, blog_id):
# forms = get_object_or_404(Blog, pk=blog_id)
# if request.method == 'POST':
# forms.title = request.POST['title'] #name=title인 애한테 담긴 내용 저장
# forms.body = request.POST['body'] #name=body인 애한테 담긴내용 저장
# forms.save()
# return redirect('/blog/'+str(blog_id))
# else: #수정사항을 입력하려고 페이지에 접속하면
# return render(request, 'new.html', {'forms':forms})
def updateform(request, blog_id):
blog = get_object_or_404(Blog, pk = blog_id)
if request.method == 'POST':#이렇게 해야 기존 내용을 불러올 수 있어
form = BlogPost(request.POST, request.FILES, instance = blog)
if form.is_valid():
post = form.save(commit = False)
post.pub_date = timezone.now()#수정한 날짜로 저장
post.save()
return redirect('/detail/'+str(blog.id))
else:#"
form = BlogPost(instance = blog)#"
return render(request, 'new.html', {'form':form})
| [
"cdkrcd8@gmail.com"
] | cdkrcd8@gmail.com |
6329aed37c3a9bf860eb343334aa54620697c20c | caa457f7cfa221ac70eb1c67753d6d1aacc33ede | /is_multiple.py | ebec9eeae996a440cf9b417434ade23a1cb3d3c2 | [] | no_license | ibnahmadCoded/how_to_think_like_a_computer_scientist_Chapter_4 | 0b609aedb7581ef5e825b7e8fe5cb5bcf96d522a | 4de3ea919432dc92a604e7ed2c0ace368c57328c | refs/heads/master | 2021-05-21T05:25:54.616256 | 2020-04-02T21:16:21 | 2020-04-02T21:16:21 | 252,565,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | def is_multiple(m, n):
"""Checks if n is a multiple of m"""
if is_factor(n, m):
return True
return False
def is_factor(f, n):
"""checks if f is a factor of n"""
return (n % f) == 0
| [
"alegeaa@yahoo.com"
] | alegeaa@yahoo.com |
b1e911efb08abf5dc32d96522d6c397ec0742951 | 06e0c89781ae9c07a55090c43d8609e9dfefbb6f | /School_13/School_13/wsgi.py | efd7f899673f2e8ee018251af6f0faed3d133fcc | [] | no_license | mfarzamalam/django | d6d4302910301ae3e135a95a9982f3bd01218260 | 935a60d3ac874b7adb4287b4c2d172b89c6551b9 | refs/heads/master | 2023-04-10T21:06:11.601436 | 2021-04-27T21:31:47 | 2021-04-27T21:31:47 | 345,129,992 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for School_13 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'School_13.settings')
application = get_wsgi_application()
| [
"mfarzamalam@gmail.com"
] | mfarzamalam@gmail.com |
e206dfba614433ba86c5a71556ab64ab0bdb2fba | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03326/s671553368.py | 2d00464fe05662acb9aa093915e6109294176329 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | n, m = map(int, input().split())
XYZ = []
JKL = []
Seihu = [-1, 1]
for i in range(n):
x, y, z = map(int, input().split())
XYZ.append([x,y,z])
for j in Seihu:
for k in Seihu:
for l in Seihu:
tot = x*j + y*k + z*l
XYZ[i].append(tot)
for j in Seihu:
for k in Seihu:
for l in Seihu:
JKL.append([j,k,l])
ans = 0
for i in range(8):
jkl = JKL[i]
XYZ = sorted(XYZ, key=lambda x:x[i+3], reverse=True)
score = 0
for x in range(m):
score += XYZ[x][i+3]
ans = max(ans, score)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6b9f29c30d1fd0ae9fe3d68b9873beeff2a97383 | b26ea6b32e5214c5ae32707ba00f5c441ba32da3 | /Biblioteca/settings-prod.py | 716224130602637a23fc30f9ae3f9fc037c2458b | [] | no_license | Karlosnat/https-github.com-rctorr-Biblioteca | 7818486b958998e7515bba140222c8d8da884248 | 828408b7ac4b06815e9e6137854345a74eb0d022 | refs/heads/master | 2020-06-18T08:45:57.928152 | 2019-07-04T02:58:08 | 2019-07-04T02:58:08 | 196,239,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,281 | py | """
Django settings for Biblioteca project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd-1qj@c5%d%a+ib=!krcwjr%_x4a0t@rz062pd9=fiqtw^tnrj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["rctorr.pythonanywhere.com"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalogo',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Biblioteca.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Biblioteca.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'es-MX'
TIME_ZONE = 'America/Mexico_City'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = "/home/rctorr/Biblioteca/static/"
# Se define la URL para login
LOGIN_URL = "/login/"
| [
"rictor@cuhrt.com"
] | rictor@cuhrt.com |
567f9beb3f0603e65a69f91b2c15d5f1de5f34b4 | ceeaf1a4c22e82b344fff6f8aaf2f3d4f4ab4521 | /suppliers/models.py | 82ee5b34c5d38601f3583e9bc9fe3f22c43686a2 | [
"MIT"
] | permissive | CzechInvest/ciis | a14dc23c87fda473be0b6aaeee9e12251c4ce041 | c6102598f564a717472e5e31e7eb894bba2c8104 | refs/heads/master | 2023-03-22T08:48:35.168956 | 2020-05-15T13:27:31 | 2020-05-15T13:27:31 | 110,870,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | from django.db import models
from contacts.models import ContactPerson as MyContactPerson
from contacts.models import Organisation as MyOrganisation
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis.db import models as gis_models
# Create your models here.
class Supplier(models.Model):
id = models.IntegerField(default=-1, primary_key=True)
name = models.TextField(
help_text=_("Name"), blank=True)
address = models.TextField(
help_text=_("Adresa"),
blank=True)
ico = models.TextField(
help_text=_("IČO"),
blank=True)
url = models.URLField(
help_text=_("URL"),
blank=True)
core_business = models.TextField(
help_text=_("Core business"),
blank=True)
geom = gis_models.PointField(
help_text=_("Bod"),
blank=True)
def __str__(self):
return self.name
class Meta():
managed = False
db_table = 'domino\".\"suppliers'
| [
"jachym.cepicky@gmail.com"
] | jachym.cepicky@gmail.com |
4fd7678e74d8b33f6305ad4d406a856ac171f8e8 | 72af42076bac692f9a42e0a914913e031738cc55 | /01, 특강_210705_0706/02, source/CookData(2021.01.15)/Ex05-02.py | d57ee7fa6f09cf4eacaab57d083ddf0f1fbb88cf | [] | no_license | goareum93/Algorithm | f0ab0ee7926f89802d851c2a80f98cba08116f6c | ec68f2526b1ea2904891b929a7bbc74139a6402e | refs/heads/master | 2023-07-01T07:17:16.987779 | 2021-08-05T14:52:51 | 2021-08-05T14:52:51 | 376,908,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | ## 클래스와 함수 선언 부분 ##
class Node2() :
def __init__ (self) :
self.plink = None # 앞쪽 링크
self.data = None
self.nlink = None # 뒤쪽 링크
def printNodes(start):
current = start
if current.nlink == None :
return
print("정방향 --> ", end=' ')
print(current.data, end=' ')
while current.nlink != None:
current = current.nlink
print(current.data, end=' ')
print()
print("역방향 --> ", end=' ')
print(current.data, end=' ')
while current.plink != None:
current = current.plink
print(current.data, end=' ')
## 전역 변수 선언 부분 ##
memory = []
head, current, pre = None, None, None
dataArray = ["다현", "정연", "쯔위", "사나", "지효"]
## 메인 코드 부분 ##
if __name__ == "__main__" :
node = Node2() # 첫 번째 노드
node.data = dataArray[0]
head = node
memory.append(node)
for data in dataArray[1:] : # 두 번째 이후 노드
pre = node
node = Node2()
node.data = data
pre.nlink = node
node.plink = pre
memory.append(node)
printNodes(head)
| [
"goareum7@gmail.com"
] | goareum7@gmail.com |
7f67e437dfbb9b5a80dde706cb6652c1645f976b | 6dcaec1ea2c227eb84bfa02219e5a4ba5553c47c | /loja_template/apps/galerias/urls.py | 6711636ac6a68ddfbf2c0882ff89d85a7928203d | [] | no_license | silviolucenajunior/store-template | 8f5319c178d82142e3a4e179aca5fc12a6622a3b | 2dd7ffe8dbd894258225fef2b8b1e4b982a36260 | refs/heads/master | 2020-12-30T09:26:11.065198 | 2013-08-16T18:53:51 | 2013-08-16T18:53:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | #-*- coding: utf-8 -*-
from django.conf.urls.defaults import *
#plugin tiny_mce
urlpatterns = patterns("galerias.views",
(r'^cadastrar-foto-plugin/$', 'cadastrar_foto_plugin_view'),
(r'^cadastrar-foto-plugin-ajax/$', 'cadastrar_foto_plugin_ajax'),
)
urlpatterns += patterns("galerias.views",
(r'^adicionar_foto_galeria/(?P<id_foto>.*)/(?P<id_galeria>.*)/$', 'adicionar_foto_galeria_ajax'),
(r'^cadastrar_galeria/$', 'cadastrar_galeria'),
(r'^cropar_foto/(?P<foto>.*)/$', 'manipular_foto_view'),
(r'^deletar_foto/(?P<id_foto>.*)/$', 'deletar_foto_ajax'),
(r'^fotos_json/$', 'fotos_json'),
(r'^gerenciar/$', 'gerenciador_view'),
(r'^listar_galerias/$', 'listar_galerias_view'),
(r'^manipular_foto/redimensionar/(?P<foto>.*)/$', 'redimensionar_foto_view'),
(r'^manipular_foto/(?P<foto>.*)/$', 'manipular_foto_view'),
(r'^visualizar_galeria/categoria/(?P<categoria_slug>.*)/(?P<autor_username>.*)/$', 'visualizar_galerias_por_categoria_autor_view'),
(r'^visualizar_galeria/categoria/(?P<categoria_slug>.*)/$', 'visualizar_galerias_por_categoria_view'),
(r'^visualizar_galeria/(?P<galeria_id>.*)/$', 'visualizar_galeria_view'),
(r'^visualizar_galeria/autor/(?P<autor_username>.*)/$', 'visualizar_galerias_por_autor_view'),
(r'^previa_galeria/(?P<galeria_id>.*)/$', 'visualizar_galeria_previa_view'),
(r'^setar_ordem_foto/(?P<id_foto>.*)/(?P<nova_ordem>.*)/$', 'setar_ordem_foto_ajax'),
(r'^upload_progress/$', 'upload_progress')
) | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
ef79405a7d678f52d4a84fa7839c19a313c6db8b | 1a573f905f074a2135e82a91acbc9ae1d417d50b | /python/netograph/__init__.py | 0465509828e0e73f1b5758d11578ad7c2dbf3a3b | [] | no_license | mhils/netograph-api | f72a7d44773f6902f48f82d6a5cd166799cd3478 | 57425919637d8c7237b160561f181b06ab187bb5 | refs/heads/master | 2021-07-05T12:48:01.319568 | 2018-09-18T08:45:44 | 2018-09-18T08:45:44 | 152,243,259 | 0 | 0 | null | 2018-10-09T11:58:51 | 2018-10-09T11:58:50 | null | UTF-8 | Python | false | false | 888 | py | import grpc
from netograph.dsetapi import dset_pb2_grpc
from netograph.userapi import user_pb2_grpc
def connect_dset(token):
channel = grpc.secure_channel(
'grpc.netograph.io:443',
grpc.composite_channel_credentials(
grpc.ssl_channel_credentials(),
grpc.access_token_call_credentials(token),
),
options=[
('grpc.ssl_target_name_override', "grpc.netograph.io"),
]
)
return dset_pb2_grpc.DsetStub(channel)
def connect_user(token):
channel = grpc.secure_channel(
'grpc.netograph.io:443',
grpc.composite_channel_credentials(
grpc.ssl_channel_credentials(),
grpc.access_token_call_credentials(token),
),
options=[
('grpc.ssl_target_name_override', "grpc.netograph.io"),
]
)
return user_pb2_grpc.UserStub(channel) | [
"aldo@nullcube.com"
] | aldo@nullcube.com |
f03c704be6facd8abf5ad3c327fbff2e43f889a9 | 8a25ada37271acd5ea96d4a4e4e57f81bec221ac | /usr/share/pyshared/ajenti/plugins/dashboard/updater.py | 7c6e008bcb22774ad8b2cf6f8f28d50f2db594fa | [
"Apache-2.0"
] | permissive | lupyuen/RaspberryPiImage | 65cebead6a480c772ed7f0c4d0d4e08572860f08 | 664e8a74b4628d710feab5582ef59b344b9ffddd | refs/heads/master | 2021-01-20T02:12:27.897902 | 2016-11-17T17:32:30 | 2016-11-17T17:32:30 | 42,438,362 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | import gevent
from ajenti.api import *
from ajenti.plugins.packages.api import PackageManager, PackageInfo
@plugin
class AjentiUpdater (BasePlugin):
AJENTI_PACKAGE_NAME = 'ajenti'
def run_update(self, packages):
packages = packages or [self.AJENTI_PACKAGE_NAME]
actions = []
mgr = PackageManager.get()
for name in packages:
p = PackageInfo()
p.name, p.action = name, 'i'
actions.append(p)
mgr.do(actions)
def check_for_updates(self, callback):
try:
mgr = PackageManager.get()
except NoImplementationsError:
return
def worker():
mgr.refresh()
r = []
for p in mgr.upgradeable:
if p.name.startswith(self.AJENTI_PACKAGE_NAME):
r.append(p.name)
callback(r)
gevent.spawn(worker)
| [
"lupyuen@gmail.com"
] | lupyuen@gmail.com |
7bbd018f9a34d91619ca2f6cf87822698dc58c22 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /QdEAMeXNJAivcTMiT_1.py | 9bdb30c42b07d4edb70a8ec3942a35405ac86063 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py |
def boxes(weights):
arr = []
rem = 10
t = []
for x in weights:
if rem - x >= 0:
rem -= x
t.append(x)
else :
arr.append(t)
t = []
rem = 10
rem -= x
t.append(x)
if len(t) > 0:
arr.append(t)
print(arr)
return len(arr)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c3a8d0bc33ebcec0952e8c6bbd6c4036f56842d8 | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/beta/education_beta/azext_education_beta/vendored_sdks/education/operations/_education_users_classes_operations.py | 72c60d3f3410415743d9385b687fc2adff41a49a | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 4,304 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class EducationUsersClassesOperations(object):
"""EducationUsersClassesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~education.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def delta(
self,
education_user_id, # type: str
**kwargs # type: Any
):
# type: (...) -> List["models.MicrosoftGraphEducationClass"]
"""Invoke function delta.
Invoke function delta.
:param education_user_id: key: id of educationUser.
:type education_user_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphEducationClass, or the result of cls(response)
:rtype: list[~education.models.MicrosoftGraphEducationClass]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphEducationClass"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delta.metadata['url'] # type: ignore
path_format_arguments = {
'educationUser-id': self._serialize.url("education_user_id", education_user_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphEducationClass]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delta.metadata = {'url': '/education/users/{educationUser-id}/classes/microsoft.graph.delta()'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
c83a0ea898a3f56450e1df946b05e71b733734e3 | de7a0984af8ae2bb9706e256c79eb12af65f70f0 | /contexts/rtscontext.py | e155026e0a9acbe9b2a885b98d0a9c0c73e52529 | [
"MIT"
] | permissive | grecoe/amlsdummy | 3370eefdb42c6c6b5847d732d991c145384e0c3f | f83ade8f75bf972f574834eae2535cfda6e2711b | refs/heads/master | 2021-08-07T15:35:26.997351 | 2021-01-09T15:34:37 | 2021-01-09T15:34:37 | 237,675,078 | 1 | 2 | MIT | 2020-02-28T11:13:00 | 2020-02-01T20:42:17 | Python | UTF-8 | Python | false | false | 6,324 | py | import shutil
from scripts.azure_utils import *
from contexts.basecontext import BaseContext
class RealTimeScoringContext(BaseContext):
'''
Model file and scoring script. These are constants and
probably no need to update them.
The remainder of the needed configuration comes from
the program arguments parsed in general_utils.py
'''
model_file = "model.pkl"
scoring_script_name = "./scoring.py"
scoring_script = "./paths/realtime/scoring/scoring.py"
'''
Contains the context needed to perform the tasks.
'''
def __init__(self, programArgs, userAuthorization, job_log = None):
super().__init__(programArgs, userAuthorization, job_log)
self.containerImage = None
self.computeTarget = None
self.webservice = None
self.webserviceapi = {}
def generateModel(self):
'''
Get an existing model by name or create new
'''
self.model = getOrRegisterModel(
self.workspace,
self.experiment,
self.programArguments.model_name,
RealTimeScoringContext.model_file,
self.job_log
)
if not self.model:
raise Exception("Model Creation Failed")
def generateImage(self):
'''
Generates a docker image, get name and version using:
print(image.name, image.version)
Logs here:
image.image_build_log_uri
Move the scoring script to the execution directory (which is a requirement for creating an image)
When done, remove the copy.
'''
shutil.copyfile(RealTimeScoringContext.scoring_script, RealTimeScoringContext.scoring_script_name)
self.containerImage = createImage(
self.workspace,
RealTimeScoringContext.scoring_script_name,
self.model,
self.programArguments.image_name,
self.job_log)
if not self.containerImage:
raise Exception("Container Image Creation Failed")
print("Container Creation Log: ", self.containerImage.image_build_log_uri)
def loadImage(self):
'''
In testing, I did NOT want to keep generating a model and generating an image,
if it loads then we've already done that step.
'''
if not self.containerImage:
self.containerImage = getExistingContainerImage(self.workspace, self.programArguments.image_name, self.job_log )
if self.containerImage != None:
'''
With CMK testing, we really need to check this....it's possible an image
was attempted but the actual build failed as it happens on ACR. This means
that AMLS will record that it has an image, but the image state comes back
failed.
'''
if self.containerImage.creation_state == "Failed":
raise Exception("Image exists but state is failed, terminating process...")
return self.containerImage != None
def generateComputeTarget(self, cluster_name = None, resource_group = None):
'''
Caller has to figure out if they are going to attach an existing cluster
or create a new one. Decided based on parameters
'''
if self.computeTarget:
return self.computeTarget
if cluster_name is None and resource_group is None:
print("Option is to create new compute target....")
self.computeTarget = getOrCreateComputeCluster(
self.workspace,
self.programArguments.region,
self.programArguments.aks_compute_name,
self.programArguments.aks_vm_size,
self.programArguments.aks_node_count,
self.programArguments.aks_non_prod,
self.job_log
)
else:
print("Option is to attach existing compute target....")
self.computeTarget = attachExistingCluster(
self.workspace,
cluster_name,
resource_group,
self.programArguments.aks_compute_name,
self.programArguments.aks_non_prod,
self.job_log
)
if not self.computeTarget:
raise Exception("Cannot create compute target.")
def deleteWebservice(self):
if not self.webservice:
raise Exception("No web service loaded")
print("Deleting web service...")
self.job_log.addInfo("Deleting web service")
self.webservice.delete()
self.webservice = None
self.job_log.addInfo("Web service deleted")
def loadWebservice(self):
'''
Retrieve an existing web service, used for deletion purposes.
'''
if not self.workspace:
raise Exception("You must load the workspace first")
if not self.containerImage:
raise Exception("You must load the conatiner image first")
if not self.webservice:
self.webservice = getExistingWebService(
self.workspace,
self.containerImage,
self.programArguments.aks_service_name,
self.job_log
)
return self.webservice != None
def generateWebService(self):
'''
Generate the web service
'''
if not self.webservice:
self.webservice = getOrCreateWebservice(
self.workspace,
self.containerImage,
self.programArguments.aks_service_name,
self.programArguments.aks_num_replicas,
self.programArguments.aks_cpu_cores,
self.computeTarget,
self.job_log
)
if not self.webservice:
raise Exception("Could not create the web service.")
self.webserviceapi["url"] = self.webservice.scoring_uri
self.webserviceapi["key"] = self.webservice.get_keys()[0]
def testWebService(self):
if self.webservice:
prediction = self.webservice.run(json.dumps({"name": "Dave"}))
print(prediction)
| [
"grecoe@microsoft.com"
] | grecoe@microsoft.com |
8d829cc9490f7662027662a0f9511a6e6e50f951 | ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb | /python_modules/dagster-test/dagster_test/toys/asset_reconciliation/eager_reconciliation.py | 5a49890cf839aa67d14aa36c0c71124187f55cb5 | [
"Apache-2.0"
] | permissive | dagster-io/dagster | 6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a | fe21995e0402878437a828c6a4244025eac8c43b | refs/heads/master | 2023-09-05T20:46:08.203794 | 2023-09-05T19:54:52 | 2023-09-05T19:54:52 | 131,619,646 | 8,565 | 1,154 | Apache-2.0 | 2023-09-14T21:57:37 | 2018-04-30T16:30:04 | Python | UTF-8 | Python | false | false | 602 | py | from dagster import (
AutoMaterializePolicy,
Definitions,
asset,
load_assets_from_current_module,
)
@asset
def root1():
...
@asset
def root2():
...
@asset
def diamond_left(root1):
...
@asset
def diamond_right(root1):
...
@asset
def diamond_sink(diamond_left, diamond_right):
...
@asset
def after_both_roots(root1, root2):
...
defs = Definitions(
assets=load_assets_from_current_module(
group_name="eager_reconciliation",
key_prefix="eager_reconciliation",
auto_materialize_policy=AutoMaterializePolicy.eager(),
),
)
| [
"noreply@github.com"
] | dagster-io.noreply@github.com |
e29b726c54adb05c37f2093deda259bec151ea6e | 30fe7671b60825a909428a30e3793bdf16eaaf29 | /.metadata/.plugins/org.eclipse.core.resources/.history/63/9010f07eaeea00161174a93fd5908e78 | 392d2088e7c6f05709c2717070f065f8bf4e2b58 | [] | no_license | abigdream84/PythonStudy | 0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1 | 059274d3ba6f34b62ff111cda3fb263bd6ca8bcb | refs/heads/master | 2021-01-13T04:42:04.306730 | 2017-03-03T14:54:16 | 2017-03-03T14:54:16 | 79,123,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | #!/usr/bin/env python
#coding:UTF-8
import threading
import time
num = 0
def run():
time.sleep(1)
global num
num += 1
time.sleep(1)
print(num)
for i in range(100):
t = threading.Thread(target=run)
t.start()
| [
"abigdream@hotmail.com"
] | abigdream@hotmail.com | |
ae1e84aa9f819ff4d88b735878bf3a153afc31e3 | aeeaf40350a652d96a392010071df8a486c6e79f | /archive/python/Python/unsorted/235.lowest-common-ancestor-of-a-binary-search-tree.py | 2fe98c1dd49f5015b1f1abd8ba318f5bf60c1b0b | [
"MIT"
] | permissive | linfengzhou/LeetCode | 11e6c12ce43cf0053d86437b369a2337e6009be3 | cb2ed3524431aea2b204fe66797f9850bbe506a9 | refs/heads/master | 2021-01-23T19:34:37.016755 | 2018-04-30T20:44:40 | 2018-04-30T20:44:40 | 53,916,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root or root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left and right:
return root
if left:
return left
if right:
return right
return None
| [
"luke.zlf@gmail.com"
] | luke.zlf@gmail.com |
aabc884e84fcfc59e35fc4a13e9012ad096792c4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03464/s278760514.py | 08616e0b58be34c3cb585dbb67b7ff087e21dabd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | n=int(input())
arr=list(map(int,input().split()))
arr=arr[::-1]
if arr[0]!=2:
print(-1)
else:
arr=arr[1:]
l=2
r=3
for val in arr:
if (l<=val*(l//val)<=r) or (l<=val*(r//val)<=r):
if l%val!=0:
l=val*(l//val+1)
r=val*(r//val+1)-1
else:
print(-1)
break
else:
print(l,r) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
09202c09b7522bbd53167ab063440db35dd0fec8 | 44eb40bf7bbd006f441b22d149dbb06eebe97506 | /src/chap02/04_XOR_gate.py | 566ded8dc57e75ea16bfa6a035fade75e11729bb | [] | no_license | hoonest/Deep_Learning | 56939f983c81e75b79d5474c11649dd57bf7107b | dd94f46ff886f20a47b09a54593e5fd2d53f0ed4 | refs/heads/master | 2020-04-19T22:52:03.640247 | 2019-02-19T03:34:16 | 2019-02-19T03:34:16 | 168,481,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | import numpy as np
# from 02_AND_bias import AND
# AND gate
def AND(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.7
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
# NAND Gate
def NAND(x1, x2):
x = np.array([x1, x2])
w = np.array([-0.5, -0.5])
bias = 0.7
tmp = np.sum(w * x) + bias
if tmp <= 0:
return 0
else:
return 1
# OR Gate
def OR(x1, x2):
w = np.array([0.5, 0.5]) # AND와는 모두 같은 구조의 피셉트론
x = np.array([x1, x2])
b = -0.2
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
# XOR Gate
def XOR(x1, x2):
s1 = NAND(x1, x2)
s2 = OR(x1, x2)
y = AND(s1, s2)
return y
if __name__ == "__main__":
x = XOR(0, 0) # 0
print(x)
x = XOR(0, 1) # 1
print(x)
x = XOR(1, 0) # 1
print(x)
x = XOR(1, 1) # 0
print(x)
| [
"hoonest01@gmail.com"
] | hoonest01@gmail.com |
ae349ac80acca792276ae618b437905cce9acb03 | 945d957bde025c0aa96df08d151252c3d2b682cb | /dynamicNetworkConfig/transport/wsgi/middleware/__init__.py | ffd513f3f042ae197e107ffc17b10c682bbcccae | [
"Apache-2.0"
] | permissive | ClockwerksSoftware/dynamicNetworkConfig | 85d8c16d44dbcd8361dba17fe01d5474d2e3a7c5 | c785f437edb912be0915822184d3967c71225849 | refs/heads/master | 2021-01-19T12:51:56.336000 | 2017-02-25T19:36:37 | 2017-02-25T19:36:37 | 82,368,281 | 0 | 1 | null | 2017-02-25T19:36:38 | 2017-02-18T07:00:42 | null | UTF-8 | Python | false | false | 190 | py | from dynamicNetworkConfig.transport.wsgi.middleware.auth import (
AuthenticationMiddleware
)
from dynamicNetworkConfig.transport.wsgi.middleware.context import (
ContextMiddleware
)
| [
"bm_witness@yahoo.com"
] | bm_witness@yahoo.com |
b4b1aae7471ab0b781047b9e07c70747713c5a23 | 2635edb96afa8117d4584a470061e447b79adc6e | /mybook/urls.py | e264e1f09a359e34ec1c24f8104f5d3d73414430 | [] | no_license | Mark-Seaman/Sensei-2018 | 673609731ecb5ebb782dab94b2cf3d7c22940424 | 06b02892cfe1bf1d25cb4224e86eb693c82b0f29 | refs/heads/master | 2022-02-18T19:14:10.343093 | 2022-01-15T20:06:21 | 2022-01-15T20:06:21 | 158,728,468 | 0 | 0 | null | 2022-01-16T21:06:09 | 2018-11-22T16:51:55 | HTML | UTF-8 | Python | false | false | 1,778 | py | from django.conf.urls import url
from django.contrib.auth import login, logout
from .seaman import DocFileIndex, DocList, Leverage, MarkSeaman, PrivateDoc, SeamansLog
from .guide import SeamansGuide
from .views import *
from .spiritual import SpiritualDoc, SpiritualSelect
urlpatterns = [
# Documents
url(r'^$', DocRoot.as_view()),
url(r'^(?P<title>[\w/\-_.]*)/Missing$', DocMissing.as_view()),
url(r'^(?P<title>[\w/\-_.]*)/Random$', DocRandom.as_view()),
url(r'^(?P<title>[\w/\-_.]*)/List$', DocList.as_view()),
url(r'^(?P<title>[\w/\-_.]*)/Files$', DocFileIndex.as_view()),
# Authentication
# url(r'^login', login, name='login'),
# url(r'^logout$', logout, {'next_page': '/login'}),
# MarkSeaman
#url(r'^MarkSeaman/booknotes/(?P<title>[\w/\-.]*)$', BookNotes.as_view()),
url(r'MarkSeaman/(?P<title>[\w/\-.]*)$', MarkSeaman.as_view()),
# Guide
url(r'^guide/(?P<title>[\w/\-_.]*)$', SeamansGuide.as_view()),
# Private Pages
url(r'^info/(?P<title>[\w/\-_.]*)$', PrivateDoc.as_view()),
# Seaman's Log
url(r'^seamanslog$', SeamansLog.as_view()),
url(r'^seamanslog/(?P<title>[\w/\-_.]*)$', SeamansLog.as_view()),
# Shrinking World
url(r'shrinkingworld/Leverage/(?P<title>[\w/\-.]*)$', Leverage.as_view()),
# Spiritual
url(r'^spiritual/Index$', SpiritualDoc.as_view()),
url(r'^spiritual/(?P<title>[\w\-_.]*)$', SpiritualSelect.as_view()),
url(r'^spiritual/(?P<title>[\w/\-_.]*)$', SpiritualDoc.as_view()),
# Documents
url(r'^(?P<title>[\w/\-_.]*)$', DocDisplay.as_view()),
]
| [
"Mark.Seaman@imac.net"
] | Mark.Seaman@imac.net |
8d2fd166695b224a0c4a3383603705671eaf20b8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03767/s405465871.py | e340c69019eb1ab19c7c4ffec2a97b3f548737f3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | n = int(input())
a = sorted(map(int,input().split()),reverse=True)
ans = 0
t = (n*3)//3
cnt = 0
i = 1
while cnt<t:
cnt += 1
ans += a[i]
i += 2
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7f93026775a2e9e95f6b22976a0c2f2e247b946a | 930309163b930559929323647b8d82238724f392 | /DSL_3_C.py | 12248c6e6506b9376befc0c7bcdb09205d16bb8f | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,398 | py |
class Segtree:
n = 1
size = 1
log = 2
d = [0]
op = None
e = 10 ** 15
def __init__(self, V: "List", OP: "function", E: "基底"):
self.n = len(V)
self.op = OP
self.e = E
self.log = (self.n - 1).bit_length()
self.size = 1 << self.log
self.d = [E for i in range(2 * self.size)]
for i in range(self.n):
self.d[self.size + i] = V[i]
for i in range(self.size - 1, 0, -1):
self.update(i)
def set(self, p, x): # 1
assert 0 <= p and p < self.n
p += self.size
self.d[p] = x
[self.update(p >> i) for i in range(1, self.log + 1)]
def get(self, p): # 2
assert 0 <= p and p < self.n
return self.d[p + self.size]
def prod(self, l, r): # 3
assert 0 <= l and l <= r and r <= self.n
sml = smr = self.e
l += self.size; r += self.size
while l < r:
if l & 1:
sml = self.op(sml, self.d[l])
l += 1
if r & 1:
smr = self.op(self.d[r - 1], smr)
r -= 1
l >>= 1; r >>= 1
return self.op(sml, smr)
def all_prod(self): # 4
return self.d[1]
def max_right(self, l, f): # 5
assert 0 <= l and l <= self.n
assert f(self.e)
if l == self.n:
return self.n
l += self.size
sm = self.e
while 1:
while l % 2 == 0:
l >>= 1
if not (f(self.op(sm, self.d[l]))):
while l < self.size:
l = 2 * l
if f(self.op(sm, self.d[l])):
sm = self.op(sm, self.d[l])
l += 1
return l - self.size
sm = self.op(sm, self.d[l])
l += 1
if (l & -l) == l:
break
return self.n
def min_left(self, r, f): # 6
assert 0 <= r and r < self.n
assert f(self.e)
if r == 0:
return 0
r += self.size
sm = self.e
while 1:
r -= 1
while r > 1 & (r % 2):
r >>= 1
if not (f(self.op(self.d[r], sm))):
while r < self.size:
r = 2 * r + 1
if f(self.op(self.d[r], sm)):
sm = self.op(self.d[r], sm)
r -= 1
return r + 1 - self.size
sm = self.op(self.d[r], sm)
if (r & -r) == r:
break
return 0
def update(self, k): # 7
self.d[k] = self.op(self.d[2 * k], self.d[2 * k + 1])
N,Q=map(int,input().split())
*A,=map(int,input().split())
*X,=map(int,input().split())
'''
stree = Segtree(V=A, OP=lambda x,y:x+y, E=0)
import collections
cnt = collections.defaultdict(int)
for left in range(N):
for right in range(left, N):
p = stree.prod(left, right+1)
cnt[p] += 1
for x in X:
print(sum([f for p, f in cnt.items() if p <= x]))
'''
def shakutori(X):
left, right = 0,0 # 動かす変数
ans = 0
for left in range(N):
tmp = 0
right = left
while right < N and tmp + A[right] <= X:
tmp += A[right]
right += 1
ans += right - left
#ans += right - left + 1
print(ans)
for x in X:
shakutori(x)
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
853e439c56d563a22f2a948eb8b7b9eed9488bda | b6a84594f8c29d968014faaddd49abeb7537a5fc | /python/349.intersection-of-two-arrays.py | 7a451d2011ff6765e416ab3f944dd25ec1210da3 | [] | no_license | nickyfoto/lc | 8a6af3df114e693e265d0ede03f4d4e1283e010e | 3633b4df3e24968057c7d684689b931c5a8032d3 | refs/heads/master | 2020-09-16T19:23:07.765917 | 2020-06-07T17:18:06 | 2020-06-07T17:18:06 | 223,866,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | #
# @lc app=leetcode id=349 lang=python3
#
# [349] Intersection of Two Arrays
#
# https://leetcode.com/problems/intersection-of-two-arrays/description/
#
# algorithms
# Easy (54.07%)
# Total Accepted: 216.7K
# Total Submissions: 398.2K
# Testcase Example: '[1,2,2,1]\n[2,2]'
#
# Given two arrays, write a function to compute their intersection.
#
# Example 1:
#
#
# Input: nums1 = [1,2,2,1], nums2 = [2,2]
# Output: [2]
#
#
#
# Example 2:
#
#
# Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
# Output: [9,4]
#
#
# Note:
#
#
# Each element in the result must be unique.
# The result can be in any order.
#
#
#
#
#
class Solution:
# def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
def intersection(self, nums1, nums2):
d1 = {}
d2 = {}
for i in nums1:
if i not in d1:
d1[i] = 0
for i in nums2:
if i not in d2:
d2[i] = 0
l1, l2 = len(d1), len(d2)
if l1 < l2:
for k in d1:
if k in d2:
d1[k] = 1
return [k for (k, v) in d1.items() if d1[k]]
else:
for k in d2:
if k in d1:
d2[k] = 1
return [k for (k, v) in d2.items() if d2[k]]
# s = Solution()
# nums1 = [1,2,2,1]
# nums2 = [2,2]
# print(s.intersection(nums1, nums2))
# nums1 = [4,9,5]
# nums2 = [9,4,9,8,4]
# print(s.intersection(nums1, nums2))
| [
"nickyfoto@gmail.com"
] | nickyfoto@gmail.com |
c1ebd8b8d228c003d0a4407c67ebe600f881f249 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02733/s121858438.py | 185f83ced0156e8f9333a88aeced5f9f6a733be6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | h,*s=open(0)
h,w,k,*m=map(int,h.split())
b=w
while b:
b-=1;r=t=j=0;d=[0]*h
while w-j:
i=c=0;j+=1
while h-i:
d[c]+=s[i][~j]>'0'
if d[c]>k:d=[0]*h;f=t<j;r-=h*w*~-f-1;t=j;j-=f;break
c+=b>>i&1;i+=1
m+=r+c,
print(min(m)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
bab41f705343005511627b25159e282df8e0bf4c | 466ac2f617422572ee37a1b0eac9825a141e26fa | /thormang3/display_result.py | 0e9c2b648da72fbd5bbeef54c99eecfdfc421d9a | [] | no_license | ahanjaya/Classifying-3D-objects-with-CNN | d667bd15dbbdbb5533033d33e93ccc24899788e6 | 03a5edd1ef5aca759e49c49af476470d9e2e37cb | refs/heads/master | 2020-09-28T10:41:24.090638 | 2020-04-17T07:38:19 | 2020-04-17T07:38:19 | 226,761,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,884 | py | #!/usr/bin/env python3
import os
import sys
import yaml
import pickle
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
class Results:
def __init__(self):
self.n_folder = 9
self.res_folder = "result/{}".format(self.n_folder)
plt.style.use('seaborn-deep')
plt.rcParams.update({'font.size': 22})
def plot_single_graph(self, mode, vcnn1, voxnet, mvcnn):
plt.style.use('seaborn-deep')
plt.rcParams.update({'font.size': 22})
fig, ax = plt.subplots(1, 1, figsize=(12,8))
# mode = 'val_loss' or 'val_accuracy' or 'accuracy' or 'loss'])
if 'loss' in mode:
val_vcnn1 = np.min( np.array(vcnn1.history[mode]) )
val_voxnet = np.min( np.array(voxnet.history[mode]) )
val_mvcnn1 = np.min( np.array(mvcnn.history[mode]) )
else:
val_vcnn1 = np.max( np.array(vcnn1.history[mode]) )
val_voxnet = np.max( np.array(voxnet.history[mode]) )
val_mvcnn1 = np.max( np.array(mvcnn.history[mode]) )
epochs = range(1,self.epochs + 1)
ax.plot(epochs, vcnn1.history [mode], 'r', label='VCNN1 - {0:.2f}' .format(val_vcnn1))
ax.plot(epochs, voxnet.history[mode], 'b', label='VoxNet - {0:.2f}'.format(val_voxnet))
ax.plot(epochs, mvcnn.history[mode], 'g', label='MVCNN - {0:.2f}'.format(val_mvcnn1))
ax.legend()
ax.grid()
ax.set_xlabel('Epochs')
ax.set_ylabel(mode)
return fig
def plot_double_graph(self, mode, vcnn1, voxnet, mvcnn):
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12,8))
# mode = 'val_loss' or 'val_accuracy' or 'accuracy' or 'loss'])
if 'loss' in mode:
train_vcnn1 = np.min( np.array(vcnn1. history['loss']) )
train_voxnet = np.min( np.array(voxnet.history['loss']) )
train_mvcnn1 = np.min( np.array(mvcnn.history['loss']) )
val_vcnn1 = np.min( np.array(vcnn1. history['val_loss']) )
val_voxnet = np.min( np.array(voxnet.history['val_loss']) )
val_mvcnn1 = np.min( np.array(mvcnn.history['val_loss']) )
ax1.set_ylabel('Training Loss')
ax2.set_ylabel('Validation Loss')
else:
train_vcnn1 = np.max( np.array(vcnn1. history['accuracy']) )
train_voxnet = np.max( np.array(voxnet.history['accuracy']) )
train_mvcnn1 = np.max( np.array(mvcnn.history['accuracy']) )
val_vcnn1 = np.max( np.array(vcnn1. history['val_accuracy']) )
val_voxnet = np.max( np.array(voxnet.history['val_accuracy']) )
val_mvcnn1 = np.max( np.array(mvcnn.history['val_accuracy']) )
ax1.set_ylabel('Training Accuracy')
ax2.set_ylabel('Validation Accuracy')
epochs = range(1,self.epochs + 1)
ax1.plot(epochs, vcnn1.history [mode], 'r', label='VCNN1 - {0:.2f}' .format(train_vcnn1))
ax1.plot(epochs, voxnet.history[mode], 'b', label='VoxNet - {0:.2f}'.format(train_voxnet))
ax1.plot(epochs, mvcnn.history[mode], 'g', label='MVCNN - {0:.2f}'.format(train_mvcnn1))
ax2.plot(epochs, vcnn1.history ['val_'+mode], 'r', label='VCNN1 - {0:.2f}' .format(val_vcnn1))
ax2.plot(epochs, voxnet.history['val_'+mode], 'b', label='VoxNet - {0:.2f}'.format(val_voxnet))
ax2.plot(epochs, mvcnn.history['val_'+mode], 'g', label='MVCNN - {0:.2f}'.format(val_mvcnn1))
ax1.legend()
ax2.legend()
ax1.grid()
ax2.grid()
ax1.set_xlabel('Epochs')
ax2.set_xlabel('Epochs')
fig.tight_layout()
def plot_confusion_matrix(self, name, cm, classes, normalize=False, cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
title='Normalized confusion matrix'
else:
title='Confusion matrix'
# plt.figure(self.plt_num, figsize=(7.5, 6))
# plt.figure(plt_num, figsize=(12, 8))
fig, ax = plt.subplots(1, 1, figsize=(12,8))
# plt.imshow(cm, interpolation='nearest', cmap=cmap)
# plt.title(title)
# plt.colorbar()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
fig.colorbar(im, ax=ax)
tick_marks = np.arange(len(classes))
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes)
plt.xticks(rotation=45)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
# fig_name = '{}/{}_{}.png'.format(self.res_folder, self.n_folder, name)
# fig.savefig(fig_name, dpi=fig.dpi)
def run(self):
# load pickle
self.pickle_file = "{}/{}_history.p".format(self.res_folder, self.n_folder)
with open(self.pickle_file, 'rb') as filehandle:
data = pickle.load(filehandle)
cm = data['cm']
# classes = [ 'big_suitcase', 'black_chair', 'blue_chair', 'small_suitcase', 'table']
classes = [ 'big_suitcase', 'black_chair', 'blue_chair', 'small_suitcase']
self.plot_confusion_matrix('MVCNN', cm, classes=classes)
plt.show(block=False)
input('Close: ')
plt.close('all')
if __name__ == '__main__':
res = Results()
res.run() | [
"hanjaya_mandala@yahoo.com"
] | hanjaya_mandala@yahoo.com |
bd4492e32b8c961efd74ceda5ee7197a84dbcbcc | 2ae24d0c6d91df960a2ca68a0b7a754a69d4fe18 | /web/exmr/apps/merchant_tools/migrations/0011_multipayment.py | d15d79ad3a8e90ae65584c71692fb62d2c618350 | [] | no_license | exmrcoin/project-gcps.io | 2fc2a0a207ce1282d616a8a680aef938fbcf5352 | c0071e63406845a5f3dbbe33ae65673cacc271f8 | refs/heads/master | 2023-01-04T02:08:47.893847 | 2020-10-29T17:03:34 | 2020-10-29T17:03:34 | 121,253,614 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | # Generated by Django 2.0.2 on 2018-07-19 09:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('coins', '0007_auto_20180718_1131'),
('merchant_tools', '0010_posqrmaker'),
]
operations = [
migrations.CreateModel(
name='MultiPayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('paid_amount', models.CharField(max_length=512)),
('eq_usd', models.CharField(blank=True, max_length=512, null=True)),
('paid_unique_id', models.CharField(blank=True, max_length=512, null=True)),
('transaction_id', models.CharField(max_length=64)),
('paid_in', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coins.Coin')),
],
),
]
| [
"nijap@techversantinfotech.com"
] | nijap@techversantinfotech.com |
133204167552658953dec762fb75f1d33e371f32 | 768058e7f347231e06a28879922690c0b6870ed4 | /venv/lib/python3.7/site-packages/cytoolz/tests/test_curried_toolzlike.py | 04da221e7a6c2da495482c4d0ec0f832cf4c4132 | [] | no_license | jciech/HeisenbergSpinChains | 58b4238281d8c158b11c6c22dd0da82025fd7284 | e43942bbd09f6675e7e2ff277f8930dc0518d08e | refs/heads/master | 2022-12-18T08:04:08.052966 | 2020-09-29T12:55:00 | 2020-09-29T12:55:00 | 258,476,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | import cytoolz
import cytoolz.curried
import types
from dev_skip_test import dev_skip_test
# Note that the tests in this file assume `toolz.curry` is a class, but we
# may some day make `toolz.curry` a function and `toolz.Curry` a class.
@dev_skip_test
def test_toolzcurry_is_class():
import toolz
assert isinstance(toolz.curry, type) is True
assert isinstance(toolz.curry, types.FunctionType) is False
@dev_skip_test
def test_cytoolz_like_toolz():
import toolz
import toolz.curried
for key, val in vars(toolz.curried).items():
if isinstance(val, toolz.curry):
if val.func is toolz.curry: # XXX: Python 3.4 work-around!
continue
assert hasattr(cytoolz.curried, key), (
"cytoolz.curried.%s does not exist" % key
)
assert isinstance(getattr(cytoolz.curried, key), cytoolz.curry), (
"cytoolz.curried.%s should be curried" % key
)
@dev_skip_test
def test_toolz_like_cytoolz():
import toolz
import toolz.curried
for key, val in vars(cytoolz.curried).items():
if isinstance(val, cytoolz.curry):
assert hasattr(toolz.curried, key), (
"cytoolz.curried.%s should not exist" % key
)
assert isinstance(getattr(toolz.curried, key), toolz.curry), (
"cytoolz.curried.%s should not be curried" % key
)
| [
"jan@multiply.ai"
] | jan@multiply.ai |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.