blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
62225756896d2b4614a6f4732d1f5e155553fb11
|
f931249f3766bd871eede76a950484701915c32d
|
/collective_decision/tests/test_models.py
|
aa2bd906a88f75c7dc22581ed4b16733fcad544b
|
[] |
no_license
|
cleliofavoccia/Share
|
aa509c9cfa1aa3789237b411b2b94d952d848322
|
cf0b982a6df2b8b4318d12d344ef0827394eedfd
|
refs/heads/main
| 2023-07-11T08:29:59.016252
| 2021-08-09T10:13:53
| 2021-08-09T10:13:53
| 373,621,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,125
|
py
|
"""Tests of collective_decision django models"""
from django.test import TestCase
from django.db import IntegrityError
from user.models import User
from group.models import Group
from group_member.models import GroupMember
from product.models import Product
from ..models import Estimation, Decision
class EstimationModelTest(TestCase):
"""Tests on Estimation object"""
@classmethod
def setUp(cls):
"""Set up a context to test Estimation object"""
user = User.objects.create_user(
username='Frodon',
email='frodon@gmail.com',
password='sam'
)
group = Group.objects.create(name="La communauté de l'anneau ")
cls.group_member = GroupMember.objects.create(user=user, group=group)
cls.product = Product.objects.create(
name='PS5',
group=group
)
cls.estimation = Estimation.objects.create(
cost=3,
group_member=cls.group_member,
product=cls.product
)
def test_estimation_has_group_member(self):
"""Test Estimation object has relation
with GroupMember object"""
user = User.objects.get(username='Frodon')
estimation_group_member = Estimation.objects.get(
group_member=GroupMember.objects.get(
user=user
)
)
self.assertEqual(self.estimation, estimation_group_member)
def test_estimation_has_product(self):
"""Test Estimation object has relation
with Product object"""
product = Product.objects.get(name='PS5')
estimation_product = Estimation.objects.get(product=product)
self.assertEqual(self.estimation, estimation_product)
def test_delete_estimation_not_delete_group_member_and_product(self):
"""Test if Estimation object is deleted, GroupMember and Product
objects are not deleted"""
estimation = self.estimation
estimation.delete()
group_member = self.group_member
product = self.product
self.assertTrue(group_member)
self.assertTrue(product)
def test_constraints_one_estimation_per_product_and_group_member(self):
"""Test if constraint of one Estimation object between
product and group_member is respected"""
try:
estimation_two = Estimation.objects.create(
cost=6,
group_member=self.group_member,
product=self.product
)
estimation_two.save()
except IntegrityError:
estimation_two = 'IntegrityError'
self.assertEqual(estimation_two, 'IntegrityError')
class DecisionModelTest(TestCase):
"""Tests on Decision object"""
@classmethod
def setUp(cls):
"""Set up a context to test Decision object"""
user = User.objects.create_user(
username='Frodon',
email='frodon@gmail.com',
password='sam'
)
cls.group = Group.objects.create(name="La communauté de l'anneau")
cls.group_member = GroupMember.objects.create(
user=user,
group=cls.group
)
cls.decision = Decision.objects.create(
delete_group_vote=True,
modify_group_vote=False,
delete_member_vote=True,
group_member=cls.group_member,
group=cls.group
)
def test_decision_has_group_member(self):
"""Test Decision object has relation
with GroupMember object"""
user = User.objects.get(username='Frodon')
decision_group_member = Decision.objects.get(
group_member=GroupMember.objects.get(
user=user
)
)
self.assertEqual(self.decision, decision_group_member)
def test_decision_has_group(self):
"""Test Decision object has relation
with Group object"""
group = Group.objects.get(name="La communauté de l'anneau")
decision_group = Decision.objects.get(group=group)
self.assertEqual(self.decision, decision_group)
def test_delete_decision_not_delete_group_member_and_group(self):
"""Test if Decision object is deleted, GroupMember and Group
objects are not deleted"""
decision = self.decision
decision.delete()
group_member = self.group_member
group = self.group
self.assertTrue(group_member)
self.assertTrue(group)
def test_constraints_one_decision_per_group_and_group_member(self):
"""Test if constraint of one Decision object between
group and group_member is respected"""
try:
decision_two = Decision.objects.create(
delete_group_vote=True,
modify_group_vote=True,
delete_member_vote=True,
group_member=self.group_member,
group=self.group
)
decision_two.save()
except IntegrityError:
decision_two = 'IntegrityError'
self.assertEqual(decision_two, 'IntegrityError')
|
[
"favoccia.c@live.fr"
] |
favoccia.c@live.fr
|
be6cbbd046838c6b3ea18f1e70cb6f08612db43e
|
0c1d6b8dff8bedfffa8703015949b6ca6cc83f86
|
/lib/worklists/operator/CT/v3.0/business/ADSL/test/data.py
|
011c359518dc97400db6bdb1e079f6fba7538eb2
|
[] |
no_license
|
samwei8/TR069
|
6b87252bd53f23c37186c9433ce4d79507b8c7dd
|
7f6b8d598359c6049a4e6cb1eb1db0899bce7f5c
|
refs/heads/master
| 2021-06-21T11:07:47.345271
| 2017-08-08T07:14:55
| 2017-08-08T07:14:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
#coding:utf-8
# -----------------------------doc--------------------------
# 工单 描述
WORKLIST_DOC = """ """
# -----------------------------args--------------------------
# 工单 参数
WORKLIST_ARGS = {
"ConnectionType": ("IP_Routed", "1"),
"ServiceList": ("INTERNET", "2"),
"Username": ("admin", "3"),
"Password": ("admin", "4"),
"Enable": ("true", "5")
}
|
[
"zhaojunhhu@gmail.com"
] |
zhaojunhhu@gmail.com
|
47ea0ceecd6c900370bb212ceedfa5d6e9398339
|
2b1b4b92b5b6dadc6dfb556e867b042c97a4d738
|
/CMGTools/H2TauTau/python/proto/plotter/HistDrawer.py
|
afd02f94b7ea7778cb69f5c3d0a53fba6df59986
|
[] |
no_license
|
emanueledimarco/cmg-cmssw
|
65f6d20ccd9bfec74856397407b96dee9830afc6
|
b54ebd03ab8bad9c7694d867e3f3c54a1ad910c4
|
refs/heads/CMGTools_from7412_monox
| 2021-01-18T17:57:17.833303
| 2016-02-24T15:27:58
| 2016-02-24T15:27:58
| 21,276,131
| 1
| 0
| null | 2017-10-25T21:50:09
| 2014-06-27T13:11:29
|
C++
|
UTF-8
|
Python
| false
| false
| 6,022
|
py
|
import re
import os
import copy
from math import log10, floor
from ROOT import TCanvas, TPaveText, TBox, gStyle
from CMGTools.RootTools.DataMC.Stack import Stack
from CMGTools.H2TauTau.proto.plotter.CMS_lumi import CMS_lumi
from CMGTools.H2TauTau.proto.plotter.officialStyle import officialStyle
officialStyle(gStyle)
def ensureDir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
class HistDrawer:
ocan = None
can = None
pad = None
padr = None
@classmethod
def buildCanvas(cls):
can = cls.can
pad = cls.pad
padr = cls.padr
if not can:
can = cls.can = TCanvas('can', '', 800, 800)
can.Divide(1, 2, 0.0, 0.0)
pad = cls.pad = can.GetPad(1)
padr = cls.padr = can.GetPad(2)
# Set Pad sizes
pad.SetPad(0.0, 0.32, 1., 1.0)
padr.SetPad(0.0, 0.00, 1., 0.34)
pad.SetTopMargin(0.08)
pad.SetLeftMargin(0.16)
pad.SetBottomMargin(0.03)
pad.SetRightMargin(0.05)
padr.SetBottomMargin(0.35)
padr.SetLeftMargin(0.16)
padr.SetRightMargin(0.05)
can.cd()
can.Draw()
pad.Draw()
padr.Draw()
return can, pad, padr
@classmethod
def buildCanvasSingle(cls):
ocan = TCanvas('ocan', '', 600, 600)
ocan.cd()
ocan.Draw()
return ocan
@staticmethod
def datasetInfo(plot):
year = ''
year = '2015'
lumi = plot.lumi if hasattr(plot, 'lumi') else 0.
unit = plot.lumi_unit if hasattr(plot, 'lumi_unit') else 'pb'
energy = plot.com_energy if hasattr(plot, 'com_energy') else 13
return year, lumi, energy, unit
@staticmethod
def CMSPrelim(plot, pad, channel, legend='right'):
pad.cd()
year, lumi, energy, unit = HistDrawer.datasetInfo(plot)
theStr = '{lumi:3.3} {unit}^{{-1}} ({energy:d} TeV)'.format(year=year, unit=unit, lumi=lumi, energy=energy)
CMS_lumi(pad, theStr, iPosX=0)
lowY = 0.77
r = pad.GetRightMargin()
l = pad.GetLeftMargin()
posX = l + 0.045*(1-l-r)
posXhigh = 0.25
if legend == 'left':
posX = 1. - r - 0.08
posXhigh = 1. - r - 0.02
plot.chan = TPaveText(posX, lowY, posXhigh, lowY+0.18, "NDC")
plot.chan.SetBorderSize(0)
plot.chan.SetFillStyle(0)
plot.chan.SetTextAlign(12)
plot.chan.SetTextSize(0.6*pad.GetTopMargin()) # To have it the same size as CMS_lumi
plot.chan.SetTextFont(42)
plot.chan.AddText(channel)
plot.chan.Draw('same')
unitpat = re.compile('.*\((.*)\)\s*$')
keeper = []
@staticmethod
def draw(plot, do_ratio=True, channel='TauMu', plot_dir='plots',
plot_name=None, SetLogy=0, mssm=False,
blindxmin=None, blindxmax=None, unit=None):
print plot
Stack.STAT_ERRORS = True
can = pad = padr = None
if do_ratio:
can, pad, padr = HistDrawer.buildCanvas()
else:
can = HistDrawer.buildCanvasSingle()
pad.cd()
pad.SetLogy(SetLogy)
plot.DrawStack('HIST')
h = plot.supportHist
h.GetXaxis().SetLabelColor(1)
# h.GetXaxis().SetLabelSize(1)
unitsperbin = h.GetXaxis().GetBinWidth(1)
ytitle = 'Events'
if unit:
round_to_n = lambda x, n: round(x, -int(floor(log10(abs(x)))) + (n - 1))
ytitle += round_to_n(unitsperbin, 3)
h.GetYaxis().SetTitle('Events')
h.GetYaxis().SetTitleOffset(1.0)
h.GetXaxis().SetTitleOffset(2.0)
if do_ratio:
padr.cd()
ratio = copy.deepcopy(plot)
ratio.legendOn = False
if blindxmin or blindxmax:
if not blindxmin:
blindxmin = 0
if not blindxmax:
blindxmax = plot.GetXaxis().GetXmax()
if do_ratio:
ratio.Blind(blindxmin, blindxmax, True)
plot.Blind(blindxmin, blindxmax, False)
if do_ratio:
ratio.DrawDataOverMCMinus1(-0.5, 0.5)
hr = ratio.dataOverMCHist
# ytp_ratio = float(pad.YtoPixel(0.)/padr.YtoPixel(0))
# print 'YTP ratio', ytp_ratio
# xtp_ratio = float(pad.XtoPixel(0.)/padr.XtoPixel(0))
# print 'XTP ratio', xtp_ratio
ytp_ratio = 2.
xtp_ratio = 2.
# hr.GetYaxis().SetNdivisions(4)
hr.GetYaxis().SetTitleSize(h.GetYaxis().GetTitleSize() * xtp_ratio)
hr.GetXaxis().SetTitleSize(h.GetXaxis().GetTitleSize() * ytp_ratio)
hr.GetYaxis().SetTitleOffset(h.GetYaxis().GetTitleOffset() / xtp_ratio)
hr.GetXaxis().SetTitleOffset(h.GetXaxis().GetTitleOffset() / ytp_ratio)
hr.GetYaxis().SetLabelSize(h.GetYaxis().GetLabelSize() * xtp_ratio)
hr.GetXaxis().SetLabelSize(h.GetXaxis().GetLabelSize() * ytp_ratio)
h.GetXaxis().SetLabelColor(0)
h.GetXaxis().SetLabelSize(0)
padr.Update()
# blinding
if blindxmin or blindxmax:
pad.cd()
max = plot.stack.totalHist.GetMaximum()
box = TBox(blindxmin, 0, blindxmax, max)
box.SetFillColor(1)
box.SetFillStyle(3004)
box.Draw()
HistDrawer.keeper.append(box)
print channel
if channel == 'TauMu':
HistDrawer.CMSPrelim(plot, pad, '#tau_{#mu}#tau_{h}', legend=plot.legendPos)
elif channel == 'TauEle':
HistDrawer.CMSPrelim(plot, pad, '#tau_{e}#tau_{h}', legend=plot.legendPos)
can.cd()
plotname = plot_dir + '/'
ensureDir(plot_dir)
plotname += plot_name if plot_name else plot.name
can.SaveAs(plotname + '.png')
pad.SetLogy(0)
return ratio
drawRatio = draw
|
[
"jan.steggemann@cern.ch"
] |
jan.steggemann@cern.ch
|
8f54020865428b31e51ea2f36018f18b202ee386
|
b46dff381a329b5456359d9edcbb279e7c710999
|
/tilelit/tests/test_tilelit.py
|
f6f7297882115d058d6e9d9ba83007ba751485e7
|
[
"MIT"
] |
permissive
|
uw-biomedical-ml/tilelit
|
d8c48cfaf95ea90c23fe4dbc7862d76747e00628
|
36062279e321a30a194982dc61df6946614e7e22
|
refs/heads/master
| 2021-09-17T22:08:43.761931
| 2018-07-05T22:27:58
| 2018-07-05T22:27:58
| 114,414,633
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
from __future__ import absolute_import, division, print_function
import os.path as op
import numpy as np
import pandas as pd
import numpy.testing as npt
import tilelit as ti
import skimage.data as skd
from skimage import transform as tf
def test_tile():
static = skd.coins()
ty = np.random.randn() * 0.05 * static.shape[0]
tx = np.random.randn() * 0.05 * static.shape[1]
rot = np.random.randn() * np.pi/20
affine = tf.AffineTransform(rotation=rot, translation=(tx, ty))
moving = tf.warp(static, affine.inverse)
img_initial, aff_initial = \
ti.initial_alignment(static, moving, gaussian_blur=5)
img_final, aff_final = \
ti.fine_alignment(static, img_initial, starting_affine=aff_initial)
npt.assert_almost_equal(aff_final, affine.params)
|
[
"arokem@gmail.com"
] |
arokem@gmail.com
|
06196dc14bd12901227bf7210937a9df76502363
|
4a8c1f7d9935609b780aff95c886ef7781967be0
|
/topcoder/Rookie SRM 1/OverallScores.py
|
d32100bb4d37fadd8b01c0ab9f613199978513d7
|
[] |
no_license
|
recuraki/PythonJunkTest
|
d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a
|
2556c973d468a6988d307ce85c5f2f8ab15e759a
|
refs/heads/master
| 2023-08-09T17:42:21.875768
| 2023-07-18T23:06:31
| 2023-07-18T23:06:31
| 13,790,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,359
|
py
|
# -*- coding: utf-8 -*-
import math,string,itertools,fractions,heapq,collections,re,array,bisect
class OverallScores:
def findWinner(self, N, scores):
dat = [0] * N
for i in range(len(scores) // N):
for j in range(N):
dat[j] += scores[j + i * N]
maind = -1
maval = -1
for i in range(N):
if dat[i] > maval:
maind = i
maval = dat[i]
return maind
# CUT begin
# TEST CODE FOR PYTHON {{{
import sys, time, math
def tc_equal(expected, received):
try:
_t = type(expected)
received = _t(received)
if _t == list or _t == tuple:
if len(expected) != len(received): return False
return all(tc_equal(e, r) for (e, r) in zip(expected, received))
elif _t == float:
eps = 1e-9
d = abs(received - expected)
return not math.isnan(received) and not math.isnan(expected) and d <= eps * max(1.0, abs(expected))
else:
return expected == received
except:
return False
def pretty_str(x):
if type(x) == str:
return '"%s"' % x
elif type(x) == tuple:
return '(%s)' % (','.join( (pretty_str(y) for y in x) ) )
else:
return str(x)
def do_test(N, scores, __expected):
startTime = time.time()
instance = OverallScores()
exception = None
try:
__result = instance.findWinner(N, scores);
except:
import traceback
exception = traceback.format_exc()
elapsed = time.time() - startTime # in sec
if exception is not None:
sys.stdout.write("RUNTIME ERROR: \n")
sys.stdout.write(exception + "\n")
return 0
if tc_equal(__expected, __result):
sys.stdout.write("PASSED! " + ("(%.3f seconds)" % elapsed) + "\n")
return 1
else:
sys.stdout.write("FAILED! " + ("(%.3f seconds)" % elapsed) + "\n")
sys.stdout.write(" Expected: " + pretty_str(__expected) + "\n")
sys.stdout.write(" Received: " + pretty_str(__result) + "\n")
return 0
def run_tests():
sys.stdout.write("OverallScores (400 Points)\n\n")
passed = cases = 0
case_set = set()
for arg in sys.argv[1:]:
case_set.add(int(arg))
with open("OverallScores.sample", "r") as f:
while True:
label = f.readline()
if not label.startswith("--"): break
N = int(f.readline().rstrip())
scores = []
for i in range(0, int(f.readline())):
scores.append(int(f.readline().rstrip()))
scores = tuple(scores)
f.readline()
__answer = int(f.readline().rstrip())
cases += 1
if len(case_set) > 0 and (cases - 1) in case_set: continue
sys.stdout.write(" Testcase #%d ... " % (cases - 1))
passed += do_test(N, scores, __answer)
sys.stdout.write("\nPassed : %d / %d cases\n" % (passed, cases))
T = time.time() - 1614260880
PT, TT = (T / 60.0, 75.0)
points = 400 * (0.3 + (0.7 * TT * TT) / (10.0 * PT * PT + TT * TT))
sys.stdout.write("Time : %d minutes %d secs\n" % (int(T/60), T%60))
sys.stdout.write("Score : %.2f points\n" % points)
if __name__ == '__main__':
run_tests()
# }}}
# CUT end
|
[
"kanai@wide.ad.jp"
] |
kanai@wide.ad.jp
|
2a722ec1fe9fcda7804a6102c78858b391aa6dfe
|
377dc973a58d30154cf485de141223d7ca5424dd
|
/havok_classes/hkaSkeletonLocalFrameOnBone.py
|
24e0e76c53f139086cdd47e4f2731f50d18ed9ba
|
[
"MIT"
] |
permissive
|
sawich/havok-reflection
|
d6a5552f2881bb4070ad824fb7180ad296edf4c4
|
1d5b768fb533b3eb36fc9e42793088abeffbad59
|
refs/heads/master
| 2021-10-11T12:56:44.506674
| 2019-01-25T22:37:31
| 2019-01-25T22:37:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
from .hkLocalFrame import hkLocalFrame
import struct
class hkaSkeletonLocalFrameOnBone(object):
localFrame: any
boneIndex: int
def __init__(self, infile):
self.localFrame = any(infile) # TYPE_POINTER:TYPE_STRUCT
self.boneIndex = struct.unpack('>h', infile.read(2)) # TYPE_INT16:TYPE_VOID
def __repr__(self):
return "<{class_name} localFrame={localFrame}, boneIndex={boneIndex}>".format(**{
"class_name": self.__class__.__name__,
"localFrame": self.localFrame,
"boneIndex": self.boneIndex,
})
|
[
"kevin@turtlerockweb.com"
] |
kevin@turtlerockweb.com
|
0953423a781578e7260493e3b17df08f46cb8ffa
|
0f9a97d48a9f0179bcf1e3d80c08340096eb561e
|
/ДЗ-5. Функции и рекурсия/C. Периметр треугольника.py
|
218540cdf273e3aecc918c967a3df0f90da0aa53
|
[] |
no_license
|
dmitryokh/python
|
96d8ec8c3f2d3428b90d510a1003aecf102b13d0
|
8efe761412779bed9a7516832d3152843088fa43
|
refs/heads/master
| 2020-04-24T03:08:42.865813
| 2019-02-20T11:41:52
| 2019-02-20T11:41:52
| 171,661,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
from math import sqrt
def distance(x1, y1, x2, y2):
return(sqrt((x2 - x1)**2 + (y2 - y1)**2))
x1 = int(input())
y1 = int(input())
x2 = int(input())
y2 = int(input())
x3 = int(input())
y3 = int(input())
a = distance(x1, y1, x2, y2)
b = distance(x1, y1, x3, y3)
c = distance(x3, y3, x2, y2)
print(a + b + c)
|
[
"noreply@github.com"
] |
dmitryokh.noreply@github.com
|
46b795561c7cd2ef9084ec78d43889d97b023575
|
747f759311d404af31c0f80029e88098193f6269
|
/addons/stock/wizard/stock_location_product.py
|
1f9777a72c1d926463d1fd0cf913ed81086e71dc
|
[] |
no_license
|
sgeerish/sirr_production
|
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
|
1081f3a5ff8864a31b2dcd89406fac076a908e78
|
refs/heads/master
| 2020-05-19T07:21:37.047958
| 2013-09-15T13:03:36
| 2013-09-15T13:03:36
| 9,648,444
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,423
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
class stock_location_product(osv.osv_memory):
_name = "stock.location.product"
_description = "Products by Location"
_columns = {
'from_date': fields.datetime('From'),
'to_date': fields.datetime('To'),
}
def action_open_window(self, cr, uid, ids, context=None):
""" To open location wise product information specific to given duration
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: An ID or list of IDs if we want more than one
@param context: A standard dictionary
@return: Invoice type
"""
mod_obj = self.pool.get('ir.model.data')
for location_obj in self.read(cr, uid, ids, ['from_date', 'to_date']):
return {
'name': False,
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'product.product',
'type': 'ir.actions.act_window',
'context': {'location': context['active_id'],
'from_date': location_obj['from_date'],
'to_date': location_obj['to_date'],
},
'domain': [('qty_available', '>', 0)],
}
stock_location_product()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"geerish@omerp.net"
] |
geerish@omerp.net
|
e4def48c4cf436a2b2d384bd315d99b9a0f1b56a
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_noisy1310.py
|
f6c89f3e4f5a2d30ec73a1b5f825e843ad9d4a87
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,424
|
py
|
# qubit number=5
# total number=56
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=50
prog.cz(input_qubit[4],input_qubit[2]) # number=51
prog.h(input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=28
prog.z(input_qubit[3]) # number=42
prog.cz(input_qubit[1],input_qubit[0]) # number=29
prog.h(input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=43
prog.cz(input_qubit[1],input_qubit[0]) # number=44
prog.h(input_qubit[0]) # number=45
prog.cx(input_qubit[1],input_qubit[0]) # number=35
prog.h(input_qubit[0]) # number=53
prog.cz(input_qubit[1],input_qubit[0]) # number=54
prog.h(input_qubit[0]) # number=55
prog.x(input_qubit[0]) # number=39
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.cx(input_qubit[1],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=46
prog.cz(input_qubit[1],input_qubit[0]) # number=47
prog.h(input_qubit[0]) # number=48
prog.cx(input_qubit[1],input_qubit[0]) # number=27
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.cx(input_qubit[0],input_qubit[1]) # number=22
prog.y(input_qubit[2]) # number=41
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[0],input_qubit[1]) # number=24
prog.rx(1.0398671683382215,input_qubit[2]) # number=31
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1310.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
9ad89e902ed284ef9edf8837805fb2c2d6c96dce
|
f0f2a548457e345d5432c8ddcd37daa25ee56957
|
/apml-client/show-stats.py
|
c989e874560d2414b6478337566c10ae269a4f02
|
[
"MIT"
] |
permissive
|
blxlrsmb/myap.ml
|
edf2703df34de27282dbf0d7821f83b629e1ccc8
|
b21270d54b453cd53e160a017f21d52e444ff62b
|
refs/heads/master
| 2021-01-24T20:52:38.420997
| 2016-06-19T06:19:44
| 2016-06-19T06:19:44
| 36,968,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: show-stats.py
# Date: Sat Jun 06 20:16:34 2015 +0800
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
from logger import EventLogger
ev = EventLogger()
pkgs = ev.packages
names = set()
cnt = 0
for p in pkgs:
names = names.union(set(p['key'].keys()))
cnt += sum(p['key'].itervalues())
names = names.union(set(p['mouse'].keys()))
cnt += sum(p['mouse'].itervalues())
print names
print cnt
|
[
"ppwwyyxxc@gmail.com"
] |
ppwwyyxxc@gmail.com
|
bc68bd06515bf96079aa0ed7e1770b5be7c25b5c
|
b50ff836c3010b42821624e79cb21e4d9594f588
|
/selfdrive/car/vin.py
|
76fd34ca6d80e8aecc040376a6369041889c537f
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
dnv26/openpilot
|
5fe441eb9ef93a56b5a2522ace58c3ae415c7813
|
b1db54ea553ccc41cfb54bdf66a1c31ab52432f2
|
refs/heads/HKG_6.5
| 2020-09-09T06:37:33.909558
| 2019-11-14T21:47:55
| 2019-11-14T21:47:55
| 221,376,282
| 8
| 31
|
MIT
| 2020-05-09T02:30:46
| 2019-11-13T05:01:02
|
C++
|
UTF-8
|
Python
| false
| false
| 3,245
|
py
|
#!/usr/bin/env python3
import selfdrive.messaging as messaging
from selfdrive.boardd.boardd import can_list_to_can_capnp
VIN_UNKNOWN = "0" * 17
# sanity checks on response messages from vin query
def is_vin_response_valid(can_dat, step, cnt):
if len(can_dat) != 8:
# ISO-TP meesages are all 8 bytes
return False
if step == 0:
# VIN does not fit in a single message and it's 20 bytes of data
if can_dat[0] != 0x10 or can_dat[1] != 0x14:
return False
if step == 1 and cnt == 0:
# first response after a CONTINUE query is sent
if can_dat[0] != 0x21:
return False
if step == 1 and cnt == 1:
# second response after a CONTINUE query is sent
if can_dat[0] != 0x22:
return False
return True
class VinQuery():
def __init__(self, bus):
self.bus = bus
# works on standard 11-bit addresses for diagnostic. Tested on Toyota and Subaru;
# Honda uses the extended 29-bit addresses, and unfortunately only works from OBDII
self.query_ext_msgs = [[0x18DB33F1, 0, b'\x02\x09\x02'.ljust(8, b"\x00"), bus],
[0x18DA10f1, 0, b'\x30'.ljust(8, b"\x00"), bus]]
self.query_nor_msgs = [[0x7df, 0, b'\x02\x09\x02'.ljust(8, b"\x00"), bus],
[0x7e0, 0, b'\x30'.ljust(8, b"\x00"), bus]]
self.cnts = [1, 2] # number of messages to wait for at each iteration
self.step = 0
self.cnt = 0
self.responded = False
self.never_responded = True
self.dat = b""
self.vin = VIN_UNKNOWN
def check_response(self, msg):
# have we got a VIN query response?
if msg.src == self.bus and msg.address in [0x18daf110, 0x7e8]:
self.never_responded = False
# basic sanity checks on ISO-TP response
if is_vin_response_valid(msg.dat, self.step, self.cnt):
self.dat += bytes(msg.dat[2:]) if self.step == 0 else bytes(msg.dat[1:])
self.cnt += 1
if self.cnt == self.cnts[self.step]:
self.responded = True
self.step += 1
def send_query(self, sendcan):
# keep sending VIN qury if ECU isn't responsing.
# sendcan is probably not ready due to the zmq slow joiner syndrome
if self.never_responded or (self.responded and self.step < len(self.cnts)):
sendcan.send(can_list_to_can_capnp([self.query_ext_msgs[self.step]], msgtype='sendcan'))
sendcan.send(can_list_to_can_capnp([self.query_nor_msgs[self.step]], msgtype='sendcan'))
self.responded = False
self.cnt = 0
def get_vin(self):
# only report vin if procedure is finished
if self.step == len(self.cnts) and self.cnt == self.cnts[-1]:
self.vin = self.dat[3:].decode('utf8')
return self.vin
def get_vin(logcan, sendcan, bus, query_time=1.):
vin_query = VinQuery(bus)
frame = 0
# 1s max of VIN query time
while frame < query_time * 100:
a = messaging.recv_one(logcan)
for can in a.can:
vin_query.check_response(can)
vin_query.send_query(sendcan)
frame += 1
return vin_query.get_vin()
if __name__ == "__main__":
from selfdrive.services import service_list
logcan = messaging.sub_sock(service_list['can'].port)
sendcan = messaging.pub_sock(service_list['sendcan'].port)
print(get_vin(logcan, sendcan, 0))
|
[
"user@comma.ai"
] |
user@comma.ai
|
8fd9e5477942786209d08d62c024179c66489083
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5640146288377856_0/Python/trapped/a_small.py
|
d6f1fa475144b794563c801431c358bd4fcbb062
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
T = int(raw_input())
def solve(r, c, w):
if w == 1: return r*c
if w == c: return (r-1) + c
# following only for small case: r=1
if w > c//2: return w+1
if w == c//2: return w+1+(c%2)
return 1+ solve(r, c-w, w)
'''if c%2 == 1:
ans = 1 + (c//2)//w
ans += solve(r, c//2, w)
else:
ans = 1 + (c//2 - 1)//w
ans += solve(r, c//2, w)
return ans'''
for t in range(1,T+1):
r, c, w = map(int, raw_input().strip().split())
print "Case #%d: %d"%(t, solve(r,c,w))
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
c8d8db5bcecf909d177c3caf44457684046f456e
|
e5606385fc77c1fbb30be223637b2b62372d6635
|
/news/urls.py
|
20fbb45caaed199e495d3c073c27917d6aaccb6c
|
[] |
no_license
|
bhoj001/DJ_Test_App
|
6b2f116523af72a15370e303af745d7df7d7951c
|
a20d5ae9049a5d432ba219eb96036953c5512bf5
|
refs/heads/master
| 2020-03-24T00:25:57.714156
| 2018-07-25T11:29:15
| 2018-07-25T11:29:15
| 142,290,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
from django.conf.urls import url
from tango_with_django import views
urlpatterns = [
url(r'^home$', views.home, name='home'),
url(r'^$', views.index, name='index'),
]
|
[
"nishantkarki305@gmail.com"
] |
nishantkarki305@gmail.com
|
1abb6b3b06ad3d28a1bc8ffef2a528725d178bbd
|
9214a22c37f9751f66005d9033c68eadca43d6ee
|
/app/.history/datasets/spotify_fetch_20201212160615.py
|
2d534dc61d069d9f1b881c5e1a14e0cc57cb80ae
|
[] |
no_license
|
bsalgado98/lockdown-music
|
0800922ea25c9b07ea4b034297e67725776de070
|
a407265b7caa83667c92ee13c2adec1645acdafa
|
refs/heads/main
| 2023-01-29T09:55:38.137972
| 2020-12-17T01:01:49
| 2020-12-17T01:01:49
| 322,133,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import json
import csv
auth_manager = SpotifyClientCredentials()
sp = spotipy.Spotify(auth_manager=auth_manager)
def get_artist_genre(artist):
results = sp.search(q='artist:' + artist, type='artist')
# print(json.dumps(results, indent=4, sort_keys=True))
artists = results['artists']
items = artists['items']
print(artists)
grammys = []
with open('the_grammy_awards.csv', mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
grammys.append(row)
artist = grammys[0].get('artist')
genres = get_artist_genre(artist)
# print(genres)
|
[
"bsalgado@stevens.edu"
] |
bsalgado@stevens.edu
|
e58b99b41a4f3a5a38ecf00da40037bd8fa7eae3
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayPcreditLoanHousemortgageRealtychainNotifyResponse.py
|
c8ce7d57930cb013c46e118af02437d5099697ec
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 506
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayPcreditLoanHousemortgageRealtychainNotifyResponse(AlipayResponse):
def __init__(self):
super(AlipayPcreditLoanHousemortgageRealtychainNotifyResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayPcreditLoanHousemortgageRealtychainNotifyResponse, self).parse_response_content(response_content)
|
[
"jiandong.jd@antfin.com"
] |
jiandong.jd@antfin.com
|
146ee16ca9aaaf47c04e5aa73ebb2fc961973fc6
|
3ec8ade4cedc4601a1dea1933182675bc35f543a
|
/demo/models.py
|
4376f3134a7a3e28177d207c2299c172e8bd3723
|
[] |
no_license
|
artrey/drf-comments
|
d9fce0b14c28ef9810cc17caeb03b2435cc075b6
|
ee875f820dc691df984ef8f292e8f9e532ca4689
|
refs/heads/master
| 2023-07-20T12:31:14.879634
| 2021-08-20T17:12:12
| 2021-08-20T17:12:12
| 398,344,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
from django.core.validators import MaxValueValidator
from django.db import models
class Comment(models.Model):
author = models.CharField(max_length=100)
text = models.TextField()
rating = models.PositiveSmallIntegerField(validators=[MaxValueValidator(5)])
|
[
"oz.sasha.ivanov@gmail.com"
] |
oz.sasha.ivanov@gmail.com
|
a7323120d71625d329147a0ad58c8fbe841c5d7e
|
edcd74f8f65119bdbe737360c2ca33b4a6da160a
|
/python/test-multiprocessing/multiprocessing_simple.py
|
f23b32c4e12ab97388c5ee1cb1354982163bc9f4
|
[] |
no_license
|
hyunjun/practice
|
72e83de6a1d5e04ddcd16526f16110ea2dd00373
|
5376dd48b1cefb4faba9d2ef6a8a497b6b1d6c67
|
refs/heads/master
| 2023-08-31T07:00:37.320351
| 2023-08-17T07:29:24
| 2023-08-17T07:29:24
| 2,704,126
| 3
| 2
| null | 2022-12-14T20:25:07
| 2011-11-03T18:28:44
|
Python
|
UTF-8
|
Python
| false
| false
| 276
|
py
|
# https://pymotw.com/3/multiprocessing/basics.html
import multiprocessing
def worker():
"""worker function"""
print('Worker')
if __name__ == '__main__':
jobs = []
for i in range(5):
p = multiprocessing.Process(target=worker)
jobs.append(p)
p.start()
|
[
"morpheus.0@kakaocorp.com"
] |
morpheus.0@kakaocorp.com
|
49fd37eab918232db078265207ad4f60b6097bb1
|
b80bd48e12f2361ea51284fba0a08ea61673cfd4
|
/drip/models.py
|
beec587cc63ff535831ebb3038532e3041054c3d
|
[] |
no_license
|
pdc/caption
|
25829912241224ec93697537900f6199fb14121b
|
6677de9f0bf40d5f331da8a3fb1fb45c7404d704
|
refs/heads/master
| 2016-09-06T16:55:30.264130
| 2013-02-28T20:01:55
| 2013-02-28T20:01:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
# -*-coding: UTF-8-*-
from django.db import models
from django.contrib.auth.models import User
from articles.models import Article
class DripAuthor(models.Model):
uid = models.IntegerField(editable=False, primary_key=True)
name = models.CharField(max_length=60, unique=True)
mail = models.EmailField(max_length=64, blank=True)
def __unicode__(self):
return u'{0} ({1})'.format(self.name, self.uid)
class DripNode(models.Model):
article = models.OneToOneField(Article, null=False, editable=False, related_name='drip')
author = models.ForeignKey(DripAuthor)
nid = models.IntegerField(editable=False, primary_key=True)
def __unicode__(self):
return u'{0} ({1})'.format(self.article.title, self.nid)
|
[
"pdc@alleged.org.uk"
] |
pdc@alleged.org.uk
|
7964d51dadf0214da7cc67756d0945ff15a2053c
|
f30e9ca8c5777196bf8e56d82ecddb80418d21ba
|
/test_fena/v1_12/test_blocks.py
|
7f1ca52716e4415e719a8986dcf5cb915ef2c9b7
|
[
"MIT"
] |
permissive
|
Aquafina-water-bottle/Fena
|
029a1d6be64bdf1bd75d647d83e524821dfc7a3c
|
00a2b1dbc6f8abd968c46c637d6ad30d5fcde919
|
HEAD
| 2018-10-22T13:36:24.838025
| 2018-07-19T18:16:17
| 2018-07-19T18:16:17
| 96,073,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,790
|
py
|
from test_fena.test_common import test_block
def test_blocks():
# block states as simple values
test_block("stonebrick[0]", "minecraft:stonebrick 0")
test_block("stonebrick[1]", "minecraft:stonebrick 1")
test_block("stonebrick[-1]", "minecraft:stonebrick -1")
test_block("stonebrick[*]", "minecraft:stonebrick *")
test_block("stonebrick[]", "minecraft:stonebrick *")
test_block("stonebrick", "minecraft:stonebrick *")
test_block("minecraft:stonebrick[0]", "minecraft:stonebrick 0")
test_block("minecraft:stonebrick[1]", "minecraft:stonebrick 1")
test_block("minecraft:stonebrick[-1]", "minecraft:stonebrick -1")
test_block("minecraft:stonebrick[*]", "minecraft:stonebrick *")
test_block("minecraft:stonebrick[]", "minecraft:stonebrick *")
test_block("minecraft:stonebrick", "minecraft:stonebrick *")
# actual block states
test_block("stonebrick[variant=mossy_stonebrick]", "minecraft:stonebrick variant=mossy_stonebrick")
test_block("stone_brick_stairs[facing=south,half=top]", "minecraft:stone_brick_stairs facing=south,half=top")
test_block("stone_brick_stairs[facing=south, half=top]", "minecraft:stone_brick_stairs facing=south,half=top")
test_block("stone_brick_stairs[facing = south, half = top]", "minecraft:stone_brick_stairs facing=south,half=top")
test_block("stone_brick_stairs [ facing = south, half = top ] ", "minecraft:stone_brick_stairs facing=south,half=top")
test_block("minecraft:stonebrick[variant=mossy_stonebrick]", "minecraft:stonebrick variant=mossy_stonebrick")
test_block("minecraft:stone_brick_stairs[facing=south,half=top]", "minecraft:stone_brick_stairs facing=south,half=top")
test_block("minecraft:stone_brick_stairs[facing=south, half=top]", "minecraft:stone_brick_stairs facing=south,half=top")
test_block("minecraft:stone_brick_stairs[facing = south, half = top]", "minecraft:stone_brick_stairs facing=south,half=top")
test_block("minecraft:stone_brick_stairs [ facing = south, half = top ] ", "minecraft:stone_brick_stairs facing=south,half=top")
# nbt tags
test_block(r'chest[0]{}', r'minecraft:chest 0 replace {}')
test_block(r'chest[1]{}', r'minecraft:chest 1 replace {}')
test_block(r'chest[-1]{}', r'minecraft:chest -1 replace {}')
test_block(r'chest[*]{}', r'minecraft:chest * replace {}')
test_block(r'chest[]{}', r'minecraft:chest * replace {}')
test_block(r'chest{}', r'minecraft:chest * replace {}')
test_block(r'minecraft:chest[0]', r'minecraft:chest 0')
test_block(r'minecraft:chest[1]', r'minecraft:chest 1')
test_block(r'minecraft:chest[-1]', r'minecraft:chest -1')
test_block(r'minecraft:chest[*]', r'minecraft:chest *')
test_block(r'minecraft:chest[]', r'minecraft:chest *')
test_block(r'minecraft:chest', r'minecraft:chest *')
test_block(r'chest[facing=west]{}', r'minecraft:chest facing=west replace {}')
test_block(r'chest[facing=west]{Lock:"lol"}', r'minecraft:chest facing=west replace {Lock:"lol"}')
test_block(r'minecraft:chest[facing=west]{}', r'minecraft:chest facing=west replace {}')
test_block(r'minecraft:chest[facing=west]{Lock:"lol"}', r'minecraft:chest facing=west replace {Lock:"lol"}')
test_block(r'chest[0]{Lock:"lol"}', r'minecraft:chest 0 replace {Lock:"lol"}')
test_block(r'chest[1]{Lock:"lol"}', r'minecraft:chest 1 replace {Lock:"lol"}')
test_block(r'chest[-1]{Lock:"lol"}', r'minecraft:chest -1 replace {Lock:"lol"}')
test_block(r'chest[*]{Lock:"lol"}', r'minecraft:chest * replace {Lock:"lol"}')
test_block(r'chest[]{Lock:"lol"}', r'minecraft:chest * replace {Lock:"lol"}')
test_block(r'chest{Lock:"lol"}', r'minecraft:chest * replace {Lock:"lol"}')
test_block(r'minecraft:chest[0]{Lock:"lol"}', r'minecraft:chest 0 replace {Lock:"lol"}')
test_block(r'minecraft:chest[1]{Lock:"lol"}', r'minecraft:chest 1 replace {Lock:"lol"}')
test_block(r'minecraft:chest[-1]{Lock:"lol"}', r'minecraft:chest -1 replace {Lock:"lol"}')
test_block(r'minecraft:chest[*]{Lock:"lol"}', r'minecraft:chest * replace {Lock:"lol"}')
test_block(r'minecraft:chest[]{Lock:"lol"}', r'minecraft:chest * replace {Lock:"lol"}')
test_block(r'minecraft:chest{Lock:"lol"}', r'minecraft:chest * replace {Lock:"lol"}')
# errors
test_block('not_a_block', expect_error=True)
test_block("wrong_id:stonebrick", expect_error=True)
test_block("stonebrick:stonebrick", expect_error=True)
test_block('stonebrick[0,1]', expect_error=True)
test_block('stonebrick[0=0,1=1]', expect_error=True)
test_block('stonebrick[asdf=0,nou=1]', expect_error=True)
test_block("stone_brick_stairs[=]", expect_error=True)
test_block("stone_brick_stairs[facing]", expect_error=True)
test_block("stone_brick_stairs[facing=]", expect_error=True)
test_block("stone_brick_stairs[facing=south=north]", expect_error=True)
test_block("stone_brick_stairs[facing=south half=top]", expect_error=True)
test_block('minecraft:not_a_block', expect_error=True)
test_block('minecraft:stonebrick[0,1]', expect_error=True)
test_block('minecraft:stonebrick[0=0,1=1]', expect_error=True)
test_block('minecraft:stonebrick[asdf=0,nou=1]', expect_error=True)
test_block("minecraft:stone_brick_stairs[=]", expect_error=True)
test_block("minecraft:stone_brick_stairs[facing]", expect_error=True)
test_block("minecraft:stone_brick_stairs[facing=]", expect_error=True)
test_block("minecraft:stone_brick_stairs[facing=south=north]", expect_error=True)
test_block("minecraft:stone_brick_stairs[facing=south half=top]", expect_error=True)
|
[
"glowing.locker@gmail.com"
] |
glowing.locker@gmail.com
|
2015f26b503e42442766db3c50645d21d6ecccab
|
2635edb96afa8117d4584a470061e447b79adc6e
|
/test/booknotes_test.py
|
3682f6f5b88bb82f90d549216fa699601c455763
|
[] |
no_license
|
Mark-Seaman/Sensei-2018
|
673609731ecb5ebb782dab94b2cf3d7c22940424
|
06b02892cfe1bf1d25cb4224e86eb693c82b0f29
|
refs/heads/master
| 2022-02-18T19:14:10.343093
| 2022-01-15T20:06:21
| 2022-01-15T20:06:21
| 158,728,468
| 0
| 0
| null | 2022-01-16T21:06:09
| 2018-11-22T16:51:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 273
|
py
|
from os import environ
from os.path import join
from bin.shell import file_tree_list, shell, check_shell_lines
def booknotes_list_test():
return shell('x booknotes list')
def booknotes_lines_test():
return check_shell_lines('x booknotes content', 2700, 2800)
|
[
"Mark.Seaman@imac.net"
] |
Mark.Seaman@imac.net
|
8ab6f785b6f34c9e36b99994af2bb9339cfbb8db
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/show_user_mfa_device_request.py
|
e917b2945cce19343061600282e1586bd29f038b
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,931
|
py
|
# coding: utf-8
import re
import six
class ShowUserMfaDeviceRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'user_id': 'str'
}
attribute_map = {
'user_id': 'user_id'
}
def __init__(self, user_id=None):
"""ShowUserMfaDeviceRequest - a model defined in huaweicloud sdk"""
self._user_id = None
self.discriminator = None
self.user_id = user_id
@property
def user_id(self):
"""Gets the user_id of this ShowUserMfaDeviceRequest.
待查询的IAM用户ID,获取方式请参见:[获取用户ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:return: The user_id of this ShowUserMfaDeviceRequest.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this ShowUserMfaDeviceRequest.
待查询的IAM用户ID,获取方式请参见:[获取用户ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:param user_id: The user_id of this ShowUserMfaDeviceRequest.
:type: str
"""
self._user_id = user_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowUserMfaDeviceRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
d5476304cb3aab788f50565a72061d73b9a6a5ed
|
5aaa7bdc5085987f925748e6b9afc694a3e9b908
|
/y63p.py
|
f9fbeee2e07e974b15c4185a22fd4c85e17a50fb
|
[] |
no_license
|
swarnanjali/pythonproject
|
e086af8e2b03f8dae9e155f958c99c4ebb722445
|
10932592b601c7b27ce7eebeeb59e3876ac8fafb
|
refs/heads/master
| 2020-06-22T10:31:43.354583
| 2019-07-10T14:47:26
| 2019-07-10T14:47:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
t=input()
s=''
n=[]
for i in t:
if i not in n:
s+=i
n.append(i)
elif i in n:
break
print(len(n))
|
[
"noreply@github.com"
] |
swarnanjali.noreply@github.com
|
cd4860ec2a8cda26dffcb523f58de782eb9b010c
|
53181572c4b22df4b569a9901bcd5347a3459499
|
/ceit_191116/py200111/output_2_format.py
|
67686883b4583dec92ce8b20bb65683a5a040e91
|
[] |
no_license
|
edu-athensoft/ceit4101python_student
|
80ef067b77421fce76d04f778d5c6de8b12f676c
|
33cfa438c062d45e8d246b853e93d3c14b92ff2d
|
refs/heads/master
| 2020-07-30T01:04:21.084384
| 2020-07-27T02:21:57
| 2020-07-27T02:21:57
| 210,027,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
"""
output formatting
string format()
positional arguments
"""
name1 = 'Obama'
name2 = 'Helen'
name3 = 'Marie'
name4 = "Cindy"
greeting1 = "morning!"
greeting2 = "afternoon!"
greeting3 = "evening!"
print("Good {1}, {0}! Long time no see".format(name3, greeting3))
print("Good {2}, {0} and {1} ! Long time no see".format(name3, name4, greeting3))
|
[
"lada314@gmail.com"
] |
lada314@gmail.com
|
730f11cd40ad83877318044f913b110a4afb45f1
|
5a75325ffdc14ca1eb08fa32225ba2b5b1347010
|
/compose/django/cull_backups.py
|
a05a182ed41edc94c1e0e8a196199c79d301f5c9
|
[
"MIT"
] |
permissive
|
teamhero/hawc
|
cdc3f058aeef104c9305af67a54579f7f6362111
|
528f4ec605e8f406e0d147ba240a6a604f80a713
|
refs/heads/master
| 2021-05-23T06:09:01.679227
| 2019-03-11T14:30:39
| 2019-03-11T14:30:39
| 94,796,508
| 0
| 2
|
NOASSERTION
| 2020-04-03T23:47:21
| 2017-06-19T16:14:52
|
Python
|
UTF-8
|
Python
| false
| false
| 792
|
py
|
#!/usr/bin/env python3
import os
from datetime import date, datetime
today = date.today()
print('Checking db backups for removal: {}'.format(today))
root = '/data/backups'
for fn in os.listdir(root):
if '.sql.gz' not in fn:
continue
d1 = datetime.strptime(fn, 'hawc-%Y-%m-%dT%H_%M.sql.gz').date()
days = (today - d1).days
if days <= 14:
# keep all <= 14 days
keep = True
elif days <= 90:
# keep one weekly for 3 months (or first of month)
keep = (d1.day == 1 or d1.weekday() == 0)
else:
# keep only the first of the month
keep = (d1.day == 1)
if not keep:
fn = os.path.join(root, fn)
print('Removing %s' % fn)
os.system('rm {}'.format(fn))
print('db backup removal complete')
|
[
"shapiromatron@gmail.com"
] |
shapiromatron@gmail.com
|
1359004d9f80c9154062c624edc743d5280a1316
|
ecd27923efba50703a7bfbfa2ba37a8cc78560ea
|
/automatic_scraper/config/bid/liriqing/liaoning_huludao_ggzy_config.py
|
6d9fe5faedd7b9732ff28010a2a073fb58b59ae8
|
[] |
no_license
|
yougecn/work
|
fb691b072a736731083777e489712dee199e6c75
|
1b58525e5ee8a3bdecca87fdee35a80e93d89856
|
refs/heads/master
| 2022-03-03T19:14:17.234929
| 2018-04-17T12:29:19
| 2018-04-17T12:29:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,379
|
py
|
# coding: utf-8
import time
import logging
import re
logger = logging.getLogger(__name__)
author = "liriqing"
web_title = u"葫芦岛市公共资源交易"
data_source = 'http://www.hldggzyjyzx.com.cn'
start_urls = [
#招标公告
"http://www.hldggzyjyzx.com.cn/Front/jyxx/071001/071001001/",
"http://www.hldggzyjyzx.com.cn/Front/jyxx/071002/071002001/",
"http://www.hldggzyjyzx.com.cn/Front/jyxx/071002/071002002/",
#更正公告
"http://www.hldggzyjyzx.com.cn/Front/jyxx/071002/071002003/",
#中标候选人
"http://www.hldggzyjyzx.com.cn/Front/jyxx/071001/071001002/",
#中标
"http://www.hldggzyjyzx.com.cn/Front/jyxx/071001/071001003/",
"http://www.hldggzyjyzx.com.cn/Front/jyxx/071002/071002004/"
]
db_config = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': 'asd123',
'database': 'bid_data',
'table': 'zhaotoubiao'
}
# 列表页模板
index_pattern = {
"_list": {'pattern': "//tr[@height='15']", 'type': 'xpath', 'target': 'html', 'custom_func_name': ''},
"_next_page": {'pattern': "//td[text()='下页 >' and @onclick]", 'type': 'xpath', 'target': 'html', 'custom_func_name': ''},
"title": {'pattern': "//a", 'type': 'xpath', 'target': 'text', 'custom_func_name': ''},
}
# 详情页模板
detail_pattern = {
"sc": {'pattern': "//table[@bgcolor='#ffffff']", 'type': 'xpath', 'target': 'html', 'custom_func_name': ''},
"_issue_time": {'pattern': "//font[@class='webfont']", 'type': 'xpath', 'target': 'text', 'custom_func_name': ''},
# "pub_date_fmt": "%Y-%m-%d %H:%M:%S",
}
def init(item):
"""初始化时执行"""
logger.info(u'init item: %s', item)
item['_web_title'] = item['web_title']
del item['web_title']
item['region']=u'辽宁-葫芦岛市'
item['_delay_between_pages'] = 3
def process_list_item(list_element, item):
"""处理列表页元素
:param list_element: _list模板解析出的html元素
:param item:
获取列表页后,根据_list模板获取每一个详情html代码后执行
有些内容可在列表页获取,可自定义在此处理,如:
item['pub_date'] = pq(list_element).find('span').text()
"""
#item['issue_time'] = int(time.mktime(time.strptime(item['issue_time'], "%Y-%m-%d")))
if '071001001' in item['_current_start_url'] or '071002001' in item['_current_start_url'] or '071002002' in item['_current_start_url']:
item['bid_type']=1
elif '071001002' in item['_current_start_url']:
item['bid_type']=4
elif '071002003' in item['_current_start_url']:
item['bid_type'] = 2
elif '071001003' in item['_current_start_url'] or '071002004' in item['_current_start_url']:
item['bid_type'] = 0
# 停止翻页
# if item['_current_page'] == 10:
# item['_click_next'] = False
def process_detail_item(item):
"""处理详情页
:param item:
获取详情页信息,存入item后执行
可在此处理程序无法处理的情况
如详情页无法解析发布时间,需要使用正则表达式从content中提取等
"""
time_data=re.search(r'(\d+)/(\d+)/(\d+)',item['_issue_time'].strip(),re.S).group()
item['issue_time'] = int(time.mktime(time.strptime(time_data, "%Y/%m/%d")))
if len(item['sc']) > 0:
item['is_get'] = 1
else:
item['is_get'] = 0
|
[
"iwechen123@gmail.com"
] |
iwechen123@gmail.com
|
dfa29b530d8cfec3c71ef5b903b2669318bf08a0
|
8f7615603d4d923fd2cda41a2105b85b596ab4c5
|
/leetcode/easy/605-Can_place_flowers.py
|
0768c8acf6ccd14529365904c51987083054bec6
|
[
"MIT"
] |
permissive
|
shubhamoli/solutions
|
e7ec922047c16cfdc10070aa5b884a278b12d8c5
|
5a24fdeb6e5f43b821ef0510fe3b343ddda18f22
|
refs/heads/master
| 2021-01-05T04:13:35.302613
| 2020-06-27T18:28:18
| 2020-06-27T18:28:18
| 240,875,585
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
"""
Leetcode #605
"""
from typing import List
class Solution:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
if not flowerbed:
return False
# the idea here is check for 0 and if left and right of 0 are also 0
# then plant
N = len(flowerbed)
i = 0
count = 0
while i < N:
if (flowerbed[i] == 0 and (i == 0 or flowerbed[i-1] == 0) and (i == N - 1 or flowerbed[i+1] == 0)):
flowerbed[i] = 1
count += 1
if count >= n:
return True
i += 1
return False
if __name__ == "__main__":
solution = Solution()
assert solution.canPlaceFlowers([1,0,0,0,1], 1) == True
assert solution.canPlaceFlowers([1,0,0,0,1], 2) == False
|
[
"oli.shubham@gmail.com"
] |
oli.shubham@gmail.com
|
fce93dd6490570c79d10bd6acd01a91479394c8c
|
7ae20e08e736e6df546cb5a80df2baf067686b52
|
/tasks/sprint-5/Финал B - Remove Node/my_solution.py
|
4cbe757f866c016c572fa865b916b06f74304bf0
|
[] |
no_license
|
Grey2k/yandex.praktikum-alghoritms
|
faf466374c932733cc1c5049a2df719d8fd33ac7
|
97b1b4858265b44266a33b834e1e9a1349739048
|
refs/heads/master
| 2023-08-28T02:46:16.502298
| 2021-09-28T19:08:35
| 2021-09-28T19:08:35
| 334,646,281
| 10
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,728
|
py
|
# from node import Node
def remove(root, key):
if root is None:
return root
node_to_remove, parent_of_node_to_remove = find_node(root, None, key)
# If noting to remove returns original root
if node_to_remove is None:
return root
# Removing found node
return remove_node(root, parent_of_node_to_remove, node_to_remove)
def remove_node(root, node_parent, node):
left = node.left
right = node.right
# 0 Case - Node is root without children
if node is root and left is None and right is None:
root = None
return root
# 1 Case - Node is root and has children
if node is root:
if left is None:
root = right
return root
else:
max_node = find_max_and_remove(left)
root = max_node
# if maximum is not left part itself
if max_node is not left:
max_node.left = left
max_node.right = right
return root
# 2 Case - Node has no children
if left is None and right is None:
if node_parent.left is node:
node_parent.left = None
else:
node_parent.right = None
return root
# 2 Case - Node has children
if left is None:
if node_parent.left is node:
node_parent.left = right
else:
node_parent.right = right
else:
max_node = find_max_and_remove(left)
if node_parent.left is node:
node_parent.left = max_node
else:
node_parent.right = max_node
# if maximum is not left part itself
if max_node is not left:
max_node.left = left
max_node.right = right
return root
def find_max_and_remove(node):
parent = node
if parent.right is None:
return parent
while True:
if parent.right.right is None:
found = parent.right
parent.right = None
return found
parent = parent.right
def find_node(root, parent, key):
if root is None:
return None, None
if root.value == key:
return root, parent
if root.value < key:
return find_node(root.right, root, key)
if root.value > key:
return find_node(root.left, root, key)
# noinspection Assert
def test():
node1 = Node(None, None, 2)
node2 = Node(node1, None, 3)
node3 = Node(None, node2, 1)
node4 = Node(None, None, 6)
node5 = Node(node4, None, 8)
node6 = Node(node5, None, 10)
node7 = Node(node3, node6, 5)
new_head = remove(node7, 10)
assert new_head.value == 5
assert new_head.right is node5
assert new_head.right.value == 8
|
[
"grey2k@gmail.com"
] |
grey2k@gmail.com
|
9afc47d3742c9bd1433e0d1f78a7a3bc61f0e45d
|
65b6e3dada91a0acb589d25358badb02c845b67f
|
/examples/ex_graph.py
|
dafeb6b4aa33fafca13629a3f11db2e1e0c8e08c
|
[
"BSD-3-Clause"
] |
permissive
|
polestar37/src
|
5b9bacb1e1e5118835d47c0f44fa5ad78fb0ad04
|
5dd54527356901b24b6ea3caff644a26aeb3c3c6
|
refs/heads/master
| 2021-01-15T22:39:44.730414
| 2017-06-22T23:06:15
| 2017-06-22T23:06:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,635
|
py
|
# -----------------------------------------------------------------------
# This is an example illustrating how to use the user graphing functionality
# in Python
# (c) Hex-Rays
#
from idaapi import *
class GraphCloser(action_handler_t):
def __init__(self, graph):
action_handler_t.__init__(self)
self.graph = graph
def activate(self, ctx):
self.graph.Close()
def update(self, ctx):
return AST_ENABLE_ALWAYS
class ColorChanger(action_handler_t):
def __init__(self, graph):
action_handler_t.__init__(self)
self.graph = graph
def activate(self, ctx):
self.graph.color = self.graph.color ^ 0xffffff
self.graph.Refresh()
return 1
def update(self, ctx):
return AST_ENABLE_ALWAYS
class MyGraph(GraphViewer):
def __init__(self, funcname, result):
self.title = "call graph of " + funcname
GraphViewer.__init__(self, self.title)
self.funcname = funcname
self.result = result
self.color = 0xff00ff
def OnRefresh(self):
self.Clear()
id = self.AddNode((self.funcname, self.color))
for x in self.result.keys():
callee = self.AddNode((x, self.color))
self.AddEdge(id, callee)
return True
def OnGetText(self, node_id):
return self[node_id]
def Show(self):
if not GraphViewer.Show(self):
return False
# graph closer
actname = "graph_closer:%s" % self.title
register_action(action_desc_t(actname, "Close: %s" % self.title, GraphCloser(self)))
attach_action_to_popup(self.GetTCustomControl(), None, actname)
# color changer
actname = "color_changer:%s" % self.title
register_action(action_desc_t(actname, "Change colors: %s" % self.title, ColorChanger(self)))
attach_action_to_popup(self.GetTCustomControl(), None, actname)
return True
def show_graph():
f = idaapi.get_func(here())
if not f:
print "Must be in a function"
return
# Iterate through all function instructions and take only call instructions
result = {}
for x in [x for x in FuncItems(f.startEA) if idaapi.is_call_insn(x)]:
for xref in XrefsFrom(x, idaapi.XREF_FAR):
if not xref.iscode: continue
t = GetFunctionName(xref.to)
if not t:
t = hex(xref.to)
result[t] = True
g = MyGraph(GetFunctionName(f.startEA), result)
if g.Show():
return g
else:
return None
g = show_graph()
if g:
print "Graph created and displayed!"
|
[
"Arnaud Diederen arnaud@hex-rays.com"
] |
Arnaud Diederen arnaud@hex-rays.com
|
5441634d49305645e4e9a6c7fbaffd964e776814
|
ad2251276aec376b480a31373165cf5b63a109c0
|
/input/litdata/bouwens2017.py
|
bbd597895d8fcf211e9ab49763d96822106933fe
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
mirochaj/ares
|
1b6bbe39c423762eb3051d65718394e8e839cd14
|
f323300b56ae61fab56eda1e5179cfc991eaa74f
|
refs/heads/main
| 2023-08-19T04:14:29.928747
| 2022-11-09T22:46:08
| 2022-11-09T22:46:08
| 249,536,610
| 16
| 13
|
MIT
| 2023-08-28T23:00:00
| 2020-03-23T20:25:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
"""
Bouwens et al., 2017, ApJ, 843, 129
Table 4 and volume estimate from text.
"""
info = \
{
'reference': 'Bouwens et al., 2017, ApJ, 843, 129',
'data': 'Table 5',
'label': 'Bouwens+ (2017)'
}
import numpy as np
redshifts = [6.]
wavelength = 1600. # I think?
ULIM = -1e10
tmp_data = {}
tmp_data['lf'] = \
{
6.0: {'M': list(np.arange(-20.75, -12.25, 0.5)),
'phi': [0.0002, 0.0009, 0.0007, 0.0018, 0.0036,
0.0060, 0.0071, 0.0111, 0.0170, 0.0142,
0.0415, 0.0599, 0.0817, 0.1052, 0.1275,
0.1464, 0.1584],
'err': [(0.0002, 0.0002), (0.0004, 0.0004),
(0.0004, 0.0004), (0.0006, 0.0006),
(0.0009, 0.0009), (0.0012, 0.0012),
(0.0066, 0.0014), (0.0101, 0.0022),
(0.0165, 0.0039), (0.0171, 0.0054),
(0.0354, 0.0069), (0.0757, 0.0106),
(0.1902, 0.0210), (0.5414, 0.0434),
(1.6479, 0.0747), (5.4369, 0.1077),
(19.8047, 0.1343)],
},
}
units = {'lf': 1.}
data = {}
data['lf'] = {}
for key in tmp_data['lf']:
#mask = np.array(tmp_data['lf'][key]['err']) == ULIM
N = len(tmp_data['lf'][key]['M'])
mask = np.array([tmp_data['lf'][key]['err'][i] == ULIM for i in range(N)])
data['lf'][key] = {}
data['lf'][key]['M'] = np.ma.array(tmp_data['lf'][key]['M'], mask=mask)
data['lf'][key]['phi'] = np.ma.array(tmp_data['lf'][key]['phi'], mask=mask)
data['lf'][key]['err'] = tmp_data['lf'][key]['err']
|
[
"mirochaj@gmail.com"
] |
mirochaj@gmail.com
|
e3b7c0253fc3545cc8614c084a9fd36fb3322e26
|
e2a8c529c7e92108c11f01cbef168b5b9137521a
|
/ChromeGo/XX-Net/code/default/launcher/post_update.py
|
d05d8c507d1b2ec9887e5be04d71ed6fe7e89b0b
|
[
"BSD-2-Clause"
] |
permissive
|
glxdegit/fanqiang
|
c28a514988eaaf6485c682bc346aa276e8b0c0b3
|
85691ef7a5ec99fbd81f8bdb09db7b60379e3c38
|
refs/heads/master
| 2022-07-07T06:41:24.681743
| 2020-05-16T02:25:22
| 2020-05-16T02:25:22
| 264,363,999
| 3
| 2
| null | 2020-05-16T05:11:06
| 2020-05-16T05:11:06
| null |
UTF-8
|
Python
| false
| false
| 2,603
|
py
|
import os
import sys
import re
import stat
import shutil
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath(os.path.join(current_path, os.pardir))
top_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir))
from xlog import getLogger
xlog = getLogger("launcher")
import config
def check():
import update_from_github
current_version = update_from_github.current_version()
last_run_version = config.get(["modules", "launcher", "last_run_version"], "0.0.0")
if last_run_version == "0.0.0":
postUpdateStat = "isNew"
elif last_run_version != current_version:
postUpdateStat = "isPostUpdate"
run(last_run_version)
else:
return
config.set(["update", "postUpdateStat"], postUpdateStat)
config.set(["modules", "launcher", "last_run_version"], current_version)
config.save()
def older_or_equal(version, reference_version):
try:
p = re.compile(r'([0-9]+)\.([0-9]+)\.([0-9]+)')
m1 = p.match(version)
m2 = p.match(reference_version)
v1 = map(int, map(m1.group, [1, 2, 3]))
v2 = map(int, map(m2.group, [1, 2, 3]))
return v1 <= v2
except:
xlog.warn("older_or_equal fail: %s, %s" % (version, reference_version)) # e.g. "get_version_fail" when post_update.run(last_run_version), "last_run_version" in \data\launcher\config.yaml
return False # is not older
def run(last_run_version):
if config.get(["modules", "launcher", "auto_start"], 0):
import autorun
autorun.enable()
if os.path.isdir(os.path.join(top_path, 'launcher')):
shutil.rmtree(os.path.join(top_path, 'launcher')) # launcher is for auto-update from 2.X
if older_or_equal(last_run_version, '3.0.4'):
xlog.info("migrating to 3.x.x")
for filename in os.listdir(top_path):
filepath = os.path.join(top_path, filename)
if os.path.isfile(filepath):
if sys.platform != 'win32' and filename == 'start':
st = os.stat(filepath)
os.chmod(filepath, st.st_mode | stat.S_IEXEC)
if filename in ['start.sh', 'start.command', 'start.lnk', 'LICENSE.txt', 'download.md', 'version.txt', 'xxnet', 'xxnet.bat', 'xxnet.vbs']:
os.remove(filepath)
else:
if filename in ['goagent', 'python27', 'gae_proxy', 'php_proxy', 'x_tunnel', 'python3', 'Python3', 'lib', 'SwitchySharp']:
shutil.rmtree(filepath)
|
[
"banned.ebook@gmail.com"
] |
banned.ebook@gmail.com
|
45aab77aeb697bf1ad193b75e81fb1b11904a171
|
2995ab9f4d8e4292763f215709bd3da266c812d4
|
/proyecto_ibis/ibis/users/migrations/0003_auto_20210611_1201.py
|
696f1309a1b6218a8098ff72531e3d4b7f7bf16a
|
[] |
no_license
|
mrinxx/Ibis
|
a45c84184c03c5983cfb67ba303235162f22abfb
|
4435fad66bf8082eb9a3b41b0b2d415607cd207a
|
refs/heads/main
| 2023-06-01T19:40:16.092648
| 2021-06-14T20:48:10
| 2021-06-14T20:48:10
| 376,138,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
# Generated by Django 3.2.4 on 2021-06-11 12:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_remove_guardian_image'),
]
operations = [
migrations.RemoveField(
model_name='alumn',
name='address',
),
migrations.RemoveField(
model_name='alumn',
name='city',
),
migrations.RemoveField(
model_name='alumn',
name='last_name',
),
migrations.RemoveField(
model_name='alumn',
name='name',
),
]
|
[
"mrinxx5@gmail.com"
] |
mrinxx5@gmail.com
|
51703a989853c75d13edcea9e53aa6ece050f724
|
2324dea2cb3003c8ab7e8fd80588d44973eb8c77
|
/Euler_1_12.py
|
5734c779f4c0af8eadd33888278b66b26ca706d5
|
[] |
no_license
|
MikeOcc/MyProjectEulerFiles
|
5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56
|
4d066d52380aade215636953589bf56d6b88f745
|
refs/heads/master
| 2021-01-16T18:45:44.133229
| 2015-05-27T18:28:43
| 2015-05-27T18:28:43
| 5,876,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
sum=0
x = 500*499/2
maxum = 0
maxsum = 0
bestrow = 0
rstart = 12375
estart = 12377
showall = False
for r in range(rstart,estart):
trinum = r*(r+1)/2
#trinum = r
sum=0
if trinum%2==0:
endrange = (trinum/2) + 1
elif trinum%3==0:
endrange = (trinum/3) + 1
elif trinum%5==0:
endrange = (trinum/5) + 1
# elif trinum%7==0:
# endrange = (trinum/7) + 1
# elif trinum%9==0:
# endrange = (trinum/9) + 1
else:
continue
print r
for i in range(1,endrange):
#z=float(trinum)/float(i)
#print i, "z = ",z
if trinum%i==0:
sum = sum + 1
#print i,float(trinum)/float(i)
sum = sum + 1
#print "maxsum ", maxsum
if showall == True or sum > maxsum:
print "\nRow:", r, ",triangle number :",trinum, ", number of factors:",sum
if sum > maxsum:
maxsum = sum
maxum = trinum
bestrow = r
if maxsum > 500:break;
print "\nFor r =", bestrow, ", the number of factors =",maxsum, "for triangle number =", maxum
#6720 - 384, 6546
|
[
"mike.occhipinti@mlsassistant.com"
] |
mike.occhipinti@mlsassistant.com
|
9c47603886a4c6ed2a21d6996118f3276f77d34b
|
770f7b7155c33d2f8c27846b93b9b73db45b2e2a
|
/gofedinfra/system/helpers/artefactkeygenerator/golangprojectdistributioncontracts.py
|
fa997f75dbfb7f4e5d02dc12ec4815bd664b40c5
|
[] |
no_license
|
gofed/infra
|
b0f6186486e8aa7c8c640411ee92d6648cbc77ec
|
2f402bbdf1e5fa7cb68262cc3408a2fc1436269f
|
refs/heads/master
| 2022-10-16T02:46:09.226939
| 2018-06-07T23:16:44
| 2018-06-08T11:31:37
| 48,703,326
| 1
| 5
| null | 2022-10-11T11:17:16
| 2015-12-28T17:08:28
|
Python
|
UTF-8
|
Python
| false
| false
| 503
|
py
|
from infra.system.core.meta.metaartefactkeygenerator import MetaArtefactKeyGenerator
import logging
class GolangProjectDistributionContractsKeyGenerator(MetaArtefactKeyGenerator):
def generate(self, data, delimiter = ":"):
# return a list of fields
keys = []
for key in ["artefact", "product", "distribution", "build", "rpm"]:
if key not in data:
raise ValueError("golang-project-distribution-contracts: %s key missing" % key)
keys.append(self.truncateKey(data[key]))
return keys
|
[
"jchaloup@redhat.com"
] |
jchaloup@redhat.com
|
badfda65db90fba0d138df86bdf89c553f163981
|
1ec1e418fc5c9aac055c9218f1074332adf1e720
|
/rand_param_envs/gym/envs/classic_control/mountain_car.py
|
fd564b1a6e7c7322b34b749b615ac33353677373
|
[] |
no_license
|
CHEN-yongquan/mier_public
|
344e34137343aa564b261c7125edac3b3ff10eb0
|
af56fa84811dc7a697feb1b9dff01836d2148810
|
refs/heads/master
| 2022-10-15T13:21:35.198458
| 2020-06-12T08:22:16
| 2020-06-12T08:22:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,400
|
py
|
"""
https://webdocs.cs.ualberta.ca/~sutton/MountainCar/MountainCar1.cp
"""
import math
from rand_param_envs import gym
from rand_param_envs.gym import spaces
from rand_param_envs.gym.utils import seeding
import numpy as np
class MountainCarEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
self.low = np.array([self.min_position, -self.max_speed])
self.high = np.array([self.max_position, self.max_speed])
self.viewer = None
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(self.low, self.high)
self._seed()
self.reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
position, velocity = self.state
velocity += (action - 1) * 0.001 + math.cos(3 * position) * (-0.0025)
velocity = np.clip(velocity, -self.max_speed, self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if (position == self.min_position and velocity < 0): velocity = 0
done = bool(position >= self.goal_position)
reward = -1.0
self.state = (position, velocity)
return np.array(self.state), reward, done, {}
def _reset(self):
self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
return np.array(self.state)
def _height(self, xs):
return np.sin(3 * xs) * .45 + .55
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width / world_width
carwidth = 40
carheight = 20
if self.viewer is None:
from rand_param_envs.gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs - self.min_position) * scale, ys * scale))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle(carheight / 2.5)
frontwheel.set_color(.5, .5, .5)
frontwheel.add_attr(rendering.Transform(translation=(carwidth / 4, clearance)))
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle(carheight / 2.5)
backwheel.add_attr(rendering.Transform(translation=(-carwidth / 4, clearance)))
backwheel.add_attr(self.cartrans)
backwheel.set_color(.5, .5, .5)
self.viewer.add_geom(backwheel)
flagx = (self.goal_position - self.min_position) * scale
flagy1 = self._height(self.goal_position) * scale
flagy2 = flagy1 + 50
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)])
flag.set_color(.8, .8, 0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation((pos - self.min_position) * scale, self._height(pos) * scale)
self.cartrans.set_rotation(math.cos(3 * pos))
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
|
[
"russellm@berkeley.edu"
] |
russellm@berkeley.edu
|
07e5017f2acb0a1965c7ecaf4bc866b332dd9c41
|
09d3599c8e53b38104e96d479b2b40ac549d0bef
|
/Rakesh/merge-sorting/merge_sort_code.py
|
fdcfc5be250ad399af146275c3c7b1928f480b15
|
[] |
no_license
|
rakeshsukla53/interview-preparation
|
54764c83c86a52c566899ec87d74dad84216764d
|
09355094c85496cc42f8cb3241da43e0ece1e45a
|
refs/heads/master
| 2016-09-06T02:08:50.436414
| 2016-02-01T00:31:52
| 2016-02-01T00:31:52
| 40,916,511
| 9
| 3
| null | 2015-12-31T05:00:55
| 2015-08-17T17:59:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 831
|
py
|
def mergeSort(alist):
print("Splitting ",alist)
if len(alist)>1:
mid = len(alist)//2
lefthalf = alist[:mid]
righthalf = alist[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
i = 0
j = 0
k = 0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
alist[k]=lefthalf[i]
i=i+1
else:
alist[k]=righthalf[j]
j=j+1
k=k+1
while i < len(lefthalf):
alist[k]=lefthalf[i]
i=i+1
k=k+1
while j < len(righthalf):
alist[k]=righthalf[j]
j=j+1
k=k+1
print("Merging ",alist)
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
mergeSort(alist)
print(alist)
|
[
"rakesh.sukla53@gmail.com"
] |
rakesh.sukla53@gmail.com
|
9fe70e6e9311515a57fe7f9f89e914f226914708
|
09cead98874a64d55b9e5c84b369d3523c890442
|
/py200912b_python2m6/day14_201212/homework/kevin/stem1402b_python_homework_12_KEvin (1).py
|
df9bca9af96dc347be56451b3a84d2e749a06e03
|
[] |
no_license
|
edu-athensoft/stem1401python_student
|
f12b404d749286036a090e941c0268381ce558f8
|
baad017d4cef2994855b008a756758d7b5e119ec
|
refs/heads/master
| 2021-08-29T15:01:45.875136
| 2021-08-24T23:03:51
| 2021-08-24T23:03:51
| 210,029,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
"""
1. Read an HTML file, and copy all content into a new file
2. Read a CSV file, and copy all content into a new file
3. Read a CSV file, and copy its first half lines into a new file
"""
# Question 1.
file = open("myweb.html")
content = file.read()
file.close()
file = open("myweb.txt", 'w')
file.write(content)
file.close()
print("Content has been written")
# Question 2.
file = open("business-price-indexes-september-2020-quarter-corrections-to-previously-published-statistics.csv")
content = file.read()
file.close()
file = open("business-price-indexes-september-2020-quarter-corrections-to-previously-published-statistics.txt", 'w')
file.write(content)
file.close()
print("Content has been written")
# Question 3.
file = open("business-price-indexes-september-2020-quarter-corrections-to-previously-published-statistics.csv")
num = len(file.readlines())
if num % 2 == 0:
line = int(num / 2)
print(line)
elif num % 2 == 1:
line = int(num + 1)
line = int(line / 2)
print(line)
file.seek(0)
file2 = open("csvmodified.txt", 'w')
file2.close()
for i in range(line):
content = file.readline()
file3 = open("csvmodified.txt", 'a')
file3.write(content)
file3.close()
file.close()
print("Content has been written")
|
[
"lada314@gmail.com"
] |
lada314@gmail.com
|
3698d09b3ef585be930e2cdaae23a99451a3fe60
|
6a95b330e1beec08b917ff45eccfd6be3fd4629f
|
/kubernetes/client/models/v1beta1_pod_disruption_budget.py
|
1f4a7e680204daa09f3226b789c49eddb7556fd5
|
[
"Apache-2.0"
] |
permissive
|
TokkoLabs/client-python
|
f4a83d6540e64861b59e322c951380a670578d7f
|
f1ad9c6889105d8510472606c98f8d3807f82020
|
refs/heads/master
| 2023-07-14T01:36:46.152341
| 2017-12-21T21:32:11
| 2017-12-21T21:32:11
| 115,042,671
| 0
| 0
|
Apache-2.0
| 2021-08-06T03:29:17
| 2017-12-21T20:05:15
|
Python
|
UTF-8
|
Python
| false
| false
| 7,321
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1PodDisruptionBudget(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1PodDisruptionBudgetSpec',
'status': 'V1beta1PodDisruptionBudgetStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1beta1PodDisruptionBudget - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this V1beta1PodDisruptionBudget.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1PodDisruptionBudget.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1PodDisruptionBudget.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1PodDisruptionBudget.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta1PodDisruptionBudget.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1PodDisruptionBudget.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1PodDisruptionBudget.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1PodDisruptionBudget.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1PodDisruptionBudget.
:return: The metadata of this V1beta1PodDisruptionBudget.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1PodDisruptionBudget.
:param metadata: The metadata of this V1beta1PodDisruptionBudget.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta1PodDisruptionBudget.
Specification of the desired behavior of the PodDisruptionBudget.
:return: The spec of this V1beta1PodDisruptionBudget.
:rtype: V1beta1PodDisruptionBudgetSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta1PodDisruptionBudget.
Specification of the desired behavior of the PodDisruptionBudget.
:param spec: The spec of this V1beta1PodDisruptionBudget.
:type: V1beta1PodDisruptionBudgetSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1beta1PodDisruptionBudget.
Most recently observed status of the PodDisruptionBudget.
:return: The status of this V1beta1PodDisruptionBudget.
:rtype: V1beta1PodDisruptionBudgetStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta1PodDisruptionBudget.
Most recently observed status of the PodDisruptionBudget.
:param status: The status of this V1beta1PodDisruptionBudget.
:type: V1beta1PodDisruptionBudgetStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1PodDisruptionBudget):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
2f7b470ae7b0fec585de036aeb4572515b45c7cc
|
3fd8fd35d61d997b586e40ed8d938805ce5fdf3b
|
/Nonsense/sqlite3_select_variations.py
|
0e0b6abd418f5905d994320c035d6cff5d79e1f4
|
[] |
no_license
|
ChocolatePadmanaban/Cooking_Scheduler
|
8afd967cd5128b15c9865aa44ae3d298ee3027ad
|
3cd91009e68064f92408fb5bba55519ba77767c3
|
refs/heads/master
| 2023-01-03T10:55:25.306425
| 2020-11-01T07:13:50
| 2020-11-01T07:13:50
| 260,551,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
import sqlite3
db_filename = 'todo.db'
with sqlite3.connect(db_filename) as conn :
cursor = conn.cursor()
cursor.execute("""
select name, description, deadline from project
where name = 'pymotw'
""")
name, description, deadline = cursor.fetchone()
print('\nProject details for {} ({}) \n due {}'.format(
description, name, deadline
))
cursor.execute("""
select id, priority, details, status, deadline from task
where project = 'pymotw' order by deadline
""")
print('\nNext 5 tasks')
for row in cursor.fetchmany(5):
task_id, priority, details, status, deadline = row
print('{:2d} [{:d}] {:<25} [{:<8}] ({})'.format(
task_id, priority, details, status, deadline
))
|
[
"pradeeppadmanaban7@gmail.com"
] |
pradeeppadmanaban7@gmail.com
|
b1b0836558fca7fbe061c4eb80f07882c4fc2efe
|
abfa0fcab2bc9a9c3cccbc3a8142cdd4b2a66ee9
|
/171-excel sheet column number.py
|
ecf0f4089ecebf1bb3c5d9513e327b54f6b2dd8b
|
[] |
no_license
|
JinnieJJ/leetcode
|
20e8ccf3f8919028c53e0f0db86bcc2fbc7b6272
|
26c6ee936cdc1914dc3598c5dc74df64fa7960a1
|
refs/heads/master
| 2021-04-15T09:18:08.450426
| 2021-03-06T01:53:27
| 2021-03-06T01:53:27
| 126,275,814
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
base = ord('A') - 1
n = len(s)
result = 0
for i in range(n):
result += (ord(s[n - 1 - i]) - base) * pow(26, i)
return result
|
[
"noreply@github.com"
] |
JinnieJJ.noreply@github.com
|
2085ad269f187efe33fbf9c09c32fd5e88473c84
|
60d5b5b1f1c912d1655de3884efc09dfddd8d132
|
/sites/kotourism/interop/admin.py
|
6386d5b15eb775e20b5d54615fdec0aabf661348
|
[] |
no_license
|
alexgula/django_sites
|
15033c739401f24603e957c5a034d63652f0d21f
|
038834c0f544d6997613d61d593a7d5abf673c70
|
refs/heads/master
| 2016-09-05T11:02:43.838095
| 2014-07-07T11:36:07
| 2014-07-07T11:36:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
# coding=utf-8
from django.contrib import admin
from .models import Log
class LogAdmin(admin.ModelAdmin):
list_display = ('action_date', 'success', 'results', )
search_fields = ('results', )
date_hierarchy = 'action_date'
list_filter = ('success', )
admin.site.register(Log, LogAdmin)
|
[
"alexgula@gmail.com"
] |
alexgula@gmail.com
|
2b5c20e9871a400e3e7f65eacec72607e68aa9f6
|
5de0c9652b13fc57009626bfdec4c6c761ec450c
|
/test/test_app.py
|
af68128fa5a510cf84ced78968e43c33f2f9cfa3
|
[] |
no_license
|
Quastrado/project_o_mail_service
|
7e845e5c6a70db0a3c32a1dde52e26dc8f1450f0
|
4de6e6e52d6fa1955372fbf2ba9408bad8955863
|
refs/heads/master
| 2023-02-05T08:26:27.726227
| 2020-03-18T14:45:12
| 2020-03-18T14:45:12
| 196,257,710
| 2
| 0
| null | 2023-02-02T06:38:38
| 2019-07-10T18:40:00
|
Python
|
UTF-8
|
Python
| false
| false
| 540
|
py
|
import pytest
from flask_sqlalchemy import SQLAlchemy
from owl_mail import create_app
from owl_mail.models import User
@pytest.fixture
def app():
app=create_app()
return app
def test_app(client):
response = client.get('/menu')
assert response.status_code == 200
@pytest.fixture(scope='module')
def init_database():
db.create_all()
user1 = User('Ghost', 'invisible', 'admin')
user2 = User('Vagabound', 'danger', 'user')
db.session.add(user1)
db.session.add(user2)
yield db
db.drop_all()
|
[
"you@example.com"
] |
you@example.com
|
3e39a7286d81302fbf3055a8e211ba990ae3791b
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/PyStringFormatInspection/NewStylePackedFunctionCall.py
|
9975f8d86d2a4696b19a165fee177feccb261cce
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
def f():
return dict(foo=0)
'{foo}'.format(**f())
<warning descr="Too few arguments for format string">"{}"</warning>.format()
|
[
"valentina.kiryushkina@jetbrains.com"
] |
valentina.kiryushkina@jetbrains.com
|
675c93c2b83444bcbb09534e5f4c2034726bac37
|
86f22c67e65438948b982663f8b72a29090504a2
|
/fem-sim/utils/compute_tf_jacobian_models.py
|
e81972fcb7bfee25c67fd9c3d6446709ca2b27b8
|
[] |
no_license
|
itsvismay/research-experiments
|
2738270859db259d917e2baf8a6af4115c195d8f
|
4e49063f9fa53eda156e5cd5ded9c1caf45170ca
|
refs/heads/master
| 2021-09-03T23:01:40.704813
| 2018-01-11T22:37:54
| 2018-01-11T22:37:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,404
|
py
|
import numpy
def generate_jacobian_for_tf_model(model_input_path, jacobian_output_path):
import tensorflow as tf
from tensorflow_forward_ad import forward_gradients
from tensorflow.python.platform import gfile
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.util import compat
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
def body(y, x, i):
n = tf.shape(y)[0]
loop_vars = [
tf.constant(0, tf.int32),
tf.TensorArray(tf.float32, size=n),
]
_, jacobian = tf.while_loop(
lambda j, _: j < n,
lambda j, result: (j+1, result.write(j, tf.gradients(y[j], x)[0][i])),
loop_vars)
return jacobian.stack()
def tf_jacobian(y, x, n):
loop_vars = [
tf.constant(0, tf.int32),
tf.TensorArray(tf.float32, size=n),
]
_, jacobian = tf.while_loop(
lambda i, _: i < n,
lambda i, result: (i+1, result.write(i, body(y[i], x, i))),
loop_vars)
return jacobian.stack()
with gfile.FastGFile(model_input_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
sess = tf.Session()
input_node = sess.graph.get_tensor_by_name("decoder_input:0")
output_node = sess.graph.get_tensor_by_name("output_node0:0")
jacobians = tf_jacobian(output_node, input_node, 1)
# tf.train.write_graph(jacobians.as_graph_def(), "./", "test_jac")
# from tensorflow.python.framework import graph_util
# from tensorflow.python.framework import graph_io
# # print("pred_node_names", pred_node_names)
# constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), jacobians.name)
# graph_io.write_graph(constant_graph, output_fld, output_path, as_text=False)
# print('saved the freezed graph (ready for inference) at: ', osp.join(output_fld, output_path))
#print(sess.graph.as_graph_def())
subgraph = tf.graph_util.extract_sub_graph(sess.graph.as_graph_def(), ["decoder_input", jacobians.name[:-2]])
graph_io.write_graph(subgraph, "./", jacobian_output_path, as_text=False)
# print(subgraph)
# print(jacobians.name)
# print(output_node.name)
|
[
"lawsonfulton@gmail.com"
] |
lawsonfulton@gmail.com
|
918a53cc6d7de7ff0e39d6779f5f5f5f0b340572
|
27c27208a167f089bb8ce4027dedb3fcc72e8e8a
|
/ProjectEuler/UC solutions/Successful 101-150/Q107.py
|
be8c9a5ca40b89e684d9ebaf533ae69bec056b9f
|
[] |
no_license
|
stankiewiczm/contests
|
fd4347e7b84c8c7ec41ba9746723036d86e2373c
|
85ed40f91bd3eef16e02e8fd45fe1c9b2df2887e
|
refs/heads/master
| 2021-05-10T16:46:41.993515
| 2018-02-16T09:04:15
| 2018-02-16T09:04:15
| 118,587,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,639
|
py
|
from Numeric import *
#NET = [zeros(40)]*40;
TOT = 0; netses = []; NETS = [];
for line in file("../TXTdata/network.txt"):
netses.append(line);
for line in netses:
sq = list(); N = 0;
for ch in line:
if (ch in ['0','1','2','3','4','5','6','7','8','9']):
N = 10*N+int(ch);
if (ch == ','):
if (N == 0):
sq.append(0);
else:
sq.append(N);
TOT += N;
N = 0;
if (N != 0):
sq.append(N);
TOT += N;
NETS.append(sq);
print NETS
Cnt = 0;
CNC = [];
for A in arange(len(NETS)):
CNC.append([]);
for B in arange(len(NETS)):
CNC[A].append(0);
CNC[A][A] = 1;
LAST = 1; DoneL = list(); Good = True; NEWL = 0;
while (Cnt < len(NETS)-1):
MIN = 10000; Mi = 0; Mj = 0;
for i in arange(len(NETS)):
for j in arange(i):
if (NETS[i][j] < MIN) and (NETS[i][j] >= LAST):
if (100*i+j) not in DoneL:
MIN = NETS[i][j];
Mi = i;
Mj = j;
if (CNC[Mi][Mj] == 0):
CNC[Mi][Mj] = 1; CNC[Mj][Mi] = 1;
print Cnt,":", Mi,Mj,MIN;
Cnt += 1;
DoneL.append(100*Mi+Mj)
NEWL += MIN;
for a in arange(len(NETS[Mi])):
for b in arange(len(NETS[Mj])):
if (CNC[a][Mi] == 1) and (CNC[b][Mj] == 1):
CNC[a][b] = 1; CNC[b][a] = 1;
else:
print "Completed a failed pass", MIN, Mi, Mj
DoneL.append(100*Mi+Mj)
print TOT/2, NEWL, TOT/2-NEWL;
|
[
"mstankiewicz@gmail.com"
] |
mstankiewicz@gmail.com
|
459168961ff18e611eb3a20384429e8466f547ec
|
ecf0d106831b9e08578845674a457a166b6e0a14
|
/programming_basics/EXAM_PREPARATION/6_baking_competition.py
|
9552197aa4b84f14c2e538ac66c099c7e7be2450
|
[] |
no_license
|
ivo-bass/SoftUni-Solutions
|
015dad72cff917bb74caeeed5e23b4c5fdeeca75
|
75612d4bdb6f41b749e88f8d9c512d0e00712011
|
refs/heads/master
| 2023-05-09T23:21:40.922503
| 2021-05-27T19:42:03
| 2021-05-27T19:42:03
| 311,329,921
| 8
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
sweets_prices = {"cookies": 1.50, "cakes": 7.80, "waffles": 2.30}
sweets_count_total = {"cookies": 0, "cakes": 0, "waffles": 0}
participants_count = int(input())
for _ in range(participants_count):
sweets_per_participant = {"cookies": 0, "cakes": 0, "waffles": 0}
baker = input()
sweets_type = input()
while sweets_type != "Stop baking!":
sweets_count = int(input())
sweets_per_participant[sweets_type] += sweets_count
sweets_count_total[sweets_type] += sweets_count
sweets_type = input()
print(f'{baker} baked '
f'{sweets_per_participant["cookies"]} cookies, '
f'{sweets_per_participant["cakes"]} cakes and '
f'{sweets_per_participant["waffles"]} waffles.')
total_sold = sweets_count_total["cookies"] + \
sweets_count_total["cakes"] + \
sweets_count_total["waffles"]
total_sum = sweets_count_total["cookies"] * sweets_prices["cookies"] + \
sweets_count_total["cakes"] * sweets_prices["cakes"] + \
sweets_count_total["waffles"] * sweets_prices["waffles"]
print(f"All bakery sold: {total_sold}")
print(f"Total sum for charity: {total_sum:.2f} lv.")
|
[
"ivailo.ignatoff@gmail.com"
] |
ivailo.ignatoff@gmail.com
|
0792edfcdab56134a64173a4679ac2670b36c589
|
e9973cbb3ceef6941ed6bc9744b0619d1cbac157
|
/blender/arm/logicnode/action_set_visible.py
|
1cefa30d81bd176229e2378961fc0e677cd855db
|
[
"Zlib",
"GPL-2.0-only"
] |
permissive
|
ForestCSharp/armory
|
5da5551a9ef344ad7ea44f55b6c52c07fa6302d5
|
89586682154e5fcef1529b2123c5a2d48ac7e982
|
refs/heads/master
| 2020-12-15T04:09:12.063333
| 2020-03-07T14:44:02
| 2020-03-07T14:44:02
| 234,990,289
| 1
| 0
|
Zlib
| 2020-03-07T14:44:03
| 2020-01-20T00:42:21
|
Python
|
UTF-8
|
Python
| false
| false
| 565
|
py
|
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SetVisibleNode(Node, ArmLogicTreeNode):
'''Set visible node'''
bl_idname = 'LNSetVisibleNode'
bl_label = 'Set Visible'
bl_icon = 'QUESTION'
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.inputs.new('ArmNodeSocketObject', 'Object')
self.inputs.new('NodeSocketBool', 'Bool')
self.outputs.new('ArmNodeSocketAction', 'Out')
add_node(SetVisibleNode, category='Action')
|
[
"lubos.lenco@gmail.com"
] |
lubos.lenco@gmail.com
|
191db65073f52a890b3b84415c335f3be5793331
|
b4484e6309bc538f87ca9f4692b6e5c47da4c449
|
/stacks-and-queues/stack-and-queue-NEW/dynamicArrayQueue/main.py
|
df83210062c4b75d4a6dc7195976c17f5f418188
|
[] |
no_license
|
teknofage/CS-1.3-Core-Data-Structures
|
5178077c5995655bfecfe5e2c29d4405b1dabc6b
|
c2a439188113c0fc4175ff99cebba831ac5960d5
|
refs/heads/master
| 2022-12-22T18:17:50.463788
| 2020-10-07T23:58:29
| 2020-10-07T23:58:29
| 291,888,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
from Queue import Queue
'''#CREATE a queue
#front at index 0
#back at index n - 1
my_queue = []
#UPDATE, ADD
#enqueue
my_queue.append("A")
my_queue.append("B")
my_queue.append("C")
#DELETE
#dequeue
my_queue.pop(0)
#READ
#front
print(my_queue[0])'''
#CREATE
my_queue = Queue()
my_queue.enqueue("A")
#["A"]
my_queue.enqueue("B")
#["A", "B"]
my_queue.enqueue("C")
#["A", "B", "C"]
print(my_queue.front())
|
[
"teknofage@gmail.com"
] |
teknofage@gmail.com
|
b7215d422a5dfcf2055f1a7256eecebe08f2f804
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02934/s538390286.py
|
0244438040ed16b0988ea23f2cb1e3db34324d0a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
#!/usr/bin/env python3
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
def main():
N = int(readline())
As = map(int, readline().split())
ans = 0
for a in As:
ans += 1 / a
print(1/ans)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e98dbe14df42a16853ab45a34a4f32dbb1f2e053
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2638/60836/314330.py
|
dd3bfd48d6ff027cf4fab8a595b1773601811862
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
"""
第一行包含两个正整数N、M,分别表示数列中实数的个数和操作的个数
第二行包含N个实数,其中第i个实数表示数列的第i项
接下来M行,每行为一条操作,格式为以下两种之一:
操作1:1 x y k ,表示将第x到第y项每项加上k,k为一实数
操作2:2 x y ,表示求出第x到第y项这一子数列的平均数
操作3:3 x y ,表示求出第x到第y项这一子数列的方差
5 5
1 5 4 2 3
2 1 4
3 1 5
1 1 1 1
1 2 2 -1
3 1 5
"""
NM=[int(m) for m in str(input()).split(" ")]
N=NM[0]
M=NM[1]
arr=[int(m) for m in str(input()).split(" ")]
instruction=[]
for i in range(M):
instruction.append([int(m) for m in str(input()).split(" ")])
for i in range(M):
if(instruction[i][0]==1):
x=instruction[i][1]-1
y=instruction[i][2]-1
k=instruction[i][3]
while(x<=y):
arr[x]+=k
x+=1
if(instruction[i][0]==2):
x = instruction[i][1] - 1
y = instruction[i][2] - 1
print('%.4f' % (sum(arr[x:y+1])/float(y-x+1)))
if(instruction[i][0]==3):
x = instruction[i][1] - 1
y = instruction[i][2] - 1
s = y - x + 1
E=sum(arr[x:y+1])/float(s)
first=0
while(x<=y):
first+=pow(arr[x]-E,2)
x+=1
print('%.4f' % (first/s))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
42fe82c8265bc7a52b94883083b19770d75793da
|
006341ca12525aa0979d6101600e78c4bd9532ab
|
/CMS/Zope-3.2.1/Dependencies/zope.app-Zope-3.2.1/zope.app/applicationcontrol/zopeversion.py
|
85daa3ab7e652fa583597c1d4a5e1a8224c1ba05
|
[
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] |
permissive
|
germanfriday/code-examples-sandbox
|
d0f29e20a3eed1f8430d06441ac2d33bac5e4253
|
4c538584703754c956ca66392fdcecf0a0ca2314
|
refs/heads/main
| 2023-05-30T22:21:57.918503
| 2021-06-15T15:06:47
| 2021-06-15T15:06:47
| 377,200,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Utility to retrieve the Zope version.
$Id: zopeversion.py 26884 2004-08-03 21:31:09Z fdrake $
"""
__docformat__ = 'restructuredtext'
import os
import re
import zope.app
from zope.app.applicationcontrol.interfaces import IZopeVersion
from zope.interface import implements
class ZopeVersion(object):
implements(IZopeVersion)
__entries = re.compile(r'(url|revision)\s*=\s*"([^"]+)"')
__tags = re.compile(r'/(tags|branches)/([^/]+)/')
def __init__(self, path=None):
if path is None:
path = os.path.dirname(os.path.abspath(zope.app.__file__))
self.path = path
self.result = None
def getZopeVersion(self):
"""See zope.app.applicationcontrol.interfaces.IZopeVersion"""
if self.result is not None:
return self.result
self.result = "Development/Unknown"
# is this a SVN checkout?
svndir = os.path.join(self.path, ".svn")
if os.path.isdir(svndir):
self.__setSVNVersion(svndir)
else:
# try to get official Zope release information
versionfile = os.path.join(self.path, "version.txt")
if os.path.isfile(versionfile):
f = file(versionfile)
self.result = f.readline().strip() or self.result
f.close()
return self.result
def __setSVNVersion(self, svndir):
entriesfile = os.path.join(svndir, "entries")
# get the version information
if os.path.isfile(entriesfile):
f = file(entriesfile)
url, revision = "", ""
for line in f:
match = self.__entries.search(line)
if match is not None:
name, value = match.group(1, 2)
if name == "url":
url = value
elif name == "revision":
revision = value
if url and revision:
break
f.close()
if revision and url:
match = self.__tags.search(url)
tag = ""
if match is not None:
type, value = match.group(1, 2)
if type == "tags":
tag = "/Tag: %s" % value
elif type == "branches":
tag = "/Branch: %s" % value
self.result = "Development/Revision: %s%s" \
% (revision, tag)
ZopeVersionUtility = ZopeVersion()
|
[
"chris@thegermanfriday.com"
] |
chris@thegermanfriday.com
|
109e8140c1ba99a672fa1b549b8707bd4b670ffe
|
8df5df20ac10a8dc81f7ac6e21e835553a8f5e2d
|
/src/sleekapps/graphql/threads/schemas/post.py
|
8c40cb5a4ae534582fcb193dc02a176600fe4dd5
|
[] |
no_license
|
adepeter/sleekforum
|
7be71907d26623c43cd78a6da77a2398c1c25e26
|
35385e648974cdf009732af4c50b69a1825f7fda
|
refs/heads/master
| 2022-09-18T02:45:42.522128
| 2021-10-23T06:41:44
| 2021-10-23T06:41:44
| 208,669,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
from ..queries.post import PostQuery
from ..mutations.post import PostMutation
class PostQuerySchema(PostQuery):
pass
class PostMutationSchema(PostMutation):
pass
|
[
"adepeter26@gmail.com"
] |
adepeter26@gmail.com
|
8b15af25bd12ee28dd60bfa50ef64233eade6b3e
|
503d2f8f5f5f547acb82f7299d86886691966ca5
|
/atcoder/abc170_d.py
|
b793c3b0e59bf00a5329e943ebdaf51604ca831c
|
[] |
no_license
|
Hironobu-Kawaguchi/atcoder
|
3fcb649cb920dd837a1ced6713bbb939ecc090a9
|
df4b55cc7d557bf61607ffde8bda8655cf129017
|
refs/heads/master
| 2023-08-21T14:13:13.856604
| 2023-08-12T14:53:03
| 2023-08-12T14:53:03
| 197,216,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
# https://atcoder.jp/contests/abc170/tasks/abc170_d
import numpy as np
import sys
input = sys.stdin.buffer.readline
N = int(input())
A = list(map(int, (input().split())))
A.sort()
cnt = np.zeros(10**6+10, dtype=np.int32)
for x in A:
if cnt[x] != 0:
cnt[x] = 2
continue
cnt[x::x] += 1
ans = 0
for x in A:
if cnt[x] == 1:
ans += 1
print(ans)
|
[
"hironobukawaguchi3@gmail.com"
] |
hironobukawaguchi3@gmail.com
|
ecbffecc0cc2e41903c8ea05f722a15622f58247
|
16bcda63a6bba61af0c835aabd73b0467696be4a
|
/itertools.permutations()/new 1.py
|
fc66ce8640fb164b234f6d40cbf4f58b749a3cac
|
[] |
no_license
|
ZirvedaAytimur/HackerRank_Python
|
8ea7d629e75b4db31e04c64d102bf7a0a0b5632b
|
175017ed3d8ff11385e12e926b94da97797fc094
|
refs/heads/master
| 2023-03-13T13:00:42.225364
| 2021-03-10T20:36:18
| 2021-03-10T20:36:18
| 288,210,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
from itertools import permutations
S, N = input().split()
print(*[''.join(i) for i in permutations(sorted(S), int(N))], sep="\n")
|
[
"zirvedaytimur@gmail.com"
] |
zirvedaytimur@gmail.com
|
fe770d37c3f012383470ba567f00d7c563d70240
|
c37e63a902106bbde63fb5517f420e2c043c7f93
|
/6.第六類/PYD608.py
|
f2831263799476d02d6107d8dda3737c6e08f4e2
|
[] |
no_license
|
junyi1997/TQC_Python
|
35b1320ca7a1b2f8eee4b9e8d4f1b9d7f4a5c02f
|
befe177880a7034f37848ff404bb7d33f9a07ff9
|
refs/heads/master
| 2020-04-25T04:05:57.508858
| 2019-03-24T17:03:07
| 2019-03-24T17:03:07
| 172,499,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 20:16:26 2018
@author: user
最大最小值索引
"""
data=[]
d_max=0
d_min=99999
for i in range(3):
data.append([])
for j in range(3):
num=eval(input())
data[i].append(num)
if num > d_max:
d_max=num
d_ind=(i,j)
if num < d_min:
d_min=num
da_ind=(i,j)
print("Index of the largest number {:} is: ({:}, {:})".format(d_max,d_ind[0],d_ind[1]))
print("Index of the smallest number {:} is: ({:}, {:})".format(d_min,da_ind[0],da_ind[1]))
|
[
"q5896799@gmail.com"
] |
q5896799@gmail.com
|
b46c0d94a3c555f1f2593bb99544a3614bf282bd
|
4bf3aaf77c309a489100b98a8c03532632df152c
|
/Python/동빈북/그리디/무지의먹방라이브.py
|
c99f1764e90f9f5916ae8413225428dd15fcbe6a
|
[] |
no_license
|
murane/PS
|
7fbfc54d962231949efc67f1a35c4b0119de0780
|
e938c6c503aeac08bf65e1e66709172b0e5da6ef
|
refs/heads/master
| 2023-05-06T22:51:54.105811
| 2021-05-30T03:34:53
| 2021-05-30T03:34:53
| 293,699,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
import heapq
def solution(food_times, k):
heap=[]
if sum(food_times)<=k:
return -1
for idx,time in enumerate(food_times):
heapq.heappush(heap,(time,idx+1))
tot_cnt=0
cur_cnt=0
while heap:
tmp_time,tmp_idx=heapq.heappop(heap)
tot_cnt+=(tmp_time-cur_cnt)*(len(heap)+1)
cur_cnt=tmp_time
if tot_cnt>=k:
heapq.heappush(heap,(tmp_time,tmp_idx))
break
heap.sort(key=lambda x: x[1])
return heap[(k-tot_cnt)%len(heap)][1]
if __name__ == '__main__':
food_times=[4,2,3,6,7,1,5,8]
k=27
#food_times=[3,1,2]
#k=1
answer = 5
print(solution(food_times, k))
|
[
"murane@naver.com"
] |
murane@naver.com
|
2edf5053669ffa0143f87c4e40cfcf93088f2495
|
c753216f44c4c5f34d50763a02d720e064ed5d13
|
/OPSI/web2/test/test_log.py
|
5909fce6269025ae6359143e64908bd32be8d23e
|
[] |
no_license
|
mpice-mn/python-opsi
|
7fefcd590213a5b698022323b166710e8cbf5641
|
76dcd4e38100e019f64731539b31be6e8af60af7
|
refs/heads/stable
| 2023-05-02T05:25:31.478822
| 2020-02-05T21:16:50
| 2020-02-05T21:16:50
| 104,738,074
| 0
| 0
| null | 2017-09-25T10:49:13
| 2017-09-25T10:49:13
| null |
UTF-8
|
Python
| false
| false
| 4,489
|
py
|
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
from OPSI.web2 import log, resource, http
from OPSI.web2.test.test_server import BaseCase, BaseTestResource
from twisted.python import log as tlog
class BufferingLogObserver(log.BaseCommonAccessLoggingObserver):
"""
A web2 log observer that buffer messages.
"""
messages = []
def logMessage(self, message):
self.messages.append(message)
class SetDateWrapperResource(resource.WrapperResource):
"""
A resource wrapper which sets the date header.
"""
def hook(self, req):
def _filter(req, resp):
resp.headers.setHeader('date', 0.0)
return resp
_filter.handleErrors = True
req.addResponseFilter(_filter, atEnd=True)
class NoneStreamResource(resource.Resource):
"""
A basic empty resource.
"""
def render(self, req):
return http.Response(200)
class TestLogging(BaseCase):
def setUp(self):
self.blo = BufferingLogObserver()
tlog.addObserver(self.blo.emit)
# some default resource setup
self.resrc = BaseTestResource()
self.resrc.child_emptystream = NoneStreamResource()
self.root = SetDateWrapperResource(log.LogWrapperResource(self.resrc))
def tearDown(self):
tlog.removeObserver(self.blo.emit)
def assertLogged(self, **expected):
"""
Check that logged messages matches expected format.
"""
if 'date' not in expected:
epoch = log.BaseCommonAccessLoggingObserver().logDateString(0)
expected['date'] = epoch
if 'user' not in expected:
expected['user'] = '-'
if 'referer' not in expected:
expected['referer'] = '-'
if 'user-agent' not in expected:
expected['user-agent'] = '-'
if 'version' not in expected:
expected['version'] = '1.1'
if 'remotehost' not in expected:
expected['remotehost'] = 'remotehost'
messages = self.blo.messages[:]
del self.blo.messages[:]
expectedLog = ('%(remotehost)s - %(user)s [%(date)s] "%(method)s '
'%(uri)s HTTP/%(version)s" %(status)d %(length)d '
'"%(referer)s" "%(user-agent)s"')
if expected.get('logged', True):
# Ensure there weren't other messages hanging out
self.assertEquals(len(messages), 1, "len(%r) != 1" % (messages, ))
self.assertEquals(messages[0], expectedLog % expected)
else:
self.assertEquals(len(messages), 0, "len(%r) != 0" % (messages, ))
def test_logSimpleRequest(self):
"""
Check the log for a simple request.
"""
uri = 'http://localhost/'
method = 'GET'
def _cbCheckLog(response):
self.assertLogged(method=method, uri=uri, status=response[0],
length=response[1].getHeader('content-length'))
d = self.getResponseFor(self.root, uri, method=method)
d.addCallback(_cbCheckLog)
return d
def test_logErrors(self):
"""
Test the error log.
"""
def test(_, uri, method, **expected):
expected['uri'] = uri
expected['method'] = method
def _cbCheckLog(response):
self.assertEquals(response[0], expected['status'])
self.assertLogged(
length=response[1].getHeader('content-length'), **expected)
return self.getResponseFor(self.root,
uri,
method=method).addCallback(_cbCheckLog)
uri = 'http://localhost/foo' # doesn't exist
method = 'GET'
d = test(None, uri, method, status=404, logged=True)
# no host. this should result in a 400 which doesn't get logged
uri = 'http:///'
d.addCallback(test, uri, method, status=400, logged=False)
return d
def test_logNoneResponseStream(self):
"""
Test the log of an empty resource.
"""
uri = 'http://localhost/emptystream'
method = 'GET'
def _cbCheckLog(response):
self.assertLogged(method=method, uri=uri, status=200,
length=0)
d = self.getResponseFor(self.root, uri, method=method)
d.addCallback(_cbCheckLog)
return d
|
[
"j.schneider@uib.de"
] |
j.schneider@uib.de
|
f722f47de9df0ad22072198f7ee90be74d106206
|
1dd0e0cbe481e965b1c9886a3d44ca3a122ae2af
|
/medikit/pipeline.py
|
59c97241d4e208de7b4bbea3d0301dc62eb5f615
|
[] |
no_license
|
sei-nicolas/medikit
|
7dffc0a2866cba54023e19f5ebcd7c70b57359e8
|
e06fe8bc80cfa9af6fdeedf1f5fc2ef2f662dd9b
|
refs/heads/master
| 2020-07-07T00:02:18.040553
| 2019-06-30T08:33:42
| 2019-06-30T08:33:42
| 203,179,919
| 0
| 0
| null | 2019-08-19T13:36:52
| 2019-08-19T13:36:52
| null |
UTF-8
|
Python
| false
| false
| 2,278
|
py
|
"""
Pipelines are a way to describe a simple step-by-step process, for example the release process.
"""
import datetime
import json
import logging
logger = logging.getLogger(__name__)
class Pipeline:
"""
Class to configure a pipeline.
"""
def __init__(self):
self.steps = []
def add(self, step):
self.steps.append(step)
return self
def remove(self, identity):
for i in range(len(self.steps)):
if identity == get_identity(self.steps[i]):
del self.steps[i]
break
def get_identity(step):
return str(step)
class ConfiguredPipeline:
"""
Used to actually load run and persist a configured pipeline.
"""
def __init__(self, name, pipeline, config=None):
self.name = name
self.steps = pipeline.steps
self.meta = {"created": str(datetime.datetime.now())}
self.config = config
def init(self):
for step in self.steps:
step.init()
def next(self):
for step in self.steps:
if not step.complete:
return step
raise StopIteration("No step left.")
@property
def current(self):
for i, step in enumerate(self.steps):
if not step.complete:
return i + 1
return len(self)
def __len__(self):
return len(self.steps)
def abort(self):
for step in self.steps:
step.abort()
def serialize(self):
return json.dumps(
{
"meta": {**self.meta, "updated": str(datetime.datetime.now())},
"steps": [[get_identity(step), step.get_state()] for step in self.steps],
},
indent=4,
)
def unserialize(self, serialized):
serialized = json.loads(serialized)
self.meta = serialized.get("meta", {})
steps = serialized.get("steps", [])
if len(steps) != len(self.steps):
raise IOError("Invalid pipeline state storage.")
for (identity, state), step in zip(steps, self.steps):
if get_identity(step) != identity:
raise IOError("Mismatch on step identity.")
step.set_state(state)
step.config = self.config
|
[
"romain@dorgueil.net"
] |
romain@dorgueil.net
|
784081e775bbe964c1190f6cd858499aab6d04fd
|
dc3b3615e0e94e730508229c0fc758fc09390856
|
/src_nlp/tensorflow/toward_control/model/discriminator.py
|
3b73306d82b4046fa5b0514ff421f7b6998eda2a
|
[
"MIT"
] |
permissive
|
ashishpatel26/finch
|
63993c208e0fc0ed52512420d38d03ce456116c1
|
bf2958c0f268575e5d51ad08fbc08b151cbea962
|
refs/heads/master
| 2020-04-20T21:59:25.292770
| 2018-12-25T01:31:50
| 2018-12-25T01:31:50
| 169,125,127
| 0
| 1
|
MIT
| 2019-02-04T18:17:06
| 2019-02-04T18:17:06
| null |
UTF-8
|
Python
| false
| false
| 4,199
|
py
|
from base import BaseModel
from configs import args
import tensorflow as tf
class _Discriminator(BaseModel):
def __init__(self, build_graph=True):
super().__init__('Discriminator')
with tf.variable_scope(self._scope):
self.embedding = tf.get_variable('lookup_table', [args.vocab_size, args.embed_dims])
self.dropout_embed = tf.layers.Dropout(args.discriminator_dropout_rate)
self.attn_proj = tf.layers.Dense(1, tf.tanh)
self.output_proj = tf.layers.Dense(args.n_class)
if build_graph:
self.build_train_graph(dataloader)
self.build_predict_graph(dataloader)
def __call__(self, inputs, is_training, soft_inp=False):
with tf.variable_scope(self._scope):
if soft_inp:
_inputs = tf.reshape(inputs, [-1, args.vocab_size])
x = tf.matmul(_inputs, self.embedding)
batch_sz = tf.shape(inputs)[0]
x = tf.reshape(x, [batch_sz, args.max_len, args.embed_dims])
else:
x = tf.nn.embedding_lookup(self.embedding, inputs)
x = self.dropout_embed(x, training=is_training)
align = tf.squeeze(self.attn_proj(x), -1)
align = tf.expand_dims(tf.nn.softmax(align), -1)
x = tf.squeeze(tf.matmul(x, align, transpose_a=True), -1)
logits = self.output_proj(x)
return logits
class Discriminator(BaseModel):
def __init__(self, build_graph=True):
super().__init__('Discriminator')
with tf.variable_scope(self._scope):
self.embedding = tf.get_variable('lookup_table', [args.vocab_size, args.embed_dims])
self.dropout_embed = tf.layers.Dropout(args.discriminator_dropout_rate)
self.conv_k3 = tf.layers.Conv1D(args.n_filters, 3, activation=tf.nn.relu)
self.conv_k4 = tf.layers.Conv1D(args.n_filters, 4, activation=tf.nn.relu)
self.conv_k5 = tf.layers.Conv1D(args.n_filters, 5, activation=tf.nn.relu)
self.dropout_feat = tf.layers.Dropout(args.discriminator_dropout_rate)
self.hidden_proj = tf.layers.Dense(args.n_filters, tf.nn.relu)
self.output_proj = tf.layers.Dense(args.n_class)
if build_graph:
self.build_train_graph(dataloader)
self.build_predict_graph(dataloader)
def __call__(self, inputs, is_training, soft_inp=False):
with tf.variable_scope(self._scope):
if soft_inp:
_inputs = tf.reshape(inputs, [-1, args.vocab_size])
x = tf.matmul(_inputs, self.embedding)
batch_sz = tf.shape(inputs)[0]
x = tf.reshape(x, [batch_sz, args.max_len, args.embed_dims])
else:
x = tf.nn.embedding_lookup(self.embedding, inputs)
x = self.dropout_embed(x, training=is_training)
feat_map = []
for conv in [self.conv_k3, self.conv_k4, self.conv_k5]:
_x = conv(x)
_x = tf.layers.max_pooling1d(_x, _x.get_shape().as_list()[1], 1)
_x = tf.reshape(_x, (tf.shape(x)[0], args.n_filters))
feat_map.append(_x)
x = tf.concat(feat_map, -1)
x = self.dropout_feat(x, training=is_training)
x = self.hidden_proj(x)
logits = self.output_proj(x)
return logits
def build_train_graph(self, dataloader):
X_batch, y_batch = dataloader.train_iterator.get_next()
logits = self.forward(X_batch, is_training=True)
self.ops['global_step'] = tf.Variable(0, trainable=False)
self.ops['loss'] = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=y_batch))
self.ops['train'] = tf.train.AdamOptimizer().minimize(
self.ops['loss'], global_step=self.ops['global_step'])
def build_predict_graph(self, dataloader):
self.ops['pred_logits'] = self.forward(dataloader.predict_iterator.get_next(),
is_training=False)
|
[
"zhedongzheng@outlook.com"
] |
zhedongzheng@outlook.com
|
5149a29ec22475fb3f959d83ecc335c7692de590
|
d48a10791e607b1690eea9c6c7191416cd60dc2d
|
/src/tlog/receiver/parse/syslog.py
|
1691afeb50010f7356e35dd4e226981e5d97f225
|
[] |
no_license
|
thomaserlang/TLog
|
d84d1f51d5454598539fa8ab56b26292a13899f9
|
3f331c7169c90d1fac0d1922b011b56eebbd086a
|
refs/heads/master
| 2020-06-02T19:26:41.600474
| 2014-08-23T19:00:34
| 2014-08-23T19:00:34
| 12,624,537
| 2
| 0
| null | 2013-09-13T16:00:48
| 2013-09-05T17:52:29
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 7,392
|
py
|
import logging
import re
import dateutil.parser
from tlog.receiver.parse.parsed import Parsed
class Syslog(object):
'''
Class for parsing different syslog standards.
Parse a syslog message:
Syslog.parse('some syslog').
Returns none if the log format is not supported.
Supported standards:
* RFC 3164
* RFC 5424
'''
# PRI TIMESTAMP HOST MESSAGE
regex_3164 = re.compile('^<([0-9]+)>([a-z ]+ [0-9]+ [0-9:]+) ([^ ]+) (.*)$', re.IGNORECASE)
# PRI VERSION TIME HOST APP PROCID MSGID STRUC-DATA MESSAGE
regex_5424 = re.compile('^<([0-9]+)>([0-9]+) ([^ ]+) ([^ ]+) ([^- ]+|-) ([^- ]+|-) ([^- ]+|-) (\[.*\]|-) (.*)$', re.IGNORECASE)
@classmethod
def parse_message(cls, message):
'''
Removes BOM from the message.
BOM indicates that the message is encoded with utf-8.
:param message: str
:returns: str
'''
if message[:3] == 'BOM':
if isinstance(message, unicode):
return message[3:]
return message[3:].decode('utf-8')
return message
@classmethod
def parse_process_info_3164(cls, message):
'''
Returns process info from a message and the message,
where the process info has been removed.
Example:
su[123]: 'su root' failed for lonvick on /dev/pts/8
:param message: str
:returns: (str, dict)
str:
'su root' failed for lonvick on /dev/pts/8
dict:
{
'app-name': 'su',
'procid': 123
}
'''
i = 0
value = ''
data = {}
prev_s = ''
if message[:1] == ':':
return (message[1:], data)
for s in message:
i += 1
if s == ' ' and prev_s == ':':
if 'app-name' in data:
return (message[i:], data)
elif s == ' ':
return (message, data)
elif s in ('[', ':'):
if 'app-name' not in data:
data['app-name'] = value
value = ''
prev_s = s
continue
elif s == ']':
data['procid'] = int(value)
continue
value = value + s
prev_s = s
return (message, data)
@classmethod
def parse_structured_data(cls, structured_data):
'''
Parses a structured-data as specified in: http://tools.ietf.org/html/rfc5424#section-6.3
Example:
[exampleSDID@32473 iut="3" eventSource="Application \\"[test\\]\\"" eventID="1011"][examplePriority@32473 class="high"]
:param structured_data: str
http://tools.ietf.org/html/rfc5424#section-6.3
:returns: dict
{
'exampleSDID@32473': {
'iut': '3',
'eventSource': 'Application "[test]"',
'eventID': '1011'
},
'examplePriority@32473': {
'class': 'high'
}
}
'''
def remove_escaped(value):
# http://tools.ietf.org/html/rfc5424#section-6.3.3
value = value.replace(u'\\"', u'"')
value = value.replace(u'\\]', ']')
value = value.replace(u'\\\\', '\\')
return value
if isinstance(structured_data, str):
structured_data = structured_data.decode('utf-8')
parsed = {}
d = parsed
key = u''
find_key = True
find_field = False
value = u''
in_string = False
prev_s = u''
for s in structured_data:
if not in_string:
if s == u'[':
find_key = True
find_field = False
d = parsed
continue
if s in (u' ', u']'): # key found
if not key:
continue
parsed[key] = {}
d = parsed[key]
find_field = True
key = ''
continue
if s == u'=':# key found and value start
find_field = False
in_string = False
continue
if s == u'"' and prev_s <> u'\\':
if not in_string:
in_string = True
continue
# value found
d[key] = remove_escaped(value)
value = ''
key = ''
find_field = True
in_string = False
continue
if not in_string:
key = key + s
else:
value = value + s
prev_s = s
return parsed
@classmethod
def parse_3164(cls, log):
'''
:returns: ``Parsed``
'''
match = cls.regex_3164.match(log)
if match:
pri = int(match.group(1))
severity = pri % 8
message, process_info = cls.parse_process_info_3164(match.group(4))
data = {
'message': cls.parse_message(message),
'priority': pri,
'facility': pri / 8,
'severity': severity,
'timestamp': dateutil.parser.parse(match.group(2)),
}
data.update(process_info)
return Parsed(
hostname=match.group(3),
level=severity,
data=data,
standard=u'Syslog 3164',
)
return None
@classmethod
def parse_5424(cls, log):
'''
:returns: ``Parsed``
'''
match = cls.regex_5424.match(log)
if match:
pri = int(match.group(1))
severity = pri % 8
data = {
'message': cls.parse_message(match.group(9)),
'priority': pri,
'facility': pri / 8,
'severity': severity,
'timestamp': dateutil.parser.parse(match.group(3)),
}
if match.group(5) <> '-':
data['app-name'] = match.group(5)
if match.group(6) <> '-':
data['procid'] = match.group(6)
if match.group(7) <> '-':
data['msgid'] = match.group(7)
if match.group(8) <> '-':
data['structured-data'] = cls.parse_structured_data(match.group(8))
return Parsed(
hostname=match.group(4),
level=severity,
data=data,
standard=u'Syslog 5424',
)
return None
@classmethod
def parse(cls, log):
'''
Tries the different log standards.
Returns none if the log format is not supported.
:returns: Parsed
'''
s_3164 = cls.parse_3164(log)
if s_3164:
return s_3164
s_5424 = cls.parse_5424(log)
if s_5424:
return s_5424
return None
|
[
"thomas@erlang.dk"
] |
thomas@erlang.dk
|
6930b3b01ee36c759829bd4f634118dc666a853f
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit2987.py
|
472b1cd797ec2131dde4ee210490114eef1d7309
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,147
|
py
|
# qubit number=4
# total number=39
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=30
prog.cx(input_qubit[0],input_qubit[3]) # number=36
prog.x(input_qubit[3]) # number=37
prog.cx(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=13
prog.cz(input_qubit[0],input_qubit[3]) # number=14
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[3],input_qubit[1]) # number=19
prog.z(input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=35
prog.h(input_qubit[1]) # number=20
prog.rx(-3.141592653589793,input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[2]) # number=17
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[1],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.cx(input_qubit[2],input_qubit[1]) # number=34
prog.x(input_qubit[1]) # number=23
prog.x(input_qubit[1]) # number=24
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2987.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
bab103f06638d1b892180096303b9bf0e6475feb
|
92a7d8b7cef98a8af9e11c3a3e32537e42a5909f
|
/temp.py
|
bb3d3707f91aad9a5bb630f9d7575eccac34a46d
|
[] |
no_license
|
shanto268/focus-stacking
|
3b8f74ec7338f2664ac35f88f32bcdc304c011ff
|
e3e9dba2adfdb4a85a3a5b4a0c4f9f40e4240a9a
|
refs/heads/master
| 2023-04-11T04:36:16.236913
| 2021-04-22T20:08:43
| 2021-04-22T20:08:43
| 360,668,996
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
def getNeatTomograms(csvFile):
I_matrices_reshaped = np.loadtxt(csvFile)
filters = [
'none', 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36',
'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom',
'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos'
]
for j in range(len(filters)):
dir_name = "/Users/sshanto/hep/hep_daq/CAMAC/focus-stacking/images/mystery_same_axis/{}".format(
filters[j])
mkdir_p(dir_name)
k = 0
print("Using {} filter".format(filters[j]))
for i in I_matrices_reshaped:
i = i.reshape(21, 21)
fig = plt.figure(frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
imshowobj = ax.imshow(np.flip(i),
aspect='auto',
inerpolation=filters[j])
imshowobj.set_clim(0.9, 1.2)
fname = "{}/img{}.png".format(dir_name, k)
fig.savefig(fname)
k += 1
|
[
"sadman-ahmed.shanto@ttu.edu"
] |
sadman-ahmed.shanto@ttu.edu
|
b1efdec2c8d1b55d5b698c8a63e0cd204b58a672
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/problems/0947.0_Most_Stones_Removed_with_Same_Row_or_Column.py
|
cd63b6a1fee276fbeb5a6d85dbc93e57564f6f69
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
'''
Runtime: 355 ms, faster than 64.84% of Python3 online submissions for Most Stones Removed with Same Row or Column.
Memory Usage: 14.6 MB, less than 72.25% of Python3 online submissions for Most Stones Removed with Same Row or Column.
'''
class UF:
def __init__(self, n):
self.p = list(range(n))
self.set_count = n
def find(self, x):
if x == self.p[x]:
return x
self.p[x] = self.find(self.p[x])
return self.p[x]
def union(self, x, y):
px, py = self.find(x), self.find(y)
if px != py:
self.p[px] = py
self.set_count -= 1
class Solution:
def removeStones(self, stones: List[List[int]]) -> int:
# same y; x list -> index for stones
x_indice = defaultdict(list)
# same x; y list -> index for stones
y_indice = defaultdict(list)
n = len(stones)
for i, (x, y) in enumerate(stones):
x_indice[y].append(i)
y_indice[x].append(i)
uf = UF(n)
for _, indice in chain(x_indice.items(), y_indice.items()):
i = indice[0]
for j in indice:
uf.union(i, j)
return n - uf.set_count
|
[
"laoxing201314@outlook.com"
] |
laoxing201314@outlook.com
|
60348d5d0dcc591878cc15c5daf0973aebeb18b0
|
b15ccd04d3edfb4d6278a055422610be09c3916c
|
/4615_재미있는 오셀로 게임/sol3.py
|
1c09cf931c77a05419f6e8447e49b0e62a0ad716
|
[] |
no_license
|
hksoftcorn/Algorithm
|
d0f3a1a6009f47e4f391e568b29a3b51d6095d33
|
81b067b8105ba305172dd8271787c19f04d170ba
|
refs/heads/master
| 2023-05-12T21:15:34.668580
| 2021-06-08T07:57:04
| 2021-06-08T07:57:04
| 337,121,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
import sys
sys.stdin = open('sample_input.txt', 'r')
def check(col, row, color):
global arr, N
def reversi(col, row, color):
global arr
# 상하좌우 / 대각선
dx = [-1, 1, 0, 0, -1, -1, 1, 1]
dy = [0, 0, -1, 1, -1, 1, -1, 1]
for i in range(8):
d_x = dx[i]
d_y = dy[i]
change_list = []
while True:
arr[col][row] = color
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split())
arr = [[0] * N for _ in range(N)]
# 흑 : 1, 백 : 2
arr[N // 2][N // 2 - 1] = 1
arr[N // 2 - 1][N // 2] = 1
arr[N // 2][N // 2] = 2
arr[N // 2 - 1][N // 2 - 1] = 2
for _ in range(M):
col, row, color = map(int, input().split())
reversi(col, row, color)
print(arr)
black = 0
white = 0
for i in range(len(arr)):
for j in range(len(arr[0])):
if arr[i][j] == 1:
black += 1
elif arr[i][j] == 2:
white += 1
print('#{} {} {}'.format(tc, black, white))
|
[
"hksoftcorn.dev@gmail.com"
] |
hksoftcorn.dev@gmail.com
|
0c9583a3811439fcc5cda71bf42497a2544f7d7b
|
3c27b86f0165ab24e6b04d505e8471e032594f0b
|
/pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GLES1/IMG/user_clip_plane.py
|
b8f7827c07884c9fba7dd0ac8e86eab155bfd9a7
|
[
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LGPL-2.1-or-later",
"GPL-3.0-only",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"MIT"
] |
permissive
|
alexus37/AugmentedRealityChess
|
8b9ccdfffc8aee93a86a44b8ef53c034ec6a10d1
|
7f600ad153270feff12aa7aa86d7ed0a49ebc71c
|
refs/heads/master
| 2020-12-24T13:29:21.967833
| 2020-02-27T09:38:50
| 2020-02-27T09:38:50
| 31,264,034
| 1
| 1
|
MIT
| 2020-02-27T09:38:52
| 2015-02-24T14:36:34
|
Python
|
UTF-8
|
Python
| false
| false
| 938
|
py
|
'''OpenGL extension IMG.user_clip_plane
This module customises the behaviour of the
OpenGL.raw.GLES1.IMG.user_clip_plane to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/IMG/user_clip_plane.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.IMG.user_clip_plane import *
from OpenGL.raw.GLES1.IMG.user_clip_plane import _EXTENSION_NAME
def glInitUserClipPlaneIMG():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glClipPlanefIMG=wrapper.wrapper(glClipPlanefIMG).setInputArraySize(
'eqn', 4
)
glClipPlanexIMG=wrapper.wrapper(glClipPlanexIMG).setInputArraySize(
'eqn', 4
)
### END AUTOGENERATED SECTION
|
[
"alexlelidis@gmx.de"
] |
alexlelidis@gmx.de
|
1b93ddc9c6c1399eaa6bec5b56290bfafa63f575
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-2/b93d3b23f51ef282c90dcf924475eb1fcaa4151d-<__getitem__>-fix.py
|
5adbfa85f57ff1603ef0188a487be24d8bd31338
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
def __getitem__(self, key):
if isinstance(key, slice):
(start, stop) = (key.start, key.stop)
if (start is None):
start = 0
if (stop is None):
stop = self.data.shape[0]
if ((stop + self.start) <= self.end):
idx = slice((start + self.start), (stop + self.start))
else:
raise IndexError
elif isinstance(key, int):
if ((key + self.start) < self.end):
idx = (key + self.start)
else:
raise IndexError
elif isinstance(key, np.ndarray):
if ((np.max(key) + self.start) < self.end):
idx = (self.start + key).tolist()
else:
raise IndexError
elif isinstance(key, list):
if ((max(key) + self.start) < self.end):
idx = [(x + self.start) for x in key]
else:
raise IndexError
else:
raise IndexError
if (self.normalizer is not None):
return self.normalizer(self.data[idx])
else:
return self.data[idx]
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
62162d4ab0cfb57a6d6fefa1e484bb594dd92685
|
ec15546fbcf6a2523f08950f0da01ade4451524f
|
/example_project/testapp/streams.py
|
dd63d45ce22d39cd8fdddc679a4b03ef35dece2e
|
[
"BSD-3-Clause"
] |
permissive
|
seaw688/django-activity-stream
|
5869f3716e8d261296d520455f6412cbeca3c2db
|
b4e470444bcccb666bfb3711c0bced7863936ff9
|
refs/heads/master
| 2020-12-18T10:57:24.140620
| 2020-01-21T15:39:07
| 2020-01-21T15:39:07
| 235,354,434
| 0
| 0
|
BSD-3-Clause
| 2020-01-21T13:55:13
| 2020-01-21T13:55:12
| null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
from datetime import datetime
from actstream.managers import ActionManager, stream
class MyActionManager(ActionManager):
@stream
def testfoo(self, obj, time=None):
if time is None:
time = datetime.now()
return obj.actor_actions.filter(timestamp__lte = time)
@stream
def testbar(self, verb):
return self.filter(verb=verb)
|
[
"justquick@gmail.com"
] |
justquick@gmail.com
|
3e0281fe6bf7c9011f21574c13f9ca744ce5653c
|
8efe56ee34c455a6b1336897f6d457acbc9c10f9
|
/examples/torch/pearl_half_cheetah_vel.py
|
0e468a052eb49096c983e7c4462f42d069776847
|
[
"MIT"
] |
permissive
|
neurips2020submission11699/metarl
|
ab18d11e708bf569d76cb2fab2bcce089badd111
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
refs/heads/master
| 2022-10-15T22:03:09.948673
| 2020-06-11T19:22:55
| 2020-06-11T19:30:58
| 268,410,657
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,501
|
py
|
#!/usr/bin/env python3
"""PEARL HalfCheetahVel example."""
import click
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv, normalize
from metarl.envs.mujoco import HalfCheetahVelEnv
from metarl.experiment import LocalRunner
from metarl.experiment.deterministic import set_seed
from metarl.experiment.task_sampler import SetTaskSampler
from metarl.sampler import LocalSampler
from metarl.torch import set_gpu_mode
from metarl.torch.algos import PEARL
from metarl.torch.algos.pearl import PEARLWorker
from metarl.torch.embeddings import MLPEncoder
from metarl.torch.policies import ContextConditionedPolicy
from metarl.torch.policies import TanhGaussianMLPPolicy
from metarl.torch.q_functions import ContinuousMLPQFunction
@click.command()
@click.option('--num_epochs', default=500)
@click.option('--num_train_tasks', default=100)
@click.option('--num_test_tasks', default=30)
@click.option('--encoder_hidden_size', default=200)
@click.option('--net_size', default=300)
@click.option('--num_steps_per_epoch', default=2000)
@click.option('--num_initial_steps', default=2000)
@click.option('--num_steps_prior', default=400)
@click.option('--num_extra_rl_steps_posterior', default=600)
@click.option('--batch_size', default=256)
@click.option('--embedding_batch_size', default=100)
@click.option('--embedding_mini_batch_size', default=100)
@click.option('--max_path_length', default=200)
@wrap_experiment
def pearl_half_cheetah_vel(ctxt=None,
seed=1,
num_epochs=500,
num_train_tasks=100,
num_test_tasks=30,
latent_size=5,
encoder_hidden_size=200,
net_size=300,
meta_batch_size=16,
num_steps_per_epoch=2000,
num_initial_steps=2000,
num_tasks_sample=5,
num_steps_prior=400,
num_extra_rl_steps_posterior=600,
batch_size=256,
embedding_batch_size=100,
embedding_mini_batch_size=100,
max_path_length=200,
reward_scale=5.,
use_gpu=False):
"""Train PEARL with HalfCheetahVel environment.
Args:
ctxt (metarl.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
num_epochs (int): Number of training epochs.
num_train_tasks (int): Number of tasks for training.
num_test_tasks (int): Number of tasks for testing.
latent_size (int): Size of latent context vector.
encoder_hidden_size (int): Output dimension of dense layer of the
context encoder.
net_size (int): Output dimension of a dense layer of Q-function and
value function.
meta_batch_size (int): Meta batch size.
num_steps_per_epoch (int): Number of iterations per epoch.
num_initial_steps (int): Number of transitions obtained per task before
training.
num_tasks_sample (int): Number of random tasks to obtain data for each
iteration.
num_steps_prior (int): Number of transitions to obtain per task with
z ~ prior.
num_extra_rl_steps_posterior (int): Number of additional transitions
to obtain per task with z ~ posterior that are only used to train
the policy and NOT the encoder.
batch_size (int): Number of transitions in RL batch.
embedding_batch_size (int): Number of transitions in context batch.
embedding_mini_batch_size (int): Number of transitions in mini context
batch; should be same as embedding_batch_size for non-recurrent
encoder.
max_path_length (int): Maximum path length.
reward_scale (int): Reward scale.
use_gpu (bool): Whether or not to use GPU for training.
"""
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size,
encoder_hidden_size)
# create multi-task environment and sample tasks
env_sampler = SetTaskSampler(lambda: MetaRLEnv(
normalize(HalfCheetahVelEnv())))
env = env_sampler.sample(num_train_tasks)
test_env_sampler = SetTaskSampler(lambda: MetaRLEnv(
normalize(HalfCheetahVelEnv())))
runner = LocalRunner(ctxt)
# instantiate networks
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env,
hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env,
hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(
env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(
env=env,
policy_class=ContextConditionedPolicy,
encoder_class=MLPEncoder,
inner_policy=inner_policy,
qf=qf,
vf=vf,
num_train_tasks=num_train_tasks,
num_test_tasks=num_test_tasks,
latent_dim=latent_size,
encoder_hidden_sizes=encoder_hidden_sizes,
test_env_sampler=test_env_sampler,
meta_batch_size=meta_batch_size,
num_steps_per_epoch=num_steps_per_epoch,
num_initial_steps=num_initial_steps,
num_tasks_sample=num_tasks_sample,
num_steps_prior=num_steps_prior,
num_extra_rl_steps_posterior=num_extra_rl_steps_posterior,
batch_size=batch_size,
embedding_batch_size=embedding_batch_size,
embedding_mini_batch_size=embedding_mini_batch_size,
max_path_length=max_path_length,
reward_scale=reward_scale,
)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
runner.setup(algo=pearl,
env=env[0](),
sampler_cls=LocalSampler,
sampler_args=dict(max_path_length=max_path_length),
n_workers=1,
worker_class=PEARLWorker)
runner.train(n_epochs=num_epochs, batch_size=batch_size)
pearl_half_cheetah_vel()
|
[
"neurips2020submission11699@gmail.com"
] |
neurips2020submission11699@gmail.com
|
684cc61e7ed14e6b451bd27e39aa48200081c2b3
|
d7ec9d21fd426f4037908d4cd1b0532d3ea4288d
|
/simdb/settings.py
|
21040d8caed58785045b8d33aebbfca021820f57
|
[] |
no_license
|
simmons-tech/simdb
|
53fdbfeb999ad2f31a442d31b90205ac1529ead9
|
be1938fb2139ee394dd9cfbcba8fd762b58c78d9
|
refs/heads/master
| 2020-04-22T10:06:28.846310
| 2016-10-13T03:36:51
| 2016-10-13T03:36:51
| 67,450,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,096
|
py
|
"""
Django settings for simdb project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from . import database
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv(
'DJANGO_SECRET_KEY',
'mh_!(!r7%^ysphmfhz2cmoshto3p#yjwa$!14&2r&pn#&9pi^w'
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Libraries
'oidc_auth',
# Apps
'home',
'people',
'sdb',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
AUTHENTICATION_BACKENDS = (
'oidc_auth.auth.OpenIDConnectBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/oidc/login/'
LOGIN_REDIRECT_URL = '/'
OIDC_AUTH = {
'DEFAULT_PROVIDER': {
'issuer': 'https://oidc.mit.edu/',
'authorization_endpoint': 'https://oidc.mit.edu/authorize',
'token_endpoint': 'https://oidc.mit.edu/token',
'userinfo_endpoint': 'https://oidc.mit.edu/userinfo',
'jwks_uri': 'https://oidc.mit.edu/jwk',
'signing_alg': 'RS256',
'client_id': os.getenv('OIDC_CLIENT_ID'),
'client_secret': os.getenv('OIDC_CLIENT_SECRET'),
},
'SCOPES': ('openid', 'profile', 'email'),
}
ROOT_URLCONF = 'simdb.urlsdummy'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'simdb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': database.config(),
'sdb': database.config('SDB'),
}
DATABASE_ROUTERS = ['sdb.utils.SdbRouter']
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
|
[
"ammubhave@gmail.com"
] |
ammubhave@gmail.com
|
b36824f0be40160f9c5f18035f4391791f1f4e17
|
2f122071e3cee6a150392edee2e4a2b0770309bb
|
/src/apps/search/views.py
|
e68ce7f9bef692437b25cd0939469393610d31bc
|
[] |
no_license
|
snicoper/ofervivienda
|
4ca63472ca50406469977057f53b91a81c30f57f
|
44b8d2934105ccbf02ff6c20896aa8c2b1746eaa
|
refs/heads/master
| 2020-04-01T09:30:31.640803
| 2018-10-15T08:23:59
| 2018-10-15T08:23:59
| 153,077,955
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,490
|
py
|
from django.shortcuts import Http404
from django.views.generic import FormView, ListView
from anuncios.mixins.views import ClassFromCategoryNameMixin
from anuncios.models import Anuncio
from anuncios.settings import ANUNCIO_PAGINATE_BY
from favorites.mixins.views import FavoriteListContextMixin
from utils.text import ucfirst
from .forms import BaseSearchForm, SearchAddressForm
class IndexSearchView(FormView):
"""Form de búsqueda en el home.
Cuando pulsa en buscar, lo procesa SearchFiltersView.
"""
template_name = 'search/index.html'
form_class = SearchAddressForm
class SearchFiltersView(FavoriteListContextMixin, ListView):
"""Form con filtros de busqueda."""
template_name = 'search/search_filters.html'
paginate_by = ANUNCIO_PAGINATE_BY
context_object_name = 'anuncio_list'
model = Anuncio
def dispatch(self, request, *args, **kwargs):
"""Probar que la categoría del URLConf existe.
Hay dos URLConf apuntan a la view, así que en caso de tener category,
se ha de probar que existe.
Si no hay category, muestra por defecto 'piso'.
"""
self.category = 'piso'
if self.kwargs.get('category'):
self.category = self.kwargs.get('category')
if not hasattr(self.model, self.category.upper()):
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_form(self):
"""Obtener el form en base a self.category."""
form = BaseSearchForm.get_form_class(self.category)(self.request.GET or None)
form.initial['category'] = self.category.upper()
return form
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = self.get_form()
context['category_name'] = ucfirst(self.category)
return context
def get_queryset(self, **kwargs):
"""Queryset para búsquedas del formulario con filtros."""
form = self.get_form()
queryset = self.model.objects.none()
if form.is_valid() and form.cleaned_data.get('q'):
category = form.cleaned_data.get('category').upper()
type_anuncio = form.cleaned_data.get('type_anuncio')
metros_cuadrados = form.cleaned_data.get('metros_cuadrados', 0)
habitaciones = form.cleaned_data.get('habitaciones', 0)
banos = form.cleaned_data.get('banos', 0)
precio = form.cleaned_data.get('precio', 0)
genero = form.cleaned_data.get('genero', None)
fumar_piso = form.cleaned_data.get('permite_fumar_piso', False)
fumar_habitacion = form.cleaned_data.get('permite_fumar_habitacion', False)
internet = form.cleaned_data.get('internet', False)
address = form.cleaned_data.get('q')
queryset = Anuncio.get_model_class(category).objects.published(
location_string__search=address
)
if queryset.exists():
# Filtros en la búsqueda.
if type_anuncio:
queryset = queryset.filter(type_anuncio=type_anuncio)
if habitaciones and habitaciones > 0:
queryset = queryset.filter(habitaciones__gte=habitaciones)
if banos and banos > 0:
queryset = queryset.filter(banos__gte=banos)
if metros_cuadrados and metros_cuadrados > 0:
queryset = queryset.filter(metros_cuadrados__gte=metros_cuadrados)
if genero:
queryset = queryset.filter(genero=genero)
if fumar_piso:
queryset = queryset.filter(permite_fumar_piso=fumar_piso)
if fumar_habitacion:
queryset = queryset.filter(permite_fumar_habitacion=fumar_habitacion)
if internet:
queryset = queryset.filter(internet=internet)
if precio and precio > 0:
queryset = queryset.filter(precio__lte=precio)
return queryset.select_related('owner').prefetch_related('image_anuncio')
class SearchMapView(ClassFromCategoryNameMixin, FormView):
"""Form avanzado de búsquedas, muestra mapa de gmaps."""
template_name = 'search/search_map.html'
form_class = None
model = Anuncio
def get(self, request, *args, **kwargs):
"""Comprueba que exista la categoría en el URLConf."""
if not self.category or not hasattr(Anuncio, self.category):
raise Http404
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
category_list = {k: v for k, v in Anuncio.CATEGORY_CHOICES}
context['title'] = category_list.get(self.category)
context['category_name'] = self.category
return context
def get_form(self):
"""Obtener el form según category."""
form_class = BaseSearchForm.get_form_class(self.category)
initial = {'category': self.category}
if self.request.user.is_authenticated and self.request.user.user_location:
initial['latitude'] = self.request.user.user_location.latitude
initial['longitude'] = self.request.user.user_location.longitude
form = form_class(self.request.GET or None, initial=initial)
# Eliminar el campo q
del form.fields['q']
return form
|
[
"snicoper@gmail.com"
] |
snicoper@gmail.com
|
a8c9730f594067c302297e85bf27f363f28b7440
|
220535ba153c2fca757c264cab56d2ff835f4fd4
|
/02_K-近邻算法KNN/KNN.py
|
12d160bc82b9aaca1311f5b885db5491a719ccfc
|
[] |
no_license
|
cp4011/Machine-Learning
|
d41b5b8533b8d2210d1e69b944f8ea997c2488b4
|
7f4576278990f8a2de302e69eb6373d169f9fbc8
|
refs/heads/master
| 2020-04-04T15:41:53.912450
| 2019-03-26T15:37:35
| 2019-03-26T15:37:35
| 156,048,459
| 2
| 0
| null | 2018-11-04T06:14:57
| 2018-11-04T04:16:03
|
Python
|
UTF-8
|
Python
| false
| false
| 9,066
|
py
|
from numpy import *
from os import listdir
"""k-近邻算法
优点:精度高、对异常值不敏感、无数据输入假定。
缺点:计算复杂度高、空间复杂度高。
使用数据范围:数值型和标称型。
伪代码:
对未知类别属性的数据集中的每个点依次执行以下操作:
(1).计算已知类别数据集中的点与当前点之间的距离;
(2).按照距离递增次序排序;
(3).选取与当前点距离最小的k个点;
(4).确定前k个点所在类别的出现频率;
(5).返回前k个点出现频率最高的类别作为当前点的预测分类。
"""
# k-近邻算法
def classify0(inputX, dataSet, labels, k):
"""
Function: 创建数据集和标签
Args: inputX:用于分类的输入向量 (1xN)
dataSet:输入的训练样本集 (MxN)
labels:标签向量 (1xM vector)
k:用于比较的近邻数量 (should be an odd number奇数)
Returns: sortedClassCount[0][0]:分类结果
"""
# dataSet.shape[0]:求dataSet矩阵的行数;dataSet.shape[1]:求dataSet矩阵的列数; dataSet.shape:元组形式输出矩阵行数、列数
dataSetSize = dataSet.shape[0]
# tile(A, B):将A重复B次,其中B可以是int类型也可以是元组类型;这句话相当于向量inputX与矩阵dataSet里面的每组数据做差
diffMat = tile(inputX, (dataSetSize, 1)) - dataSet # numpy.tile()复制
# sqDiffMat.sum(axis=0):对矩阵的每一列求和
# sqDiffMat.sum(axis=1):对矩阵的每一行求和
# sqDiffMat.sum():对整个矩阵求和
distances = (diffMat**2).sum(axis=1)**0.5 # 计算欧式距离((x1-x2)^2 + (y1-y2)^2)^0.5
sortedDistances = distances.argsort() # 返回从小到大排序的索引 # 将矩阵的每一行向量相加: sum(a,axis=1)或a.sum(axis=1)
classCount = {}
for i in range(k): # 给字典赋值
y = labels[sortedDistances[i]] # 字典的key
classCount[y] = classCount.get(y, 0) + 1 # dict.get(key, default=none)
result = sorted(classCount.items(), key=lambda x: x[1], reverse=True) # d.items()返回的是一个列表,如[('a',74), ('b',90)]
# 返回可遍历的(键, 值) 元组数组,如[('Google', 3), ('taobao', 2), ('Runoob', 1)]
return result[0][0] # 错误return result[0],这是返回的tuple
# 数据集举例
def createDataSet():
group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
# 准备数据:从文本文件中解析数据
def file2matrix(filename):
f = open(filename)
arrayOLines = f.readlines()
numberOfLines = len(arrayOLines)
returnMat = zeros((numberOfLines, 3)) # 创建返回的NumPy矩阵
classLabelVector = [] # 创建返回的向量列表
index = 0
for line in arrayOLines:
line = line.strip() # 截取掉头尾的所有的回车字符和空格
listFromLine = line.split('\t') # 使用tab字符将上一步得到的整行数据分割成一个元素列表
returnMat[index] = listFromLine[0:3] # 选取前三个元素,存储到特征矩阵中
classLabelVector.append(int(listFromLine[-1])) # 错classLabelVector = listFromLine[-1]
index += 1
return returnMat, classLabelVector # 返回训练样本矩阵和类标签向量
# 归一化特征值 newValue = (oldValue - min)/(max - min)
def autoNorm(dataSet):
# 求取列的最小值
minValues = dataSet.min(0) # 参数0使得函数从列中取得最小值,得到1 * 3 的向量
maxVelues = dataSet.max(0)
ranges = maxVelues - minValues
m = len(dataSet)
minValueMatrix = tile(minValues, (m, 1)) # 复制1*3的向量minValues成m行1列
rangesMatrix = tile(ranges, (m, 1))
returnNorm = zeros(shape(dataSet)) # 创建输出矩阵normDataSet
returnNorm = dataSet - minValueMatrix
normDataSet = returnNorm / rangesMatrix # Numpy中矩阵除法是:linalg.solve(matA, matB)
# 返回归一化矩阵、差值向量和最小值向量
return normDataSet, ranges, minValues # 可以只返回归一化的结果normDataSet,后两个结果 可以用于归一化输入inputX
# 分类器针对约会网站的测试代码
def datingClassTest():
ratio = 0.1 # 使用10%的数据去测试分类器
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minVals = autoNorm(datingDataMat) # 归一化特征值
m = normMat.shape[0]
num = int(m * ratio) # 初始化测试向量个数
errorCount = 0.0
for i in range(num): # 对测试集分类,返回分类结果并打印
# 错classifierResult = classify0(normMat[i, :], normMat[num, :], datingLabels[num, :], 3)
# 传参给分类器进行分类,每个for循环改变的参数只有第一项的测试数据而已
classifierResult = classify0(normMat[i, :], normMat[num:m, :], datingLabels[num:m], 3)
print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i]))
if classifierResult != datingLabels[i]:
errorCount += 1.0
print("the errorCount is %d" % errorCount)
print("the total error rate is: %f" % (errorCount / float(num))) # 注意是%f, 而%d:0.066就输出为0
# KNN预测约会网站某人的喜欢程度
def classifyPerson():
resultList = ['not at all', 'in small doses', 'in large doses']
percentTats = float(input("percentage of time spent playing video games?\n"))
ffMiles = float(input("frequent flier miles earned per year?\n"))
iceCream = float(input("liters of ice cream consumed per year?\n"))
inArr = array([percentTats, ffMiles, iceCream]) # 注意要加上[]
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt')
normMat, ranges, minValues = autoNorm(datingDataMat)
result = classify0((inArr-minValues)/ranges, normMat, datingLabels, 3) # 进行分类,且输入inArr归一化
print("You will probably like this person:", resultList[result - 1]) # 注意 减1
# test
classifyPerson()
# KNN识别手写数据集(32*32转换成1*1024的向量)
def img2vector(filename):
returnVect = zeros((1, 1024)) # 注意有两个()
f = open(filename)
for i in range(32): # 两层循环 32*32
lineStr = f.readline()
for j in range(32):
returnVect[0, 32*i+j] = int(lineStr[j]) # 存储在1*1024的Numpy的数组中,注意加int()和[0, 32*i+j]中的0,
return returnVect # 返回要输出的1*1024向量
def handWritingClassTest():
trainingFileList = listdir('trainingDigits') # os.listdir()用于返回指定的文件夹包含的文件或文件夹的名字的列表(以字母顺序)
m = len(trainingFileList) # 获取训练文件数目
trainingMat = zeros((m, 1024)) # 初始化训练矩阵
hwLabels = []
for i in range(m): # 开始提取训练集
fileNameStr = trainingFileList[i] # 从文件名列表中循环取各文件名
trainingMat[i, :] = img2vector('trainingDigits/%s' % fileNameStr)
a = fileNameStr.split('.')[0] # 从文件名解析出分类数字
b = int(a.split('_')[0]) # 注意要将字符int()一下
hwLabels.append(b) # 存储解析出的分类数字到标签中
testFileList = listdir('testDigits')
mTest = len(testFileList)
errorCount = 0.0
for i in range(mTest):
fileNameStr = testFileList[i]
vectorUnderTest = img2vector('testDigits/%s' % fileNameStr)
a = fileNameStr.strip('.')[0]
b = int(a.strip('_')[0])
classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3) # 参数传入分类器进行分类
# print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, b))
if classifierResult != b:
errorCount += 1.0
print("the total number of errors is: %d" % errorCount)
print("the total error rate is: %f" % (errorCount / float(mTest)))
if __name__ == "__main__":
handWritingClassTest()
|
[
"957628963@qq.com"
] |
957628963@qq.com
|
4aea3afea6a9199732f404cf7c84b91ca28615e2
|
0ed050cd8d23bf4679860694bbc5d6f2e45f229a
|
/src/q13es/tests.py
|
0056b741b65be1b6294b2fc08682a819429d57ee
|
[] |
no_license
|
dsysme/hackforisrael
|
5404563d57f0034b1cefe09586abb6d65f8d5442
|
6081533fb8fbdecf28b4a4d85b7e9822d8969ca4
|
refs/heads/master
| 2020-12-13T20:54:55.744381
| 2013-11-03T23:34:07
| 2013-11-03T23:34:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,672
|
py
|
from django import forms
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils.translation import gettext as _
from q13es.forms import create_form, split_form_file, parse_field, parse_form
from q13es.models import Answer
import os.path
User = get_user_model()
class Q13esTest(TestCase):
# CHOICES1 = (
# (1, 'One'),
# (2, 'Two'),
# (3, 'Three'),
# )
#
# def setUp(self):
# self.u = User.objects.create_user("foobar")
def test_split_form_file(self):
s = "Shalom!\n[abc]\nfoo\n[def?]\nbar"
result = split_form_file(s)
self.assertEquals(
('Shalom!', [('abc', True, 'foo'), ('def', False, 'bar')]),
result)
def test_split_form_file_no_fields(self):
s = "Shalom!"
result = split_form_file(s)
self.assertEquals(('Shalom!', []), result)
def test_split_form_file_empty(self):
s = ""
result = split_form_file(s)
self.assertEquals(('', []), result)
#
#
def test_parse_field_default(self):
s = "foo"
result = parse_field(s)
self.assertEquals((None, {'label': 'foo', 'help_text': ''}), result)
def test_parse_field_text(self):
s = "bar\n\ntext"
result = parse_field(s)
self.assertEquals(('text', {'label': 'bar', 'help_text': ''}), result)
def test_parse_field_text_with_help(self):
s = "bar\n\ntext\n\ncontent\n123\n\nfoo\n\nbar"
result = parse_field(s)
self.assertEquals(('text', {'help_text': 'content 123\nfoo\nbar', 'label': 'bar'}), result)
def test_parse_field(self):
s = """
What is your favourite color?
radio:
* red
* green
* blue
Please choose your
favourite color.
You can choose only one
"""
result = parse_field(s)
expected = 'radio', {
'label': 'What is your favourite color?',
'help_text': 'Please choose your favourite color.\nYou can choose only one',
'choices': [
('red', 'red'),
('green', 'green'),
('blue', 'blue')],
}
self.assertEquals(expected, result)
# def test_build_field(self):
#
# args = {
# 'label': 'What is your favourite color?',
# 'help_text': 'Please choose your favourite color.\nYou can choose only one',
# 'choices': [
# ('red', 'red'),
# ('green', 'green'),
# ('blue', 'blue')],
# }
#
# result = build_field(_('radio'), args)
# self.assertIsInstance(result, forms.ChoiceField)
# self.assertIsInstance(result.widget, forms.RadioSelect)
# self.assertEquals(args['label'], result.label)
# self.assertEquals(args['help_text'], result.help_text)
# self.assertEquals(args['choices'], result.choices)
#
# def test_build_field_simple(self):
#
# result = build_field(None, {'label': 'Foo'})
# self.assertIsInstance(result, forms.CharField)
# self.assertEquals('Foo', result.label)
# def test_simple_q13e(self):
# """
# Tests that a form can be saved in an answer
# """
# class BasicForm(forms.Form):
# title = forms.CharField()
# notes = forms.CharField(widget=forms.Textarea)
# optional_charfield = forms.CharField(required=False)
# vote = forms.IntegerField(min_value=1, max_value=5)
# optional_intfield = forms.IntegerField(required=False)
# # choices = forms.MultipleChoiceField(choices=self.CHOICES1)
#
# data = {
# 'title': 'a\nb\nc',
# 'notes': 'a\nb\nc',
# 'vote': '1',
# 'choices': '12',
# 'foo': 'bar',
# 'foo': 'bar',
# }
#
# f = BasicForm(data)
# self.assertTrue(f.is_valid())
#
# # print f.cleaned_data
#
# a = Answer.objects.create(q13e_slug='basic', user=self.u, data=f.cleaned_data)
#
# self.assertEquals(1, len(self.u.answers.all()))
#
# self.assertEquals(['basic'], [o.q13e_slug for o in self.u.answers.all()])
def test_create_form(self):
"""
Tests that a form can be created from field definitions
"""
info = (
('title', (forms.CharField, {})),
('description', (forms.CharField, {
'widget': forms.Textarea,
})),
('flavour', (forms.ChoiceField, {
'widget': forms.RadioSelect,
'choices': (
(1, "A"),
(2, "B"),
)
})),
)
form_class = create_form(info)
self.assertIn(forms.BaseForm, form_class.__bases__)
form = form_class({})
self.assertEquals(3, len(form.fields))
self.assertEquals(3, len(form.errors))
form = form_class({
'title': ':-)',
'description': 'foo',
'flavour': '3'
})
self.assertEquals(1, len(form.errors))
s = forms.ChoiceField.default_error_messages['invalid_choice'] % {'value': '3'}
self.assertEquals(s.decode('utf8'), form.errors['flavour'][0])
form = form_class({
'title': ':-)',
'description': 'foo',
'flavour': '1'
})
self.assertEquals(0, len(form.errors))
self.assertEquals(form.cleaned_data, {
'title': ':-)',
'description': 'foo',
'flavour': '1'
})
def test_parse_form(self):
with open(os.path.join(os.path.dirname(__file__), 'test_form.txt')) as f:
text = f.read()
form_class = parse_form(text)
self.assertIn(forms.BaseForm, form_class.__bases__)
form = form_class()
self.assertEquals(4, len(form.fields))
|
[
"udioron@gmail.com"
] |
udioron@gmail.com
|
8cdbd7afe50b400f3a05da8118e35216bc8e369c
|
fa0d15b56a0d2bfd33a3bc5ca48c02d9b389af3d
|
/ggrc-core/src/ggrc/app.py
|
794c4eb71bb29963b8b85425288d1423f465950a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Bryan-Guner-Backup/WULF
|
5f50ca4069f267e2ce0b1b6537f340ead5d78984
|
59d70ce2d4fe148e78ed112746d6963eeccaa6e9
|
refs/heads/master
| 2023-08-22T13:15:51.457279
| 2021-10-29T08:16:03
| 2021-10-29T08:16:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,667
|
py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Sets up Flask app."""
import re
from flask import Flask
from flask.ext.sqlalchemy import get_debug_queries
from flask.ext.sqlalchemy import SQLAlchemy
from tabulate import tabulate
from ggrc import contributions # noqa: imported so it can be used with getattr
from ggrc import db
from ggrc import extensions
from ggrc import notifications
from ggrc import settings
app = Flask('ggrc', instance_relative_config=True) # noqa: valid constant name
app.config.from_object(settings)
if "public_config" not in app.config:
app.config.public_config = {}
for key in settings.exports:
app.config.public_config[key] = app.config[key]
# Configure Flask-SQLAlchemy for app
db.app = app
db.init_app(app)
@app.before_request
def _ensure_session_teardown():
"""Ensure db.session is correctly removed
Occasionally requests are terminated without calling the teardown methods,
namely with DeadlineExceededError on App Engine.
"""
if db.session.registry.has():
db.session.remove()
def init_models(app_):
import ggrc.models
ggrc.models.init_app(app_)
def configure_flask_login(app_):
import ggrc.login
ggrc.login.init_app(app_)
def configure_webassets(app_):
"""Add basic webassets configuration."""
from ggrc import assets
app_.jinja_env.add_extension('webassets.ext.jinja2.assets')
app_.jinja_env.assets_environment = assets.environment
def configure_jinja(app_):
"""Add basic jinja configuration."""
app_.jinja_env.add_extension('jinja2.ext.autoescape')
app_.jinja_env.autoescape = True
app_.jinja_env.add_extension('jinja2.ext.with_')
app_.jinja_env.add_extension('hamlpy.ext.HamlPyExtension')
def init_services(app_):
import ggrc.services
ggrc.services.init_all_services(app_)
def init_views(app_):
import ggrc.views
ggrc.views.init_all_views(app_)
def init_extension_blueprints(app_):
for extension_module in extensions.get_extension_modules():
if hasattr(extension_module, 'blueprint'):
app_.register_blueprint(extension_module.blueprint)
def init_indexer():
import ggrc.fulltext
ggrc.indexer = ggrc.fulltext.get_indexer()
def init_permissions_provider():
from ggrc.rbac import permissions
permissions.get_permissions_provider()
def init_extra_listeners():
from ggrc.automapper import register_automapping_listeners
register_automapping_listeners()
def _enable_debug_toolbar():
"""Enable flask debug toolbar for benchmarking requests."""
if getattr(settings, "FLASK_DEBUGTOOLBAR", False):
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(app)
def _enable_jasmine():
"""Set jasmine sources and specs if it's enabled.
Jasmine is used for javascript tests and is not installed on the production
environment, that is why we must check if it enabled before tying to import
it.
"""
if getattr(settings, "ENABLE_JASMINE", False):
from flask.ext.jasmine import Asset
from flask.ext.jasmine import Jasmine
# Configure Flask-Jasmine, for dev mode unit testing
jasmine = Jasmine(app)
jasmine.sources(
Asset("dashboard-js"),
Asset("dashboard-js-spec-helpers"),
Asset("dashboard-js-templates"))
jasmine.specs(
Asset("dashboard-js-specs"))
def _display_sql_queries():
"""Set up display database queries
This function makes sure we display the sql queries if the record setting is
enabled.
"""
report_type = getattr(settings, "SQLALCHEMY_RECORD_QUERIES", False)
valid_types = ('count', 'slow', 'all')
if report_type:
if report_type not in valid_types:
raise Exception("""Invalid SQLALCHEMY_RECORD_QUERIES value specified.
Possible options: {}""".format(', '.join(valid_types)))
# pylint: disable=unused-variable
@app.after_request
def display_queries(response):
"""Display database queries
Prints out SQL queries, EXPLAINs for queries above slow_threshold, and
a final count of queries after every HTTP request
"""
slow_threshold = 0.5 # EXPLAIN queries that ran for more than 0.5s
queries = get_debug_queries()
app.logger.info("Total queries: %s", len(queries))
if report_type == 'count':
return response
# We have to copy the queries list below otherwise queries executed
# in the for loop will be appended causing an endless loop
for query in queries[:]:
if report_type == 'slow' and query.duration < slow_threshold:
continue
app.logger.info(
"%.8f %s\n%s\n%s",
query.duration,
query.context,
query.statement,
query.parameters)
is_select = bool(re.match('SELECT', query.statement, re.I))
if query.duration > slow_threshold and is_select:
try:
statement = "EXPLAIN " + query.statement
engine = SQLAlchemy().get_engine(app)
result = engine.execute(statement, query.parameters)
app.logger.info(tabulate(result.fetchall(), headers=result.keys()))
except Exception as err: # pylint: disable=broad-except
app.logger.warning("Statement failed: %s", statement)
app.logger.exception(err)
return response
init_models(app)
configure_flask_login(app)
configure_webassets(app)
configure_jinja(app)
init_services(app)
init_views(app)
init_extension_blueprints(app)
init_indexer()
init_permissions_provider()
init_extra_listeners()
notifications.register_notification_listeners()
_enable_debug_toolbar()
_enable_jasmine()
_display_sql_queries()
|
[
"bgoonz4212@gmail.com"
] |
bgoonz4212@gmail.com
|
1da2545df8d545a097b32fdc32594028de41246c
|
d8346eaf1c910ff02c7b243692a2766b8b089f06
|
/for-post/python-standard-library-threading/s2-timer-loop/loop.py
|
fb38dc289d93392d4877804a4926fc8e4cb27eda
|
[] |
no_license
|
dustinpfister/examples-python
|
55304c99ba3af82cd8784ee98745546632155c68
|
a9910ee05d4df524f951f61b6d9778531a58ccbf
|
refs/heads/master
| 2023-03-06T21:10:18.888654
| 2021-02-26T20:32:52
| 2021-02-26T20:32:52
| 318,595,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
import threading
def printMess():
print('hello');
def loop(func, sec):
def wrapper():
loop(func, sec)
func()
t = threading.Timer(sec, wrapper)
t.start()
return t
t=loop(printMess, 1)
|
[
"dustin.pfister@gmail.com"
] |
dustin.pfister@gmail.com
|
915ce9e4b26a80de0c7696c731b048a00f6e8272
|
adbcfc92d71e9be39ccef49b3aee0d06cdcb0af1
|
/alembic/versions/48e2185b4a81_added_foreign_key_to_the_line_stops.py
|
50b33102a1ea440040fdc9b085a9132fe3c8f2b6
|
[] |
no_license
|
linxaddict/mpkscraper
|
67e5cf5806974d4fe78f1c892f12870008951a91
|
a3b48a16f5db448692751c7de1f443dc3b0b30e7
|
refs/heads/master
| 2020-06-15T03:33:19.935314
| 2016-12-17T21:07:20
| 2016-12-17T21:07:20
| 75,334,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
"""added foreign key to the line_stops
Revision ID: 48e2185b4a81
Revises: 44516a7495c7
Create Date: 2016-12-17 21:50:48.779742
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '48e2185b4a81'
down_revision = '44516a7495c7'
branch_labels = None
depends_on = None
def upgrade():
op.create_foreign_key('fk_stop_line_stop', 'line_stops', 'stops', ['line_id'], ['id'])
def downgrade():
op.drop_constraint('fk_stop_line_stop', 'stops')
|
[
"mprzepiorkowski@gmail.com"
] |
mprzepiorkowski@gmail.com
|
75809450259f12afa6cc106612dff03bb1da556b
|
6958566f07393cc0dbdc3d8cefdcc741153f721b
|
/flask0518/utils/settings.py
|
678992a18fc5ba456baa786cf55a1bc20675649f
|
[] |
no_license
|
iversongit/20180518
|
6ffeb7f91c7b66f07c9f4f0df7616accec473ba1
|
9ae2fb839815155c4784936a8c099cc98ebc15cf
|
refs/heads/master
| 2020-03-17T19:55:33.897986
| 2018-05-19T02:11:41
| 2018-05-19T02:11:41
| 133,884,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
import os
from utils.functions import get_db_url
# 放置所有配置相关的操作,形同Django的setting.py
# 基础路径
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 页面模板
templates_dir = os.path.join(BASE_DIR,"templates")
# 静态模板
static_dir = os.path.join(BASE_DIR,"static")
# 连接数据库
DATABASE = {
# 用户
'USER':'root',
# 密码
'PASSWORD':'5201314',
# 端口
'PORT':'3306',
# 地址 127.0.0.1
'HOST':'127.0.0.1',
# 数据库
'DB':'mysql',
# 驱动
# pymysql --> 驱动 -- python3没有mysqldb,所以需要以pymysql为媒介,来操纵mysql
'DRIVER':'pymysql',
# 数据库名称
'NAME': 'flask_20180518'
}
# 连接数据库
SQLALCHEMY_DATABASE_URI = get_db_url(DATABASE)
|
[
"1564329410@qq.com"
] |
1564329410@qq.com
|
fde93cf368188861818c16d04c7084c0c3f7a84a
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/fizz_20200607124449.py
|
500600f0a3fd3de68b394839af6489449e8e04aa
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
def fizz(num):
newNumber = []
for i in range(1,num+1):
newNumber.append(i)
for j in range(len(newNumber)):
if newNumber[j] % 3== 0:
newNumber[j] = "Fizz"
elif newNumber[j] % 5 == 0:
newNumber[j] = "Buzz"
elif newNumber[j] % 3 == 0 and newNumber[j] % 5 == 0:
else:
newNumber[j] = newNumber[j]
print(newNumber)
fizz(8)
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
4965dbda8f463902986b1c3f3580791ed62f75cd
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-1/vse-naloge-brez-testov/DN4-M-96.py
|
e73ef8577fed38746d0fd9c7193bff1d7ac9d17f
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036
| 2018-12-06T22:56:38
| 2018-12-06T22:56:38
| 126,011,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,382
|
py
|
# Tu pišite svoje funkcije:
from math import*
def koordinate(ime, kraji):
s = ()
for i, x, y in kraji:
if ime == i:
s = s + (x,y)
return(s)
else:
return None
def razdalja_koordinat(x1, y1, x2, y2):
razdalja = sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
return razdalja
def razdalja(ime1, ime2, kraji):
xprva, yprva = koordinate(ime1, kraji)
xdruga, ydruga = koordinate(ime2, kraji)
return razdalja_koordinat(xprva, yprva, xdruga, ydruga)
def v_dometu(ime, domet, kraji):
z = []
for imeTest, xTest, yTest in kraji:
if(ime != imeTest):
if razdalja(ime, imeTest, kraji) <= domet:
z.append(imeTest)
return z
def najbolj_oddaljeni(ime, imena, kraji):
najvecjaTest = 0
for ime1 in imena:
razTest = razdalja(ime, ime1, kraji)
if(razTest > najvecjaTest):
najvecjaTest = razTest
izpis = ime1
return izpis
def zalijemo(ime, domet, kraji):
imena = v_dometu(ime, domet, kraji)
return najbolj_oddaljeni(ime, imena, kraji)
def presek(s1, s2):
u = []
for a in s1:
for b in s2:
if a == b:
u.append(a)
return u
def skupno_zalivanje(ime1, ime2, domet, kraji):
c = v_dometu(ime1, domet, kraji)
d = v_dometu(ime2, domet, kraji)
e = presek(c, d)
return e
|
[
"lenart.motnikar@gmail.com"
] |
lenart.motnikar@gmail.com
|
f6f0b4e89f6ced5ee2a6039dd64dee6a3c957630
|
6a2b1b1d6092a8d2492a6677b6fd19d27b0f461f
|
/14-Python-Advance/06-python-regex/04-Match-Object/02-Match-Attributes/02-Match-pos-endpos-attribute.py
|
f9cd514798dea81ea6dd5771c3af7ef8e2ded8ef
|
[] |
no_license
|
Uttam1982/PythonTutorial
|
3cfbe237199e048967502f3d0c1936f2b878cb87
|
8e28cc5c4be5826a011059db66f6952871248c82
|
refs/heads/master
| 2022-12-17T18:47:28.397383
| 2020-09-22T08:55:23
| 2020-09-22T08:55:23
| 288,524,784
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
# match.pos
# match.endpos
#--------------------------------------------------------------------------------------------
# Contain the effective values of <pos> and <endpos> for the search.
# Remember that some methods, when invoked on a compiled regex,
# accept optional <pos> and <endpos> arguments that limit the search
# to a portion of the specified search string. These values are accessible
# from the match object with the .pos and .endpos attributes
#--------------------------------------------------------------------------------------------
import re
re_obj = re.compile(r'\d+')
m = re_obj.search('foo123bar', 2, 7)
# Output : <re.Match object; span=(3, 6), match='123'>
print(m)
# Output : 2,7
print(m.pos, m.endpos)
# If the <pos> and <endpos> arguments aren’t included in the call,
# either because they were omitted or because the function in question
# doesn’t accept them, then the .pos and .endpos attributes effectively
# indicate the start and end of the string:
#--------------------------------------------------------------------------------------------
re_obj = re.compile(r'\d+')
m = re_obj.search('foo123bar')
# Output : <re.Match object; span=(3, 6), match='123'>
print(m)
# Output : 0,9
print(m.pos, m.endpos)
#--------------------------------------------------------------------------------------------
|
[
"uttampat@gmail.com"
] |
uttampat@gmail.com
|
de72767d2ff972eb773a57d625072fad3a34062a
|
fb605733922e6e65c3a8537e0eddca18a98bd161
|
/bin/sql-to-markdown
|
bb2563e15394de6c4e827ed47f792398a8323fff
|
[] |
no_license
|
EarthCubeGeochron/Sparrow-docs
|
5f3c73851d8a0a4ddb3b019c81b7e6066d3988a9
|
ad6ad2e337e98170fb33a5384640ff01c9462f03
|
refs/heads/master
| 2020-04-29T07:02:42.555203
| 2019-04-18T22:25:32
| 2019-04-18T22:25:32
| 175,939,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import fileinput
import pyparsing
from textwrap import dedent
print("~~~sql")
in_doc = False
docs = ""
for line in fileinput.input():
docstart = "/*" in line
docend = "*/" in line
if docstart:
in_doc = True
if not in_doc:
print(line, end="")
continue
if in_doc:
docs += line
if docend:
in_doc = False
if not in_doc and docs != "":
docs = (dedent(docs)
.replace("/*","\n~~~\n\n")
.replace("*/","\n\n~~~sql\n"))
print(dedent(docs), end="")
docs = ""
print("~~~")
|
[
"dev@davenquinn.com"
] |
dev@davenquinn.com
|
|
2c3d5b4e9124467eb3bf4af0a2652746057a9351
|
45ba55b4fbdaf1657fde92beaeba4f173265afcd
|
/tests/b.py
|
f646816b0c9a3e696aef98558f548199055989fd
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
strawberry-graphql/strawberry
|
af96afd4edd1788c59e150597a12501fbc7bf444
|
6d86d1c08c1244e00535840d9d87925431bc6a1c
|
refs/heads/main
| 2023-08-30T03:34:12.929874
| 2023-08-24T12:01:09
| 2023-08-24T12:01:09
| 162,690,887
| 3,408
| 529
|
MIT
| 2023-09-14T21:49:44
| 2018-12-21T08:56:55
|
Python
|
UTF-8
|
Python
| false
| false
| 779
|
py
|
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from typing_extensions import Annotated
import strawberry
if TYPE_CHECKING:
from tests.a import A
@strawberry.type
class B:
id: strawberry.ID
@strawberry.field
async def a(self) -> Annotated[A, strawberry.lazy("tests.a"), object()]:
from tests.a import A
return A(id=self.id)
@strawberry.field
async def optional_a(
self,
) -> Annotated[A, strawberry.lazy("tests.a"), object()] | None:
from tests.a import A
return A(id=self.id)
@strawberry.field
async def optional_a2(
self,
) -> Optional[Annotated[A, strawberry.lazy("tests.a"), object()]]:
from tests.a import A
return A(id=self.id)
|
[
"noreply@github.com"
] |
strawberry-graphql.noreply@github.com
|
43e50a203f2e925342a83dcf7976dde81970e1e1
|
bc441bb06b8948288f110af63feda4e798f30225
|
/ucpro_sdk/model/notify/operation_log_pb2.pyi
|
fc93c0734ad12ed730d7c1a374e421a9a3cbc2ca
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,962
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from google.protobuf.struct_pb2 import (
Struct as google___protobuf___struct_pb2___Struct,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
from ucpro_sdk.model.notify.app_pb2 import (
App as ucpro_sdk___model___notify___app_pb2___App,
)
from ucpro_sdk.model.notify.deploy_info_pb2 import (
DeployInfo as ucpro_sdk___model___notify___deploy_info_pb2___DeployInfo,
)
from ucpro_sdk.model.notify.device_pb2 import (
Device as ucpro_sdk___model___notify___device_pb2___Device,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class OperationLog(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
system = ... # type: typing___Text
topic = ... # type: typing___Text
event_id = ... # type: typing___Text
parent_event_id = ... # type: typing___Text
event = ... # type: typing___Text
status = ... # type: typing___Text
operator = ... # type: typing___Text
target_name = ... # type: typing___Text
target_id = ... # type: typing___Text
target_category = ... # type: typing___Text
notifiers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
trigger = ... # type: typing___Text
memo = ... # type: typing___Text
app_id = ... # type: typing___Text
cluster_id = ... # type: typing___Text
package_id = ... # type: typing___Text
package_name = ... # type: typing___Text
version_id = ... # type: typing___Text
version_name = ... # type: typing___Text
content = ... # type: typing___Text
data_name = ... # type: typing___Text
ip = ... # type: typing___Text
ip_list = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
subject = ... # type: typing___Text
mtime = ... # type: builtin___int
ctime = ... # type: builtin___int
@property
def parent_event(self) -> OperationLog: ...
@property
def device_list(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ucpro_sdk___model___notify___device_pb2___Device]: ...
@property
def app_list(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ucpro_sdk___model___notify___app_pb2___App]: ...
@property
def ext_info(self) -> google___protobuf___struct_pb2___Struct: ...
@property
def deploy_info(self) -> ucpro_sdk___model___notify___deploy_info_pb2___DeployInfo: ...
def __init__(self,
*,
system : typing___Optional[typing___Text] = None,
topic : typing___Optional[typing___Text] = None,
event_id : typing___Optional[typing___Text] = None,
parent_event_id : typing___Optional[typing___Text] = None,
parent_event : typing___Optional[OperationLog] = None,
event : typing___Optional[typing___Text] = None,
status : typing___Optional[typing___Text] = None,
device_list : typing___Optional[typing___Iterable[ucpro_sdk___model___notify___device_pb2___Device]] = None,
operator : typing___Optional[typing___Text] = None,
target_name : typing___Optional[typing___Text] = None,
target_id : typing___Optional[typing___Text] = None,
target_category : typing___Optional[typing___Text] = None,
app_list : typing___Optional[typing___Iterable[ucpro_sdk___model___notify___app_pb2___App]] = None,
ext_info : typing___Optional[google___protobuf___struct_pb2___Struct] = None,
notifiers : typing___Optional[typing___Iterable[typing___Text]] = None,
trigger : typing___Optional[typing___Text] = None,
memo : typing___Optional[typing___Text] = None,
app_id : typing___Optional[typing___Text] = None,
cluster_id : typing___Optional[typing___Text] = None,
package_id : typing___Optional[typing___Text] = None,
package_name : typing___Optional[typing___Text] = None,
version_id : typing___Optional[typing___Text] = None,
version_name : typing___Optional[typing___Text] = None,
deploy_info : typing___Optional[ucpro_sdk___model___notify___deploy_info_pb2___DeployInfo] = None,
content : typing___Optional[typing___Text] = None,
data_name : typing___Optional[typing___Text] = None,
ip : typing___Optional[typing___Text] = None,
ip_list : typing___Optional[typing___Iterable[typing___Text]] = None,
subject : typing___Optional[typing___Text] = None,
mtime : typing___Optional[builtin___int] = None,
ctime : typing___Optional[builtin___int] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> OperationLog: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> OperationLog: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"deploy_info",b"deploy_info",u"ext_info",b"ext_info",u"parent_event",b"parent_event"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"app_id",b"app_id",u"app_list",b"app_list",u"cluster_id",b"cluster_id",u"content",b"content",u"ctime",b"ctime",u"data_name",b"data_name",u"deploy_info",b"deploy_info",u"device_list",b"device_list",u"event",b"event",u"event_id",b"event_id",u"ext_info",b"ext_info",u"ip",b"ip",u"ip_list",b"ip_list",u"memo",b"memo",u"mtime",b"mtime",u"notifiers",b"notifiers",u"operator",b"operator",u"package_id",b"package_id",u"package_name",b"package_name",u"parent_event",b"parent_event",u"parent_event_id",b"parent_event_id",u"status",b"status",u"subject",b"subject",u"system",b"system",u"target_category",b"target_category",u"target_id",b"target_id",u"target_name",b"target_name",u"topic",b"topic",u"trigger",b"trigger",u"version_id",b"version_id",u"version_name",b"version_name"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
010368ccaa1fe15b704c69b83731653d4d069dff
|
9709da49cf74e0f1248848e029c5084af22e4e27
|
/e2yun_addons/odoo12/wx_tools/models/res_partner.py
|
56975b035d93ea387a4aa843381cea659c9a17e1
|
[] |
no_license
|
guwenfeng/filelib
|
27ff44ecac988bbf1ed30e7e4d33bdfbc790393f
|
bc9003b47f06d7f10c0dc93034179d0fafccc683
|
refs/heads/master
| 2020-06-30T21:27:49.204212
| 2019-08-06T09:58:32
| 2019-08-06T09:58:32
| 200,955,551
| 0
| 1
| null | 2019-08-07T02:17:12
| 2019-08-07T02:17:12
| null |
UTF-8
|
Python
| false
| false
| 3,851
|
py
|
# coding=utf-8
import logging
from geopy.distance import vincenty
from odoo import models, fields, api
_logger = logging.getLogger(__name__)
class WXResPartner(models.Model):
_inherit = 'res.partner'
wxcorp_user_id = fields.Many2one('wx.corpuser', '关联企业号用户')
wx_user_id = fields.Many2one('wx.user', '微信公众用户')
wxlatitude = fields.Float('纬度', digits=(10, 7))
wxlongitude = fields.Float('经度', digits=(10, 7))
wxprecision = fields.Float('位置精度', digits=(10, 7))
location_write_date = fields.Datetime("更新时间", readonly=True)
wx_address = fields.Char(u'地址', compute='_get_address')
near_team = fields.Char(u'附近门店', compute='_get_near_team')
@api.one
def _get_near_team(self):
_logger.info(self)
@api.one
def _get_address(self):
# 获取用户位置
from ..controllers import amapapi
if self.wxlatitude and self.wxlongitude:
wx_location = '%s,%s' % (self.wxlongitude, self.wxlatitude)
convert_location = amapapi.coordinateconvert(self, wx_location)
location = convert_location.split(';')[0] # 用户真实位置
formatted_address = amapapi.geocoderegeo(self, location)
if formatted_address:
self.wx_address = formatted_address
newport_ri = (location.split(',')[1], location.split(',')[0])
crm_team_pool = self.env['crm.team'].search([])
search_read_new = []
for crm_team in crm_team_pool:
if crm_team.longitude != 0.0 or crm_team.longitude != 0.0:
cleveland_oh = (crm_team.latitude, crm_team.longitude)
pos_kilometers = vincenty(newport_ri, cleveland_oh).kilometers
crm_team.distance = pos_kilometers
search_read_new.append(crm_team)
# _logger.info("门店与用户距离%s" % pos_kilometers)
if search_read_new:
min_distance = (min(search_read_new, key=lambda dict: dict['distance']))
self.near_team = '%s:距离%s公里' % (min_distance.street, min_distance.distance)
_logger.info("获取门店信息")
# def _compute_im_status(self):
# super(WXResPartner, self)._compute_im_status()
def send_corp_msg(self, msg):
from ..rpc import corp_client
entry = corp_client.corpenv(self.env)
mtype = msg["mtype"]
if mtype == "text":
entry.client.message.send_text(entry.current_agent, self.wxcorp_user_id.userid, msg["content"])
if mtype == "card":
entry.client.message.send_text_card(entry.current_agent, self.wxcorp_user_id.userid, msg['title'],
msg['description'], msg['url'], btntxt=msg.get("btntxt", "详情"))
elif mtype == 'image':
ret = entry.client.media.upload(mtype, msg['media_data'])
entry.client.message.send_image(entry.current_agent, self.wxcorp_user_id.userid, ret['media_id'])
elif mtype == 'voice':
ret = entry.client.media.upload(mtype, msg['media_data'])
entry.client.message.send_voice(entry.current_agent, self.wxcorp_user_id.userid, ret['media_id'])
def get_corp_key(self):
if self.wxcorp_user_id:
return self.wxcorp_user_id.userid
def get_wx_key(self):
if self.wx_user_id:
return self.wx_user_id.openid
@api.multi
def write(self, vals):
resusers = super(WXResPartner, self).write(vals)
if vals.get('wx_user_id') and self.user_ids.wx_user_id.id != vals.get('wx_user_id'):
self.user_ids.wx_user_id = vals.get('wx_user_id')
self.user_ids.wx_id = self.user_ids.wx_user_id.openid
return resusers
|
[
"hepeng1@163.com"
] |
hepeng1@163.com
|
ac6d3308b07574f96e885441150f069ca1b51439
|
29db04da44804aae807f113dc0bb9134563b084e
|
/Python2-Core/src/main/prompto/statement/MethodCall.py
|
b483b2baa4a61ce28a56d38c5583e9fdd8129d0d
|
[] |
no_license
|
prompto/prompto-python2
|
0f84e6e6f0f6889a4c866fc300d351fd7ee8a6b1
|
f929df1dd55e0b5d8ec9f39398a924503fce4019
|
refs/heads/master
| 2022-12-07T12:41:27.297404
| 2022-11-27T17:37:43
| 2022-11-27T17:37:43
| 32,623,630
| 3
| 1
| null | 2019-05-04T11:01:41
| 2015-03-21T07:17:05
|
Python
|
UTF-8
|
Python
| false
| false
| 8,090
|
py
|
from prompto.declaration.IDeclaration import IDeclaration
from prompto.declaration.AbstractMethodDeclaration import AbstractMethodDeclaration
from prompto.declaration.ArrowDeclaration import ArrowDeclaration
from prompto.error.PromptoError import PromptoError
from prompto.error.SyntaxError import SyntaxError
from prompto.grammar.ArgumentList import ArgumentList
from prompto.statement.SimpleStatement import SimpleStatement
from prompto.declaration.ConcreteMethodDeclaration import ConcreteMethodDeclaration
from prompto.runtime.Context import MethodDeclarationMap
from prompto.runtime.MethodFinder import MethodFinder
from prompto.declaration.ClosureDeclaration import ClosureDeclaration
from prompto.type.MethodType import MethodType
from prompto.type.VoidType import VoidType
from prompto.value.ArrowValue import ArrowValue
from prompto.value.ClosureValue import ClosureValue
from prompto.value.BooleanValue import BooleanValue
from prompto.parser.Dialect import Dialect
from prompto.utils.CodeWriter import CodeWriter
class MethodCall(SimpleStatement):
def __init__(self, selector, arguments=None):
super(MethodCall, self).__init__()
self.selector = selector
self.arguments = arguments
def __str__(self):
suffix = str(self.arguments) if self.arguments is not None else ""
return str(self.selector) + suffix
def check(self, context):
finder = MethodFinder(context, self)
declaration = finder.findBest(False)
if declaration is None:
return VoidType.instance
if declaration.isAbstract():
self.checkAbstractOnly(context, declaration)
return VoidType.instance if declaration.returnType is None else declaration.returnType
else:
local = context if self.isLocalClosure(context) else self.selector.newLocalCheckContext(context, declaration)
return self.checkDeclaration(declaration, context, local)
def checkAbstractOnly(self, context, declaration):
if declaration.isReference: # parameter or variable populated from a method call
return
if declaration.memberOf is not None: # the category could be subclassed (if constructor called on abstract, that would raise an error anyway)
return
# if a global method, need to check for runtime dispatch
finder = MethodFinder(context, self)
potential = finder.findPotential()
if potential.all(lambda decl: decl.isAbstract()):
raise SyntaxError("Cannot call abstract method")
def checkReference(self, context):
finder = MethodFinder(context, self)
method = finder.findBest(False)
if method is not None:
return MethodType(method)
else:
return None
def isLocalClosure(self, context):
if self.selector.parent is not None:
return False
decl = context.getLocalDeclaration(IDeclaration, self.selector.name)
return isinstance(decl, MethodDeclarationMap)
def checkDeclaration(self, declaration, parent, local):
if isinstance(declaration, ConcreteMethodDeclaration) and declaration.mustBeBeCheckedInCallContext(parent):
return self.fullCheck(declaration, parent, local)
else:
return self.lightCheck(declaration, local)
def lightCheck(self, declaration, local):
declaration.registerParameters(local)
return declaration.check(local)
def fullCheck(self, declaration, parent, local):
try:
arguments = self.makeArguments(parent, declaration)
declaration.registerParameters(local)
for argument in arguments:
expression = argument.resolve(local, declaration, True)
value = argument.getParameter().checkValue(parent, expression)
local.setValue(argument.getName(), value)
return declaration.check(local)
except PromptoError as e:
raise SyntaxError(e.message)
def makeArguments(self, context, declaration):
if self.arguments is None:
return ArgumentList()
else:
return self.arguments.makeArguments(context, declaration)
def interpret(self, context):
finder = MethodFinder(context, self)
declaration = finder.findBest(True)
if declaration is None:
raise SyntaxError("No such method: " + str(self))
local = self.selector.newLocalContext(context, declaration)
declaration.registerParameters(local)
self.assignArguments(context, local, declaration)
return declaration.interpret(local)
def assignArguments(self, context, local, declaration):
arguments = self.makeArguments(context, declaration)
for argument in arguments:
expression = argument.resolve(local, declaration, True)
parameter = argument.getParameter()
value = parameter.checkValue(context, expression)
if value is not None and parameter.mutable and not value.mutable:
from prompto.error.NotMutableError import NotMutableError
raise NotMutableError()
local.setValue(argument.getName(), value)
def interpretReference(self, context):
declaration = self.findDeclaration(context)
return ClosureValue(context, MethodType(declaration))
def interpretAssert(self, context, testMethodDeclaration):
value = self.interpret(context)
if isinstance(value, BooleanValue):
return value.value
else:
writer = CodeWriter(self.dialect, context)
self.toDialect(writer)
raise SyntaxError("Cannot test '" + str(writer) + "'")
def findDeclaration(self, context):
method = self.findRegistered(context)
if method is not None:
return method
else:
finder = MethodFinder(context, self)
return finder.findBest(True)
def findRegistered(self, context):
if self.selector.getParent() is None:
try:
o = context.getValue(self.selector.getName())
if isinstance(o, ClosureValue):
return self.getClosureDeclaration(context, o)
elif isinstance(o, ArrowValue):
return ArrowDeclaration(o)
except PromptoError:
pass
return None
def getClosureDeclaration(self, context, closure):
decl = closure.itype.method
if decl.memberOf is not None:
# the closure references a member method (useful when a method reference is needed)
# in which case we may simply want to return that method to avoid spilling context into method body
# this is only true if the closure comes straight from the method's instance context
# if the closure comes from an accessible context that is not the instance context
# then it is a local variable that needs the closure context to be interpreted
declaring = context.contextForValue(self.selector.getName())
if declaring == closure.context:
return decl
return ClosureDeclaration(closure)
def toDialect(self, writer):
if self.requiresInvoke(writer):
writer.append("invoke: ")
self.selector.toDialect(writer, False)
if self.arguments is not None:
self.arguments.toDialect(writer)
elif writer.dialect is not Dialect.E:
writer.append("()")
def requiresInvoke(self, writer):
if writer.dialect is not Dialect.E or (self.arguments is not None and len(self.arguments) > 0):
return False
try:
finder = MethodFinder(writer.context, self)
declaration = finder.findBest(False)
# if method is a reference, need to prefix with invoke
return declaration.isAbstract() or declaration.closureOf is not None
except:
pass
# ok
return False
|
[
"eric.vergnaud@wanadoo.fr"
] |
eric.vergnaud@wanadoo.fr
|
3bfb51f0aa7f3004baf108569efcc7120a220413
|
d60ee49abaee6c74c5b777f8f112a7f75f71f029
|
/genome/variants2/filter/VCF/somatic/genes/indels/rare_variants.py
|
6062b31fd198971fea91fd9fa2620ed59197a345
|
[] |
no_license
|
ak352/melanomics
|
41530f623b4bfdbd5c7b952debcb47622d1a8e88
|
fc5e6fdb1499616fb25a8dc05259add8a65aeca0
|
refs/heads/master
| 2020-12-24T16:14:42.271416
| 2015-08-06T12:48:52
| 2015-08-06T12:48:52
| 18,439,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,593
|
py
|
import sys
from common_rsid import get_non_flagged
#Creates a dictionary of fields pointing to column numbers, makes the code more readable
def ParseFields(line):
fields = {}
var = line[:-1].split("\t")
for x in range(0, len(var)):
fields[var[x]] = x
return fields
def read_tsv(infile):
with open(infile) as f:
var = ParseFields(next(f))
for line in f:
record = {}
line = line[:-1].split("\t")
for x in var:
record[x] = line[var[x]]
record["all"] = "\t".join(line)
yield record
def report(line, log):
for s in sys.stderr, log:
s.write(line)
if __name__ == "__main__":
infile = sys.argv[1]
outfile = sys.argv[2]
logfile = outfile + ".log"
percent_threshold = 0.01
out, log = [open(x, "w") for x in [outfile, logfile]]
annotations = [line[:-1] for line in open("frequency_annotations")]
non_flagged = get_non_flagged()
report("Input: %s\n" % infile, log)
report("Output: %s\n" % outfile, log)
report("Log file: %s\n" % logfile, log)
report("Population frequency threshold = %1.2f\n" % percent_threshold, log)
rare, num_common, total = 0,0,0
""" Output the header """
out.write(next(open(infile)))
count = {}
for anno in annotations:
count[anno] = 0
count["dbsnp"] = 0
count["avsnp"] = 0
""" Filter the variants """
for record in read_tsv(infile):
is_common = False
for annotation in annotations:
if record[annotation]:
if float(record[annotation]) >= percent_threshold:
is_common = True
count[annotation] += 1
if record['snp138'] in non_flagged:
is_common=True
count['dbsnp'] += 1
if record['avsnp138'] in non_flagged:
is_common=True
count['avsnp'] += 1
total += 1
if is_common:
num_common += 1
else:
rare += 1
out.write(record["all"] + "\n")
report("STATISTICS:\n", log)
report("Total variants = %d\n" % total, log)
report("Common variants = %d (%2.2f%%)\n" % (num_common, float(num_common)*100/float(total)), log)
report("Of which:\n", log)
for annotation in annotations+['dbsnp', 'avsnp']:
report("\tVariants found in %s = %d (%2.2f%%)\n" % (annotation, count[annotation], float(count[annotation])*100/float(total)), log)
report("Rare variants = %d\n" % rare, log)
|
[
"ak@uni.fake"
] |
ak@uni.fake
|
b1aa5b9263b56dd44182ac4f6d3ae9f712c62267
|
5b2590bc39cf0cb44b7257f2be3be72a0e58d068
|
/torchkbnufft/nufft/sparse_interp_mat.py
|
4f2ef88a46f98241db14e644a685efbb30416c0f
|
[
"MIT"
] |
permissive
|
zaccharieramzi/torchkbnufft
|
efc59f4bed6675173b8064d5dec7ec9eeb43b0f2
|
37e5808ab73ddb52cbd4655f3d7fd6273b3dd89a
|
refs/heads/master
| 2020-12-26T08:17:44.180049
| 2020-08-03T17:17:40
| 2020-08-03T17:17:40
| 273,199,030
| 0
| 0
|
MIT
| 2020-06-18T09:39:00
| 2020-06-18T09:39:00
| null |
UTF-8
|
Python
| false
| false
| 5,225
|
py
|
import itertools
import numpy as np
import torch
from ..math import complex_mult, conj_complex_mult
from .interp_functions import calc_coef_and_indices
def get_interpob(model):
"""Retrieves the interpolation dictionary from model.
Different nufft objects use different interpolation objects. This function
only extracts the minimum amount necessary for sparse matrix
precomputation.
Args:
model (TorchKbNufft object): A TorchKbNufft object with attributes for
forming a KbNufft interpolation dictionary.
Returns:
dict: A dictionary with interpolation parameters.
"""
interpob = dict()
interpob['table'] = []
for i in range(len(model.table)):
interpob['table'].append(getattr(model, 'table_tensor_' + str(i)))
interpob['grid_size'] = model.grid_size_tensor
interpob['numpoints'] = model.numpoints_tensor
interpob['table_oversamp'] = model.table_oversamp_tensor
return interpob
def compute_forw_mat(dims, table, numpoints, Jlist, L, tm):
"""Compute a forward Kaiser-Bessel interpolation sparse matrix.
Args:
dims (tensor): A list of sizes of each dimension.
table (tensor): A list of interpolation tables.
numpoints (tensor): A list of numbers of nearest neighbors for each
dimension.
Jlist (tensor): A list of nearest neighbor configurations.
L (tensor): A list of table sizes for each dimension.
tm (tensor): An array of normalized frequency locations.
Returns:
tuple: A 2-length tuple with a sparse interpolation matrix in each
element. The first matrix has the real coefficients; the second
has the imaginary.
"""
dtype = table[0].dtype
device = table[0].device
int_type = torch.long
nJ = Jlist.shape[1]
# center of tables
centers = torch.floor(numpoints * L / 2).to(dtype=int_type)
# offset from k-space to first coef loc
kofflist = 1 + torch.floor(tm - numpoints.unsqueeze(1) / 2.0)
# do a bit of type management - ints for faster index comps
dims = dims.to(dtype=int_type)
kofflist = kofflist.to(dtype=int_type)
Jlist = Jlist.to(dtype=int_type)
# initialize the sparse matrices
coef_mat_real = torch.sparse.FloatTensor(
tm.shape[-1], torch.prod(dims)).to(dtype=dtype, device=device)
coef_mat_imag = torch.sparse.FloatTensor(
tm.shape[-1], torch.prod(dims)).to(dtype=dtype, device=device)
# loop over offsets and take advantage of broadcasting
for Jind in range(nJ):
coef, arr_ind = calc_coef_and_indices(
tm, kofflist, Jlist[:, Jind], table, centers, L, dims)
sparse_coords = torch.stack(
(
torch.arange(
arr_ind.shape[0],
dtype=arr_ind.dtype,
device=arr_ind.device
),
arr_ind
)
)
coef_mat_real = coef_mat_real + torch.sparse.FloatTensor(
sparse_coords,
coef[0],
torch.Size((arr_ind.shape[0], torch.prod(dims)))
)
coef_mat_imag = coef_mat_imag + torch.sparse.FloatTensor(
sparse_coords,
coef[1],
torch.Size((arr_ind.shape[0], torch.prod(dims)))
)
return coef_mat_real, coef_mat_imag
def precomp_sparse_mats(om, model):
"""Precompute sparse interpolation matrices.
Args:
om (tensor): The k-space trajectory in radians/voxel.
model (TorchKbNufft object): A KbNufft type object with attributes for
creating a KbNufft interpolation object.
Returns:
tuple: A 2-length tuple with lists of sparse interpolation matrices in
each element. The first matrix has the real coefficient matrices;
the second has the imaginary.
"""
interpob = get_interpob(model)
dtype = interpob['table'][0].dtype
device = interpob['table'][0].device
# extract interpolation params and match device and dtype to input
table = interpob['table']
grid_size = interpob['grid_size']
numpoints = interpob['numpoints']
table_oversamp = interpob['table_oversamp']
ndims = om.shape[1]
M = om.shape[2]
# convert to normalized freq locs
tm = torch.zeros(size=om.shape, dtype=dtype, device=device)
Jgen = []
for i in range(ndims):
gam = (2 * np.pi / grid_size[i])
tm[:, i, :] = om[:, i, :] / gam
Jgen.append(range(np.array(numpoints[i].cpu(), dtype=np.int)))
# build an iterator for going over all J values
Jgen = list(itertools.product(*Jgen))
coef_real_mats = []
coef_imag_mats = []
for norm_traj in tm:
coef_mat_real, coef_mat_imag = compute_forw_mat(
grid_size.to(dtype=dtype, device=device),
table,
numpoints,
torch.tensor(
np.transpose(np.array(Jgen)),
dtype=dtype,
device=device
),
table_oversamp,
norm_traj
)
coef_real_mats.append(coef_mat_real)
coef_imag_mats.append(coef_mat_imag)
return coef_real_mats, coef_imag_mats
|
[
"matt.muckley@gmail.com"
] |
matt.muckley@gmail.com
|
445094ac9443d4043d0a46684d8716d700979415
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Scripts/pyinstaller/tests/old_suite/interactive/test_pygame.py
|
e3c84c9dfc7af1c578e2a92a953046ea790ce1d4
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:3026e58b95b7ac74eb29a2e0e663c0a7e96472b2bf98d1be45c60efdd0b9458b
size 922
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
50fa5a76956a5e9f87fc8b39fcc81f05fde6feeb
|
365967082720f3fda31afccfc237b7a67e8ffc07
|
/math/UglyNumber.py
|
80cd2786b22e2532548ebd0cad9f942395e607c6
|
[] |
no_license
|
hulaba/geekInsideYou
|
ec68dee3fa24d63f5470aa40b600ef34d37c5da1
|
72c1f1b4fbf115db91c908a68c9ac3ca4cb22a4f
|
refs/heads/master
| 2022-12-11T11:11:03.149336
| 2020-09-12T16:12:40
| 2020-09-12T16:12:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
def divide(no, div):
while no % div == 0:
no = no / div
return no
def isUgly(num):
num = divide(num, 2)
num = divide(num, 3)
num = divide(num, 5)
return 1 if num == 1 else 0
def nthUgly(n):
count = 1
i = 1
while n > count:
i += 1
if isUgly(i):
count += 1
return i
def main():
print(nthUgly(15))
if __name__ == '__main__':
main()
|
[
"nainamehta2110@gmail.com"
] |
nainamehta2110@gmail.com
|
552b509615b8838ca1c4f36eb4f2afc80db01eb6
|
377d86194fd6d23c8ef3df3e6f7d90092dd8f9b4
|
/workout_tracker/auth/urls.py
|
81a75b085386e9757309abff228e83dd0d38bc59
|
[
"MIT"
] |
permissive
|
e-dang/Workout-Tracker
|
f20f44b012e895244bad413a46103415ffae5732
|
00a27597ea628cff62b320d616f56b2df4f344a0
|
refs/heads/master
| 2022-12-28T07:49:34.179307
| 2020-10-12T20:48:28
| 2020-10-12T20:48:28
| 293,937,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
from django.urls import path, include
from rest_auth.views import PasswordResetConfirmView
urlpatterns = [
path('', include('rest_auth.urls')),
path('register/', include('rest_auth.registration.urls')),
path('password-reset-confirm/<uidb64>/<token>/', PasswordResetConfirmView.as_view(), name='password_reset_confirm')
]
|
[
"edang830@gmail.com"
] |
edang830@gmail.com
|
82c411312abd5617b27f1cfbea9d327ce60f4dc8
|
51b6d2fc53d5c632fcf01319842baebf13901e84
|
/atcoder.jp/abc196/abc196_d/Main.py
|
08662bac5d300ada3cd847e151140d34667825e1
|
[] |
no_license
|
mono-0812/procon
|
35db3b2c21eff74fbd7b52db07f249380f6834ef
|
68a4b53880a228a0164052b23d1326363efcbc20
|
refs/heads/master
| 2023-05-30T17:02:58.935074
| 2021-06-27T12:15:10
| 2021-06-27T12:15:10
| 345,896,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,249
|
py
|
import bisect,collections,copy,heapq,itertools,math,string,sys,queue
def I(): return input()
def IS(): return input().split()
def II(): return int(input())
def IIS(): return map(int,input().split())
def LIIS(): return list(map(int,input().split()))
def ZER(N): return [False for _ in range(N)]
INF=float("inf")
MOD=10**9+7
def make_divisors(n):
lower_divisors , upper_divisors = [], []
i = 1
while i*i <= n:
if n % i == 0:
lower_divisors.append(i)
if i != n // i:
upper_divisors.append(n//i)
i += 1
return lower_divisors + upper_divisors[::-1]
##############################################################################
h,w,a,b=IIS()
used=[[0]*w for _ in range(h)]
res=0
def dfs(i,j,a,b):
global res
if a<0 or b<0:
return
if j==w:
j=0
i+=1
if i==h:
res+=1
return
if used[i][j]==1:
return dfs(i,j+1,a,b)
used[i][j]=1
dfs(i,j+1,a,b-1)
if j+1 < w and used[i][j+1]==0:
used[i][j+1]=1
dfs(i,j+1,a-1,b)
used[i][j+1]=0
if i+1<h and used[i+1][j]==0:
used[i+1][j]=1
dfs(i,j+1,a-1,b)
used[i+1][j]=0
used[i][j]=0
return res
print(dfs(0,0,a,b))
|
[
"frisk02.jar@gmail.com"
] |
frisk02.jar@gmail.com
|
263f48233234deeabc43b9fed764e143d380396d
|
dd63082fe701392a132af4c52fba3ce2fb76e84c
|
/virtual/bin/django-admin.py
|
52ff4c4bd001d26a8f0e187c372daeff74962e1b
|
[] |
no_license
|
CollinsMuiruri/multivendor
|
b53bfbe773e86d1793d09d87e4b8ecada79a7823
|
cf68cb3e9dbca8040dc781e173712f8e043cb42f
|
refs/heads/main
| 2023-04-11T07:54:04.488449
| 2021-04-19T23:05:40
| 2021-04-19T23:05:40
| 359,611,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
#!/home/collins/Documents/proxy-services/multivendor/virtual/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"wanyekicollins@gmail.com"
] |
wanyekicollins@gmail.com
|
ea5160d5e285bd0f2ad8621191591fd191836803
|
ebf934fb6fd4e0ebbd870db857897fbb9d8022b7
|
/pyspark/ml/other/spark_ml_02.py
|
4e0ecd29cb3ae2cc49e494d27fa5864a6c628830
|
[] |
no_license
|
AidenLong/ai
|
6ce2bcf5928f8350ba8b440e9032ea4c39dd69ec
|
0901e6010bbb51a165680e52d9adaeec7e510dc1
|
refs/heads/master
| 2020-05-03T13:27:38.698490
| 2019-08-22T03:18:09
| 2019-08-22T03:18:09
| 178,653,209
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,667
|
py
|
# --encoding:utf-8 --
import os
from pyspark.mllib.stat import Statistics
from pyspark import SparkConf, SparkContext
if 'SPARK_HOME' not in os.environ:
os.environ['SPARK_HOME'] = 'D:\syl\dev\spark-1.6.1-bin-2.5.0-cdh5.3.6'
# 创建Spark上下文
conf = SparkConf() \
.setMaster('local') \
.setAppName('spark ml 02')
sc = SparkContext(conf=conf)
# 构建一个Vector的RDD
vector_rdd = sc.parallelize([
[0, 2, 3],
[4, 8, 16],
[-7, 8, -9],
[10, -10, 12]
])
# 1. 汇总统计
summary = Statistics.colStats(vector_rdd)
print("汇总对象类型:{}".format(type(summary)))
print("各个特征属性的均值:{}".format(summary.mean()))
print("各个特征属性的方差:{}".format(summary.variance()))
print("样本数据量:{}".format(summary.count()))
print("各个特征属性的最大特征值:{}".format(summary.max()))
print("各个特征属性的最小特征值:{}".format(summary.min()))
print("特征值不等于0的样本数量:{}".format(summary.numNonzeros()))
print("各个特征的L1范式值:{}".format(summary.normL1()))
print("各个特征的L2范式值:{}".format(summary.normL2()))
# 2. 相关性统计(特征与特征之间的相关性统计)
x = sc.parallelize([1.0, 1.5, 0.9, 0, 0.85, 0.95, 0.5])
y = sc.parallelize([2.0, 2.1, 0, 2.0, 0, 2.21, 0])
print("x和y的相关性指标值为:{}".format(Statistics.corr(x, y)))
# method给定相关性计算方式,默认为pearson(皮尔逊相关系数),另外可选:spearman(斯皮尔曼相关性系数)
feature_corr = Statistics.corr(vector_rdd, method='pearson')
print("RDD对象中特征属性与特征属性之间的相关性指标为:\n{}".format(feature_corr))
|
[
"807108578@qq.com"
] |
807108578@qq.com
|
d9ad09a9d01aacf6b03f7ada5b2712a773f66e3c
|
9b20743ec6cd28d749a4323dcbadb1a0cffb281b
|
/11_Time_Series_Forecasting_with_Python/02/features_lag1.py
|
ec593a95a20499aae3844259bf5a6e6eb9850287
|
[] |
no_license
|
jggrimesdc-zz/MachineLearningExercises
|
6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178
|
ee265f1c6029c91daff172b3e7c1a96177646bc5
|
refs/heads/master
| 2023-03-07T19:30:26.691659
| 2021-02-19T08:00:49
| 2021-02-19T08:00:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
# create a lag feature
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
series = read_csv('daily-minimum-temperatures.csv', header=0, index_col=0, parse_dates=True, squeeze=True)
temps = DataFrame(series.values)
dataframe = concat([temps.shift(1), temps], axis=1)
dataframe.columns = ['t', 't+1']
print(dataframe.head(5))
|
[
"jgrimes@jgrimes.tech"
] |
jgrimes@jgrimes.tech
|
a6f64d216a61ec6b1ffd4aea86d1986532e794ab
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/autosar/models/data_transformation_subtypes_enum.py
|
a50f8760e3b21482a2c78717041e0d8b56a05c4d
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 166
|
py
|
from enum import Enum
__NAMESPACE__ = "http://autosar.org/schema/r4.0"
class DataTransformationSubtypesEnum(Enum):
DATA_TRANSFORMATION = "DATA-TRANSFORMATION"
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
8e6929fa89c74d13a0c75c8d23b87b2a2effc313
|
b7125b27e564d2cc80a2ce8d0a6f934aa22c8445
|
/.history/display_board_20201107211818.py
|
025bebad29b34bbd089707fa7af0e9cfb8956bf5
|
[] |
no_license
|
JensVL96/Puzzle-solver-for-fun
|
4c15dcd570c3705b7ac555efb56b52913e81083c
|
6d8a4378a480372213a596a336a4deca727a00fc
|
refs/heads/master
| 2021-07-15T05:19:42.185495
| 2020-11-08T13:59:49
| 2020-11-08T13:59:49
| 224,855,888
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,807
|
py
|
from config import *
import pygame as pg
class Display_board():
def __init__(self, screen):
self.screen = screen
self.font_num = pg.font.SysFont("comicsans", NUMBER_SIZE)
self.font_cdt = pg.font.SysFont("comicsans", CANDIDATE_SIZE)
def draw_val(self, val, x, y):
text1 = self.font_num.render(str(val), 1, BLACK)
self.screen.blit(text1, (x * BLOCK_SIZE + 15, y * BLOCK_SIZE + 15))
def draw_cdt(self, val, x, y):
text1 = self.font_cdt.render(str(val), 1, BLACK)
self.screen.blit(text1, (x * BLOCK_SIZE + 1, y * BLOCK_SIZE + 1))
def on_mouse_press(self, x, y, symbol, modifier):
pass
def draw(self, grid, cell):
for i in range (9):
for j in range (9):
if grid[i][j] != 0:
if type(grid[i][j]) != int:
self.draw_candidates(grid, cell)
else:
text1 = self.font_num.render(str(grid[i][j]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + i * BLOCK_SIZE + 15, TOP_LY + j * BLOCK_SIZE + 14))
size = 0
x_coord = TOP_LX
y_coord = TOP_LY
for i in range(10):
if i % 3 == 0:
print("thick")
thick = 7
else:
print("thin")
thick = 1
if (i + 2) % 3 == 0:
print("increasing size: ", i)
size += 7
# else:
# size = 0
check_diff = x_coord
x_coord += BLOCK_SIZE + size
# thick Thick thin thin thick thin thin thick thin thin thick
# TOP_LX -> TOP_RX & ( TOP_LY -> BS + 7 -> BS -> BS -> BS + 7 -> BS -> BS -> BS + 7 -> BS -> BS)
print("line: ", i, size, "block size: ", BLOCK_SIZE)
print("Start horizontal: ", check_diff, "end: ", x_coord)
print("Start vertical: ", i * BLOCK_SIZE + size, "end: ", 405 + i * BLOCK_SIZE + size)
pg.draw.line(self.screen, BLACK, (TOP_LX,
TOP_LY + i * BLOCK_SIZE + size),
(TOP_RX,
TOP_RY + i * BLOCK_SIZE+ size), thick)
pg.draw.line(self.screen, BLACK, (TOP_LX + i * BLOCK_SIZE+ size,
TOP_LY),
(BOT_LX + i * BLOCK_SIZE+ size,
BOT_LY), thick)
# For candidate placement
# if i % 3 == 0:
# print(BLOCK_SIZE)
# pg.draw.line(self.screen, BLACK, (cell[0],
# cell[1] + i * (cell[2] / 9)),
# ((cell[0] + cell[2]),
# cell[1] + i * (cell[2] / 9)), 1)
# pg.draw.line(self.screen, BLACK, (cell[0] + i * (cell[3] / 9),
# cell[1]),
# (cell[0] + i * (cell[3] / 9),
# cell[1] + cell[3]), 1)
def draw_candidates(self, grid, cell):
new_line = 1
iteration = 1
indent = 15
for number in grid[i][j]:
if iteration % 3 == 1: # Checking if first in line: 1, 4, 7
text1 = self.font_cdt.render(str(number), 1, BLACK)
self.screen.blit(text1, (cell[0] + 3, cell[1] + ((new_line - 1) * indent) + 2))
else:
text1 = self.font_cdt.render(str(number), 1, BLACK)
self.screen.blit(text1, (cell[0] + ((iteration - 1) * indent) + 3, cell[1] + ((new_line - 1) * indent) + 2))
if iteration % 3 == 0: # checking if last in line: 3, 6
new_line += 1
iteration = 0
iteration += 1
def update(self, grid, row, col, blk):
font_val = pg.font.SysFont("comicsans", BOLD)
if row != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[row[0]][row[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 15, TOP_LY + row[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[row[0]][row[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 14, TOP_LY + row[1] * BLOCK_SIZE + 10))
if col != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[col[0]][col[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 15, TOP_LY + col[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[col[0]][col[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 14, TOP_LY + col[1] * BLOCK_SIZE + 10))
if blk != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[blk[0]][blk[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 15, TOP_LY + blk[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[blk[0]][blk[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 14, TOP_LY + blk[1] * BLOCK_SIZE + 10))
def find_cell(self, x, y):
# Only applies glow when a cell is selected
if x == -1 and y == -1:
return
width = BLOCK_SIZE
height = BLOCK_SIZE
# Adjustment in size if bordering a thick line
if x % 3 == 0: # If thick line on the left
start_pos_x = TOP_LX + x * BLOCK_SIZE + 4
width = BLOCK_SIZE# - 4
else:
start_pos_x = TOP_LX + x * BLOCK_SIZE + 1
if (x + 1) % 3 == 0: # If thick line on the right
width = BLOCK_SIZE# - 3.5
if y % 3 == 0: # If thick line on the top
start_pos_y = TOP_LY + y * BLOCK_SIZE + 4
height = BLOCK_SIZE# - 4
else:
start_pos_y = TOP_LY + y * BLOCK_SIZE + 1
if (y + 1) % 3 == 0: # If thick line on the bottom
height = BLOCK_SIZE# - 3.5
return (start_pos_x, start_pos_y, width, height)
def blink(self, alpha, a_change):
if a_change:
alpha += BLINK_SPEED
if alpha >= 175:
a_change = False
elif a_change == False:
alpha += -BLINK_SPEED
if alpha <= 30:
a_change = True
return (alpha, a_change)
|
[
"jle040@uit.no"
] |
jle040@uit.no
|
8be82ede930331667dd607f674a2cefd5c1b9b0c
|
dbd8180d9c02c22b42baa5227437714ff352fd8e
|
/1-100/L237.py
|
2ab3f5f5b9eb4f3c53c14f04d1fca60d0a23b65e
|
[] |
no_license
|
k8godzilla/-Leetcode
|
92953dfffc0f06907fa7bd0beea7bc27b16f9efa
|
58d5384155f481b1d1b0a7ca69566245dd779554
|
refs/heads/master
| 2020-06-12T15:35:43.380979
| 2019-08-07T11:14:49
| 2019-08-07T11:14:49
| 194,348,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,636
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 26 11:27:50 2019
@author: admin
"""
'''
请编写一个函数,使其可以删除某个链表中给定的(非末尾)节点,你将只被给定要求被删除的节点。
现有一个链表 -- head = [4,5,1,9],它可以表示为:
示例 1:
输入: head = [4,5,1,9], node = 5
输出: [4,1,9]
解释: 给定你链表中值为 5 的第二个节点,那么在调用了你的函数之后,该链表应变为 4 -> 1 -> 9.
示例 2:
输入: head = [4,5,1,9], node = 1
输出: [4,5,9]
解释: 给定你链表中值为 1 的第三个节点,那么在调用了你的函数之后,该链表应变为 4 -> 5 -> 9.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/delete-node-in-a-linked-list
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
while node.next is not None:
node.val = node.next.val
if node.next.next is None:
node.next = None
break
node = node.next
|
[
"k8sunyin@126.com"
] |
k8sunyin@126.com
|
7e33ad8bbc4f98c052b5f081b07505b5b17aa3df
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/9_排序和搜索/经典题/826. 安排工作以达到最大收益.py
|
ec2e9799c526bbb8a42b79b5b4cd4c785cc7eb7f
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
from typing import List
from bisect import bisect_left
# difficulty[i] 表示第 i 个工作的难度,profit[i] 表示第 i 个工作的收益。
# worker[i] 是第 i 个工人的能力,即该工人只能完成难度小于等于 worker[i] 的工作。
# !每一个工人都最多只能安排一个工作,但是一个工作可以完成多次。
# 我们能得到的最大收益是多少?(调整打怪策略,与什么样的怪兽战斗获得金币最多)
# 1 <= n, m <= 104
# 不能二分查找:题目没有说难度高的工作收益也越大
# 总结:
# !排序+遍历加指针记录
# !对每个工人找到最大收益
class Solution:
def maxProfitAssignment(
self, difficulty: List[int], profit: List[int], worker: List[int]
) -> int:
worker.sort()
jobs = sorted(zip(difficulty, profit), key=lambda x: x[0])
res, ji, maxPro = 0, 0, 0
for w in worker:
while ji < len(jobs) and jobs[ji][0] <= w:
maxPro = max(maxPro, jobs[ji][1])
ji += 1
res += maxPro
return res
print(
Solution().maxProfitAssignment(
difficulty=[2, 4, 6, 8, 10], profit=[10, 20, 30, 40, 50], worker=[4, 5, 6, 7]
)
)
# 输出: 100
# 解释: 工人被分配的工作难度是 [4,4,6,6] ,分别获得 [20,20,30,30] 的收益
|
[
"lmt2818088@gmail.com"
] |
lmt2818088@gmail.com
|
3d5c8745030e29f443d5aefebb7a4c6d895d30ec
|
18e886106d2d84c978e12aa29691f5c0269409ca
|
/src/biotite/sequence/search.py
|
975603f8edc57daec4a299306d9c5f47044335c4
|
[
"BSD-3-Clause"
] |
permissive
|
avestamh/biotite
|
fed37fb4733b4b064cad06b0fccb0193f295c6a2
|
d2066e1011bffa7ef83db9fddf6207da8834cc79
|
refs/heads/master
| 2020-08-06T06:59:21.326038
| 2019-09-27T17:03:06
| 2019-09-27T17:03:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__author__ = "Patrick Kunzmann"
__all__ = ["find_subsequence", "find_symbol", "find_symbol_first",
"find_symbol_last"]
import numpy as np
def find_subsequence(sequence, query):
"""
Find a subsequence in a sequence.
Parameters
----------
sequence : Sequence
The sequence to find the subsequence in.
query : Sequence
The potential subsequence. Its alphabet must extend the
`sequence` alphabet.
Returns
-------
match_indices : ndarray
The starting indices in `sequence`, where `query` has been
found. The array is empty if no match has been found.
Raises
------
ValueError
If the `query` alphabet does not extend the `sequence` alphabet.
Examples
--------
>>> main_seq = NucleotideSequence("ACTGAATGA")
>>> sub_seq = NucleotideSequence("TGA")
>>> print(find_subsequence(main_seq, sub_seq))
[2 6]
"""
if not sequence.get_alphabet().extends(query.get_alphabet()):
raise ValueError("The sequences alphabets are not equal")
match_indices = []
frame_size = len(query)
for i in range(len(sequence) - frame_size + 1):
sub_seq_code = sequence.code[i : i + frame_size]
if np.array_equal(query.code, sub_seq_code):
match_indices.append(i)
return np.array(match_indices)
def find_symbol(sequence, symbol):
"""
Find a symbol in a sequence.
Parameters
----------
sequence : Sequence
The sequence to find the symbol in.
symbol : object
The symbol to be found in `sequence`.
Returns
-------
match_indices : ndarray
The indices in `sequence`, where `symbol` has been found.
"""
code = sequence.get_alphabet().encode(symbol)
return np.where(sequence.code == code)[0]
def find_symbol_first(sequence, symbol):
"""
Find first occurence of a symbol in a sequence.
Parameters
----------
sequence : Sequence
The sequence to find the symbol in.
symbol : object
The symbol to be found in `sequence`.
Returns
-------
first_index : int
The first index of `symbol` in `sequence`. If `symbol` is not in
`sequence`, -1 is returned.
"""
match_i = find_symbol(sequence, symbol)
if len(match_i) == 0:
return -1
return np.min(match_i)
def find_symbol_last(sequence, symbol):
"""
Find last occurence of a symbol in a sequence.
Parameters
----------
sequence : Sequence
The sequence to find the symbol in.
symbol : object
The symbol to be found in `sequence`.
Returns
-------
flast_index : int
The last index of `symbol` in `sequence`. If `symbol` is not in
`sequence`, -1 is returned.
"""
match_i = find_symbol(sequence, symbol)
if len(match_i) == 0:
return -1
return np.max(match_i)
|
[
"patrick.kunzm@gmail.com"
] |
patrick.kunzm@gmail.com
|
ba108f845d88a211249cea333a6ee45a47788350
|
0fa517fb36fbffd3ffcc9c1a016f1812a8139102
|
/numpy_01_arrays.py
|
90971c221829f79b1720b2bb2e27d2a8362b7e47
|
[] |
no_license
|
sooyoungkim/flipped-python-modules
|
2e7ed4d8590edcb87695d9eb3f5a096f7c362006
|
cfa5074b22429340e8586cb49b6a3c25df61e6f3
|
refs/heads/master
| 2020-03-09T10:57:45.480915
| 2018-04-09T10:01:18
| 2018-04-09T10:01:18
| 128,749,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,497
|
py
|
import numpy as np
########################################################
# Arrays
# - Numpy의 중심 feature는 Array object class이다.
# - Python의 List와 비슷하지만 Array의 모든 원소는 같은 타입이다.
# - 대개 float이나 int 타입
# - 엄청 큰 numeric 데이터를 매우 빠르고 효과적으로 계산.
# - Arrays 는 multidimensional이라 할 수 있다.
# - Array생성 : numpy.array(the list, the type)
########################################################
########################################################
# a one-dimensional array
one = np.array([1, 4, 5, 8], float)
print(one)
print(type(one))
# Array 원소들에 List처럼 접근, 슬라이싱, 조작 가능
one[:2]
one[3]
one[0]
########################################################
########################################################
# a two-dimensional array
two = np.array([[1, 2, 3], [4, 5, 6]], float)
print(two)
two[0,0]
two[0,1]
two[0,2]
two[1,0]
two[1,1]
two[1,2]
two[1, :]
two[:, 2]
two[-1:, -2:]
print(two.shape) # (행 size, 열 size) 튜플 반환 -> (2,3)
print(two.dtype) # array에 저장된 value의 타입 반환
# float64 : 파이썬에서 float타입과 비슷
# NumPy는 실수를 배정밀도(8byte,64비트)로 처리
len(two) # 행의 길이
2 in two # array 안에 value 2가 존재하는지?
0 in two
########################################################
########################################################
# a one-dimensional array -> a two-dimensional array
arr = np.array(range(10), float)
print(arr)
# [ 0. 1. 2. 3. 4. 5. 6. 7. 8. 9.]
arr = arr.reshape((5,2)) # 10개 데이터가 담긴 1차원 배열을 -> 2차원의 새로운 배열로 생성
print(arr)
#[[ 0. 1.]
# [ 2. 3.]
# [ 4. 5.]
# [ 6. 7.]
# [ 8. 9.]]
print(arr.shape) # (5,2)
########################################################
########################################################
a = np.array([1, 2, 3], float)
b = a # reference 참조
c = a.copy() # 메모리에서 분리, 새로운 array 생성
a[0] = 0 # a와 a를 참조하는 b에 영향
print(a) # [0. 2. 3.]
print(b)
print(c)
a.tolist() # arrays 를 리스트로 [0.0 2.0 3.0]
list(a) # arrays 를 리스트로 [0.0 2.0 3.0]
s = a.tostring() # a binary string (i.e., not in human-readable form)으로 컨버팅
print(s)
print(np.fromstring(s)) # 이 a binary string 으로 array 생성
a.fill(0)
print(a)
########################################################
########################################################
# Transposed
a = np.array(range(6), float).reshape((2,3))
print(a)
#[[ 0. 1. 2.]
# [ 3. 4. 5.]]
print(a.transpose())
#[[ 0. 3.]
# [ 1. 4.]
# [ 2. 5.]]
print(a) # 변경 없음
########################################################
########################################################
# a two-dimensional array -> a one-dimensional array
a = np.array([[1,2,3], [4,5,6]], float)
print(a)
#[[ 1. 2. 3.]
# [ 4. 5. 6.]]
print(a.flatten())
#[ 1. 2. 3. 4. 5. 6.]
print(a) # 변경 없음
########################################################
########################################################
# a one-dimensional array
# 2개 이상의 배열을 하나로 합칠 수 있다.
a = np.array([1,2], float)
b = np.array([3,4,5,6], float)
c = np.array([7,8,9], float)
print( np.concatenate((a,b,c)) )
# a two-dimensional array
# 2차원 이상 배열도 하나로 합칠 수 있다.
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[5, 6], [7,8]], float)
print(np.concatenate((a,b))) # axis=0과 같다
print(np.concatenate((a,b), axis=0))
#[[ 1. 2.]
# [ 3. 4.]
# [ 5. 6.]
# [ 7. 8.]]
print(np.concatenate((a,b), axis=1))
#[[ 1. 2. 5. 6.]
# [ 3. 4. 7. 8.]]
########################################################
########################################################
# ?????
# 배열의 차원 수를 증가시킬 수 있다.
# 벡터와 행렬의 적절한 차원의 array를 생성하기 편리하다
a = np.array([1, 2, 3], float)
print(a[:, np.newaxis])
#[[ 1.]
# [ 2.]
# [ 3.]]
print(a[:, np.newaxis].shape) # 1차원 배열 -> 3차원 배열로 (3,1)
print(a[np.newaxis, :])
# [[ 1. 2. 3.]]
print(a[np.newaxis, :].shape) # (1,3)
########################################################
########################################################
|
[
"suri.kim@daumkakao.com"
] |
suri.kim@daumkakao.com
|
7dd305bcfab4bc04364c22ec193187aa5f1f6479
|
5390e730defb07660fb6928f5de5970db80b5457
|
/docs/conf.py
|
c8a64641f11e45a5d5e7a0132d35e129d865d969
|
[
"BSD-2-Clause"
] |
permissive
|
DrMeers/plata
|
a732831f001bdc25ab425d6e3c3f70c4d3d303fb
|
eedfdadda573e9f7b28beddf9ec578c91903355a
|
refs/heads/master
| 2021-01-18T10:32:01.027409
| 2013-06-28T07:42:40
| 2013-06-28T07:42:40
| 771,395
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,605
|
py
|
# -*- coding: utf-8 -*-
#
# Plata documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import PIL.Image
sys.modules['Image'] = PIL.Image
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../tests/'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Plata'
copyright = u'2010, Feinheit GmbH and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import plata
version = '.'.join(map(str, plata.VERSION))
# The full version, including alpha/beta/rc tags.
release = plata.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme_path = ['_theme']
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Platadoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Plata.tex', u'Plata Documentation',
u'Feinheit GmbH and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
[
"mk@spinlock.ch"
] |
mk@spinlock.ch
|
df82af94f16968ac4caa95eaf4b55ce03d574d45
|
cd1ac53e02cf8879e092bdc4a625e465e241d6e0
|
/apps/goods/views.py
|
b763376cb7524115ff1f1d04353e31927bc680fd
|
[] |
no_license
|
furuiyang0715/dailyfresh
|
a150d472237822730f6c372620c5da19da514590
|
98c678c34d5b26eda6b99f7a8d1ca74b6f4399d3
|
refs/heads/master
| 2022-12-18T17:38:22.200364
| 2020-09-17T08:49:30
| 2020-09-17T08:49:30
| 286,921,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,199
|
py
|
import json
from django.http import JsonResponse
from django.shortcuts import render
from django.views import View
from django_redis import get_redis_connection
from goods.models import GoodsType, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner
class IndexView(View):
"""首页"""
def get(self, request):
"""显示首页"""
# (1) 获取商品的种类信息
types = GoodsType.objects.all()
# (2) 获取轮播商品的信息
# 需要根据展示的顺序进行排序
# "index": 默认按照升序排列
# - "index": 安装降序排列
goods_banners = IndexGoodsBanner.objects.all().order_by("index")
# (3) 获取首页促销活动信息
goods_promotion = IndexPromotionBanner.objects.all().order_by("index")
# (4) 获取首页分类商品展示信息
goods_type = IndexTypeGoodsBanner.objects.all()
for type in types:
# 每个类型的图片展示行
image_goods_lst = IndexTypeGoodsBanner.objects.filter(type=type, display_type=1)
# 每个类型的文字展示行
text_goods_lst = IndexTypeGoodsBanner.objects.filter(type=type, display_type=0)
type.image_goods_lst = image_goods_lst
type.text_goods_lst = text_goods_lst
# (5) 获取用户购物车中商品的数目
'''
在什么时候添加购物车记录?
当用户点击加入购物车时需要添加。
什么时候需要获取购物车记录 ?
使用到购物车中数据已经访问购物车页面的时候。
使用什么存储购物车的记录?
redis。
分析存储购物车记录的格式 ?
一个用户的购物车记录成用户的一条数据来进行保存。
使用 redis 中的 hash 结构:
"cart_用户id": {"sku_id1":商品数目, "sku_id2": 商品数目, ... }
使用 hlen 获取购物车中商品的条目数。
'''
user = request.user
cart_count = 0
'''
在 redis 中增加测试数据:
hmset cart_39 1 3 2 5 3 2
'''
if user.is_authenticated:
# print("用户已登录")
conn = get_redis_connection('default')
cart_key = "cart_{}".format(user.id)
cart_count = conn.hlen(cart_key)
# 组织模板上下文
context = {
"types": types,
"goods_banners": goods_banners,
"goods_promotion": goods_promotion,
"goods_type": goods_type,
"cart_count": cart_count,
}
return render(request, "index.html", context)
'''
首页要做一个页面静态化:
如果一个页面经常被访问,但是页面变化得不频繁,或者可以捕捉何时发生改变,那么可以使用页面静态化的技术。
传统的 django 页面渲染:
数据库查询数据 --> 数据+模板渲染 --> 返回前端
使用页面静态化技术:
在数据发生变化时异步去更新生成新的静态页面
用户访问时通过 nginx 直接访问生成好的静态文件
'''
|
[
"furuiyang0715@gmail.com"
] |
furuiyang0715@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.