blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b58d8677ccb0a7cdfe14ea57afee51438b6116fa
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/nb836onw9bek4FPDt_16.py
|
69b55a2efffa5340071d2d3f67af082fc7f2d0f0
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
def count_same_ends(txt):
c = 0
txt = txt.lower()
txt = txt.replace("!", "")
txt = txt.replace(".", "")
words = txt.split()
for word in words:
if word[0] == word[len(word) - 1] and len(word) != 1:
c += 1
return c
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
7388a331af4567a46dc4b438a6216cfde308fd11
|
edd8ad3dcb6ee9b019c999b712f8ee0c468e2b81
|
/Python 300/11. Class/284.py
|
cbe6087088e53713cea580554af042f416b7832d
|
[] |
no_license
|
narinn-star/Python
|
575cba200de35b9edf3832c4e41ccce657075751
|
14eba211cd3a9e9708a30073ba5b31d21d39eeef
|
refs/heads/master
| 2023-05-25T22:57:26.079294
| 2021-06-07T15:29:39
| 2021-06-07T15:29:39
| 331,647,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
#class _ 클래스 상속
class car:
def __init__(self, wheel, price):
self.wheel = wheel
self.price = price
class bike(car):
def __init__(self,wheel, price, 구동계):
super().__init__(wheel, price) #car.__init__(self, wheel, price)
self.구동계 = 구동계
bicycle = bike(2, 100, "시마노")
print(bicycle.구동계)
print(bicycle.wheel)
|
[
"skfls2618@naver.com"
] |
skfls2618@naver.com
|
91eaecf9d7e01d9e1e6daaa5154fc75664696fdd
|
34b9b39442bde1a3c8fa670ef60bcc84d772a067
|
/Assignment 6-Pandas A-Deadline Oct 31 2017/Assigment6_Marrugo/Assignment6_step1_Marrugo.py
|
18084c7507efa63f8f69b9823d9f478cea967cc6
|
[] |
no_license
|
bnajafi/Scientific_Python_Assignments_POLIMI_EETBS
|
b398fc2754b843d63cd06d517235c16177a87dcf
|
8da926e995dcaf02a297c6bb2f3120c49d6d63da
|
refs/heads/master
| 2021-05-07T22:36:14.715936
| 2018-01-16T21:12:33
| 2018-01-16T21:12:33
| 107,265,075
| 38
| 86
| null | 2018-01-16T21:12:34
| 2017-10-17T12:24:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,890
|
py
|
# -*- coding: utf-8 -*-
# Assigment 6 Calculation of the example D using pandas
print "Assigment 5 Calculation of the example D using pandas\n"
# import library
import pandas as pd
#Convention resistance [Heat transfer coefficient, area]
Resistances_names=["R1in","R2","R3","R4","R5","R6","R7","R8out"]#Resistances names of the wall
Resistances_columns=["Type","Config","L","H","K","A","RValue"]
# Definition of resistances
R1in=["conv","Series",None,10,None,0.25,0]
R2=["cond","Series",0.03,None,0.026,0.25,0]
R3=["cond","Series",0.02,None,0.22,0.25,0]
R4=["cond","Parallel",0.16,None,0.22,0.015,0]
R5=["cond","Parallel",0.16,None,0.22,0.015,0]
R6=["cond","Parallel",0.16,None,0.72,0.22,0]
R7=["cond","Series",0.02,None,0.22,0.25,0]
R8out=["conv","Series",None,25,None,0.25,0]
#Creation of a 2D array
Resistances_Df=pd.DataFrame([R1in,R2,R3,R4,R5,R6,R7,R8out],index=Resistances_names,columns=Resistances_columns)
#Resistances_RValues= np.array(np.zeros(8))# Variable for store the resistances values
#Calculation of the conductive resistances
Resistances_Df["RValue"][Resistances_Df["Type"]=="cond"] = (Resistances_Df["L"][Resistances_Df["Type"]=="cond"])/((Resistances_Df["K"][Resistances_Df["Type"]=="cond"])*(Resistances_Df["A"][Resistances_Df["Type"]=="cond"]))
#Calculation of the convective resistances
Resistances_Df["RValue"][Resistances_Df["Type"]=="conv"] = 1.0 / ((Resistances_Df["A"][Resistances_Df["Type"]=="conv"])*(Resistances_Df["H"][Resistances_Df["Type"]=="conv"]))
#Total convection resistance
Resistances_convection=Resistances_Df["RValue"][Resistances_Df["Type"]=="conv"].sum()
#Total conduction resistances in series
Resistances_Series_conduction=(Resistances_Df["RValue"][Resistances_Df["Config"]=="Series"][Resistances_Df["Type"]=="cond"].sum())
#Calculation of the parallel resistances
Resistances_Df["RValue"][Resistances_Df["Config"]=="Parallel"]=1/(Resistances_Df["RValue"][Resistances_Df["Config"]=="Parallel"])
#Total conduction resistances in parallel
Resistances_Parallel_conduction=1/(Resistances_Df["RValue"][Resistances_Df["Config"]=="Parallel"].sum())
#Total resistance
R_total=Resistances_Series_conduction + Resistances_Parallel_conduction + Resistances_convection
wall=[20,-10,3,5,0.25]#wall inputs [Temperature in, Temperature out, high, wide, area ]
Qb=(wall[0]-wall[1])/R_total# Rate of heat transfer of one brick in [W]
Nb=(wall[2]*wall[3])/wall[4]# Number of bricks in the wall
Qtotal=Qb*Nb# Rate of heat tranfer of the wall in [W]
print "The total convenction resistance is ",Resistances_convection,"ºC/W \n"
print "The total conduction resistance in series is ",Resistances_Series_conduction,"ºC/W \n"
print "The total conduction resistance in parallel is ",Resistances_Parallel_conduction,"ºC/W \n"
print "The total thermal resistance is ",R_total,"ºC/W \n"
print "The heat transfer through the wall is "+str(Qtotal)+" W"
|
[
"b.najafi87@gmail.com"
] |
b.najafi87@gmail.com
|
8e71bfb294c0824e57c51307c343248ff48ae18a
|
addbf46c371f7d3cb51dfce6d118da8e0fd8c1f2
|
/nathan-programming-puzzles-10-2-a.py
|
5d53a2e9e28239d972ee3fd494170d25d1341682
|
[] |
no_license
|
nrhint/python-class
|
2a5af35fc887eeb6668278d40b86b5be872ee4c4
|
c70940b5e03b1858d2f6f16be6807206ec3e22bb
|
refs/heads/master
| 2020-12-07T00:37:52.907516
| 2016-12-07T00:50:52
| 2016-12-07T00:50:52
| 67,438,180
| 0
| 0
| null | 2016-09-05T17:09:27
| 2016-09-05T17:09:27
| null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
import pickle
faves = {
"food", "games", "programming"
}
favesFile = open("favorits.dat", 'wb')
pickle.dump(faves, favesFile)
favesFile.close()
|
[
"nathan@hintonclan.org"
] |
nathan@hintonclan.org
|
27d53f6361992e5a1a8759acb0c55160f5ab5cb1
|
07c3034f7b6ef88e08430b8c908613ea0091f1b6
|
/Homework/HW1/hc2324_hw1_q3.py
|
8c4604900546d1f073f205bf4eca98d9a5734f9d
|
[] |
no_license
|
HelalChow/Data-Structures
|
494b8fabcdf1cac20db78055547ce4160ad6a018
|
3b3401cbd23e01b2d7d95dfc3b95451ca179cee9
|
refs/heads/master
| 2021-10-19T00:24:23.589224
| 2019-02-15T22:34:05
| 2019-02-15T22:34:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
def a_sum_square(n):
sum=0
for i in range(n):
sum+=i**2
return sum
def b_sum_square(n):
return sum(i**2 for i in range(n))
def c_sum_square(n):
sum = 0
for i in range(n):
if i%2 != 0:
sum += i**2
return sum
def d_sum_square(n):
return sum(i**2 for i in range(n) if i%2 != 0)
|
[
"noreply@github.com"
] |
HelalChow.noreply@github.com
|
964100020297da1c3078fb8f0be88a105eaf54a7
|
46128b87bf516e34c2844b7a2de37606c1381319
|
/backend/apps/crowd_bt/types.py
|
78e1a4852e6817f06d399bb0ee8f67c8685f3450
|
[] |
no_license
|
nxxtlib/votai
|
0f9848ef64375ee2beb07e38d009bdf8360c63ed
|
b8907b23190c1e164d0130538e90356a11f43534
|
refs/heads/master
| 2022-11-24T18:44:51.416348
| 2020-06-01T22:01:08
| 2020-06-01T22:01:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
from typing import NewType, NamedTuple
# pylint: disable=pointless-string-statement
"""Mu
Given a relevance score (s), its Gaussian-distributed format is given by:
s ~ N(μ, σ²)
"""
Mu = NewType("Mu", float)
"""Sigma Squared
Given a relevance score (s), its Gaussian-distributed format is given by:
s ~ N(μ, σ²)
"""
SigmaSquared = NewType("SigmaSquared", float)
"""Alpha
Given the probability that the annotator agrees with the true pairwise preferences(η),
we assume it to be a Beta-distributed random variable with parameters α and β:
η ∼ Beta(α, β)
"""
Alpha = NewType("Alpha", float)
"""Beta
Given the probability that the annotator agrees with the true pairwise preferences(η),
we assume it to be a Beta-distributed random variable with parameters α and β:
η ∼ Beta(α, β)
"""
Beta = NewType("Beta", float)
"""Normalization Constant
Normalization constant used to regulate the expected information gain.
"""
C = NewType("C", float) # pylint: disable=invalid-name
class RelevanceScore(NamedTuple):
"""Relevance Score (s)
Score that evaluates how relevant an item is according to annotators
Represented as a Gaussian-distributed random variable with parameters μ and σ²
such that:
s ~ N(μ, σ²)
"""
mu: Mu
sigma_squared: SigmaSquared
class AnnotatorConfidence(NamedTuple):
"""Annotator Confidence (η)
Probability that an annotator agrees with the true pairwise preference
Represented as a Beta-distributed random variable with parameters α and β,
such that:
η ~ Beta(α, β)
"""
alpha: Alpha
beta: Beta
|
[
"gustavomaronato@gmail.com"
] |
gustavomaronato@gmail.com
|
31fbf2cea496cbee545c55ace24d1fbf333cb2ee
|
52e7f32f1d9cff522d76583036735ddd27cd9f7a
|
/pjs/scoping.py
|
1127f1febebe38098d14ec04302346b69397eb87
|
[] |
no_license
|
niallo/PJs
|
5f8167610a312f249d5f6d64287ee244be29dcf3
|
e0adb313559774fb1798f56f03aea2e3f0abfd3b
|
refs/heads/master
| 2021-01-16T20:56:57.175927
| 2013-07-13T21:11:54
| 2013-07-13T21:11:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,421
|
py
|
import ast
from converter import register as converts, PJsNotImplemented
import utils
FUNC_TEMPLATE = '''\
%(left)s = %(dec_front)s$def(%(special)sfunction $_%(name)s(%(args)s) { // %(lineno)d
%(contents)s
})%(dec_back)s;
%(rname)s.__module__ = _.__name__;
%(rname)s.__name__ = $b.str("%(name)s");
'''
LAMBDA_TEMPLATE = '''\
$def(%(special)sfunction $_lambda(%(args)s) {return %(contents)s;})'''
CLASS_TEMPLATE = '''\
%(left)s = %(dec_front)sClass('%(name)s', [%(bases)s], (function(){
var __%(lnum)s = {};
%(contents)s
return __%(lnum)s;
}()))%(dec_back)s;
%(rname)s.__module__ = _.__name__;
'''
@converts(ast.FunctionDef)
def functiondef(conv, node, scope):
dct = {
'name': node.name,
'lineno': node.lineno,
'special': function_special(conv, node, scope),
'left': utils.lhand_assign(node.name, scope),
'rname': utils.resolve(node.name, scope),
}
args = function_args(conv, node, scope)
dct['args'] = ', '.join(args)
dct['dec_front'] = ''
dct['dec_back'] = ''
for dec in node.decorator_list:
dct['dec_front'] += conv.convert_node(dec, scope) + '('
dct['dec_back'] += ')'
scope = scope.copy()
scope.explicit_locals = False
scope.locals += args
dct['contents'] = utils.fix_undef(conv.convert_block(node.body, scope), scope)
return FUNC_TEMPLATE % dct
def function_args(conv, node, scope):
args = list(arg.id for arg in node.args.args)
if node.args.vararg:
args.append(node.args.vararg)
if node.args.kwarg:
args.append(node.args.kwarg)
return args
def function_special(conv, node, scope):
defaults = function_defaults(conv, node, scope)
if node.args.kwarg:
return defaults + ', ' + str(bool(node.args.vararg)).lower() + ', true, '
elif node.args.vararg:
return defaults + ', true, '
elif defaults != '{}':
return defaults + ', '
else:
return ''
def function_defaults(conv, node, scope):
args = list(arg.id for arg in node.args.args)
defaults = []
for default, name in zip(reversed(node.args.defaults), reversed(args)):
defaults.append("'%s': %s" % (name, conv.convert_node(default, scope)))
return '{' + ', '.join(defaults) + '}'
@converts(ast.Lambda)
def lambdadef(conv, node, scope):
dct = {
'special': function_special(conv, node, scope),
}
args = function_args(conv, node, scope)
dct['args'] = ', '.join(args)
scope = scope.copy()
scope.explicit_locals = False
scope.locals += args
dct['contents'] = utils.fix_undef(conv.convert_node(node.body, scope), scope)
return LAMBDA_TEMPLATE % dct
@converts(ast.ClassDef)
def classdef(conv, node, scope):
imports = []
dct = {
'name': node.name,
'bases': ', '.join(utils.resolve(base.id, scope) for base in node.bases),
'left': utils.lhand_assign(node.name, scope),
'rname': utils.resolve(node.name, scope),
}
dct['dec_front'] = ''
dct['dec_back'] = ''
for dec in node.decorator_list:
dct['dec_front'] += conv.convert_node(dec, scope) + '('
dct['dec_back'] += ')'
scope = scope.copy()
scope.explicit_locals = True
dct['contents'] = utils.fix_undef(conv.convert_block(node.body, scope), scope)
dct['lnum'] = len(scope.parent_locals)
return CLASS_TEMPLATE % dct
# vim: et sw=4 sts=4
|
[
"jared@jaredforsyth.com"
] |
jared@jaredforsyth.com
|
3cdece2e48e8bed2a644484e138ec349ae54e1ab
|
82bab97dc70cad2e8a64c9563eb36694899683b0
|
/launcher.py
|
3d579fd84cf1d0a949f0dba7a5fb6f329e90bd83
|
[] |
no_license
|
lubusax/ras_1901
|
a2a0297c5751d9bd2cc10d5790a67df76779580c
|
7f4a590f7e2b9b70c47e2c4ec7615f2aca91f57a
|
refs/heads/master
| 2020-04-16T02:59:00.048554
| 2019-01-20T18:40:49
| 2019-01-20T18:40:49
| 165,215,773
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,501
|
py
|
#! /usr/bin/python3.5
import os, sys, time
from dicts.ras_dic import WORK_DIR, PinsBuzzer, PinsDown, PinsOK
from lib import Display, CardReader, PasBuz, Button
from lib import Odooxlm, Tasks
Buz = PasBuz.PasBuz( PinsBuzzer )
Disp = Display.Display()
Reader = CardReader.CardReader()
B_Down = Button.Button( PinsDown )
B_OK = Button.Button( PinsOK )
Hardware = [ Buz, Disp, Reader, B_Down, B_OK]
Odoo = Odooxlm.Odooxlm() # communicate via xlm
Tasks = Tasks.Tasks( Odoo, Hardware )
def ask_twice():
# user asked twice before executing -'are you sure?'
Buz.Play('OK')
Disp.display_msg('sure?')
B_OK.pressed = False # avoid false positives
B_Down.pressed = False
time.sleep(0.4) # allow time to take the finger
# away from the button
while not ( B_OK.pressed or B_Down.pressed): #wait answer
B_Down.scanning()
B_OK.scanning()
if B_OK.pressed: # OK pressed for a second time
Tasks.selected() # The selected Task is run.
# When the Admin Card is swiped
# the Program returns here again.
else:
Buz.Play('down')
time.sleep(0.4) # allow time to take the finger
# away from the button
B_OK.pressed = False # avoid false positives
B_Down.pressed = False
def main_loop():
# The Main Loop only ends when the option to reboot is chosen.
# In all the Tasks, when the Admin Card is swiped,
# the program returns to this Loop, where a new Task
# can be selected using the OK and Down Buttons.
Disp.initial_display()
if not Tasks.wifi_active(): # make sure that the Terminal is
Tasks.reset_wifi() # connected to a WiFi
if not Odoo.uid: # make sure that we have
Tasks.reset_odoo() # access to an odoo db
Tasks.selected() # when the terminal is switched on it goes
# to the predefined Task (begin_option)
while not ( Tasks.reboot == True ):
Disp.display_msg( Tasks.option_name() )
if B_OK.pressed:
if (Tasks.option_name() in Tasks.ask_twice):
ask_twice()
else:
Tasks.selected()
elif B_Down.pressed:
Tasks.down()
B_Down.scanning() # If no Button was Pressed
B_OK.scanning() # continue scanning
Disp.display_msg('shut_down')
time.sleep(1.5)
Disp.clear_display()
# os.system('sudo reboot')
main_loop()
|
[
"lu.bu.sax@gmail.com"
] |
lu.bu.sax@gmail.com
|
d8ea0a8e9fe2cd0625dc5ac26cddd66c3fed3058
|
00c6ded41b84008489a126a36657a8dc773626a5
|
/.history/Sizing_Method/ConstrainsAnalysis/DesignPointSelectStrategy_20210714184448.py
|
c1f95465cbf3857a24980aee211e2520759b0265
|
[] |
no_license
|
12libao/DEA
|
85f5f4274edf72c7f030a356bae9c499e3afc2ed
|
1c6f8109bbc18c4451a50eacad9b4dedd29682bd
|
refs/heads/master
| 2023-06-17T02:10:40.184423
| 2021-07-16T19:05:18
| 2021-07-16T19:05:18
| 346,111,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,588
|
py
|
# author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPDP1P2 as ca_pd_12
from scipy.optimize import curve_fit
"""
The unit use is IS standard
"""
class Design_Point_Select_Strategy:
"""This is a design point select strategy from constrains analysis"""
def __init__(self, altitude, velocity, beta, method, p_turbofan_max, p_motorfun_max, n=12):
"""
:param altitude: m x 1 matrix
:param velocity: m x 1 matrix
:param beta: P_motor/P_total m x 1 matrix
:param p_turbofan_max: maximum propulsion power for turbofan (threshold value)
:param p_motorfun_max: maximum propulsion power for motorfun (threshold value)
:param n: number of motor
the first group of condition is for stall speed
the stall speed condition have to use motor, therefore with PD
:return:
power load: design point p/w and w/s
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.n_motor = n
self.p_turbofan_max = p_turbofan_max
self.p_motorfun_max = p_motorfun_max
# initialize the p_w, w_s, hp, n, m
self.n = 100
self.m = len(self.h)
self.hp = np.linspace(0, 1, self.n)
self.hp_threshold = self.p_motorfun_max / (self.p_motorfun_max + self.p_turbofan_max)
# method1 = Mattingly_Method, method2 = Gudmundsson_Method
if method == 1:
self.method1 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_electric
else:
self.method1 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric
problem = self.method(self.h[0], self.v[0], self.beta[0], 6000, self.hp_threshold)
self.w_s = problem.allFuncs[0](problem)
def p_w_compute(self):
self.p_w = np.zeros([self.m, self.n]) # m x n matrix
for i in range(1, 8):
for j in range(self.n):
problem1 = self.method1(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
problem2 = self.method2(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
if i >= 5:
p_w_1 = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w_2 = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w_1 = problem1.allFuncs[i](problem1)
p_w_2 = problem2.allFuncs[i](problem2)
if p_w_1 > self.p_turbofan_max:
p_w_1 = 100000
elif p_w_2 > self.p_motorfun_max:
p_w_2 = 100000
self.p_w[i, j] = p_w_1 + p_w_2
return self.p_w
def strategy(self):
#find the min p_w for difference hp for each flight condition:
for i in range
|
[
"libao@gatech.edu"
] |
libao@gatech.edu
|
b93fcfdfac7b65cb81f229e57d46e240ab834093
|
920ab19b73a7cba21d340a49d9d24e2d1eeabf3d
|
/idps/lib/python3.7/site-packages/identify/extensions.py
|
e7aa969e273222d1c0e97ee9afa785c23c756811
|
[
"MIT"
] |
permissive
|
DTrafford/IDPS
|
5fa2b73f2c47cbf50b90a1a786c10f7d69c995b4
|
1eaccfc218adcb7231e64271731c765f8362b891
|
refs/heads/master
| 2022-12-16T16:28:34.801962
| 2020-03-30T18:08:09
| 2020-03-30T18:08:09
| 234,163,829
| 0
| 0
|
MIT
| 2020-09-10T06:26:02
| 2020-01-15T20:10:09
|
Python
|
UTF-8
|
Python
| false
| false
| 6,615
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
EXTENSIONS = {
'apinotes': {'text', 'apinotes'},
'asar': {'binary', 'asar'},
'bash': {'text', 'shell', 'bash'},
'bat': {'text', 'batch'},
'bmp': {'binary', 'image', 'bitmap'},
'bz2': {'binary', 'bzip2'},
'c': {'text', 'c'},
'cc': {'text', 'c++'},
'cu': {'text', 'cuda'},
'cfg': {'text'},
'cmake': {'text', 'cmake'},
'cnf': {'text'},
'coffee': {'text', 'coffee'},
'conf': {'text'},
'cpp': {'text', 'c++'},
'crt': {'text', 'pem'},
'cs': {'text', 'c#'},
'cson': {'text', 'cson'},
'css': {'text', 'css'},
'csv': {'text', 'csv'},
'cxx': {'text', 'c++'},
'dart': {'text', 'dart'},
'def': {'text', 'def'},
'dtd': {'text', 'dtd'},
'ear': {'binary', 'zip', 'jar'},
'ejs': {'text', 'ejs'},
'eot': {'binary', 'eot'},
'eps': {'binary', 'eps'},
'erb': {'text', 'erb'},
'exe': {'binary'},
'eyaml': {'text', 'yaml'},
'feature': {'text', 'gherkin'},
'fish': {'text', 'fish'},
'gemspec': {'text', 'ruby'},
'gif': {'binary', 'image', 'gif'},
'go': {'text', 'go'},
'gotmpl': {'text', 'gotmpl'},
'gpx': {'text', 'gpx', 'xml'},
'gradle': {'text', 'groovy'},
'groovy': {'text', 'groovy'},
'gyb': {'text', 'gyb'},
'gyp': {'text', 'gyp', 'python'},
'gypi': {'text', 'gyp', 'python'},
'gz': {'binary', 'gzip'},
'h': {'text', 'header', 'c', 'c++'},
'hpp': {'text', 'header', 'c++'},
'htm': {'text', 'html'},
'html': {'text', 'html'},
'hxx': {'text', 'header', 'c++'},
'icns': {'binary', 'icns'},
'ico': {'binary', 'icon'},
'ics': {'text', 'icalendar'},
'idl': {'text', 'idl'},
'inc': {'text', 'inc'},
'ini': {'text', 'ini'},
'j2': {'text', 'jinja'},
'jade': {'text', 'jade'},
'jar': {'binary', 'zip', 'jar'},
'java': {'text', 'java'},
'jenkinsfile': {'text', 'groovy'},
'jinja': {'text', 'jinja'},
'jinja2': {'text', 'jinja'},
'jpeg': {'binary', 'image', 'jpeg'},
'jpg': {'binary', 'image', 'jpeg'},
'js': {'text', 'javascript'},
'json': {'text', 'json'},
'jsonnet': {'text', 'jsonnet'},
'jsx': {'text', 'jsx'},
'key': {'text', 'pem'},
'kml': {'text', 'kml', 'xml'},
'kt': {'text', 'kotlin'},
'less': {'text', 'less'},
'lua': {'text', 'lua'},
'm': {'text', 'c', 'objective-c'},
'manifest': {'text', 'manifest'},
'map': {'text', 'map'},
'markdown': {'text', 'markdown'},
'md': {'text', 'markdown'},
'mib': {'text', 'mib'},
'mk': {'text', 'makefile'},
'mm': {'text', 'c++', 'objective-c++'},
'modulemap': {'text', 'modulemap'},
'ngdoc': {'text', 'ngdoc'},
'nim': {'text', 'nim'},
'nims': {'text', 'nim'},
'nimble': {'text', 'nimble'},
'nix': {'text', 'nix'},
'otf': {'binary', 'otf'},
'p12': {'binary', 'p12'},
'patch': {'text', 'diff'},
'pdf': {'binary', 'pdf'},
'pem': {'text', 'pem'},
'php': {'text', 'php'},
'php4': {'text', 'php'},
'php5': {'text', 'php'},
'phtml': {'text', 'php'},
'pl': {'text', 'perl'},
'plantuml': {'text', 'plantuml'},
'png': {'binary', 'image', 'png'},
'po': {'text', 'pofile'},
'pp': {'text', 'puppet'},
'properties': {'text', 'java-properties'},
'proto': {'text', 'proto'},
'purs': {'text', 'purescript'},
'py': {'text', 'python'},
'pyi': {'text', 'pyi'},
'pyx': {'text', 'cython'},
'pxd': {'text', 'cython'},
'pxi': {'text', 'cython'},
'r': {'text', 'r'},
'rb': {'text', 'ruby'},
'rs': {'text', 'rust'},
'rst': {'text', 'rst'},
's': {'text', 'asm'},
'sbt': {'text', 'sbt', 'scala'},
'sc': {'text', 'scala'},
'scala': {'text', 'scala'},
'scss': {'text', 'scss'},
'sh': {'text', 'shell'},
'sls': {'text', 'salt'},
'so': {'binary'},
'sol': {'text', 'solidity'},
'spec': {'text', 'spec'},
'styl': {'text', 'stylus'},
'sql': {'text', 'sql'},
'svg': {'text', 'svg'},
'swf': {'binary', 'swf'},
'swift': {'text', 'swift'},
'swiftdeps': {'text', 'swiftdeps'},
'tac': {'text', 'twisted', 'python'},
'tar': {'binary', 'tar'},
'tgz': {'binary', 'gzip'},
'thrift': {'text', 'thrift'},
'tiff': {'binary', 'image', 'tiff'},
'toml': {'text', 'toml'},
'tf': {'text', 'terraform'},
'ts': {'text', 'ts'},
'tsx': {'text', 'tsx'},
'ttf': {'binary', 'ttf'},
'txt': {'text', 'plain-text'},
'vdx': {'text', 'vdx'},
'vim': {'text', 'vim'},
'vue': {'text', 'vue'},
'war': {'binary', 'zip', 'jar'},
'wav': {'binary', 'audio', 'wav'},
'wkt': {'text', 'wkt'},
'whl': {'binary', 'wheel', 'zip'},
'woff': {'binary', 'woff'},
'woff2': {'binary', 'woff2'},
'wsgi': {'text', 'wsgi', 'python'},
'xml': {'text', 'xml'},
'xq': {'text', 'xquery'},
'xql': {'text', 'xquery'},
'xqm': {'text', 'xquery'},
'xqu': {'text', 'xquery'},
'xquery': {'text', 'xquery'},
'xqy': {'text', 'xquery'},
'xsd': {'text', 'xml', 'xsd'},
'xsl': {'text', 'xml', 'xsl'},
'yaml': {'text', 'yaml'},
'yang': {'text', 'yang'},
'yin': {'text', 'xml', 'yin'},
'yml': {'text', 'yaml'},
'zig': {'text', 'zig'},
'zip': {'binary', 'zip'},
'zsh': {'text', 'shell', 'zsh'},
}
EXTENSIONS_NEED_BINARY_CHECK = {
'plist': {'plist'},
}
NAMES = {
'.babelrc': {'text', 'json', 'babelrc'},
'.bowerrc': {'text', 'json', 'bowerrc'},
'.coveragerc': {'text', 'ini', 'coveragerc'},
'.dockerignore': {'text', 'dockerignore'},
'.editorconfig': {'text', 'editorconfig'},
'.gitattributes': {'text', 'gitattributes'},
'.gitignore': {'text', 'gitignore'},
'.gitmodules': {'text', 'gitmodules'},
'.jshintrc': {'text', 'json', 'jshintrc'},
'.mailmap': {'text', 'mailmap'},
'.mention-bot': {'text', 'json', 'mention-bot'},
'.npmignore': {'text', 'npmignore'},
'.yamllint': {'text', 'yaml', 'yamllint'},
'AUTHORS': EXTENSIONS['txt'],
'CMakeLists.txt': EXTENSIONS['cmake'],
'COPYING': EXTENSIONS['txt'],
'Dockerfile': {'text', 'dockerfile'},
'Gemfile': EXTENSIONS['rb'],
'Jenkinsfile': {'text', 'groovy'},
'LICENSE': EXTENSIONS['txt'],
'MAINTAINERS': EXTENSIONS['txt'],
'Makefile': EXTENSIONS['mk'],
'NOTICE': EXTENSIONS['txt'],
'PATENTS': EXTENSIONS['txt'],
'Pipfile': EXTENSIONS['toml'],
'Pipfile.lock': EXTENSIONS['json'],
'README': EXTENSIONS['txt'],
'Rakefile': EXTENSIONS['rb'],
'setup.cfg': EXTENSIONS['ini'],
}
|
[
"d.trafford@outlook.com"
] |
d.trafford@outlook.com
|
66e2bb71af508d27ce94ce064013eb5f466c0f3e
|
88e06bab1989c81a2dd649bb09b144fa7c958f89
|
/leet_construct_binary_tree_from_preorder_and_inorder.py
|
8cb4b670c2f1f3cd6c7fe8901450b6acf460ad69
|
[] |
no_license
|
VaibhavD143/Coding
|
4499526b22ee4ef13f66c3abcea671c80a8f748a
|
5de3bae8891c7d174cbc847a37c3afb00dd28f0e
|
refs/heads/master
| 2023-08-06T21:56:44.934954
| 2021-10-09T18:31:29
| 2021-10-09T18:31:29
| 263,890,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
"""
To understand base:
take example
[8,5,2,3,4,6,7,9]
[3,2,4,5,7,6,9,8]
[3,9,20,15,7]
[9,3,15,20,7]
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
ind = {val:i for i,val in enumerate(inorder)}
def rec(ps,pe,base):
if ps == pe:
return TreeNode(preorder[ps])
if ps>pe:
return None
root = TreeNode(preorder[ps])
i = ind[preorder[ps]] #position in inorder
diff = i-base #number of elements in left tree of root
root.left = rec(ps+1,ps+diff,base)
root.right = rec(ps+diff+1,pe,i+1)
return root
return rec(0,len(preorder)-1,0)
|
[
"vaibhav.dodiya143vd@gmail.com"
] |
vaibhav.dodiya143vd@gmail.com
|
774ed71b32ee8d05b954da997e212641803eb3da
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_inkier.py
|
178f6bd2151eec1fdf19af11edad7fd9981faaa8
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
#calss header
class _INKIER():
def __init__(self,):
self.name = "INKIER"
self.definitions = inky
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['inky']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
615cfacbf839f0821c7d576ef7d3c6b6b6f562ad
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03209/s218432315.py
|
e2d669ba01785a86ae398f08408031d285e65950
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
N,X = map(int,input().split())
siz = [1]
pat = [1]
for i in range(N):
siz.append(siz[-1]*2 + 3)
pat.append(pat[-1]*2 + 1)
def rec(n,x):
if n==0:
ret = int(x>0)
elif x <= 1 + siz[n-1]:
ret = rec(n-1, x-1)
else:
ret = pat[n-1] + 1 + rec(n-1, x-2-siz[n-1])
return ret
print(rec(N,X))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
cf17ba00092630b465a26dc9a485c9062396af08
|
9c4a70475f48b81b7b0d895e07b012dd8aca2c2d
|
/backend/remp_28495/urls.py
|
364caaa46eb9dc964112a11701b2e8b40501fda0
|
[] |
no_license
|
crowdbotics-apps/remp-28495
|
81c5963490654cf5c7a977936a62b816ff967e5f
|
f2430f3b7dd53d9ff43465fe12da1dd28925e773
|
refs/heads/master
| 2023-06-10T21:07:46.400603
| 2021-07-06T16:45:51
| 2021-07-06T16:45:51
| 383,537,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,187
|
py
|
"""remp_28495 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "remp"
admin.site.site_title = "remp Admin Portal"
admin.site.index_title = "remp Admin"
# swagger
api_info = openapi.Info(
title="remp API",
default_version="v1",
description="API documentation for remp App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
b9ff2167f40f7d7d526d185d522bbd4d81142ef8
|
ec3362fe2ef1f23a9b1fad9469b6a2ec89beda3a
|
/hey-brether.py
|
f1125635a446a6c1bfc49963cb83ca9edc32f4fd
|
[
"MIT"
] |
permissive
|
tomcola512/hey-brether
|
dcbee74d55c5a2c6cadb188d450af0862fc9041f
|
42fb0114662476ffd8f3b091950bce6cbe836047
|
refs/heads/master
| 2020-03-24T19:21:29.053260
| 2018-07-30T19:53:16
| 2018-07-30T19:53:16
| 142,921,495
| 0
| 0
|
MIT
| 2018-07-30T19:51:26
| 2018-07-30T19:51:25
| null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
#!/usr/bin/env python3
import sys
from typing import List
def form_letter(letter: str) -> tuple:
parts = [f':z_{letter}_{i}:' for i in range(4)]
return ''.join(parts[:2]), ''.join(parts[2:])
def form_word(word: str) -> str:
lines = [' '.join([form_letter(s)[i] for s in word]) for i in range(2)]
return '\n'.join(lines)
def hey_brether(words: List[str]) -> str:
return '\n\n'.join([form_word(w) for w in words])
if __name__ == "__main__":
print(hey_brether(sys.argv[1:]))
|
[
"austin@austinpray.com"
] |
austin@austinpray.com
|
776e156d066891997db6fdf2b1cfc8af363bc051
|
d07b91e42e32b0a0642254a460bc56a546f60a63
|
/source/lambdas/sns/handler.py
|
c8c6345aff30656a63532726508335396e977ac4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
emmanuellim/improving-forecast-accuracy-with-machine-learning
|
81a30674f24d8249b7a55d6cce4fabe4f8fb4fdf
|
2470b13c4b23861907c326cb2c3fdb6fbf4b2397
|
refs/heads/master
| 2023-01-14T13:41:42.978184
| 2020-11-24T19:07:35
| 2020-11-24T19:07:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,068
|
py
|
# #####################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
# #####################################################################################################################
import json
import os
from shared.Dataset.dataset_file import DatasetFile
from shared.helpers import get_sns_client
from shared.logging import get_logger
logger = get_logger(__name__)
def topic_arn():
"""
Get the SNS topic ARN from environment variable
:return: The SNS topic ARN
"""
return os.environ["SNS_TOPIC_ARN"]
def prepare_forecast_ready_message(event: dict):
"""
Prepare a message to notify users that forecasts are ready.
:param file: the DatasetFile that was updated to trigger this message
:return: message or none
"""
dataset_group = event.get("dataset_group_name")
message = f"Forecast for {dataset_group} is ready!"
return message
def build_message(event):
"""
Build a message for SNS to publish
:param event: the lambda event containing the message
:return: the message to publish
"""
message = ""
error = None
file = DatasetFile(event.get("dataset_file"), event.get("bucket"))
forecast_for = event.get("dataset_group_name", file.prefix)
if "statesError" in event.keys():
logger.info("State error message encountered")
message += f"There was an error running the forecast for {forecast_for}\n\n"
error = event.get("statesError")
if "serviceError" in event.keys():
logger.info("Service error message encountered")
message += (
f"There was a service error running the forecast for {forecast_for}\n\n"
)
error = event.get("serviceError")
if error:
error_type = error.get("Error", "Unknown")
error_cause = json.loads(error.get("Cause", "{}"))
error_message = error_cause.get("errorMessage")
stack_trace = error_cause.get("stackTrace")
message += f"Message: {error_message}\n\n"
if error_type == "DatasetsImporting":
message = f"Update for forecast {forecast_for}\n\n"
message += error_message
else:
message += f"Details: (caught {error_type})\n\n"
if stack_trace:
message += f"\n".join(stack_trace)
else:
message = prepare_forecast_ready_message(event)
return message
def sns(event, context):
"""
Send an SNS message
:param event: Lambda event
:param context: Lambda context
:return: None
"""
cli = get_sns_client()
message = build_message(event)
if message:
logger.info("Publishing message for event: %s" % event)
cli.publish(TopicArn=topic_arn(), Message=message)
else:
logger.info("No message to publish for event: %s" % event)
|
[
"ssdzd@amazon.com"
] |
ssdzd@amazon.com
|
6a6c9040cc8399e78b2e31e4a7d73c082ce17201
|
15e8a393f6c71ba77094a1718f4f89050409c7ae
|
/accounts/views.py
|
aa64a6b91daa4cef6e6f338622c3f42048dcea75
|
[] |
no_license
|
emilte/johansson
|
21a3e20208c67725776af0f94de4c29150935b50
|
d16bdde26e840814562f668904b2f5588c0a13ad
|
refs/heads/master
| 2023-07-23T21:01:32.830302
| 2021-09-05T14:56:01
| 2021-09-05T14:56:01
| 390,360,563
| 0
| 0
| null | 2021-08-30T00:42:49
| 2021-07-28T13:26:31
|
SCSS
|
UTF-8
|
Python
| false
| false
| 5,049
|
py
|
# imports
import math
from openpyxl import Workbook
from django.views import View
from django.urls import reverse
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, FileResponse
from django.utils import timezone
from django.contrib import messages
from django.db.models import Q, Avg, Count, Min, Sum
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import get_user_model; User = get_user_model()
from django.contrib.auth import views as auth_views
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.utils.decorators import method_decorator
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, UserChangeForm, PasswordChangeForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.admin.views.decorators import staff_member_required, user_passes_test
from accounts import forms as account_forms
from accounts import models as account_models
# End: imports -----------------------------------------------------------------
profile_dec = [
]
@method_decorator(profile_dec, name='dispatch')
class ProfileView(View):
template = "accounts/profile.html"
def get(self, request, *args, **kwargs):
return render(request, self.template, {
})
@method_decorator(profile_dec, name='dispatch')
class EditProfileView(View):
template = "accounts/edit_profile.html"
form_class = account_forms.EditUserForm
def get(self, request, *args, **kwargs):
form = self.form_class(instance=request.user)
return render(request, self.template, {'form': form})
def post(self, request, *args, **kwargs):
form = self.form_class(data=request.POST, instance=request.user)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, f"Profilen din har blitt oppdatert")
return redirect('accounts:profile')
else:
return render(request, self.template, {'form': form})
class SignUpView(View):
template = "accounts/registration_form.html"
form_class = account_forms.SignUpForm
def get(self, request, *args, **kwargs):
form = self.form_class()
return render(request, self.template, {'form': form})
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
try:
code = form.cleaned_data['code']
group = account_models.PermissionCode.objects.get(secret=code).group
user.groups.add(group)
messages.add_message(request, messages.SUCCESS, f"Med koden '{code}' har du blitt lagt til i avdeling: {group.name}")
except:
messages.add_message(request, messages.INFO, f"Koden '{code}' tilsvarer ingen avdeling. Ta kontakt med admin")
return redirect('home')
else:
return render(request, self.template, {'form': form})
@method_decorator(profile_dec, name='dispatch')
class DeleteUserView(View):
def get(self, request, *args, **kwargs):
request.user.delete()
logout(request)
messages.add_message(request, messages.SUCCESS, f"Brukeren din har blitt slettet fra systemet")
return redirect('home')
# Should use built in template AuthenticationForm
class LoginView(View):
template = "accounts/login.html"
def get(self, request, *args, **kwargs):
return render(request, self.template)
def post(self, request, *args, **kwargs):
email = request.POST['email']
password = request.POST['password']
user = authenticate(request, username=email, password=password)
error = None
if user is not None:
login(request, user)
return redirect('accounts:profile')
else:
error = "Feil"
return render(request, self.template, {'error': error})
@method_decorator(profile_dec, name='dispatch')
class LogoutView(View):
def get(self, request, *args, **kwargs):
logout(request)
return redirect('accounts:login')
@method_decorator(profile_dec, name='dispatch')
class ChangePasswordView(View):
template = "accounts/change_password.html"
form_class = account_forms.CustomPasswordChangeForm
#form_class = PasswordChangeForm
def get(self, request, *args, **kwargs):
form = self.form_class(request=request)
return render(request, self.template, {'form': form})
def post(self, request, *args, **kwargs):
form = self.form_class(data=request.POST, request=request)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
return redirect("accounts:profile")
return render(request, self.template, {'form': form})
|
[
"emil.telstad@gmail.com"
] |
emil.telstad@gmail.com
|
e3f4ebd0f7997eb84b8e98df2d8ea435590b9e7d
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/appplatform/v20230501preview/__init__.py
|
1dd362384bee7b86480bef71d3f3221e6b6714a0
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,442
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .api_portal import *
from .api_portal_custom_domain import *
from .apm import *
from .app import *
from .application_accelerator import *
from .application_live_view import *
from .binding import *
from .build_service_agent_pool import *
from .build_service_build import *
from .build_service_builder import *
from .buildpack_binding import *
from .certificate import *
from .config_server import *
from .configuration_service import *
from .container_registry import *
from .custom_domain import *
from .customized_accelerator import *
from .deployment import *
from .dev_tool_portal import *
from .gateway import *
from .gateway_custom_domain import *
from .gateway_route_config import *
from .get_api_portal import *
from .get_api_portal_custom_domain import *
from .get_apm import *
from .get_app import *
from .get_app_resource_upload_url import *
from .get_application_accelerator import *
from .get_application_live_view import *
from .get_binding import *
from .get_build_service_agent_pool import *
from .get_build_service_build import *
from .get_build_service_build_result_log import *
from .get_build_service_builder import *
from .get_build_service_resource_upload_url import *
from .get_buildpack_binding import *
from .get_certificate import *
from .get_config_server import *
from .get_configuration_service import *
from .get_container_registry import *
from .get_custom_domain import *
from .get_customized_accelerator import *
from .get_deployment import *
from .get_deployment_log_file_url import *
from .get_deployment_remote_debugging_config import *
from .get_dev_tool_portal import *
from .get_gateway import *
from .get_gateway_custom_domain import *
from .get_gateway_route_config import *
from .get_monitoring_setting import *
from .get_service import *
from .get_service_registry import *
from .get_storage import *
from .list_apm_secret_keys import *
from .list_build_service_builder_deployments import *
from .list_service_globally_enabled_apms import *
from .list_service_test_keys import *
from .monitoring_setting import *
from .service import *
from .service_registry import *
from .storage import *
from ._inputs import *
from . import outputs
|
[
"github@mikhail.io"
] |
github@mikhail.io
|
5ebd7f4f17ff38cdf95fc4df4a4fb4883473f0cf
|
929886272e269e59596cf559e1c4fb26b6897e0c
|
/clinicstation/models.py
|
cc62f17a07ef387d7596027b7a816e80d1533442
|
[
"Apache-2.0"
] |
permissive
|
vedpr612/tscharts
|
aab287478407d64449d00c2f021611128a085c74
|
09a482622fc0f6cccc56b688aea81370ab137160
|
refs/heads/master
| 2020-03-09T01:23:19.170052
| 2018-04-05T02:56:36
| 2018-04-05T02:56:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,033
|
py
|
#(C) Copyright Syd Logan 2016
#(C) Copyright Thousand Smiles Foundation 2016
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import unicode_literals
from django.db import models
from clinic.models import Clinic
from station.models import Station
from patient.models import Patient
'''
A clinic has stations. There are two models:
Station: simply a named location in the clinic. These records in
the database define the universe of all possible stations that a
clinic can be made up of. A station represents a class.
ClinicStation: defines an actual station for a particular clinic.
The station can be marked active or inactive. If inactive, it is
currently not seeing a patient, and the activepatient field
should be set to null (or None in Python). If active is True,
then the activepatient field should contain the ID of the patient
currently being seen. The station can "checkout" the activepatient
and that will cause the activepatient field to be set to NULL,
and the active field to be set to False.
The nextpatient field contains the ID of the next patient to be
seen by a station. When the station is not active, this patient
can be "checked in". When the patient is checked in, the station's
active field is set to True, and the activepatient field will be
assigned the nextpatient value. Then, nextpatient will be set to
the id of the patient next in the queue for this station.
away, awaytime, and willreturn are all used to indicate if the
station is currently manned (or not, perhaps the doctor is at
lunch).
'''
class ClinicStation(models.Model):
name = models.CharField(max_length=64)
station = models.ForeignKey(Station)
clinic = models.ForeignKey(Clinic)
active = models.BooleanField(default=False) # set to True if a patient is being seen
level = models.IntegerField(default=1) # relative importance to scheduler
away = models.BooleanField(default=True) # set to True when station is out to lunch
awaytime = models.IntegerField(default=30) # default minutes when station goes to away state before clinic is returned to (informational only)
willreturn = models.DateTimeField(auto_now_add=True) # estimated time of returen, computed when away is set to True, using the awaytime value
activepatient = models.ForeignKey(Patient, null=True, related_name='nextpatient') # if active, patient of null
nextpatient = models.ForeignKey(Patient, null=True, related_name="activepatient") # next patient to be seen or null
name_es = models.CharField(max_length=64)
|
[
"slogan621@gmail.com"
] |
slogan621@gmail.com
|
b8068d18d1dfcb398cb0e4564f4460bd7017fa22
|
46667df8344db58698838d677bdae377b3c3c53c
|
/Data Manipulation with Pandas/Part 2/24.downsampling-data.py
|
066a85b0f5cfb6d72099f5c86d7950dd93723f2b
|
[] |
no_license
|
bennysetiawan/DQLab-Career-2021
|
278577cdddb3852c57f799cd1207b4ff45962960
|
0822d15e3b24cf0146c23456d4b65b0fb00a53fc
|
refs/heads/master
| 2023-06-06T13:24:21.289929
| 2021-06-23T17:09:14
| 2021-06-23T17:09:14
| 379,657,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
import pandas as pd
# Load dataset https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/LO4/global_air_quality_4000rows.csv
gaq = pd.read_csv('https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/LO4/global_air_quality_4000rows.csv')
gaq['timestamp'] = pd.to_datetime(gaq['timestamp'])
gaq = gaq.set_index('timestamp')
print('Dataset sebelum di-downsampling (5 teratas):\n', gaq.head())
# [1] Downsampling dari daily to weekly dan kita hitung maksimum untuk seminggu
gaq_weekly = gaq.resample('W').max()
print('Downsampling daily to weekly - max (5 teratas):\n', gaq_weekly.head())
# [2] Downsampling dari daily to quaterly dan kita hitung minimumnya untuk tiap quarter
gaq_quaterly = gaq.resample('Q').min()
print('Downsampling daily to quaterly - min (5 teratas):\n', gaq_quaterly.head())
|
[
"setiawanb25@gmail.com"
] |
setiawanb25@gmail.com
|
aae8ed71564aa67c6ec5384655345127b605987a
|
3b2940c38412e5216527e35093396470060cca2f
|
/top/api/rest/CrmGrademktMemberDetailCreateRequest.py
|
46b878b46708a1903474d989185d6c8580933eda
|
[] |
no_license
|
akingthink/goods
|
842eb09daddc2611868b01ebd6e330e5dd7d50be
|
ffdb5868a8df5c2935fc6142edcdf4c661c84dca
|
refs/heads/master
| 2021-01-10T14:22:54.061570
| 2016-03-04T09:48:24
| 2016-03-04T09:48:24
| 45,093,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
'''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class CrmGrademktMemberDetailCreateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.feather = None
self.parameter = None
def getapiname(self):
return 'taobao.crm.grademkt.member.detail.create'
|
[
"yangwenjin@T4F-MBP-17.local"
] |
yangwenjin@T4F-MBP-17.local
|
6e7ab0d6a2cb4966bcce0938269d82f81bbe5888
|
134267f2244954d48c65daae0b58051aba757fed
|
/lucky.py
|
6d7541f6ac39af4ddc31c088dfa1988c61387f8c
|
[] |
no_license
|
mobin-zaman/misc_python
|
47fe836d1eae154210912b8b353f241303523e6b
|
7a22329ae38b2d5ee9cd9ce29d995686759f5f87
|
refs/heads/master
| 2020-04-28T00:48:06.774434
| 2019-07-24T15:28:15
| 2019-07-24T15:28:15
| 174,829,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
#! python3
#lucky.py - opens several google search results
import requests, sys, webbrowser, bs4
print('Googling....')
res=requests.get('http://google.com/search?q=' + ' '.join(sys.argv[1:]))
res.raise_for_status()
#TODO: Retrive top search result links.
soup=bs4.BeautifulSoup(res.text)
#TODO: Open a browser tab for each result
linkElems = soup.select('.r a')
numOpen=min(5,len(linkElems))
for i in range(numOpen):
webbrowser.open('http://google.com'+linkElems[i].get('href'))
|
[
"mobin_zaman@hotmail.com"
] |
mobin_zaman@hotmail.com
|
54e679afc65bea0590837f01b49bdf2be09aece1
|
58cd392c642ac9408349f03dc72927db6abcce55
|
/team2/src/Without_Doubt_Project/venv/lib/python3.6/site-packages/tbears/libs/icx_signer.py
|
8e3c035124d75dddc7b9979edb461cd6fd3fbba1
|
[] |
no_license
|
icon-hackathons/201902-dapp-competition-bu
|
161226eb792425078351c790b8795a0fe5550735
|
f3898d31a20f0a85637f150d6187285514528d53
|
refs/heads/master
| 2020-04-24T07:48:18.891646
| 2019-04-18T01:47:21
| 2019-04-18T01:47:21
| 171,809,810
| 3
| 11
| null | 2019-04-18T01:47:23
| 2019-02-21T06:01:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,922
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
from eth_keyfile import extract_key_from_keyfile
from secp256k1 import PrivateKey
from tbears.tbears_exception import KeyStoreException
def key_from_key_store(file_path: str, password: (bytes, str)) -> bytes:
"""Get private key from keystore file.
:param file_path: keystore file path.
:param password: password of keystore file.
:return: private key
"""
try:
with open(file_path, 'rb') as file:
private_key = extract_key_from_keyfile(file, password)
except ValueError:
raise KeyStoreException('Invalid password.')
except Exception as e:
raise KeyStoreException(f'keystore file error.{e}')
else:
return private_key
class IcxSigner:
"""Class for creating a recoverable ECDSA signature using a private key."""
def __init__(self, private_key: bytes):
self._private_key = private_key
self._private_key_object = PrivateKey(self._private_key)
def sign_recoverable(self, msg_hash):
"""Make a recoverable signature using message hash data.
We can extract public key from recoverable signature.
:param msg_hash: Hash data of message. type(bytes)
:return:
type(tuple)
type(bytes): 65 bytes data, type(int): recovery id
"""
private_key_object = self._private_key_object
recoverable_signature = private_key_object.ecdsa_sign_recoverable(msg_hash, raw=True)
return private_key_object.ecdsa_recoverable_serialize(recoverable_signature)
def sign(self, msg_hash) -> bytes:
"""Make base64-encoded string of recoverable signature data.
:param msg_hash: Hash data of message. type(bytes)
:return: base64-encoded string of recoverable signature data
"""
# 'msg_hash' argument must be 256 bits (made by hashlib.sha256() method)
signature, recovery_id = self.sign_recoverable(msg_hash)
recoverable_sig = bytes(bytearray(signature) + recovery_id.to_bytes(1, 'big'))
return base64.b64encode(recoverable_sig)
@property
def public_key(self) -> bytes:
return self._private_key_object.pubkey.serialize(compressed=False)
@property
def address(self) -> bytes:
return hashlib.sha3_256(self.public_key[1:]).digest()[-20:]
|
[
"41354736+sojinkim-icon@users.noreply.github.com"
] |
41354736+sojinkim-icon@users.noreply.github.com
|
56d1b915aa3f5e63035a47824c207613f2bf5480
|
3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be
|
/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/gameservices/v1beta/resources.py
|
101446b0908fca46ce29693f795216e4eeaf8876
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
twistedpair/google-cloud-sdk
|
37f04872cf1ab9c9ce5ec692d2201a93679827e3
|
1f9b424c40a87b46656fc9f5e2e9c81895c7e614
|
refs/heads/master
| 2023-08-18T18:42:59.622485
| 2023-08-15T00:00:00
| 2023-08-15T12:14:05
| 116,506,777
| 58
| 24
| null | 2022-02-14T22:01:53
| 2018-01-06T18:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,945
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://gameservices.googleapis.com/v1beta/'
DOCS_URL = 'https://cloud.google.com/solutions/gaming/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
['projectsId'],
True
)
PROJECTS_LOCATIONS = (
'projects.locations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_GAMESERVERDEPLOYMENTS = (
'projects.locations.gameServerDeployments',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/'
'gameServerDeployments/{gameServerDeploymentsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_GAMESERVERDEPLOYMENTS_CONFIGS = (
'projects.locations.gameServerDeployments.configs',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/'
'gameServerDeployments/{gameServerDeploymentsId}/configs/'
'{configsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_OPERATIONS = (
'projects.locations.operations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/operations/'
'{operationsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REALMS = (
'projects.locations.realms',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/realms/'
'{realmsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REALMS_GAMESERVERCLUSTERS = (
'projects.locations.realms.gameServerClusters',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/realms/'
'{realmsId}/gameServerClusters/{gameServerClustersId}',
},
['name'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
|
[
"gcloud@google.com"
] |
gcloud@google.com
|
dfeb592665b2b0016aab64fdaa9c63d96cf44147
|
33f752443cbb38d3cb4d9d40982b2a2d824acb81
|
/demo/django/api/migrations/0001_initial.py
|
6227ca3a23ca0dd634450cc0af400b99742f755b
|
[
"MIT"
] |
permissive
|
denisroldan/django-angular-dynamic-forms
|
b03b4f20751c609733356bea1a7141da29f9de54
|
f50de1c74db727e565756f40344c29bbab1b3910
|
refs/heads/master
| 2020-04-03T10:11:41.776970
| 2019-08-20T09:28:30
| 2019-08-20T09:28:30
| 155,186,503
| 0
| 0
| null | 2018-10-29T09:38:19
| 2018-10-29T09:38:19
| null |
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-24 10:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street', models.CharField(blank=True, max_length=100, null=True)),
('number', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='address',
name='city',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.City'),
),
]
|
[
"miroslav.simek@vscht.cz"
] |
miroslav.simek@vscht.cz
|
fa45b11c2a495772e67bb46f3588e19d43441dc4
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/158/usersdata/264/68784/submittedfiles/imc.py
|
f8dd970b68f227049dd18bb352f6bc264eb43c0c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
# -*- coding: utf-8 -*-
#Entrada:
peso= float(input('Digite o valor do peso em Kg:'))
altura= float(input('Digite o valor da altura em metros:'))
#Processamento:
imc= ((peso)/(altura**2))
#Saída:
if (imc>
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
5b93b4356e42e2778ef25836eb0d5835a1ef480c
|
25f9afe371c59612e02e561a6b35b8d8bafad20a
|
/tests/suite/test_smoke.py
|
c67d4d9576cb7e6ed4820df54347905d68d8793c
|
[
"Apache-2.0"
] |
permissive
|
HubBucket-Team/kubernetes-ingress
|
2b8bf4ac293fb2e9aa51e18037c34029ed66a8be
|
99663386df7cea013489a88b9471eb18be4c9e77
|
refs/heads/master
| 2020-07-07T23:18:34.157361
| 2019-08-13T13:13:39
| 2019-08-20T15:23:24
| 203,502,783
| 1
| 1
|
Apache-2.0
| 2019-08-21T03:58:05
| 2019-08-21T03:58:04
| null |
UTF-8
|
Python
| false
| false
| 2,734
|
py
|
import requests
import pytest
from suite.fixtures import PublicEndpoint
from suite.resources_utils import create_secret_from_yaml, delete_secret, \
ensure_connection_to_public_endpoint, create_items_from_yaml, \
delete_items_from_yaml, create_example_app, delete_common_app, \
wait_until_all_pods_are_ready, ensure_response_from_backend
from suite.yaml_utils import get_first_ingress_host_from_yaml
from settings import TEST_DATA
paths = ["backend1", "backend2"]
class SmokeSetup:
"""
Encapsulate the Smoke Example details.
Attributes:
public_endpoint (PublicEndpoint):
ingress_host (str):
"""
def __init__(self, public_endpoint: PublicEndpoint, ingress_host):
self.public_endpoint = public_endpoint
self.ingress_host = ingress_host
@pytest.fixture(scope="class", params=["standard", "mergeable"])
def smoke_setup(request, kube_apis, ingress_controller_endpoint, ingress_controller, test_namespace) -> SmokeSetup:
print("------------------------- Deploy Smoke Example -----------------------------------")
secret_name = create_secret_from_yaml(kube_apis.v1, test_namespace, f"{TEST_DATA}/smoke/smoke-secret.yaml")
create_items_from_yaml(kube_apis, f"{TEST_DATA}/smoke/{request.param}/smoke-ingress.yaml", test_namespace)
ingress_host = get_first_ingress_host_from_yaml(f"{TEST_DATA}/smoke/{request.param}/smoke-ingress.yaml")
create_example_app(kube_apis, "simple", test_namespace)
wait_until_all_pods_are_ready(kube_apis.v1, test_namespace)
ensure_connection_to_public_endpoint(ingress_controller_endpoint.public_ip,
ingress_controller_endpoint.port,
ingress_controller_endpoint.port_ssl)
def fin():
print("Clean up the Smoke Application:")
delete_common_app(kube_apis, "simple", test_namespace)
delete_items_from_yaml(kube_apis, f"{TEST_DATA}/smoke/{request.param}/smoke-ingress.yaml",
test_namespace)
delete_secret(kube_apis.v1, secret_name, test_namespace)
request.addfinalizer(fin)
return SmokeSetup(ingress_controller_endpoint, ingress_host)
@pytest.mark.smoke
class TestSmoke:
@pytest.mark.parametrize("path", paths)
def test_response_code_200_and_server_name(self, smoke_setup, path):
req_url = f"https://{smoke_setup.public_endpoint.public_ip}:{smoke_setup.public_endpoint.port_ssl}/{path}"
ensure_response_from_backend(req_url, smoke_setup.ingress_host)
resp = requests.get(req_url, headers={"host": smoke_setup.ingress_host}, verify=False)
assert resp.status_code == 200
assert f"Server name: {path}" in resp.text
|
[
"tellet.tat@gmail.com"
] |
tellet.tat@gmail.com
|
6ea349337b8084df78a521f35798fd7e7555a5c5
|
c5514643dd1601661abce5449674cc796247b66a
|
/src/event_configuration.py
|
a370720faf27ec54a7e64737991585f2e0fd77c9
|
[] |
no_license
|
happeninghq/happening-comments
|
0e456d593608cc1ff63e3df2109e039b8d80f921
|
d330c3fcfb648e02be9466640570764f623945f0
|
refs/heads/master
| 2021-01-20T19:06:42.744164
| 2017-04-05T13:36:25
| 2017-04-05T13:36:25
| 59,932,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
"""Event Configuration."""
from happening import configuration
from happening import plugins
class CommentOnGroups(configuration.BooleanField):
"""Can members comment on groups."""
@property
def settable(self):
"""Only enable if the groups plugin is enabled."""
return plugins.plugin_enabled("plugins.groups")
|
[
"jonathan@jscott.me"
] |
jonathan@jscott.me
|
3424b73d226dbd1d83678cf172706f34105222d2
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/frogjump_20200717123008.py
|
e4f45f8cc2fdaa932a6cbf2ca3a79c65a2b5a589
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
# use the brute force approach then optimize
# and test edge cases
# o(n)
def jump(X,Y,D):
if X == Y:
return 0
if D < 1:
return 0
else:
answer = round((Y-X) / D
print(answer)
# count = 0
# while X < Y:
# print('hmm')
# X = X+D
# count +=1
# return count
print(jump(10,85,30))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
bac232b847d7bdec1b84d7b26513593e311f39e1
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/choropleth/colorbar/_outlinewidth.py
|
f67ddb9933f89083f1ad283ae70d3c68d77de79f
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
import _plotly_utils.basevalidators
class OutlinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="outlinewidth", parent_name="choropleth.colorbar", **kwargs
):
super(OutlinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
2a5c757d67b5a4fcd73e07832a8a1b762878d752
|
be317396416134fc18d598934403906b9b1a7583
|
/word_data_gen.py
|
b738412179d82da71185cc5407c91359cbaad683
|
[
"Apache-2.0"
] |
permissive
|
Guhaifudeng/zhihukankan
|
26d5c40638035c9a13b0e24b789afd11c6eb157f
|
ccb216458a74d85bf048b0da11146716026b7ce3
|
refs/heads/master
| 2020-06-19T07:36:54.996472
| 2017-07-08T09:09:28
| 2017-07-08T09:09:28
| 94,181,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,447
|
py
|
#encoding:utf-8
import codecs
import word_util
def gen_word_key(word_embedding_file,word_key_file,has_head = False):
w_write = codecs.open(word_key_file,'w', 'utf-8')
with codecs.open(word_embedding_file, 'r', 'utf-8') as w_read:
count = 0
w_key = ''
while True:
line = w_read.readline()
if not line:
break
if has_head:
has_head = False
continue
count += 1
if count % 1000 == 0:
print('load word count %d' % count)
w_key += '\t' + line.strip().split(" ")[0]
#print(w_key)
w_write.write(w_key[1:]+'\n')
print("count of words in word_embedding_file %d" % count)
print("finished !")
def gen_word_key_after_removed(word_idf_map, word_key_after_removed_file):
with codecs.open(word_key_after_removed_file,'w','utf-8') as w_write:
rm_list = []
for (key, idf) in word_idf_map.items():
if float(idf) < 1e-6:
rm_list.append(key)
for key in rm_list:
word_idf_map.pop(key)
word_key = word_idf_map.keys()
w_write.write('\t'.join(word_key)+'\n')
w_write.close()
def gen_word_tfidf_after_removed(word_keys_tfidf_after_removed_file,word_tfidf_map,word_keys):
with codecs.open(word_keys_tfidf_after_removed_file,'w','utf-8') as w_tfidf_write:
word_keys = []
word_tfidf = []
for (key, tfidf) in word_tfidf_map.items():
if key in word_keys:
word_keys.append(key)
word_tfidf.append(tfidf)
w_tfidf_write.write('\t'.join(word_keys)+'\n')
w_tfidf_write.write('\t'.join(word_tfidf)+'\n')
w_tfidf_write.close()
if __name__ == '__main__':
#gen_word_key('../data/word_embedding.txt','../out/word_keys.txt',True)
word_key_tfidf_after_removed_file = '../out/partition_tfidf.txt'
# word_idf_map = word_util.build_word_idf_hashmap('../out/global_idf.txt')
# gen_word_key_after_removed(word_idf_map, word_key_after_removed_file)
word_tfidf_map = word_util.build_word_tfidf_hashmap('../out/global_tfidf.txt')
word_keys = word_util.build_word_keys_hashmap('../out/word_keys_rmd.txt')
gen_word_tfidf_after_removed(word_key_tfidf_after_removed_file,word_tfidf_map,word_keys)
print('finished')
|
[
"="
] |
=
|
d4483efe86d7062fd477ca674becd6f9d965816e
|
fd8d33572656edf9e1133a72ad4e2fa090f90a5f
|
/packages/OpenWeatherMap/nodes/OpenWeatherMap___BreakTemp0/OpenWeatherMap___BreakTemp0___METACODE.py
|
389970e2b1ffbdae5d0a55956bfa72961c159947
|
[
"MIT"
] |
permissive
|
ChristianHohlfeld/Ryven
|
a01c2eafa79a80883a9490efb5f043fd35f53484
|
53bf7e57a7b0fa25a704cd0d2214a7f76096d4dd
|
refs/heads/master
| 2022-12-12T22:03:57.122034
| 2020-08-31T13:45:45
| 2020-08-31T13:45:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,445
|
py
|
from NIENV import *
# API METHODS
# self.main_widget <- access to main widget
# Ports
# self.input(index) <- access to input data
# self.set_output_val(self, index, val) <- set output data port value
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)
# self.delete_input(index or input)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(index or output)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', 'global')
# self.log_message('that\'s not good', 'error')
# ------------------------------------------------------------------------------
from pyowm.utils.measurables import kelvin_to_celsius, kelvin_to_fahrenheit
class %NODE_TITLE%_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)
# self.special_actions['action name'] = {'method': M(self.action_method)}
# ...
self.initialized()
# don't call self.update_event() directly, use self.update() instead
def update_event(self, input_called=-1):
temp_dict = self.input(0)
if self.input(1) != 'kelvin':
for key in list(temp_dict.keys()):
item = temp_dict[key]
if item is not None:
if self.input(1) == 'celsius':
temp_dict[key] = kelvin_to_celsius(item)
elif self.input(1) == 'fahrenheit':
temp_dict[key] = kelvin_to_fahrenheit(item)
# temp_dict = kelvin_dict_to(temp_dict, self.input(1)) doesn't work with NoneType values -.- which happen to persist
temp = temp_dict['temp']
temp_kf = temp_dict['temp_kf']
temp_max = temp_dict['temp_max']
temp_min = temp_dict['temp_min']
feels_like = temp_dict['feels_like']
self.set_output_val(0, temp)
self.set_output_val(1, temp_kf)
self.set_output_val(2, temp_min)
self.set_output_val(3, temp_max)
self.set_output_val(4, feels_like)
def get_data(self):
data = {}
# ...
return data
def set_data(self, data):
pass # ...
def remove_event(self):
pass
|
[
"leon.thomm@gmx.de"
] |
leon.thomm@gmx.de
|
b093ca38f2f191bd61045b53e218e509e9ee9255
|
372eefa7d896d3cee8c1c1befd8d3baec4eb0188
|
/infra/services/cicd/artifacts.py
|
73cce818ce93475357036f6865901e922b7e813b
|
[] |
no_license
|
dr-natetorious/aws-homenet
|
5c17f4c3e1fcd60f50d22b5b94453f1d965d4ca0
|
d5382c7ada2c9bd5dc0b3687d57d47282791ed40
|
refs/heads/master
| 2023-06-21T17:40:08.721233
| 2021-07-22T18:08:50
| 2021-07-22T18:08:50
| 307,004,525
| 1
| 0
| null | 2021-06-27T18:05:01
| 2020-10-25T01:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
from infra.interfaces import ILandingZone
from aws_cdk import (
core,
aws_codeartifact as art,
aws_route53 as r53,
)
class ArtifactsConstruct(core.Construct):
"""
Represents a code artifact repository.
"""
def __init__(self, scope: core.Construct, id: str, landing_zone:ILandingZone,zone:r53.IHostedZone, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
core.Tags.of(self).add('Construct',ArtifactsConstruct.__name__)
self.domain = art.CfnDomain(self,'Domain',
domain_name=landing_zone.zone_name)
self.repo = art.CfnRepository(self,'PyPi',
domain_name=self.domain.domain_name,
repository_name='pypi-store',
description='PyPI connector',
external_connections=['public:pypi'])
self.repo = art.CfnRepository(self,'DefaultRepo',
repository_name=landing_zone.zone_name,
domain_name= self.domain.domain_name,
#upstreams=['pypi-store'],
description='Artifacts for '+zone.zone_name)
r53.CnameRecord(self,'DnsRecord',
zone=zone,
record_name='artifacts.{}'.format(zone.zone_name),
domain_name=self.domain.domain_name)
|
[
"nate@bachmeier"
] |
nate@bachmeier
|
02ce7282c2e5418148795d72cfafea3df7b36c38
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_129/ch82_2019_06_03_22_51_02_844625.py
|
26ab896be3fc59c6151a93badfc0832f10734bda
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
def primeiras_ocorrencias(palavra):
dic = {}
for e in palavra:
if e not in dic:
dic[e] = 1
else:
dic[e] = +=1
return dic
|
[
"you@example.com"
] |
you@example.com
|
1fa3c7c2af1235677a3196c7fa0ba42253cf7339
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/Renamer.py
|
97190580daf2e8848a7d452f77642f9dba9bf729
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,910
|
py
|
######################################################################################################
# A tool to easely renamer objects, materials,... #
# Actualy partly uncommented - if you do not understand some parts of the code, #
# please see further version or contact me #
# Author: Lapineige #
# License: GPL v3 #
######################################################################################################
############# Add-on description (used by Blender)
bl_info = {
"name": "Renamer",
"description": '',
"author": "Lapineige",
"version": (1, 0),
"blender": (2, 71, 0),
"location": "3D View > Toolself > Rename (tab)",
"warning": "",
"wiki_url": "",
"tracker_url": "http://le-terrier-de-lapineige.over-blog.com/contact",
"category": "Learnbgame",
}
import bpy
bpy.types.Scene.source_name = bpy.props.StringProperty()
bpy.types.Scene.new_name = bpy.props.StringProperty()
bpy.types.Scene.rename_mode = bpy.props.EnumProperty(items =[('objects','Object',"",1),('materials','Material',"",2),('textures','Texture',"",3),('meshes','Mesh',"",4),('lamps','Lamp',"",5),('scenes','Scene',"",6),('worlds','World',"",7)])
bpy.types.Scene.only_selection= bpy.props.BoolProperty(default=False)
class Rename(bpy.types.Operator):
""" """
bl_idname = "scene.rename"
bl_label = "Rename"
@classmethod
def poll(cls, context):
return True
def execute(self, context):
source_name = context.scene.source_name
new_name = context.scene.new_name
if context.scene.rename_mode == 'objects' and context.scene.only_selection:
to_rename_list = bpy.data.objects
for foo in to_rename_list:
if source_name in foo.name and foo.select:
foo.name = foo.name[:foo.name.index(source_name)] + new_name + foo.name[foo.name.index(source_name)+len(source_name):]
else:
exec('to_rename_list = bpy.data.' + context.scene.rename_mode +'\n' +'for foo in to_rename_list:' +'\n'+ ' if source_name in foo.name:'+'\n'+' foo.name = foo.name[:foo.name.index(source_name)] + new_name + foo.name[foo.name.index(source_name)+len(source_name):]')
return {'FINISHED'}
class SwitchName(bpy.types.Operator):
""" """
bl_idname = "scene.switch_name"
bl_label = "Switch source/new name"
@classmethod
def poll(cls, context):
return True
def execute(self, context):
context.scene.new_name , context.scene.source_name = context.scene.source_name , context.scene.new_name
return {'FINISHED'}
class RenamePanel(bpy.types.Panel):
""" """
bl_label = "Rename"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "Rename"
def draw(self, context):
layout = self.layout
layout.prop(context.scene, "rename_mode", text="Mode")
if context.scene.rename_mode == 'objects':
layout.prop(context.scene, 'only_selection', text='Only Selected')
layout.prop(context.scene, "source_name")
layout.prop(context.scene, "new_name")
row = layout.row(align=True)
row.operator("scene.rename", icon="FILE_TEXT")
row.operator("scene.switch_name", text='', icon="ARROW_LEFTRIGHT")
def register():
bpy.utils.register_class(RenamePanel)
bpy.utils.register_class(Rename)
bpy.utils.register_class(SwitchName)
def unregister():
bpy.utils.unregister_class(RenamePanel)
bpy.utils.unregister_class(Rename)
bpy.utils.unregister_class(SwitchName)
if __name__ == "__main__":
register()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
1f80f5d806fd487bb6af162e32e82e2567da9491
|
08427cf6764cb646fdd37eb239dc6dde0be68ad7
|
/python/leetcode.153.py
|
8f2d4f9af6fd521c1079ee04cb577821d613a829
|
[] |
no_license
|
CalvinNeo/LeetCode
|
9d8fa71a1da8c926b5f39659a1befcfa06608945
|
02ebe56cd92b9f4baeee132c5077892590018650
|
refs/heads/master
| 2020-12-31T00:41:14.031066
| 2020-11-06T04:41:59
| 2020-11-06T04:41:59
| 80,634,710
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
def se(arr, fr, to):
if to == fr:
return arr[fr]
elif to - fr == 1:
return min(arr[to], arr[fr])
mid = (fr + to) / 2
if arr[fr] > arr[mid]:
return se(arr, fr, mid)
elif arr[mid + 1] > arr[to]:
return se(arr, mid + 1, to)
else:
return arr[mid + 1]
class Solution(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
flag = True
for i in xrange(1, len(nums)):
if nums[i] < nums[i - 1]:
flag = False
break
if not flag:
return se(nums, 0, len(nums) - 1)
else:
return nums[0]
sln = Solution()
print sln.findMin([1,2,3,4,5,6])
print sln.findMin([4,5,6,1,2,3])
print sln.findMin([2,3,1])
print sln.findMin([3,1,2])
print sln.findMin([2,1])
print sln.findMin([3,4,1,2])
print sln.findMin([2,3,4,1])
|
[
"calvinneo1995@gmail.com"
] |
calvinneo1995@gmail.com
|
b784b3f3d2161fdf8531127e08a4cb750cc38d02
|
80fe5bd6413fb6366efba5f7a5d75edd7bca5295
|
/snake_game/scoreboard.py
|
1807cafea2c95dceb667bdecb07d2cb21b7d63e2
|
[] |
no_license
|
toastding/collections
|
37893167ca178a289b6d88b585cc056488726691
|
9c70d3ecaec211fa68d8de598af59f53f7dcbc1e
|
refs/heads/master
| 2023-07-14T08:01:37.851618
| 2021-08-31T13:42:43
| 2021-08-31T13:42:43
| 360,877,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
from turtle import Turtle
ALIGNMENT = "center"
FONT = ("Verdana", 20, "normal")
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.score = 0
with open("data.txt") as data:
self.high_score = int(data.read())
self.penup()
self.goto(0, 270)
self.pencolor("white")
self.hideturtle()
self.update_scoreboard()
def update_scoreboard(self):
self.clear()
self.write(f"Score: {self.score} High Score: {self.high_score}", align=ALIGNMENT, font=FONT)
def reset(self):
if self.score > self.high_score:
self.high_score = self.score
with open("data.txt", mode="w") as data:
data.write(f"{self.high_score}")
self.score = 0
self.update_scoreboard()
# def game_over(self):
# self.goto(0, 0)
# self.write("GAME OVER", align=ALIGNMENT, font=FONT)
def increase_score(self):
self.score += 1
self.update_scoreboard()
|
[
"ding02211995@gmail.com"
] |
ding02211995@gmail.com
|
049e187c03f97db786c9e2c1f574457db6e103ed
|
f87f51ec4d9353bc3836e22ac4a944951f9c45c0
|
/.history/HW02_20210630154801.py
|
30b513b1fbb089d9ef3ed3c11ddca365506bce60
|
[] |
no_license
|
sanjayMamidipaka/cs1301
|
deaffee3847519eb85030d1bd82ae11e734bc1b7
|
9ddb66596497382d807673eba96853a17884d67b
|
refs/heads/main
| 2023-06-25T04:52:28.153535
| 2021-07-26T16:42:44
| 2021-07-26T16:42:44
| 389,703,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,560
|
py
|
"""
Georgia Institute of Technology - CS1301
HW02 - Conditionals and Loops
Collaboration Statement:
"""
#########################################
"""
Function Name: snackBar()
Parameters: snack (str), ingredient (str), yourMoney (float)
Returns: whether you can get the snack (bool)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def snackBar(snack, ingredient, yourMoney):
if snack == 'Hotdog':
if not ingredient == 'Gluten' and not ingredient == 'Meat' and yourMoney >= 5.99:
return True
else:
return False
if snack == 'Veggie Burger':
if not ingredient == 'Gluten' and yourMoney >= 5.99:
return True
else:
return False
if snack == 'Chili Bowl':
if not ingredient == 'Meat' and yourMoney >= 3.99:
return True
else:
return False
if snack == 'Chili Cheese Fries':
if not ingredient == 'Meat' and not ingredient == 'Diary' and yourMoney >= 4.99:
return True
else:
return False
"""
Function Name: waterGames()
Parameters: gameName (str), numPlayers (int), totalFriends (int)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def waterGames(gameName, numPlayers, totalFriends):
percentPlaying = numPlayers / totalFriends
if percentPlaying < 0.3:
print('Let’s choose something else.')
elif percentPlaying >= 0.3 and percentPlaying < 0.75:
print('We will {} for a little bit!'.format(gameName))
"""
Function Name: summerShopping()
Parameters: clothingItem (str), size (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
"""
Function Name: stopGame()
Parameters: initialPrice (float), finalPrice (float), percentGrowth (float)
Returns: numberOfDays (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
"""
Function Name: adventure()
Parameters: startDay (int), stopDay (int), hikeLimit(int)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
|
[
"sanjay.mamidipaka@gmail.com"
] |
sanjay.mamidipaka@gmail.com
|
6981f306ac39b5d7c1aa2ca6983e0fbd6357d408
|
eeb3d7c9ff4c882ac913ee8e00b2201bcfdd300f
|
/string/38.count-and-say.py
|
612a90ec86afcaa5842e1c6f1a6a0e87aac53369
|
[] |
no_license
|
naseeihity/leetcode-daily
|
f89888328a1181e0592f09e90fea105d1568af99
|
4992a967ddccd05ab777dad69ce2f832dae26ae5
|
refs/heads/master
| 2023-04-23T06:08:35.473663
| 2021-05-10T15:19:33
| 2021-05-10T15:19:33
| 235,041,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
#
# @lc app=leetcode id=38 lang=python3
#
# [38] Count and Say
#
# @lc code=start
class Solution:
def countAndSay(self, n: int) -> str:
def say(self, ans):
_ans = ""
l, r = 0, len(ans)
for i in range(1, r+1):
if i == r or ans[l] != ans[i]:
count = i - l
_ans += str(count)
_ans += ans[l]
l = i
return _ans
def countAndSay(self, n: int) -> str:
ans = "1"
for i in range(1, n):
ans = self.say(ans)
return ans
# @lc code=end
|
[
"naseeihility@163.com"
] |
naseeihility@163.com
|
2b6b23388fe62b64f777be4d9d1c785a09b4fd7c
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_Class1958.py
|
690424946f0df1baa082d94199b57207c409b8f0
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,922
|
py
|
# qubit number=4
# total number=27
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=24
prog.x(input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=26
prog.rx(-1.9352210746113125,input_qubit[3]) # number=14
prog.cx(input_qubit[1],input_qubit[2]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[2]) # number=13
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.rx(-1.9069467407290044,input_qubit[2]) # number=20
prog.h(input_qubit[3]) # number=21
prog.y(input_qubit[2]) # number=10
prog.h(input_qubit[1]) # number=17
prog.cz(input_qubit[3],input_qubit[1]) # number=18
prog.h(input_qubit[1]) # number=19
prog.y(input_qubit[2]) # number=11
prog.cx(input_qubit[1],input_qubit[0]) # number=15
prog.cx(input_qubit[1],input_qubit[0]) # number=16
prog.z(input_qubit[3]) # number=23
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class1958.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
48306c72966c1c9ee9c538e95b126fab6e9107cb
|
430bd6e15ce181fdbce4dd769cdfc971b43e9d5b
|
/doughnuts/doughnuts.py
|
c2437f1b3be0adb4fa3a15bd6216eb97023c4981
|
[
"MIT"
] |
permissive
|
kodosan/Doughnuts
|
171e58415804af12cc54ed34b5b8510823dda70f
|
e246707390fb9c708241d35517a7d773858dbca7
|
refs/heads/master
| 2023-04-01T04:00:12.200820
| 2021-04-01T06:42:46
| 2021-04-01T06:42:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,169
|
py
|
import builtins
from os import path
from sys import argv
from json import loads, JSONDecodeError
from helpmenu import register_helpmenu
from libs.app import Loop_init, run_loop
from libs.config import gset, gget, custom_set, color
from libs.runtime_config import CONFIG
from libs.myapp import banner
builtins.ic = lambda *a, **kw: None
if (CONFIG["DEV"]):
try:
from icecream import ic
builtins.ic = ic
except ImportError:
pass
def main(print_banner: bool = True):
if (print_banner):
banner()
gset("root_path", path.split(path.realpath(__file__))[0])
with open(path.join(gget("root_path"), "auxiliary", "user_agents", "ua.txt"), "r") as f:
gset("user_agents", f.readlines())
register_helpmenu()
try:
with open(path.join(gget("root_path"), "variables.config"), "r") as f:
try:
for key, value in loads(f.read()).items():
custom_set(key=key, value=value)
print(
f"\n{color.green('Variable(s) loaded successfully from file variables.config')}\n")
except JSONDecodeError:
print(
f"\n{color.yellow('Variable(s) could not be read correctly')}\n")
except FileNotFoundError:
pass
except IOError:
print(f"\n{color.red('Permission denied to read variables.config')}\n")
run_loop(My_Loop_init(), leave_message="Bye! Doughnuts:)")
class My_Loop_init(Loop_init):
def set_platforms(self) -> dict:
return {"main": "main_plugins", "webshell": "webshell_plugins", "general": "general", "encode": "encode"}
def set_prompts(self) -> dict:
return {"main": "doughnuts > ", "webshell": "> "}
if __name__ == "__main__":
argc = len(argv)
if (argc > 1):
if (argv[1].lower() in ["generate", "gen"] and 1 < argc < 8):
gset("outside", True)
from main_plugins.generate import outside_generate as generate
generate(*argv[2:])
elif (argv[1] in ["connect", "c"]):
gset("preload_command", " ".join(argv[1:]))
main(False)
else:
main()
|
[
"1162410187@qq.com"
] |
1162410187@qq.com
|
cb98b4186be427666468060d4c7ba090787f0417
|
b70eb5577099f88ae9f684f2c87647f98e26d42b
|
/hpc-historias-clinicas/historias/migrations/0012_auto_20150425_1937.py
|
6e8d224cdc4208b57deb58837147f39f9868e9f2
|
[] |
no_license
|
btenaglia/hpc-historias-clinicas
|
be1a392a119a72055ba643fba9c9a09b740aef47
|
649d8660381381b1c591667760c122d73071d5ec
|
refs/heads/master
| 2020-06-03T19:05:17.910077
| 2015-06-10T23:05:31
| 2015-06-10T23:05:31
| 32,827,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,145
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('historias', '0011_auto_20150425_1936'),
]
operations = [
migrations.AlterField(
model_name='historias',
name='dpto_cirugia',
field=models.ForeignKey(default=1, verbose_name='M\xe9dico Responsable', to='historias.DptosCirugiaGeneral'),
preserve_default=False,
),
migrations.AlterField(
model_name='historias',
name='fecha_ingreso',
field=models.DateField(default=datetime.datetime(2015, 4, 25, 19, 37, 9, 146737), help_text='Formato: dd/mm/yyyy', verbose_name='Fecha de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='historias',
name='hora_ingreso',
field=models.TimeField(default=datetime.datetime(2015, 4, 25, 19, 37, 9, 146689), help_text='Formato: hh:mm', verbose_name='Hora de Ingreso'),
preserve_default=True,
),
]
|
[
"brunomartintenaglia@gmail.com"
] |
brunomartintenaglia@gmail.com
|
0f1e5f7680d8e48b6c5d8230307c0b7c1c017512
|
3f5bf0ed01ff34036b0476e82acdcdd646f66859
|
/visualocean/__init__.py
|
08c3257e01c6c8fe7351a67a26d464ab08f8a2c6
|
[] |
no_license
|
cpsarason/visualoceanpy
|
9e64590f16659d61b6fefd9fc912a6c868175226
|
b712e73849226dbdebf1c8da57bf00098ed1f4df
|
refs/heads/master
| 2021-03-31T01:15:26.751240
| 2018-03-13T23:45:18
| 2018-03-13T23:45:18
| 125,122,884
| 0
| 0
| null | 2018-03-13T22:31:16
| 2018-03-13T22:31:15
| null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
__author__ = 'Landung Setiawan'
__all__ = ['core', 'utils']
|
[
"landungs@uw.edu"
] |
landungs@uw.edu
|
7d153215cdaf6e880f5ca74101116bc24be6340e
|
b2e9e3db0202a6bd06b5d1f4c4fd3369b5260261
|
/python/p032.py
|
2e8202df7f17ac73122932747adf0ec4d9e10ad0
|
[] |
no_license
|
jackmoody11/project-euler-solutions
|
66e7128cae130499ce518c2008e5df91a6883a68
|
8b6e00bfac7855f5c892f5b3094415935358cb98
|
refs/heads/master
| 2020-04-12T23:52:57.347142
| 2020-01-10T00:23:16
| 2020-01-10T00:23:16
| 162,831,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
from utils import is_pandigital
def is_nine_pandigital_product(a, b):
my_str = str(a) + str(b) + str(a*b)
if len(my_str) == 9 and is_pandigital(my_str):
return True
else:
return False
def compute():
# 1 will never return a pandigital product (will repeat digits)
pandigitals = set()
# sqrt(987654321) = 31426.96, so we know this is the
# upper limit for our larger number (we define b as the larger number here)
for a in range(2, 31426):
for b in range(a, 31426):
mult = a * b
if len(str(a) + str(b) + str(mult)) > 9:
# Once the concatenation of a, b, and a + b gives
# a string of length > 9, we can skip to the next
# value for a
break
if is_nine_pandigital_product(a, b) and mult not in pandigitals:
pandigitals.add(mult)
return sum(pandigitals)
if __name__ == "__main__":
print(compute())
|
[
"jackmoody@unc.edu"
] |
jackmoody@unc.edu
|
4f504c0716dacadf713fabd2507a80603e3b8c13
|
d2b53b3568890dd805575035d09635c422c6bc4d
|
/python/ray/autoscaler/util.py
|
d436be426411827e6fe4f2b19e777c1de368ccc8
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
mehrdadn/ray
|
939deda7099eb30371cbb920a9725b314c58c0b5
|
3506910c5da257215d38d02f424acc4f419ddbaf
|
refs/heads/master
| 2020-09-03T15:33:35.578248
| 2020-07-31T21:33:27
| 2020-07-31T21:33:27
| 219,498,150
| 2
| 1
|
Apache-2.0
| 2019-11-04T12:37:23
| 2019-11-04T12:37:22
| null |
UTF-8
|
Python
| false
| false
| 6,188
|
py
|
import collections
import hashlib
import json
import jsonschema
import os
import threading
from typing import Any, Dict
import ray
import ray.services as services
from ray.autoscaler.node_provider import get_default_config
from ray.autoscaler.docker import dockerize_if_needed
REQUIRED, OPTIONAL = True, False
RAY_SCHEMA_PATH = os.path.join(
os.path.dirname(ray.autoscaler.__file__), "ray-schema.json")
# Internal kv keys for storing debug status.
DEBUG_AUTOSCALING_ERROR = "__autoscaling_error"
DEBUG_AUTOSCALING_STATUS = "__autoscaling_status"
class ConcurrentCounter:
def __init__(self):
self._lock = threading.RLock()
self._counter = collections.defaultdict(int)
def inc(self, key, count):
with self._lock:
self._counter[key] += count
return self.value
def dec(self, key, count):
with self._lock:
self._counter[key] -= count
assert self._counter[key] >= 0, "counter cannot go negative"
return self.value
def breakdown(self):
with self._lock:
return dict(self._counter)
@property
def value(self):
with self._lock:
return sum(self._counter.values())
def validate_config(config: Dict[str, Any]) -> None:
"""Required Dicts indicate that no extra fields can be introduced."""
if not isinstance(config, dict):
raise ValueError("Config {} is not a dictionary".format(config))
with open(RAY_SCHEMA_PATH) as f:
schema = json.load(f)
try:
jsonschema.validate(config, schema)
except jsonschema.ValidationError as e:
raise jsonschema.ValidationError(message=e.message) from None
def prepare_config(config):
with_defaults = fillout_defaults(config)
merge_setup_commands(with_defaults)
dockerize_if_needed(with_defaults)
return with_defaults
def fillout_defaults(config: Dict[str, Any]) -> Dict[str, Any]:
defaults = get_default_config(config["provider"])
defaults.update(config)
defaults["auth"] = defaults.get("auth", {})
return defaults
def merge_setup_commands(config):
config["head_setup_commands"] = (
config["setup_commands"] + config["head_setup_commands"])
config["worker_setup_commands"] = (
config["setup_commands"] + config["worker_setup_commands"])
return config
def with_head_node_ip(cmds):
head_ip = services.get_node_ip_address()
out = []
for cmd in cmds:
out.append("export RAY_HEAD_IP={}; {}".format(head_ip, cmd))
return out
def hash_launch_conf(node_conf, auth):
hasher = hashlib.sha1()
hasher.update(
json.dumps([node_conf, auth], sort_keys=True).encode("utf-8"))
return hasher.hexdigest()
# Cache the file hashes to avoid rescanning it each time. Also, this avoids
# inadvertently restarting workers if the file mount content is mutated on the
# head node.
_hash_cache = {}
def hash_runtime_conf(file_mounts,
cluster_synced_files,
extra_objs,
generate_file_mounts_contents_hash=False):
"""Returns two hashes, a runtime hash and file_mounts_content hash.
The runtime hash is used to determine if the configuration or file_mounts
contents have changed. It is used at launch time (ray up) to determine if
a restart is needed.
The file_mounts_content hash is used to determine if the file_mounts or
cluster_synced_files contents have changed. It is used at monitor time to
determine if additional file syncing is needed.
"""
runtime_hasher = hashlib.sha1()
contents_hasher = hashlib.sha1()
def add_content_hashes(path, allow_non_existing_paths: bool = False):
def add_hash_of_file(fpath):
with open(fpath, "rb") as f:
for chunk in iter(lambda: f.read(2**20), b""):
contents_hasher.update(chunk)
path = os.path.expanduser(path)
if allow_non_existing_paths and not os.path.exists(path):
return
if os.path.isdir(path):
dirs = []
for dirpath, _, filenames in os.walk(path):
dirs.append((dirpath, sorted(filenames)))
for dirpath, filenames in sorted(dirs):
contents_hasher.update(dirpath.encode("utf-8"))
for name in filenames:
contents_hasher.update(name.encode("utf-8"))
fpath = os.path.join(dirpath, name)
add_hash_of_file(fpath)
else:
add_hash_of_file(path)
conf_str = (json.dumps(file_mounts, sort_keys=True).encode("utf-8") +
json.dumps(extra_objs, sort_keys=True).encode("utf-8"))
# Only generate a contents hash if generate_contents_hash is true or
# if we need to generate the runtime_hash
if conf_str not in _hash_cache or generate_file_mounts_contents_hash:
for local_path in sorted(file_mounts.values()):
add_content_hashes(local_path)
head_node_contents_hash = contents_hasher.hexdigest()
# Generate a new runtime_hash if its not cached
# The runtime hash does not depend on the cluster_synced_files hash
# because we do not want to restart nodes only if cluster_synced_files
# contents have changed.
if conf_str not in _hash_cache:
runtime_hasher.update(conf_str)
runtime_hasher.update(head_node_contents_hash.encode("utf-8"))
_hash_cache[conf_str] = runtime_hasher.hexdigest()
# Add cluster_synced_files to the file_mounts_content hash
if cluster_synced_files is not None:
for local_path in sorted(cluster_synced_files):
# For cluster_synced_files, we let the path be non-existant
# because its possible that the source directory gets set up
# anytime over the life of the head node.
add_content_hashes(local_path, allow_non_existing_paths=True)
file_mounts_contents_hash = contents_hasher.hexdigest()
else:
file_mounts_contents_hash = None
return (_hash_cache[conf_str], file_mounts_contents_hash)
|
[
"noreply@github.com"
] |
mehrdadn.noreply@github.com
|
eeeb8737f788a99b63bc14fa4ee601c37472ba24
|
28dbe47aba287ed94ef7bba734203736bcc06249
|
/.history/run_dmac_20200702162638.py
|
87a34ccf19b64f8739c194e0fbccc0dad71a4485
|
[] |
no_license
|
ntung88/Trading_Algorithms
|
242fd816b19df95e02e9fcd8c5c91c862d2ede40
|
d96488b1754e3751f739d9c3f094a8f8dc54a0a9
|
refs/heads/master
| 2022-11-19T16:04:07.800344
| 2020-07-17T21:14:10
| 2020-07-17T21:14:10
| 276,239,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
'''
Script for running dmac on current data. Outputs decision for paper trading since I don't have the resources
to trade electronically :(((
'''
import dmac
import yfinance as yf
import numpy as np
import sys
def main():
args = sys.argv[1:]
tickers = ' '.join(args)
data = yf.download(tickers, period='max', group_by='ticker')
dirty = pd.DataFrame(data['TSLA'])
#Currently using only closing prices
frame = clean_data(dirty)['Close']
periods = optimize(frame)
print(periods)
if __name__ == "__main__":
main(_
|
[
"nathantung@Nathans-MacBook-Pro.local"
] |
nathantung@Nathans-MacBook-Pro.local
|
97d790031b2efb4f1acf57eee08a3880d3106887
|
0bbd11c91de6ed2315a463809cb1094d6523ca02
|
/proj03/lattice5/step-afm.py
|
690ed8ae14b16b5ae6e3f4b02c7c33b88e22ddfe
|
[] |
no_license
|
impurity80/emto
|
b232048829002f2ba721019c45df420696f48973
|
0a7a0d2fcdf41e7763bb4de4244d6598a74ab270
|
refs/heads/master
| 2021-01-18T19:46:39.102514
| 2017-02-20T04:04:42
| 2017-02-20T04:04:42
| 69,660,962
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,175
|
py
|
import csv
import os
from ase import Atom, Atoms
from ase.lattice.cubic import *
from ase.visualize import view
from numpy import *
from emto import *
from ase.utils.eos import EquationOfState
import matplotlib.pyplot as plt
from ase.lattice import bulk
name = 'afm'
curr_dir = os.getcwd()
os.system('mkdir eos')
os.system('mkdir result')
result = '{0}/result/result-{1}.txt'.format(curr_dir,name)
os.system('rm {0}'.format(result))
result_all = '{0}/result/result_summary-{1}.csv'.format(curr_dir,name)
os.system('rm {0}'.format(result_all))
save(result, 'delta calculation {0}'.format(name))
save(result_all, 'delta calculation {0}'.format(name))
csvfile = open('mole.csv', 'rb')
buffer = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)
for row in buffer:
id = int(row[0])
c = row[1]
mn = round(row[2]/2.0, 3 )*2.0
ni = round(row[3]/2.0, 3 )*2.0
cr = round(row[4]/2.0, 3 )*2.0
al = round(row[5]/2.0, 3 )*2.0
si = round(row[6]/2.0, 3 )*2.0
mo = round(row[7]/2.0, 3 )*2.0
co = round(row[8]/2.0, 3 )*2.0
cu = round(row[9]/2.0, 3 )*2.0
nb = round(row[10]/2.0, 3 )*2.0
ti = round(row[11]/2.0, 3 )*2.0
v = round(row[12]/2.0, 3 )*2.0
w = round(row[13]/2.0, 3 )*2.0
print row
print mn, ni, cr
fe = 1-mn-ni-cr-al-si-mo-co-cu-nb-ti-v-w
save(result, 'alloy id {0}'.format(id))
OPTIONS = np.linspace(0.98, 1.02, 9)
volumes = []
energies = []
save(result, 'nonmagnetic calculate {0}'.format(id))
for opt in OPTIONS:
l = 3.59 * opt
a = l / sqrt(2)
c = l
atoms = Atoms('Fe2',
scaled_positions=[
(0.0, 0.0, 0),
(0.5, 0.5, 0.5)],
cell=[a, a, c],
pbc=(1, 1, 1))
atoms.set_tags([1, 2])
alloys = []
alloys.append(Alloy(1, 'Fe', fe, 1.0))
alloys.append(Alloy(2, 'Fe', fe, -1.0))
if mn > 1e-7:
alloys.append(Alloy(1, 'Mn', mn, 1.0))
alloys.append(Alloy(2, 'Mn', mn, -1.0))
if ni > 1e-7:
alloys.append(Alloy(1, 'Ni', ni, 1.0))
alloys.append(Alloy(2, 'Ni', ni, -1.0))
if cr > 1e-7:
alloys.append(Alloy(1, 'Cr', cr, 1.0))
alloys.append(Alloy(2, 'Cr', cr, -1.0))
if al > 1e-7:
alloys.append(Alloy(1, 'Al', al, 1.0))
alloys.append(Alloy(2, 'Al', al, -1.0))
if si > 1e-7:
alloys.append(Alloy(1, 'Si', si, 1.0))
alloys.append(Alloy(2, 'Si', si, -1.0))
if mo > 1e-7:
alloys.append(Alloy(1, 'Mo', mo, 1.0))
alloys.append(Alloy(2, 'Mo', mo, -1.0))
if co > 1e-7:
alloys.append(Alloy(1, 'Co', co, 1.0))
alloys.append(Alloy(2, 'Co', co, -1.0))
if cu > 1e-7:
alloys.append(Alloy(1, 'Cu', cu, 1.0))
alloys.append(Alloy(2, 'Cu', cu, -1.0))
if nb > 1e-7:
alloys.append(Alloy(1, 'Nb', nb, 1.0))
alloys.append(Alloy(2, 'Nb', nb, -1.0))
if ti > 1e-7:
alloys.append(Alloy(1, 'Ti', ti, 1.0))
alloys.append(Alloy(2, 'Ti', ti, -1.0))
if v > 1e-7:
alloys.append(Alloy(1, 'V', v, 1.0))
alloys.append(Alloy(2, 'V', v, -1.0))
if w > 1e-7:
alloys.append(Alloy(1, 'W', w, 1.0))
alloys.append(Alloy(2, 'W', w, -1.0))
calc = EMTO()
calc.set(dir='work/{1}/alloy-{2}/opt-{0:0.4f}'.format(opt, name, id),
lat=6,
ncpa=20,
amix=0.05,
afm='F',
kpts=[13, 13, 13]
)
calc.set_alloys(alloys)
atoms.set_calculator(calc)
nm_e = atoms.get_potential_energy()/atoms.get_number_of_atoms()
nm_v = atoms.get_volume()/atoms.get_number_of_atoms()
if nm_e < -0.001:
volumes.append(nm_v)
energies.append(nm_e)
save(result, '{3} result : {0} {1} {2}'.format(opt, nm_v, nm_e, name))
print volumes, energies
temp_volumes = []
temp_energies = []
pivot = energies[0]
for v, e in zip(volumes, energies):
if e-pivot > -0.04 and e-pivot < 0.01:
temp_volumes.append(v)
temp_energies.append(e)
eos = EquationOfState(temp_volumes, temp_energies)
v0, e0, B = eos.fit()
eos.plot('eos/{1}-{0}.png'.format(id,name))
save(result, '{0} {1} {2} {3}'.format(v0, e0, B, (4.0 * v0) ** (1.0 / 3.0)))
save(result, OPTIONS)
save(result, volumes)
save(result, energies)
save(result, '------------------------')
save(result_all, '{0}, {1}, {2}, {3}, {4}, {5}'.format(id, e0, v0, B, volumes, energies ))
# save(result_all, '{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}, {12}, {13}, {14}, {15}, {16}, {17}, {18}, {19}, {20}, {21}, {22} '.format(id, hcp_e0-bct_e0, hcp_e0-fcc_e0, hcp_e0-fccf_e0, fcc_e0-bct_e0, fccf_e0-bct_e0, row, fcc_v0, fcc_e0, fcc_B, fccf_v0, fccf_e0, fccf_B, bct_v0, bct_e0, bct_B, hcp_v0, hcp_e0, hcp_B, fcc_energies, fccf_energies, bct_energies, hcp_energies))
|
[
"impurity@postech.ac.kr"
] |
impurity@postech.ac.kr
|
f9a8acabb60a466dfe29c4ce1b191ff815101635
|
574c640c4adf212db0bcc1f93e9ca48d2296ad72
|
/backend/delivery_order/migrations/0001_initial.py
|
479852031897524fe8d7963958bdc69b06220b1b
|
[] |
no_license
|
crowdbotics-apps/test2-27795
|
15331bc54c504607a5cb97369f557e08b1a28343
|
593c571eb9fff06e48e392b4fbcd2c3cb9f82151
|
refs/heads/master
| 2023-05-12T02:36:09.348868
| 2021-06-07T08:28:50
| 2021-06-07T08:28:50
| 374,590,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
# Generated by Django 2.2.20 on 2021-06-07 08:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('delivery_user_profile', '0001_initial'),
('menu', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Bill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_amount', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('contact_info', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_contact_info', to='delivery_user_profile.ContactInfo')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('detail', models.TextField()),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('total_price', models.FloatField()),
('status', models.CharField(max_length=20)),
('notes', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_bill', to='delivery_order.Bill')),
('item_variant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_item_variant', to='menu.ItemVariant')),
('payment_method', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_payment_method', to='delivery_order.PaymentMethod')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_profile', to='delivery_user_profile.Profile')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
b92a4b791f5e98e824c6bf9f13238386d080e65d
|
996003d0ee6444480f98eeb496793be6420c9b63
|
/tests/mongodb/events/test_database_events.py
|
bc5eb31811161a6ce52e572e2f77568a27c43dfe
|
[
"BSD-3-Clause",
"BSD-3-Clause-Clear"
] |
permissive
|
Wytamma/fasteve
|
fb8b54cd6d8c8bb7c058a04543dfa3781a3a869c
|
0d28a50dc6b6e017bbae5ff82150d081e3ad818e
|
refs/heads/master
| 2023-06-08T04:52:02.510353
| 2023-06-01T07:32:07
| 2023-06-01T07:32:07
| 199,584,609
| 41
| 1
|
BSD-3-Clause-Clear
| 2022-03-17T12:04:28
| 2019-07-30T05:58:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
from typing import Optional
from fasteve import Fasteve, MongoModel, Resource, MongoObjectId
from starlette.testclient import TestClient
from pydantic import Field
class People(MongoModel):
id: Optional[MongoObjectId] = Field(alias="_id")
name: Optional[str]
people = Resource(
name="people",
model=People,
resource_methods=["GET", "POST", "DELETE"],
item_methods=["GET", "DELETE", "PUT", "PATCH"],
)
resources = [people]
app = Fasteve(resources=resources)
@app.on_event("after_read_resource")
async def after_read_resource_callback(name, response):
events.append("after_read_resource")
@app.on_event("after_read_item")
async def after_read_item_callback(name, response):
events.append("after_read_item")
events = []
def test_database_events():
with TestClient(app) as test_client:
response = test_client.get("/people")
data = {"name": "Curie"}
response = test_client.post("/people", json=data)
response = test_client.get(f"/people/{response.json()['_data'][0]['_id']}")
assert "after_read_resource" in events
assert "after_read_item" in events
|
[
"wytamma.wirth@me.com"
] |
wytamma.wirth@me.com
|
5c7c7fdbcb35fd6878837d4f230c5bc598b4168c
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/experiments/vitchyr/disentanglement/mix_vectorized_and_single_reward/pnp_first_try.py
|
740841243f26fa33669cbc0f7de986c37b36b3b4
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508
| 2021-06-18T03:08:12
| 2021-06-18T03:08:12
| 375,810,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,205
|
py
|
import torch.nn.functional as F
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.experiments.disentanglement.contextual_encoder_distance_launcher import (
encoder_goal_conditioned_sac_experiment
)
from rlkit.launchers.launcher_util import run_experiment
if __name__ == "__main__":
variant = dict(
env_id='OneObjectPickAndPlace2DEnv-v0',
disentangled_qf_kwargs=dict(
encode_state=True,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_using_encoder_settings=dict(
encode_state=False,
encode_goal=False,
detach_encoder_via_goal=False,
detach_encoder_via_state=False,
),
sac_trainer_kwargs=dict(
reward_scale=1,
discount=0.99,
soft_target_tau=1e-3,
target_update_period=1,
single_loss_weight=0.5,
use_automatic_entropy_tuning=True,
),
num_presampled_goals=5000,
max_path_length=100,
algo_kwargs=dict(
batch_size=256,
num_epochs=300,
num_eval_steps_per_epoch=1000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
# num_epochs=10,
# num_eval_steps_per_epoch=100,
# num_expl_steps_per_train_loop=100,
# num_trains_per_train_loop=100,
# min_num_steps_before_training=100,
),
replay_buffer_kwargs=dict(
fraction_future_context=0.5,
fraction_distribution_context=0.5,
max_size=int(1e6),
),
save_debug_video=True,
debug_visualization_kwargs=dict(
save_period=20,
initial_save_period=2,
),
save_video=True,
save_video_kwargs=dict(
save_video_period=20,
rows=2,
columns=3,
subpad_length=1,
subpad_color=127,
pad_length=1,
pad_color=0,
num_columns_per_rollout=5,
),
evaluation_goal_sampling_mode='random',
exploration_goal_sampling_mode='random',
exploration_policy_kwargs=dict(
exploration_version='occasionally_repeat',
repeat_prob=0.5,
),
encoder_cnn_kwargs=dict(
kernel_sizes=[3, 3, 3],
n_channels=[8, 16, 32],
strides=[1, 1, 1],
paddings=[0, 0, 0],
pool_type='none',
hidden_activation='relu',
),
use_image_observations=True,
env_renderer_kwargs=dict(
width=12,
height=12,
output_image_format='CHW',
),
video_renderer_kwargs=dict(
width=48,
height=48,
output_image_format='CHW',
),
debug_renderer_kwargs=dict(
width=48,
height=48,
output_image_format='CHW',
),
use_separate_encoder_for_policy=True,
skip_encoder_mlp=False,
encoder_kwargs=dict(
hidden_sizes=[],
),
distance_scatterplot_save_period=20,
distance_scatterplot_initial_save_period=2,
)
search_space = {
'reward_type': [
'encoder_distance',
],
'encoder_kwargs.output_size': [
8,
],
'max_path_length': [
20,
],
'encoder_kwargs.hidden_sizes': [
[],
# [64],
# [64, 64],
],
'replay_buffer_kwargs.fraction_future_context': [
0.5,
],
'disentangled_qf_kwargs.architecture': [
# 'single_head_match_many_heads',
'many_heads',
],
'sac_trainer_kwargs.single_loss_weight': [
1.0,
0.9,
0.5,
0.1,
0.0,
]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
mode = 'local'
exp_name = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
n_seeds = 2
mode = 'sss'
exp_name = 'pnp-img-obs-enc-d-rew-many-heads--sweep-single-loss-weight'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for seed in range(n_seeds):
variant['exp_id'] = exp_id
# variant['seed'] = seed
run_experiment(
encoder_goal_conditioned_sac_experiment,
exp_name=exp_name,
mode=mode,
variant=variant,
use_gpu=True,
num_exps_per_instance=3,
# slurm_config_name='cpu_co',
gcp_kwargs=dict(
zone='us-east1-c',
gpu_kwargs=dict(
gpu_model='nvidia-tesla-k80',
num_gpu=1,
)
),
time_in_mins=int(2.5*24*60),
)
|
[
"alexanderkhazatsky@gmail.com"
] |
alexanderkhazatsky@gmail.com
|
a78915eef8809887752870022b28b634ec01beb0
|
aea74a8c1d4ad17eb65b7c70da5342c01fd1a930
|
/websites_postgres/scraper_topwatch.py
|
9889b1522ed08d8759a96633ee4c81cc9235c4e1
|
[] |
no_license
|
savusebastian/angular_project
|
4e6d8b398e17ca91842d7579d8f4da8650e7a13a
|
9c28c25e4b9875abf346f7e9a7e8baa34bc3f9ee
|
refs/heads/main
| 2023-04-17T07:03:32.016850
| 2021-05-09T09:07:55
| 2021-05-09T09:07:55
| 365,710,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
from bs4 import BeautifulSoup
import psycopg2
import requests
def topwatch_DB():
con = psycopg2.connect(
host='localhost',
database='postgres',
user='postgres',
password='winding1127!'
)
cur = con.cursor()
URL = 'https://www.topwatch.ro/fossil-fs4735'
shop = URL.split('/')[2].split('.')[1]
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
available_data = soup.find_all('loc')
links = [item.get_text() for item in available_data]
for link in links:
try:
web_page = requests.get(link)
web_soup = BeautifulSoup(web_page.content, 'html.parser')
schemaorg_data = web_soup.find_all(itemprop=True)
data = {}
exists_name = False
exists_image = False
for item in schemaorg_data:
if item.get('itemprop') == 'name' and exists_name == False:
data[item.get('itemprop')] = item.get_text().strip()
exists_name = True
if item.get('itemprop') == 'priceCurrency' or item.get('itemprop') == 'price':
data[item.get('itemprop')] = item.get('content')
if item.get('itemprop') == 'model':
data[item.get('itemprop')] = item.get_text()
if item.get('itemprop') == 'image' and exists_image == False:
data[item.get('itemprop')] = item.get('src')
exists_image = True
cur.execute("SELECT model FROM product WHERE model = '%s'" % data['model'])
result = cur.fetchall()
if result != []:
# print('Update', link)
cur.execute("UPDATE product SET price = '%s' WHERE model = '%s'" % (data['price'], data['model']))
con.commit()
else:
# print('Insert', link)
cur.execute("INSERT INTO product(%s, %s, %s, %s, %s, %s, %s) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s')" % ('url', 'shop', 'product_name', 'image', 'model', 'price', 'price_currency', link, shop, data['name'], data['image'], data['model'], data['price'], data['priceCurrency']))
con.commit()
except:
print(link)
# for item in data:
# print(item, ':', data[item])
cur.close()
con.close()
if __name__ == '__main__':
topwatch_DB()
|
[
"savusebastianf@gmail.com"
] |
savusebastianf@gmail.com
|
63724cd5abaef45153bdee596d8bdd703ee2dcdc
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/crossref/models/gov/nih/nlm/ncbi/jats1/table_wrap_group_orientation.py
|
ec33b15cb988fe0f5c36ce8ce7d99b32f3e84539
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 170
|
py
|
from enum import Enum
__NAMESPACE__ = "http://www.ncbi.nlm.nih.gov/JATS1"
class TableWrapGroupOrientation(Enum):
LANDSCAPE = "landscape"
PORTRAIT = "portrait"
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
e3feccc9956618a66353c2c0564ac4e266a9b46c
|
a9c359681631e8344f55163a2d69018ed02c0a90
|
/openr/py/openr/cli/commands/tech_support.py
|
2193ec40af9b74d4d2d772c4d554f30c0e0d3e8f
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
facebook/openr
|
66c82707ae47fa5ed711c20f0355ad7100a3cf1c
|
8e4c6e553f0314763c1595dd6097dd578d771f1c
|
refs/heads/main
| 2023-09-03T02:55:03.399114
| 2023-07-26T16:46:46
| 2023-07-26T16:46:46
| 108,306,129
| 936
| 295
|
MIT
| 2023-08-31T23:03:31
| 2017-10-25T17:59:53
|
C++
|
UTF-8
|
Python
| false
| false
| 4,893
|
py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from builtins import object
from openr.cli.commands import config, decision, fib, kvstore, lm, monitor, openr, perf
from openr.cli.utils.utils import parse_nodes
from openr.utils.consts import Consts
class TechSupportCmd(object):
def __init__(self, cli_opts):
"""initialize the tech support command"""
self.cli_opts = cli_opts
# Keep short timeout
self.cli_opts.timeout = 1000
# Print routes or not
self.print_routes = False
def run(self, routes):
self.print_routes = routes
funcs = [
("openr config file", self.print_config_file),
("openr runtime params", self.print_runtime_params),
("openr version", self.print_openr_version),
("openr config", self.print_config),
("breeze lm links", self.print_lm_links),
("breeze kvstore peers", self.print_kvstore_peers),
("breeze kvstore nodes", self.print_kvstore_nodes),
("breeze kvstore prefixes", self.print_kvstore_prefixes),
("breeze kvstore keys --ttl", self.print_kvstore_keys),
("breeze decision adj", self.print_decision_adjs),
("breeze decision validate", self.print_decision_validate),
("breeze decision routes", self.print_decision_routes),
("breeze fib validate", self.print_fib_validate),
("breeze fib unicast-routes", self.print_fib_unicast_routes),
("breeze fib mpls-routes", self.print_fib_mpls_routes),
("breeze fib routes-installed", self.print_fib_routes_installed),
("breeze perf fib", self.print_perf_fib),
("breeze monitor counters", self.print_monitor_counters),
("breeze monitor logs", self.print_monitor_logs),
]
failures = []
for title, func in funcs:
self.print_title(title)
try:
func()
except Exception as e:
failures.append(title)
print(e, file=sys.stderr)
if failures:
self.print_title("openr-tech-support failures")
print("\n".join(failures))
ret = 1 if failures else 0
sys.exit(ret)
def print_title(self, title):
print("\n-------- {} --------\n".format(title))
def print_config_file(self):
if not os.path.isfile(Consts.OPENR_CONFIG_FILE):
print("Missing Config File")
return
with open(Consts.OPENR_CONFIG_FILE) as f:
print(f.read())
def print_runtime_params(self):
output = subprocess.check_output(
["pgrep", "-a", "openr"], stderr=subprocess.STDOUT
)
print(output)
def print_openr_version(self):
openr.VersionCmd(self.cli_opts).run(False)
def print_config(self):
config.ConfigLinkMonitorCmd(self.cli_opts).run()
config.ConfigPrefixManagerCmd(self.cli_opts).run()
def print_lm_links(self):
lm.LMLinksCmd(self.cli_opts).run(False, False)
def print_kvstore_peers(self):
kvstore.PeersCmd(self.cli_opts).run()
def print_kvstore_nodes(self):
kvstore.NodesCmd(self.cli_opts).run()
def print_kvstore_prefixes(self):
kvstore.PrefixesCmd(self.cli_opts).run(["all"], False)
def print_kvstore_keys(self):
kvstore.KeysCmd(self.cli_opts).run(False, "", originator=None, ttl=True)
def print_decision_adjs(self):
decision.DecisionAdjCmd(self.cli_opts).run({"all"}, {"all"}, True, False)
def print_decision_validate(self):
decision.DecisionValidateCmd(self.cli_opts).run()
def print_decision_routes(self):
if not self.print_routes:
return
nodes = parse_nodes(self.cli_opts, "")
decision.DecisionRoutesComputedCmd(self.cli_opts).run(nodes, [], [], False)
def print_fib_validate(self):
fib.FibValidateRoutesCmd(self.cli_opts).run()
def print_fib_unicast_routes(self):
if not self.print_routes:
return
fib.FibUnicastRoutesCmd(self.cli_opts).run([], False, False)
def print_fib_mpls_routes(self):
if not self.print_routes:
return
fib.FibMplsRoutesCmd(self.cli_opts).run([], False)
def print_fib_routes_installed(self):
if not self.print_routes:
return
fib.FibRoutesInstalledCmd(self.cli_opts).run([])
def print_perf_fib(self):
perf.ViewFibCmd(self.cli_opts).run()
def print_monitor_counters(self):
monitor.CountersCmd(self.cli_opts).run()
def print_monitor_logs(self):
monitor.LogCmd(self.cli_opts).run()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
c8c60f4d6c2defd5798062f40691b3b44f82ac3c
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerearth/Basic Programming/Implementation/Basics of Implementation/Bear and Medals/test.py
|
eef855d601ec9351deb2295e35c080702103ba53
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 619
|
py
|
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'2',
'4',
'0 0 2',
'1 2 1',
'2 0 0',
'0 2 0',
'1',
'0 1000 0',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'4\n' +
'1000\n')
if __name__ == '__main__':
unittest.main()
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
374819e09c7095e180257d044a6656442ae72ef5
|
5fcc3fd608a794d260368318c62547f74d4c1416
|
/lindenmayer.py
|
db17a6e4335ac8dda5edf69ca1b53747b3ec0257
|
[] |
no_license
|
ds-gurukandhamoorthi/intro-python-exs
|
241fb9158096479a100ef378f291ba83e1a7d5d4
|
68c386e51c13d0f31e273016eefc4e29ddecdc04
|
refs/heads/master
| 2022-02-25T22:28:41.061722
| 2019-10-22T18:36:46
| 2019-10-22T18:36:46
| 103,829,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
def lindenmayer(frm, production_rules,nb_transf=1):
if nb_transf < 1:
return frm
transformed = ''.join(production_rules.get(c, c) for c in frm)
if nb_transf == 1:
return transformed
return lindenmayer(transformed, production_rules, nb_transf - 1)
return
if __name__ == "__main__":
hilb = lindenmayer('L', {'L':'+RF-LFL-FR+', 'R':'-LF+RFR+FL-'}, 2)
print(hilb)
koch = lindenmayer('F', {'F':'F+F-F-F+F'},2)
print(koch)
|
[
"ds.gurukandhamoorthi@gmail.com"
] |
ds.gurukandhamoorthi@gmail.com
|
c0862bd436f7908175d607f2dbb549efe9d45c55
|
e5504d8c4880993b82d5583a11c5cc4623e0eac2
|
/LeetCode/30-day-challenge/June/june 8th - june 14th/randomizedSet.py
|
d1ab1fc8c9c3c8e3c1c1da13ad85aa74f7ab096b
|
[] |
no_license
|
noorulameenkm/DataStructuresAlgorithms
|
e5f87f426fc444d18f830e48569d2a7a50f5d7e0
|
7c3bb89326d2898f9e98590ceb8ee5fd7b3196f0
|
refs/heads/master
| 2023-06-08T19:29:42.507761
| 2023-05-28T16:20:19
| 2023-05-28T16:20:19
| 219,270,731
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
import random
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.random_set = []
self.index_store = {}
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val in self.index_store:
return False
self.random_set.append(val)
self.index_store[val] = len(self.random_set) - 1
return True
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self.index_store:
index = self.index_store[val]
last_element = self.random_set[-1]
self.random_set[index], self.index_store[last_element] = last_element, index
del self.index_store[val]
self.random_set.pop()
return True
return False
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
return random.choice(self.random_set)
# Your RandomizedSet object will be instantiated and called as such:
obj = RandomizedSet()
print(f'Answer is {obj.insert(1)}')
print(f'Answer is {obj.remove(2)}')
print(f'Answer is {obj.getRandom()}')
|
[
"noorul.km@people10.com"
] |
noorul.km@people10.com
|
7a36658ae593bde936c367f2ef8c46229d4f76b0
|
22fbe9c0fc8cc366123111f54f103e3c109bce7a
|
/zeabus_vision/src/read_exposure.py
|
fcc70b006aa286c7a67b094dcef463adeab7568c
|
[] |
no_license
|
zeabusTeam/zeabus_software_ros1
|
3730021eb3eb6d98df585d172a44c4d6176e8963
|
86a07f4da03457bad3ce9b0c63b3867403780bc0
|
refs/heads/master
| 2020-03-25T23:02:22.816144
| 2019-03-19T17:30:37
| 2019-03-19T17:30:37
| 144,256,396
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
#!/usr/bin/python2.7
"""
File name: read_exposure.py
Author: zeabus
Date created: 2018/10/16
Python Version: 2.7
"""
import rospy
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from statistic import Statistic
from sensor_msgs.msg import CompressedImage
from dynamic_reconfigure.client import Client
import time
# bgr = None
# sub_sampling = 0.4
# stat = Statistic()
# def image_callback(msg):
# global bgr, sub_sampling
# arr = np.fromstring(msg.data, np.uint8)
# bgr = cv.resize(cv.imdecode(arr, 1), (0, 0),
# fx=sub_sampling, fy=sub_sampling)
def read_exposure():
time_avg = []
while not rospy.is_shutdown():
start = time.time()
client_name = "ueye_cam_nodelet_front"
client = Client(client_name)
params = {"exposure": 33.0}
client.update_configuration(params)
rospy.get_param("/" + str(client_name) + "exposure", None)
stop = time.time()
duration = stop - start
print(duration)
time_avg.append(duration)
print("Duration between call exposure API: ")
time_avg = np.array(time_avg)
print('max:',time_avg.max())
print('min:',time_avg.min())
print('mean:',time_avg.mean())
if __name__ == '__main__':
rospy.init_node('read_exposure', anonymous=False)
read_exposure()
|
[
"supakit.kr@gmail.com"
] |
supakit.kr@gmail.com
|
985e9efd8ab0fefb39ec3e631887fb7ce65a29f5
|
811ee1e3bba45419e6c17068027d54bf6c8d4f07
|
/python/gpmp_utils/plotPlanarMobile2Arms.py
|
e6aa3a802cbaf75ae3a9859c50c99e102c449c40
|
[
"BSD-3-Clause"
] |
permissive
|
kalyanvasudev/gpmp2
|
7dfe19873c72a7b9202c06eb794ef779c2917032
|
1ee99c743d978ab20dc804c8cd9cfa7813084957
|
refs/heads/master
| 2021-12-23T13:01:27.320270
| 2020-05-03T00:44:59
| 2020-05-03T00:44:59
| 227,194,751
| 0
| 0
|
NOASSERTION
| 2019-12-10T19:01:00
| 2019-12-10T19:00:59
| null |
UTF-8
|
Python
| false
| false
| 1,366
|
py
|
import numpy as np
from gtsam import *
from gpmp2 import *
def plotPlanarMobile2Arms(figure, axis, marm, p, vehsize, color, width):
#PLOTPLANARMOBILE2ARMS Summary of this function goes here
# Detailed explanation goes here
# color = [(r,g,b)] where all values lie between 0 and 1
pose = p.pose()
# vehicle corners
corner1 = pose.transform_from(Point2(vehsize[0]/2, vehsize[1]/2))
corner2 = pose.transform_from(Point2(-vehsize[1]/2, vehsize[2]/2))
corner3 = pose.transform_from(Point2(-vehsize[1]/2, -vehsiz[2]/2))
corner4 = pose.transform_from(Point2(vehsize[1]/2, -vehsize[2]/2))
# vehicle base black lines
axis.plot([corner1.x() corner2.x() corner3.x() corner4.x() corner1.x()], \
[corner1.y() corner2.y() corner3.y() corner4.y() corner1.y()], 'k-')
# arm
position = marm.forwardKinematicsPosition(p)
position = position[0:2, :] # Todo: check rows and columns
#style = strcat(color, '-');
axis.plot(position[0,0:marm.arm1.dof+1], position[1,0:marm.arm1.dof+1], \
color=color, linewidth=width)
axis.plot(position[0,[0,marm.arm1.dof+1:end+1]], position[1,[0,marm.arm1.dof+1:end+1]], \
color=color, linewidth=width)
axis.plot(position[0,0:marm.arm1.dof+1], position[1,0:marm.arm1.dof+1], \
'k.', markersize=5);
axis.plot(position[0,marm.arm1.dof+1:end], position[1,marm.arm1.dof+1:end], \
'k.', markersize=5);
|
[
"kalyan051993@gmail.com"
] |
kalyan051993@gmail.com
|
2fec9430049b72f2e0ef566a4a08ec641022877e
|
926c7a9760702d3c56adfa3eec0e164cb2c766b6
|
/gunnery/gunnery/settings/development.py
|
8dcd148202883fecfc670ba753d381b3add3c197
|
[
"Apache-2.0"
] |
permissive
|
hunslater/gunnery
|
7d947942f0c9db56d4102e68758b95b4292efbc3
|
2b9457ef899f7367dc07ba28cc1b7e4ff2c47d8e
|
refs/heads/master
| 2021-01-17T23:14:38.387308
| 2014-06-11T20:43:51
| 2014-06-11T20:43:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
from .common import *
ENVIRONMENT = 'development'
DEBUG = True
TEMPLATE_DEBUG = True
INSTALLED_APPS += (
'debug_toolbar',
'django_nose',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
from fnmatch import fnmatch
class glob_list(list):
def __contains__(self, key):
for elt in self:
if fnmatch(key, elt): return True
return False
INTERNAL_IPS = glob_list(['127.0.0.1', '10.0.*.*'])
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
[
"pawel.olejniczak@gmail.com"
] |
pawel.olejniczak@gmail.com
|
bdfc04a2ac39f2bc9a169b4e3ae7b83d76b39078
|
c9e616e6f5146805c6d9c19d35220e9b76c93aa6
|
/박현채/비타알고 시즌2/19년9월1주차/시공의 폭풍속으로.py
|
d5b58e5858d58444c9471d2b902b1d7e5b4f7e13
|
[] |
no_license
|
inje-illab/Algorithmer
|
2d29244d38a2aeec07ad83e47e69016269ff4e88
|
ed5c67d0a1b0c720e5a8ce8fe5bafba4bb0f36b8
|
refs/heads/master
| 2023-03-16T05:46:41.795826
| 2020-01-05T14:59:36
| 2020-01-05T14:59:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
TEAM_SELECT_HERO = []
USER_SELECT_HERO = []
USEABLE_HERO = []
TEAM_SELECT_HERO.extend(map(int, input().split()))
USER_SELECT_HERO.extend(map(int, input().split()))
USEABLE_HERO = list(set(USER_SELECT_HERO) - set(TEAM_SELECT_HERO)) # 사용자 선택 영웅의 리스트 집합 - 팀 선택 영웅의 리스트 집합
print(len(USEABLE_HERO)) # 사용가능 영웅 리스트 길이 출력
|
[
"park19996@naver.com"
] |
park19996@naver.com
|
af9ab97508d3762e24f80f6c7f04143f5b825c27
|
bbf3a1b2f2f4ec3fa468a089c042643ec8243c15
|
/ML/research/object_detection/webcam_detection.py
|
35ee40f148009ce924d3b470369ca8ad01da9298
|
[
"Apache-2.0"
] |
permissive
|
lasarox/Code
|
94aa9b3d816016a171e4a3babd9127cb01a6cd03
|
2c04be4e7a066340f1cf2b45bec18298d010312a
|
refs/heads/master
| 2023-04-05T16:51:46.159055
| 2021-05-01T06:24:02
| 2021-05-01T06:24:02
| 361,516,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,099
|
py
|
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from utils import label_map_util
from utils import visualization_utils as vis_util
import cv2
cap = cv2.VideoCapture(0) # if you have multiple webcams change the value to the correct one
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# In[4]:
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
# ## Download Model
# In[5]:
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
# ## Load a (frozen) Tensorflow model into memory.
# In[6]:
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
# In[7]:
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# ## Helper code
# In[8]:
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# # Detection
# In[9]:
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ] # change this value if you want to add more pictures to test
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
# In[10]:
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
cv2.imshow('object detection', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
|
[
"you@example.com"
] |
you@example.com
|
27a2bfac21348a6fb2463f4306fb6d253e6c0790
|
1c343f610133030fbe160a1cd864bfc29be84fa8
|
/tests/test_topicmod_visualize.py
|
8d7321d8a7c0276326733f8c7338eda06ac194ec
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mcooper/tmtoolkit
|
56e585c71a553b3344c05a9a1e77adfa5044b29a
|
cdfbaf7f20095ea45edbdf9e773544e3bb63089d
|
refs/heads/master
| 2020-04-29T01:09:42.137637
| 2019-03-15T23:37:06
| 2019-03-15T23:37:06
| 175,721,140
| 0
| 0
|
Apache-2.0
| 2019-03-15T00:35:56
| 2019-03-15T00:35:56
| null |
UTF-8
|
Python
| false
| false
| 3,586
|
py
|
import os
import six
import PIL
from tmtoolkit.topicmod import model_io, visualize
try:
from wordcloud import WordCloud
def test_generate_wordclouds_for_topic_words():
py3file = '.py3' if six.PY3 else ''
data = model_io.load_ldamodel_from_pickle('tests/data/tiny_model_reuters_5_topics%s.pickle' % py3file)
model = data['model']
vocab = data['vocab']
phi = model.topic_word_
assert phi.shape == (5, len(vocab))
topic_word_clouds = visualize.generate_wordclouds_for_topic_words(phi, vocab, 10)
assert len(topic_word_clouds) == 5
assert set(topic_word_clouds.keys()) == set('topic_%d' % i for i in range(1, 6))
assert all(isinstance(wc, PIL.Image.Image) for wc in topic_word_clouds.values())
topic_word_clouds = visualize.generate_wordclouds_for_topic_words(phi, vocab, 10,
which_topics=('topic_1', 'topic_2'),
return_images=False,
width=640, height=480)
assert set(topic_word_clouds.keys()) == {'topic_1', 'topic_2'}
assert all(isinstance(wc, WordCloud) for wc in topic_word_clouds.values())
assert all(wc.width == 640 and wc.height == 480 for wc in topic_word_clouds.values())
def test_generate_wordclouds_for_document_topics():
py3file = '.py3' if six.PY3 else ''
data = model_io.load_ldamodel_from_pickle('tests/data/tiny_model_reuters_5_topics%s.pickle' % py3file)
model = data['model']
doc_labels = data['doc_labels']
theta = model.doc_topic_
assert theta.shape == (len(doc_labels), 5)
doc_topic_clouds = visualize.generate_wordclouds_for_document_topics(theta, doc_labels, 3)
assert len(doc_topic_clouds) == len(doc_labels)
assert set(doc_topic_clouds.keys()) == set(doc_labels)
assert all(isinstance(wc, PIL.Image.Image) for wc in doc_topic_clouds.values())
which_docs = doc_labels[:2]
assert len(which_docs) == 2
doc_topic_clouds = visualize.generate_wordclouds_for_document_topics(theta, doc_labels, 3,
which_documents=which_docs,
return_images=False,
width=640, height=480)
assert set(doc_topic_clouds.keys()) == set(which_docs)
assert all(isinstance(wc, WordCloud) for wc in doc_topic_clouds.values())
assert all(wc.width == 640 and wc.height == 480 for wc in doc_topic_clouds.values())
def test_write_wordclouds_to_folder(tmpdir):
path = tmpdir.mkdir('wordclouds').dirname
py3file = '.py3' if six.PY3 else ''
data = model_io.load_ldamodel_from_pickle('tests/data/tiny_model_reuters_5_topics%s.pickle' % py3file)
model = data['model']
vocab = data['vocab']
phi = model.topic_word_
assert phi.shape == (5, len(vocab))
topic_word_clouds = visualize.generate_wordclouds_for_topic_words(phi, vocab, 10)
visualize.write_wordclouds_to_folder(topic_word_clouds, path, 'cloud_{label}.png')
for label in topic_word_clouds.keys():
assert os.path.exists(os.path.join(path, 'cloud_{label}.png'.format(label=label)))
except:
# wordcloud module not found
pass
|
[
"markus.konrad@wzb.eu"
] |
markus.konrad@wzb.eu
|
fa557f36f229ffae04fde795ddaad2491c3d8cb8
|
fe6cbc51ef5043ff2f953fd2202540fd0f7d7cbc
|
/mnist_deploy.py
|
025830d8ddccfeeac54cc4347bec752d88be5c3a
|
[] |
no_license
|
Tveek/caffe_learning
|
f87c9abecb879a9807368b733772d669315cca41
|
e841abb2d0f92c5e0f9f558fbdd9e128c526f1b2
|
refs/heads/master
| 2021-01-22T07:32:57.173028
| 2016-09-26T13:32:45
| 2016-09-26T13:32:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
# -*- coding: utf-8 -*-
from caffe import layers as L,params as P,to_proto
root='./'
deploy=root+'mnist/deploy.prototxt' #文件保存路径
def create_deploy():
#少了第一层,data层
conv1=L.Convolution(bottom='data', kernel_size=5, stride=1,num_output=20, pad=0,weight_filler=dict(type='xavier'))
pool1=L.Pooling(conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
conv2=L.Convolution(pool1, kernel_size=5, stride=1,num_output=50, pad=0,weight_filler=dict(type='xavier'))
pool2=L.Pooling(conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
fc3=L.InnerProduct(pool2, num_output=500,weight_filler=dict(type='xavier'))
relu3=L.ReLU(fc3, in_place=True)
fc4 = L.InnerProduct(relu3, num_output=10,weight_filler=dict(type='xavier'))
#最后没有accuracy层,但有一个Softmax层
prob=L.Softmax(fc4)
return to_proto(prob)
def write_deploy():
with open(deploy, 'w') as f:
f.write('name:"Lenet"\n')
f.write('input:"data"\n')
f.write('input_dim:1\n')
f.write('input_dim:3\n')
f.write('input_dim:28\n')
f.write('input_dim:28\n')
f.write(str(create_deploy()))
if __name__ == '__main__':
write_deploy()
|
[
"857332641@qq.com"
] |
857332641@qq.com
|
c0c907c9480629a7fb74fc8e8f851f465c9ed21a
|
5b85703aa0dd5a6944d99370a5dde2b6844517ec
|
/03.Python/13.XML1_Find_the_Score.py
|
a6dcf1e4cc24fe1e036199a2dd8a7b3941d3931c
|
[] |
no_license
|
alda07/hackerrank
|
255329196e6a4b9d598c3f51790caf4a99a755bc
|
a09091f859e87462c95ee856cbbd0ad9b5992159
|
refs/heads/master
| 2021-10-24T07:38:34.795632
| 2019-03-23T17:29:32
| 2019-03-23T17:29:32
| 90,329,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
import sys
import xml.etree.ElementTree as etree
def get_attr_number(node):
# your code goes here
count_atribs = 0
for element in node.iter():
count_atribs += len(element.attrib)
return count_atribs
if __name__ == '__main__':
sys.stdin.readline()
xml = sys.stdin.read()
tree = etree.ElementTree(etree.fromstring(xml))
root = tree.getroot()
print(get_attr_number(root))
|
[
"hanh.vo.programmer@gmail.com"
] |
hanh.vo.programmer@gmail.com
|
f31fee6475c7cf0e9200fc4f6762bf92a3f9cdb1
|
7dd88d4ae218b8de8fe54780fb48884ef92c0b5c
|
/python/leetcode/search.py
|
0e887d9acf7a2cc74431826670bfe92836cdbc67
|
[] |
no_license
|
BenThomas33/practice
|
c98654ec3bb38740d7f69a21ea5832782abdb4f8
|
5dffbdfbdb65f959a534ed2e2ec7773ab4bc7ed9
|
refs/heads/master
| 2021-01-17T23:26:11.538707
| 2014-10-18T20:49:12
| 2014-10-18T20:49:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
class Solution:
# @param A a list of integers
# @param target an integer
# @return a boolean
def search(self, A, target):
|
[
"xinzhou918@gmail.com"
] |
xinzhou918@gmail.com
|
45a39d564c5ccfcb158db6c41322736bf97d4f25
|
830b34e369fcfb94a8eaa855c918ab66ed2050b2
|
/gui/layouts/grid.py
|
4725707f04c497a09f57f383344ad657c886ec53
|
[] |
no_license
|
treinaweb/treinaweb-kivy-framework-python
|
78e8ab1087a49e8463ebf4ecafca80fe41286cb7
|
2ddf0a881f28209a118ec893019c179bc39e75fc
|
refs/heads/master
| 2020-03-27T16:47:07.452396
| 2019-10-15T19:04:14
| 2019-10-15T19:04:14
| 146,805,958
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.widget import Widget
class TelaApp(GridLayout):
pass
class Grid(App):
def build(self):
return TelaApp()
Grid().run()
|
[
"fagnerpinheirosantos@gmail.com"
] |
fagnerpinheirosantos@gmail.com
|
446864b08c4d7945ec228b68519031d1bbce51c0
|
5be79d6cbc8a55f0b6518b28fb748c34316b385d
|
/sentinel_api/__init__.py
|
01de33cdaec552e63d559aea414ed9ab6e1d9c99
|
[
"MIT"
] |
permissive
|
jonas-eberle/esa_sentinel
|
1b9aa57a78972d93a20d03bbf0875c35f7bee4b2
|
c9498e8835ae0a585068cfd6be953319ea34ca29
|
refs/heads/master
| 2022-06-24T18:44:42.726012
| 2022-06-06T16:15:52
| 2022-06-06T16:15:52
| 47,712,138
| 51
| 16
|
MIT
| 2019-10-17T15:45:06
| 2015-12-09T18:57:35
|
Python
|
UTF-8
|
Python
| false
| false
| 238
|
py
|
from .sentinel_api import SentinelDownloader
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
|
[
"john.truckenbrodt@uni-jena.de"
] |
john.truckenbrodt@uni-jena.de
|
630fa7c67e54aef67a51ee9d03be97d872a64cc5
|
ec1f8cdbf52bcc5516a833e02ac99301a1664ed9
|
/setup.py
|
0e962f9c6b7ca5a5352113a317f655c549c3cf3b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
senarvi/theanolm
|
8fe85dcf07358a331807b9002a56b6089d5f0ff3
|
9904faec19ad5718470f21927229aad2656e5686
|
refs/heads/master
| 2023-06-24T10:39:21.985241
| 2023-06-12T06:55:26
| 2023-06-12T06:55:26
| 42,454,187
| 95
| 37
|
Apache-2.0
| 2020-11-05T11:22:31
| 2015-09-14T14:35:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,563
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This setup script can be used to run unit tests, manually install the
package, and upload the package to PyPI.
python3 setup.py --help - Display help.
python3 setup.py test - Execute unit tests.
python3 setup.py install - Install the package.
python3 setup.py sdist upload - Upload the project to PyPI.
"""
from os import path
from setuptools import setup, find_packages
SCRIPT_DIR = path.dirname(path.realpath(__file__))
VERSION_PATH = path.join(SCRIPT_DIR, 'theanolm', 'version.py')
# Don't import theanolm, as the user may not have the dependencies installed
# yet. This will import __version__.
with open(VERSION_PATH, 'r') as version_file:
exec(version_file.read())
VERSION = __version__ #@UndefinedVariable
LONG_DESCRIPTION = 'TheanoLM is a recurrent neural network language modeling ' \
'toolkit implemented using Theano. Theano allows the user ' \
'to customize and extend the neural network very ' \
'conveniently, still generating highly efficient code ' \
'that can utilize multiple GPUs or CPUs for parallel ' \
'computation. TheanoLM allows the user to specify ' \
'arbitrary network architecture. New layer types and ' \
'optimization methods can be easily implemented.'
KEYWORDS = 'theano neural network language modeling machine learning research'
CLASSIFIERS = ['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Programming Language :: Python :: 3',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering']
setup(name='TheanoLM',
version=VERSION,
author='Seppo Enarvi',
author_email='seppo2019@marjaniemi.com',
url='https://github.com/senarvi/theanolm',
download_url='https://github.com/senarvi/theanolm/tarball/v' + VERSION,
description='Toolkit for neural network language modeling using Theano',
long_description=LONG_DESCRIPTION,
license='Apache License, Version 2.0',
keywords=KEYWORDS,
classifiers=CLASSIFIERS,
packages=find_packages(exclude=['tests']),
package_data={'theanolm': ['architectures/*.arch']},
scripts=['bin/theanolm', 'bin/wctool'],
install_requires=['numpy', 'Theano', 'h5py'],
test_suite='tests')
|
[
"seppo.git@marjaniemi.com"
] |
seppo.git@marjaniemi.com
|
aa4a3ec5872f3bd9c81e30dbe94c795cb732bc34
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/GstVideo/VideoAggregatorConvertPadPrivate.py
|
52c003b39ff29287c99298ff8a25c4d0fb82cb6b
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554
| 2020-06-06T04:15:00
| 2020-06-06T04:15:00
| 269,693,287
| 8
| 2
| null | 2020-06-05T15:57:54
| 2020-06-05T15:57:54
| null |
UTF-8
|
Python
| false
| false
| 4,481
|
py
|
# encoding: utf-8
# module gi.repository.GstVideo
# from /usr/lib64/girepository-1.0/GstVideo-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gst as __gi_repository_Gst
import gi.repository.GstBase as __gi_repository_GstBase
import gobject as __gobject
class VideoAggregatorConvertPadPrivate(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(VideoAggregatorConvertPadPrivate), '__module__': 'gi.repository.GstVideo', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'VideoAggregatorConvertPadPrivate' objects>, '__weakref__': <attribute '__weakref__' of 'VideoAggregatorConvertPadPrivate' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(VideoAggregatorConvertPadPrivate)
|
[
"ttys3@outlook.com"
] |
ttys3@outlook.com
|
13bda21c18d48ad3eb96b1e6efb52ec7823dc23b
|
81fbac614ad0f6795960a7a1f615c1a7d2938fa8
|
/setup.py
|
cc4ce16119bf94cc444d7050bfea2abeedc051df
|
[
"MIT"
] |
permissive
|
Rue-Foundation/eth-bloom
|
f0c4a0fc4b41b16cb1ed103c693a44c22464d805
|
930b740267992fc7c2fbc7f38eed8c1ea3c79d40
|
refs/heads/master
| 2021-08-20T09:03:18.794271
| 2017-11-28T17:50:35
| 2017-11-28T17:50:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='eth-bloom',
# *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
version='0.5.2',
description="""Python implementation of the Ethereum Trie structure""",
long_description_markdown_filename='README.md',
author='Piper Merriam',
author_email='pipermerriam@gmail.com',
url='https://github.com/ethereum/eth-bloom',
include_package_data=True,
py_modules=['eth_bloom'],
setup_requires=['setuptools-markdown'],
install_requires=[
"pysha3>=0.3",
],
license="MIT",
zip_safe=False,
keywords='ethereum blockchain evm trie merkle',
packages=find_packages(exclude=["tests", "tests.*"]),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
[
"pipermerriam@gmail.com"
] |
pipermerriam@gmail.com
|
017b467592469746e83158bfd8f35d935f1207e6
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_385/ch44_2019_04_22_23_24_01_514593.py
|
3006efd7bceca5e33b58e34e164af9c84824b29a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
def soma_valores(soma):
lista = []
i=0
soma=0
while i<len(lista):
soma+= lista (i)
i+=1
return soma
|
[
"you@example.com"
] |
you@example.com
|
97b909e4b45e90041f2deb5ee471e5666c2c7ab2
|
b5f05426d811303c0bc2d37a7ebff67cc369f536
|
/python/crawl/study_crawl.py
|
fb7bde6519e53d66be9f5bca6de409ba80581f1b
|
[] |
no_license
|
chenwangwww/paddlehub
|
54a310c2b627868aa22e6172497d60ddd2291d24
|
8583a705af6f82512ea5473f3d8961a798852913
|
refs/heads/master
| 2023-03-13T10:17:55.589558
| 2021-03-01T02:35:43
| 2021-03-01T02:35:43
| 293,667,091
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,088
|
py
|
import requests
from bs4 import BeautifulSoup
from dbCtr import ctr
from funcs import listToStr
rooturl = 'http://www.cn56.net.cn/diming/'
def insertFullData(tb, data):
for item in data:
result = {
'name': item.get_text(),
'link': item.get('href'),
}
ctr.insertData(tb, result)
def crtb(tb, datas):
ctr.createTb(tb)
for data in datas:
insertFullData(tb, data)
def crawlUrl(suburl):
url = rooturl + suburl
strhtml = requests.get(url)
strhtml.encoding = 'gbk'
soup = BeautifulSoup(strhtml.text, 'lxml')
search_list = soup.select('#page_left > div.wrpn > a')
search_name = listToStr(search_list, '_')
if len(search_list) == 1:
selecter = '#page_left > table:nth-child(4) > tr > td:nth-child(1) > strong > a'
else:
selecter = '#page_left > div.infotree > table > tr > td:nth-child(1) > strong > a'
data = soup.select(selecter)
if len(data) > 0 and len(search_list) <= 2:
print(search_name)
crtb(search_name, [data])
for item in data:
subtempurl = item.get('href').split('/')[-1]
crawlUrl(subtempurl)
# strhtml = requests.get(rooturl)
# strhtml.encoding = 'gbk'
# soup = BeautifulSoup(strhtml.text, 'lxml')
# search_name = '中国'
# selecter1 = 'body > div:nth-child(6) > div.w650 > div > li:nth-child(1) > a'
# data1 = soup.select(selecter1)
# selecter2 = 'body > div:nth-child(6) > div.w650 > div > li > b > a'
# data2 = soup.select(selecter2)
#如果不存在,创建数据表
# ctr.createTb(search_name)
#往数据表插入数据
# for item in data1:
# result = {
# 'name': item.get_text(),
# 'link': item.get('href'),
# }
# ctr.insertData(search_name, result)
# for item in data2:
# result = {
# 'name': item.get_text(),
# 'link': item.get('href'),
# }
# ctr.insertData(search_name, result)
queryData = ctr.queryData('中国')
for search_item in queryData:
_, suburl = search_item
suburl = suburl.split('/')[-1]
crawlUrl(suburl)
|
[
"chenwangwww@sina.com"
] |
chenwangwww@sina.com
|
2b88fd7b2e949c24551cc1cf034ead697fef65d5
|
f5a53f0f2770e4d7b3fdace83486452ddcc996e1
|
/netbox/netbox/tests/test_api.py
|
0ee2d78dc1d0d9df2df846ce9c7d29f4c43c5347
|
[
"Apache-2.0"
] |
permissive
|
fireman0865/PingBox
|
35e8fc9966b51320d571b63967e352a134022128
|
0f00eaf88b88e9441fffd5173a1501e56c13db03
|
refs/heads/master
| 2023-01-20T07:55:59.433046
| 2020-03-15T13:36:31
| 2020-03-15T13:36:31
| 247,466,832
| 1
| 0
|
Apache-2.0
| 2022-12-26T21:30:32
| 2020-03-15T12:59:16
|
Python
|
UTF-8
|
Python
| false
| false
| 298
|
py
|
from django.urls import reverse
from utilities.testing import APITestCase
class AppTest(APITestCase):
def test_root(self):
url = reverse('api-root')
response = self.client.get('{}?format=api'.format(url), **self.header)
self.assertEqual(response.status_code, 200)
|
[
"fireman0865@gmail.com"
] |
fireman0865@gmail.com
|
e40a4cc665597fd71aa392ec795fc50fe1fd605a
|
8e8e4becd0ccf35a4d2397eac05c46741941a3f2
|
/examples/e2e/cli/04ignore/commands.py
|
981edeb546ef06f9b1979175c86be2be29e5cb76
|
[] |
no_license
|
podhmo/monogusa
|
13469c59e3a366f11e2d0b1d649991aceed40092
|
1129249cbfbf2d7925f69e484f1488799d2f637d
|
refs/heads/master
| 2020-09-30T18:28:27.215942
| 2020-02-29T15:17:05
| 2020-02-29T15:17:05
| 227,347,182
| 0
| 0
| null | 2020-02-29T15:17:06
| 2019-12-11T11:13:47
|
Python
|
UTF-8
|
Python
| false
| false
| 137
|
py
|
from monogusa import ignore
def hello() -> None:
pass
def byebye() -> None:
pass
@ignore
def ignore_me() -> None:
pass
|
[
"noreply@github.com"
] |
podhmo.noreply@github.com
|
947eeef7fb19a7b211cedd5dbc26edf741e2fb26
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_rut.py
|
cfbcb5070a3a50c4c9cb663637014cb5d57b50f5
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
#calss header
class _RUT():
def __init__(self,):
self.name = "RUT"
self.definitions = [u'a deep, narrow mark made in soft ground especially by a wheel', u'the period of the year during which particular male animals, especially deer and sheep, are sexually active: ', u'(of particular male animals) sexually excited']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
0c4043442316a2cf94c4f5be142ff5d603430e8d
|
4b3ae6048ced0d7f88a585af29fa3a7b15005749
|
/Python/Django/dojo_ninjas/apps/books_authors/models.py
|
69cda65e65a601d28283fe6149c273df57bf038c
|
[] |
no_license
|
ajag408/DojoAssignments
|
a6320856466ac21d38e8387bdcbbe2a02009e418
|
03baa0ff5261aee6ffedf724657b3a8c7cdffe47
|
refs/heads/master
| 2022-12-11T15:50:46.839881
| 2021-06-07T20:57:17
| 2021-06-07T20:57:17
| 79,872,914
| 0
| 0
| null | 2022-12-08T00:35:09
| 2017-01-24T02:58:15
|
Python
|
UTF-8
|
Python
| false
| false
| 915
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Book(models.Model):
name = models.CharField(max_length = 255)
desc = models.TextField()
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
def __repr__(self):
return "<Book object: {} {}>".format(self.name, self.desc)
class Author(models.Model):
first_name = models.CharField(max_length = 255)
last_name = models.CharField(max_length = 255)
email = models.CharField(max_length = 255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
books = models.ManyToManyField(Book, related_name = 'authors')
notes = models.TextField()
def __repr__(self):
return "<Author object: {} {} {}>".format(self.first_name, self.last_name, self.email)
|
[
"akashjagannathan408@gmail.com"
] |
akashjagannathan408@gmail.com
|
73e8001de0324ba056ab3490a6008921eeda2852
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/efc5dac52cdfab25c3b163958830325f6898d3b6-<exec_command>-fix.py
|
c96b8e5b71acfcfaf0ad856b372cd9ccc0252b3f
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,477
|
py
|
def exec_command(self, cmd, in_data=None, sudoable=True):
' run a command on the local host '
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.debug('in local.exec_command()')
executable = (C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None)
display.vvv('EXEC {0}'.format(cmd), host=self._play_context.remote_addr)
display.debug('opening command with Popen()')
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = map(to_bytes, cmd)
p = subprocess.Popen(cmd, shell=isinstance(cmd, (text_type, binary_type)), executable=executable, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
display.debug('done running command with Popen()')
if (self._play_context.prompt and sudoable):
fcntl.fcntl(p.stdout, fcntl.F_SETFL, (fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK))
fcntl.fcntl(p.stderr, fcntl.F_SETFL, (fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK))
become_output = ''
while ((not self.check_become_success(become_output)) and (not self.check_password_prompt(become_output))):
(rfd, wfd, efd) = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self._play_context.timeout)
if (p.stdout in rfd):
chunk = p.stdout.read()
elif (p.stderr in rfd):
chunk = p.stderr.read()
else:
(stdout, stderr) = p.communicate()
raise AnsibleError(('timeout waiting for privilege escalation password prompt:\n' + become_output))
if (not chunk):
(stdout, stderr) = p.communicate()
raise AnsibleError(('privilege output closed while waiting for password prompt:\n' + become_output))
become_output += chunk
if (not self.check_become_success(become_output)):
p.stdin.write((to_bytes(self._play_context.become_pass, errors='surrogate_or_strict') + b'\n'))
fcntl.fcntl(p.stdout, fcntl.F_SETFL, (fcntl.fcntl(p.stdout, fcntl.F_GETFL) & (~ os.O_NONBLOCK)))
fcntl.fcntl(p.stderr, fcntl.F_SETFL, (fcntl.fcntl(p.stderr, fcntl.F_GETFL) & (~ os.O_NONBLOCK)))
display.debug('getting output with communicate()')
(stdout, stderr) = p.communicate(in_data)
display.debug('done communicating')
display.debug('done with local.exec_command()')
return (p.returncode, stdout, stderr)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
3c0bf67436cd0e0e3ae2dbe2ecd91f8ab58dff95
|
047d6c1f1097e1a6055b4408e3bf80a9e01c7e5d
|
/avrae/misc/rspell.py
|
09a575c4bea6b09ebe476924d931863a592b0eec
|
[] |
no_license
|
countpauper/countpauper
|
274246f50e297a9ec1cd8d7842149e0ef1da53bd
|
efb1eea44152e9a55aed1ee1478e29df447c24c3
|
refs/heads/master
| 2023-07-23T00:35:58.619290
| 2023-07-08T14:09:06
| 2023-07-08T14:09:06
| 20,813,292
| 4
| 1
| null | 2021-02-15T08:48:50
| 2014-06-13T18:11:51
|
C
|
UTF-8
|
Python
| false
| false
| 358
|
py
|
!alias rspell <drac2>
spell_db=spell_list=load_json(get_gvar('13dc3e0a-a230-40ca-8fb3-a39846300b18'))
args=argparse(&ARGS&)
levels=args.get('l',type_=int) or range(10)
spells=[n for n,p in spell_db.items() if p.level in levels]
if not spells:
return f'echo No spells {levels}'
spell=spells[randint(len(spells))]
return f'spell "{spell}"'
</drac2>
|
[
"countpauper@gmail.com"
] |
countpauper@gmail.com
|
c205af018c3d6e98d0415f1a316565f2cdd8032e
|
d799ab92fff30ec3b4efc5aa079628971451c17a
|
/coilmq/tests/functional/test_basic.py
|
e4d4f999aa12ffc165ce4254075aa8d103028381
|
[] |
no_license
|
LucaLanziani/coilmq
|
cf87a3daed400ccc64548873827f148097d7d780
|
dce6254801617b5612816dc8d95c3249a284e99a
|
refs/heads/master
| 2021-01-15T16:00:07.231608
| 2014-12-18T12:29:30
| 2014-12-18T12:29:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,156
|
py
|
# -*- coding: utf-8 -*-
"""
Functional tests that use the default memory-based storage backends and default
scheduler implementations.
"""
import zlib
from coilmq.auth.simple import SimpleAuthenticator
from coilmq.tests.functional import BaseFunctionalTestCase
__authors__ = ['"Hans Lellelid" <hans@xmpl.org>']
__copyright__ = "Copyright 2009 Hans Lellelid"
__license__ = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
class BasicTest(BaseFunctionalTestCase):
"""
Functional tests using default storage engine, etc.
"""
def test_connect(self):
""" Test a basic (non-auth) connection. """
c = self._new_client()
def test_connect_auth(self):
""" Test connecting when auth is required. """
self.server.authenticator = SimpleAuthenticator(store={'user': 'pass'})
c1 = self._new_client(connect=False)
c1.connect()
r = c1.received_frames.get(timeout=1)
assert r.command == 'ERROR'
assert 'Auth' in r.body
c2 = self._new_client(connect=False)
c2.connect(headers={'login': 'user', 'passcode': 'pass'})
r2 = c2.received_frames.get(timeout=1)
print r2
assert r2.command == 'CONNECTED'
c3 = self._new_client(connect=False)
c3.connect(headers={'login': 'user', 'passcode': 'pass-invalid'})
r3 = c3.received_frames.get(timeout=1)
print r3
assert r3.command == 'ERROR'
def test_send_receipt(self):
c1 = self._new_client()
c1.send('/topic/foo', 'A message', extra_headers={'receipt': 'FOOBAR'})
r = c1.received_frames.get(timeout=1)
assert r.command == "RECEIPT"
assert r.receipt_id == "FOOBAR"
def test_subscribe(self):
c1 = self._new_client()
c1.subscribe('/queue/foo')
c2 = self._new_client()
c2.subscribe('/queue/foo2')
c2.send('/queue/foo', 'A message')
assert c2.received_frames.qsize() == 0
r = c1.received_frames.get()
assert r.command == 'MESSAGE'
assert r.body == 'A message'
def test_disconnect(self):
"""
Test the 'polite' disconnect.
"""
c1 = self._new_client()
c1.connect()
c1.disconnect()
assert c1.received_frames.qsize() == 0
def test_send_binary(self):
"""
Test sending binary data.
"""
c1 = self._new_client()
c1.subscribe('/queue/foo')
# Read some random binary data.
# (This should be cross-platform.)
message = 'This is the message that will be compressed.'
c2 = self._new_client()
compressed = zlib.compress(message)
print '%r' % compressed
c2.send('/queue/foo', zlib.compress(message))
r = c1.received_frames.get()
assert r.command == 'MESSAGE'
print '%r' % r.body
assert zlib.decompress(r.body) == message
def test_send_utf8(self):
"""
Test sending utf-8-encoded strings.
"""
c1 = self._new_client()
c1.subscribe('/queue/foo')
unicodemsg = u'我能吞下玻璃而不伤身体'
utf8msg = unicodemsg.encode('utf-8')
print "len(unicodemsg) = %d" % len(unicodemsg)
print "len(utf8msg) = %d" % len(utf8msg)
c2 = self._new_client()
print '%r' % utf8msg
c2.send('/queue/foo', utf8msg)
r = c1.received_frames.get()
assert r.command == 'MESSAGE'
print '%r' % r.body
assert r.body == utf8msg
|
[
"hans@xmpl.org"
] |
hans@xmpl.org
|
ec540c52386303d5ed7435b2de48a96f7ed7af0b
|
ff5eea95bb0827cb086c32f4ec1c174b28e5b82d
|
/gammapy/background/template.py
|
73305041b2c8006fd38496bb6d09cdbd15753079
|
[] |
no_license
|
pflaumenmus/gammapy
|
4830cc5506a4052658f30077fa4e11d8c685ede0
|
7b5caf832c9950c886528ca107203ce9b83c7ebf
|
refs/heads/master
| 2021-01-15T23:27:46.521337
| 2013-09-25T14:23:35
| 2013-09-25T14:23:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Template background estimation
Reference: http://adsabs.harvard.edu/abs/2003A%26A...410..389R
"""
from __future__ import division
|
[
"Deil.Christoph@gmail.com"
] |
Deil.Christoph@gmail.com
|
cb9b245593a93be30c210ad1b70214a87685c0a0
|
fa32f7fe4068323b719725558423927ad307cc4b
|
/build_isolated/rostopic/catkin_generated/pkg.develspace.context.pc.py
|
531b3ad0a23091aaac1729f6620ee17b90c89e2e
|
[] |
no_license
|
CJohnson5136/ros_catkin_ws
|
d07ee8c20bc1ebe6c05abdea24ef1f5dab14954b
|
05193a7e587ab82e696c66176b151c43d2bcef82
|
refs/heads/master
| 2021-05-09T03:05:12.373334
| 2018-01-28T03:13:33
| 2018-01-28T03:13:33
| 119,227,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rostopic"
PROJECT_SPACE_DIR = "/home/pi/ros_catkin_ws/devel_isolated/rostopic"
PROJECT_VERSION = "1.13.5"
|
[
"cody.johnson@ucollege.edu"
] |
cody.johnson@ucollege.edu
|
0313e4040f7e129dd7f7dc51cb61c6c53b03576d
|
3dcc44bf8acd3c6484b57578d8c5595d8119648d
|
/casp9_scripts/truncate_rosetta_files.py
|
57c62df38219834ef0e52eba352a85cd322a04aa
|
[] |
no_license
|
rhiju/rhiju_python
|
f0cab4dfd4dd75b72570db057a48e3d65e1d92c6
|
eeab0750fb50a3078a698d190615ad6684dc2411
|
refs/heads/master
| 2022-10-29T01:59:51.848906
| 2022-10-04T21:28:41
| 2022-10-04T21:28:41
| 8,864,938
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,352
|
py
|
#!/usr/bin/python
from os import popen,system
from os.path import exists,dirname,basename,expanduser
import sys
import string
from glob import glob
indir = sys.argv[1]
outdir = sys.argv[2]
PYDIR = expanduser('~rhiju')+'/python/'
assert( exists( PYDIR ) )
inputres = 0
if len(sys.argv)>4:
startseq = int(sys.argv[3])
endseq = int(sys.argv[4])
inputres = 1
newprefix = 'truncate_termini_'
if len(sys.argv)>5:
newprefix = sys.argv[5]
command = 'mkdir '+outdir
print(command)
system(command)
if not inputres:
secstructprobfile = glob(indir+'/*.secstructprob')
outfile = outdir+'/truncate_sequence.txt'
assert(len(secstructprobfile)>0)
command = PYDIR+'/decide_termini_truncate.py '+secstructprobfile[0]+ ' ' + outfile
print(command)
system(command)
assert( exists( outfile))
line = open(outfile).readlines()
cols = string.split(line[0])
startseq = int(cols[0])
endseq = int(cols[1])
print
print 'Using start and end residues: ',startseq,endseq
print
infile = glob(indir+'/*.pdb')
if(len(infile)>0): # PDB file is optional.
infile = infile[0]
outfile = outdir + '/'+newprefix+basename(infile)
command = PYDIR+'/termini_truncate_pdb.py %s %d %d %s' % \
(infile,startseq,endseq,outfile)
print(command)
system(command)
else:
print 'COULD NOT FIND PDB FILE BUT THAT IS OK IF YOU ARE DOING CASP.'
infile = glob(indir+'/*.fasta*')
assert(len(infile)>0)
infile = infile[0]
outfile = outdir + '/'+newprefix+basename(infile)
command = PYDIR+'/termini_truncate_fasta.py %s %d %d %s' % \
(infile,startseq,endseq,outfile)
print(command)
system(command)
infile = glob(indir+'/*.psipred_ss2*')
assert(len(infile)>0)
infile = infile[0]
outfile = outdir + '/'+newprefix+basename(infile)
command = PYDIR+'/termini_truncate_psipred_ss2.py %s %d %d %s' % \
(infile,startseq,endseq,outfile)
print(command)
system(command)
infiles = glob(indir+'/*v1_3*')
assert(len(infiles)>1)
for infile in infiles:
outfile = outdir + '/'+newprefix+basename(infile)
if basename(infile)[:6] == 'boinc_': # A special case.
outfile = outdir + '/boinc_'+newprefix + basename(infile)[6:]
command = PYDIR+'/termini_truncate_fragfile.py %s %d %d %s' % \
(infile,startseq,endseq,outfile)
print(command)
system(command)
|
[
"rhiju@stanford.edu"
] |
rhiju@stanford.edu
|
fe461e7e82c4c955bb78b8eb572cb70236f500b7
|
9508879fcf1cff718f3fe80502baff8b82c04427
|
/misc/divide_and_conquer/max_subarray.py
|
cb0fa14f92663cf7a62457fc0285511c67efa967
|
[] |
no_license
|
davidozhang/hackerrank
|
e37b4aace7d63c8be10b0d4d2bffb4d34d401d55
|
bdc40d6ff3e603949eb294bbc02a1e24a4ba5b80
|
refs/heads/master
| 2021-05-04T11:31:59.110118
| 2017-11-15T09:17:27
| 2017-11-15T09:17:27
| 47,906,672
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
#!/usr/bin/python
def max_subarray(l, left, right):
if left == right:
return l[left]
else:
middle = (left+right)/2
l1 = max_subarray(l, left, middle)
l2 = max_subarray(l, middle+1, right)
return max(l1, l2, max_crossing(l, left, middle, right))
def max_crossing(l, left, middle, right):
left_sum, right_sum = None, None
left_temp = middle
while left_temp>=left:
if not left_sum:
left_sum = l[left_temp]
else:
left_sum = max(left_sum, left_sum+l[left_temp])
left_temp -= 1
right_temp = middle+1
while right_temp<=right:
if not right_sum:
right_sum = l[right_temp]
else:
right_sum = max(right_sum, right_sum+l[right_temp])
right_temp += 1
return left_sum + right_sum
def main():
l = map(int, raw_input().split())
print max_subarray(l, 0, len(l)-1)
if __name__ == '__main__':
main()
|
[
"davzee@hotmail.com"
] |
davzee@hotmail.com
|
b1265bbbf4df82bff42bdc625d6cc5ff1f518356
|
e5838acd890b711d53fa7b37e0405c236dd52bb2
|
/trails/feeds/emergingthreatscip.py
|
63efdf9f7f92e5e3dc6e6c4843a8a14c2c631072
|
[
"MIT"
] |
permissive
|
stamparm/maltrail
|
34c40fe593f82c5f78d511c21a1cbe049aa04856
|
21422d7acbdfd1157c0b2188b5050f74d0adecbb
|
refs/heads/master
| 2023-08-31T20:08:48.881765
| 2023-08-31T19:39:55
| 2023-08-31T19:39:55
| 27,561,102
| 5,663
| 1,193
|
MIT
| 2023-08-14T03:13:03
| 2014-12-04T21:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 655
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2023 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from core.common import retrieve_content
__url__ = "https://rules.emergingthreats.net/open/suricata/rules/compromised-ips.txt"
__info__ = "compromised (suspicious)"
__reference__ = "emergingthreats.net"
def fetch():
retval = {}
content = retrieve_content(__url__)
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line] = (__info__, __reference__)
return retval
|
[
"miroslav.stampar@gmail.com"
] |
miroslav.stampar@gmail.com
|
7f953edebb9ebf147f2adabc9dbc217311cbcf9e
|
c3b766858bacfec396b839fd881f719db5ef5fc5
|
/setup.py
|
ef9419515da41c4cfa21ea315f99c875c440ad3e
|
[
"MIT"
] |
permissive
|
ferchaure/spikesorters
|
2e20dcdeac67c4e5b442628fadc851c38fc090d5
|
8577572c63c531a239452cdb48f631ec1f490121
|
refs/heads/master
| 2021-06-16T07:49:31.758572
| 2021-04-24T15:22:11
| 2021-04-24T15:22:11
| 254,942,645
| 0
| 0
|
MIT
| 2020-04-11T19:44:04
| 2020-04-11T19:44:03
| null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
from setuptools import setup, find_packages
d = {}
exec(open("spikesorters/version.py").read(), None, d)
version = d['version']
long_description = open("README.md").read()
pkg_name = "spikesorters"
setup(
name=pkg_name,
version=version,
author="Alessio Buccino, Cole Hurwitz, Samuel Garcia, Jeremy Magland, Matthias Hennig",
author_email="alessiop.buccino@gmail.com",
description="Python wrappers for popular spike sorters",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/SpikeInterface/spikesorters",
packages=find_packages(),
package_data={},
include_package_data=True,
install_requires=[
'numpy',
'spikeextractors>=0.9.4',
'spiketoolkit>=0.7.3',
'requests'
],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
)
)
|
[
"alejoe9187@gmail.com"
] |
alejoe9187@gmail.com
|
b44795123712f010089fc09f81ce2ff9435eb6cb
|
c849188f25de5cb87d9278aa7cfd0772c698c870
|
/account_report/hooks.py
|
4916696af10ec7bf8ad6c2c28c86dfe43bf6f541
|
[
"MIT"
] |
permissive
|
dineshpanchal93/helpremove
|
19c36131dc2d057ddfaf316c5f964cd211878e1b
|
37e03e922645d52a7bc5d293fa936b0b82017715
|
refs/heads/master
| 2020-03-27T10:35:04.729818
| 2019-02-09T07:24:45
| 2019-02-09T07:24:45
| 146,430,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,109
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "account_report"
app_title = "Account Report"
app_publisher = "Scantech Laser"
app_description = "Account Report"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "it@scantechlaser.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
app_include_css = "/assets/account_report/css/account_report.css"
app_include_js = "/assets/account_report/js/account_report.js"
# include js, css files in header of web template
app_include_css = "/assets/account_report/css/account_report.css"
web_include_js = "/assets/account_report/js/account_report.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "account_report.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "account_report.install.before_install"
# after_install = "account_report.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "account_report.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "account_report.tasks.all"
# ],
# "daily": [
# "account_report.tasks.daily"
# ],
# "hourly": [
# "account_report.tasks.hourly"
# ],
# "weekly": [
# "account_report.tasks.weekly"
# ]
# "monthly": [
# "account_report.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "account_report.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "account_report.event.get_events"
# }
website_context = {
"favicon": "/assets/account_report/images/logo.png",
"splash_image": "/assets/account_report/images/logo.png"
}
email_brand_image = "/assets/account_report/images/logo.png"
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
ef64daf27156245233a072e330507895eb46631f
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-storage/azure/mgmt/storage/v2018_02_01/models/immutability_policy_py3.py
|
443d619aebb4752e5b7a550f3121f3b8f72c4077
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,824
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .azure_entity_resource_py3 import AzureEntityResource
class ImmutabilityPolicy(AzureEntityResource):
"""The ImmutabilityPolicy property of a blob container, including Id, resource
name, resource type, Etag.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:ivar etag: Resource Etag.
:vartype etag: str
:param immutability_period_since_creation_in_days: Required. The
immutability period for the blobs in the container since the policy
creation, in days.
:type immutability_period_since_creation_in_days: int
:ivar state: The ImmutabilityPolicy state of a blob container, possible
values include: Locked and Unlocked. Possible values include: 'Locked',
'Unlocked'
:vartype state: str or
~azure.mgmt.storage.v2018_02_01.models.ImmutabilityPolicyState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'immutability_period_since_creation_in_days': {'required': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'immutability_period_since_creation_in_days': {'key': 'properties.immutabilityPeriodSinceCreationInDays', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
}
def __init__(self, *, immutability_period_since_creation_in_days: int, **kwargs) -> None:
super(ImmutabilityPolicy, self).__init__(**kwargs)
self.immutability_period_since_creation_in_days = immutability_period_since_creation_in_days
self.state = None
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
6d1ebf844e5baa83b39344681b4d72082c9febf4
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.CJK/Sans_8/pdf_to_json_test_Latn.CJK_Sans_8.py
|
b43ef12d4a3dacb970cc1f264903b07fa9b8562d
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.CJK/Sans_8/udhr_Latn.CJK_Sans_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
392606df599ab62daa2196de81f893ba6d951e9a
|
3d705ec48c94373817e5f61d3f839988910431e3
|
/lib/framework/executor_factory.py
|
eb8e1df9bd2b302e3f38b0f6e9fbf3d701c5b010
|
[] |
no_license
|
namesuqi/zeus
|
937d3a6849523ae931162cd02c5a09b7e37ebdd8
|
3445b59b29854b70f25da2950016f135aa2a5204
|
refs/heads/master
| 2022-07-24T14:42:28.600288
| 2018-03-29T08:03:09
| 2018-03-29T08:03:09
| 127,256,973
| 0
| 0
| null | 2022-07-07T22:57:57
| 2018-03-29T07:53:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
# coding=utf-8
"""
The factory of Test Executor
__author__ = 'zengyuetian'
"""
from lib.framework.executor_server import *
from lib.framework.executor_sdk import *
from lib.framework.executor_system import *
from lib.framework.executor_idc import *
from lib.framework.executor_leifeng import *
from lib.framework.executor_live import *
from lib.framework.executor_vod import *
from lib.framework.executor_deploy import *
class ExecutorFactory(object):
"""
create object according to param
"""
@staticmethod
def make_executor(name):
"""
create executor
:param name:
:return:
"""
if name == "server":
return ExecutorServer()
elif name == "sdk":
return ExecutorSdk()
elif name == "idc":
return ExecutorIdc()
elif name == "live":
return ExecutorLive()
elif name == "leifeng":
return ExecutorLeifeng()
elif name == "vod":
return ExecutorVod()
elif name == "deploy":
return ExecutorDeploy()
elif name == "system":
return ExecutorSystem()
elif name == "dummy":
return ExecutorSystem()
|
[
"suqi_name@163.com"
] |
suqi_name@163.com
|
3c2efb6cba7827d71aa97b9a1dc2926375aafbe1
|
d6e8601fa673876cb079b4eeaae6b40427371772
|
/neurolib/encoder/normal.py
|
7c183e035822c7ceeba4a1b916ce275b9bc21196
|
[] |
no_license
|
cunningham-lab/_neurolib_deprecated
|
8e70703d32701983a8fed9df489360acba856831
|
bf44a6b4c40347caeacd4fd38dd9d1c1680c9a65
|
refs/heads/master
| 2020-03-30T19:08:58.065181
| 2018-10-26T19:49:12
| 2018-10-26T19:49:12
| 151,530,405
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,911
|
py
|
# Copyright 2018 Daniel Hernandez Diaz, Columbia University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import fully_connected #pylint: disable=no-name-in-module
from neurolib.encoder.basic import InnerNode
from neurolib.encoder import MultivariateNormalTriL # @UnresolvedImport
act_fn_dict = {'relu' : tf.nn.relu,
'leaky_relu' : tf.nn.leaky_relu}
# pylint: disable=bad-indentation, no-member, protected-access
class NormalTriLNode(InnerNode):
"""
"""
num_expected_inputs = 1
num_expected_outputs = 3
def __init__(self,
label,
num_features,
builder,
name=None,
batch_size=1,
**dirs):
"""
Initialize a NormalInputNode
Args:
label (int): A unique identifier for the node
num_features (int): The size of the last dimension.
builder (Builder): An instance of Builder necessary to declare the
secondary output nodes
name (str): A unique string identifier for this node
batch_size (int): Self-explanatory.
dirs (dict): A set of user specified directives for constructing this
node
"""
self.name = "NormalTril_" + str(label) if name is None else name
self.builder = builder
self.num_declared_inputs = 0
self.batch_size = batch_size
super(NormalTriLNode, self).__init__(label)
self.num_features = num_features
self.main_oshape = self._oslot_to_shape[0] = [batch_size] + [num_features]
self._update_directives(**dirs)
self.free_oslots = list(range(self.num_expected_outputs))
self._declare_secondary_outputs()
def _declare_secondary_outputs(self):
"""
Declare outputs for the statistics of the distribution (mean and standard
deviation)
"""
main_oshape = self._oslot_to_shape[0]
# Mean oslot
self._oslot_to_shape[1] = main_oshape
o1 = self.builder.addOutput(name=self.directives['output_mean_name'])
self.builder.addDirectedLink(self, o1, oslot=1)
# Stddev oslot
self._oslot_to_shape[2] = main_oshape + [main_oshape[-1]]
o2 = self.builder.addOutput(name=self.directives['output_cholesky_name'])
print('_oslot_to_shape', self._oslot_to_shape)
self.builder.addDirectedLink(self, o2, oslot=2)
def _update_directives(self, **dirs):
"""
Update the node directives
"""
self.directives = {'num_layers' : 2,
'num_nodes' : 128,
'activation' : 'leaky_relu',
'net_grow_rate' : 1.0,
'share_params' : False,
'output_mean_name' : self.name + '_mean',
'output_cholesky_name' : self.name + '_cholesky'}
self.directives.update(dirs)
# Deal with directives that map to tensorflow objects hidden from the client
self.directives['activation'] = act_fn_dict[self.directives['activation']]
def _build(self, inputs=None):
"""
Builds the graph corresponding to a NormalTriL encoder.
TODO: Expand this a lot, many more specs necessary.
"""
dirs = self.directives
if inputs is not None:
raise NotImplementedError("") # TODO: Should I provide this option? meh
num_layers = dirs['num_layers']
num_nodes = dirs['num_nodes']
activation = dirs['activation']
net_grow_rate = dirs['net_grow_rate']
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
# Define the Means
x_in = self._islot_to_itensor[0]
output_dim = self._oslot_to_shape[0][-1] # Last dim
hid_layer = fully_connected(x_in, num_nodes, activation_fn=activation,
biases_initializer=tf.random_normal_initializer(stddev=1/np.sqrt(num_nodes)))
for _ in range(num_layers-1):
num_nodes = int(num_nodes*net_grow_rate)
hid_layer = fully_connected(hid_layer, num_nodes, activation_fn=activation,
biases_initializer=tf.random_normal_initializer(stddev=1/np.sqrt(num_nodes)))
mean = fully_connected(hid_layer, output_dim, activation_fn=None)
# Define the Cholesky Lower Decomposition
if dirs['share_params']:
output_chol = fully_connected(hid_layer, output_dim**2, activation_fn=None)
else:
hid_layer = fully_connected(x_in, num_nodes, activation_fn=activation,
biases_initializer=tf.random_normal_initializer(stddev=1/np.sqrt(num_nodes)))
for _ in range(num_layers-1):
num_nodes = int(num_nodes*net_grow_rate)
hid_layer = fully_connected(hid_layer, num_nodes, activation_fn=activation,
biases_initializer=tf.random_normal_initializer(stddev=1/np.sqrt(num_nodes)))
output_chol = fully_connected(hid_layer, output_dim**2,
activation_fn=None,
weights_initializer = tf.random_normal_initializer(stddev=1e-4),
# normalizer_fn=lambda x : x/tf.sqrt(x**2),
biases_initializer=tf.random_normal_initializer(stddev=1/np.sqrt(output_dim**2)))
output_chol = tf.reshape(output_chol,
# shape=[self.batch_size, output_dim, output_dim])
shape=[-1, output_dim, output_dim])
if 'output_mean_name' in self.directives:
mean_name = self.directives['output_mean_name']
else:
mean_name = "Mean_" + str(self.label) + '_0'
if 'output_cholesky_name' in self.directives:
cholesky_name = self.directives['output_cholesky_name']
else:
cholesky_name = 'CholTril_' + str(self.label) + '_0'
cholesky_tril = tf.identity(output_chol, name=cholesky_name)
# Get the tensorflow distribution for this node
self.dist = MultivariateNormalTriL(loc=mean, scale_tril=cholesky_tril)
# Fill the oslots
self._oslot_to_otensor[0] = self.dist.sample(name='Out' +
str(self.label) + '_0')
self._oslot_to_otensor[1] = tf.identity(mean, name=mean_name)
self._oslot_to_otensor[2] = cholesky_tril
self._is_built = True
def _log_prob(self, ipt):
"""
Define the loglikelihood of the distribution
"""
return self.dist.log_prob(ipt)
|
[
"dh2832@columbia.edu"
] |
dh2832@columbia.edu
|
f03ff950e29f53407416b268cd4acfd7d155443a
|
bbf7787d94e97d4e0c9bceb46203c08939e6e67d
|
/django-python/static-folder/login/views.py
|
0e51c75484eddd9ea3a346b0875ef9d67ba47164
|
[] |
no_license
|
llanoxdewa/python
|
076e6fa3ed2128c21cdd26c1be6bc82ee6917f9c
|
6586170c5f48827a5e1bcb35656870b5e4eed732
|
refs/heads/main
| 2023-06-16T05:31:52.494796
| 2021-07-09T09:04:30
| 2021-07-09T09:04:30
| 362,782,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
from django.shortcuts import render
def loginPage(req):
data = {
'title':'login-page',
'header':'selamat datang di login page',
'fileG':"login/img/pemandangan-laut-es.png",
'fileCss':"login/css/style.css"
}
return render(req,'login/index.html',data)
|
[
"llanoxdew4@gmail.com"
] |
llanoxdew4@gmail.com
|
9c7f7b6ddabf4942b20c4d5a3a928eb8dcdb991a
|
1424812c4f211d3d5e356e8b3889a689162062f3
|
/arcade/python/07_simple_sort.py
|
0effd8a406a26d4473e379c6084693df95b8d82e
|
[] |
no_license
|
nazomeku/codefights
|
cb7d3c40be0809695ec524a87c88dbebcf5b47bc
|
b23f6816f9b5b0720feac1c49c31163923e0a554
|
refs/heads/master
| 2021-01-22T12:49:35.905165
| 2017-11-21T19:03:37
| 2017-11-21T19:03:37
| 102,357,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
"""Write a function that, given an array ofintegers arr, sorts its elements
in ascending order."""
def simple_sort(arr):
n = len(arr)
for i in range(n):
j = 0
stop = n - i
while j < stop - 1:
if arr[j] > arr[j + 1]:
temp = arr[j]
arr[j] = arr[j + 1]
arr[j + 1] = temp
j += 1
return arr
|
[
"cols.nazo@gmail.com"
] |
cols.nazo@gmail.com
|
9861697b7c1a4508bd7837414f8d091fc945c6be
|
57b4ee27801c23cdd6a6d974dbc278f49740f770
|
/re100-l.py
|
55bc9b3a71d50c10ac3093535afa7ffe0b7a4c4b
|
[] |
no_license
|
zwhubuntu/CTF-chal-code
|
4de9fc0fe9ee85eab3906b36b8798ec959db628c
|
8c912e165f9cc294b3b85fab3d776cd63acc203e
|
refs/heads/master
| 2021-01-20T18:39:26.961563
| 2017-09-25T14:07:56
| 2017-09-25T14:07:56
| 62,563,092
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
'''
@author: wenhuizone
'''
a=[23,5,1,2,4,6,734,3,12,98]
tmp=''
output=''
'''
for ( i = 1; i != 10; ++i )
{
v5 = *(&v6 + i);
for ( j = i - 1; j >= 0 && *(&v6 + j) < v5; --j )
*(&v6 + j + 1) = *(&v6 + j);
*(&v6 + j + 1) = v5;
}
'''
for i in range(1,len(a)):
tmp=a[i]
for j in range(0,i-1)[::-1]:
if a[j]<tmp:
a[j+1]=a[j]
a[j+1]=tmp
for i in range(0,len(a)):
output+=chr(a[i])
print output
|
[
"zwhubuntu@hotmail.com"
] |
zwhubuntu@hotmail.com
|
4ead3a4d6b2dd62eb5e2c44cd2fdb8c23e30b661
|
b424c3262c9eacf8dd4230019eba7e05a9b95461
|
/.history/ndn_server_20200530135406.py
|
840339130aa5f97be97960d50d1841aebddd005c
|
[] |
no_license
|
leonerii/aer_tp
|
30e47f29bcda69512718a6279a7cad32e9a01b14
|
d8f46b188b5be9f315dd155ed147880ce7dce169
|
refs/heads/master
| 2022-09-30T03:27:24.375971
| 2020-06-04T14:23:16
| 2020-06-04T14:23:16
| 245,219,806
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,849
|
py
|
from threading import Thread
from ndn_receive_handler import Receive_Handler
from ndn_hello_sender import NDN_HelloSender
import socket
class NDN_Server(Thread):
def __init__(self, localhost, port=9999, data_ids={}):
Thread.__init__(self)
self.localhost = localhost
self.port = port
self.data_ids = data_ids
def run(self):
#inicializar pit, fib e cs
self.data_ids = {
'104.continente': '',
'101.A3' : ''
}
fib = {
'104.continente': self.msg['source'],
'101.A3' : self.msg[]
}
cs = {
}
pit = {
}
if self.data_ids:
for key,value in data_ids.items():
self.fib[key] = value
self.cs[key]
#criar socket server tcp
tcp_socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
tcp_socket.bind((self.localhost, self.port))
# Receiving NDN Messages
while True:
rcv_msg = self.sock.recvfrom(10240)
ndn_handler = Receive_Handler(
self.lock, self.pit, self.fib,
self.cs, self.conn, self.queue, self.localhost,
self.udp_port
)
ndn_handler.start()
# Send NDN HELLO messages
ndn_hello_sender = NDN_HelloSender(
self.fib, self.lock,self.localhost,
self.hello_interval, self.cs,
self.mcast_group, self.mcast_port
)
ndn_hello_sender.start()
else:
print('data_ids is empty')
|
[
"aseie@Adrianos-MBP.lan"
] |
aseie@Adrianos-MBP.lan
|
a048bde9c8c91cb49b73978b44bfbf744c108af1
|
a2af438d5180922fb55b0805f9702d4b93103202
|
/setup.py
|
0001d547f35d284ce13cca0cfcc1883359573c59
|
[
"WTFPL"
] |
permissive
|
JJediny/django-leaflet-storage
|
cff60100a8d721d202bb913051dc2b1abd89a53c
|
eb4dd4632f09241255bc13e30970ec55fafed816
|
refs/heads/master
| 2021-01-18T10:57:16.198138
| 2015-05-08T04:22:06
| 2015-05-08T04:22:06
| 32,661,595
| 0
| 2
| null | 2015-03-22T04:21:35
| 2015-03-22T04:21:35
| null |
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from setuptools import setup, find_packages
import leaflet_storage
long_description = codecs.open('README.rst', "r", "utf-8").read()
with open('requirements.pip') as reqs:
install_requires = [
line for line in reqs.read().split('\n') if (line and not
line.startswith(('--', 'git')))
]
setup(
name="django-leaflet-storage",
version=leaflet_storage.__version__,
author=leaflet_storage.__author__,
author_email=leaflet_storage.__contact__,
description=leaflet_storage.__doc__,
keywords="django leaflet geodjango",
url=leaflet_storage.__homepage__,
download_url="https://github.com/yohanboniface/django-leaflet-storage/downloads",
packages=find_packages(),
include_package_data=True,
platforms=["any"],
zip_safe=True,
install_requires=install_requires,
long_description=long_description,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
#"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python",
],
)
|
[
"yb@enix.org"
] |
yb@enix.org
|
14bdf727a834b4e51ab11f617e8bd79033ee437b
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/medium/6_3.py
|
c29fdaedebb04ad1825458082585935d28145298
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,845
|
py
|
Integrating TinyMCE with Django
TinyMCE is a online rich text editor which is fully flexible and provides
customisation. mostly used to get dynamic data such as articles in GFG and
much more, their is no static database for posts
**Installation –**
To integrate it with Django web app or website you need to first install its
pip library
pip install django-tinymce
**Integrate with Django Project –**
add tinyMCE as individual app in setting.py
INSTALLED_APPS = [
...
'tinymce',
...
]
Also add default configuration for tinyMCE editor in settings.py
TINYMCE_DEFAULT_CONFIG = {
'cleanup_on_startup': True,
'custom_undo_redo_levels': 20,
'selector': 'textarea',
'theme': 'silver',
'plugins': '''
textcolor save link image media preview codesample contextmenu
table code lists fullscreen insertdatetime nonbreaking
contextmenu directionality searchreplace wordcount visualblocks
visualchars code fullscreen autolink lists charmap print hr
anchor pagebreak
''',
'toolbar1': '''
fullscreen preview bold italic underline | fontselect,
fontsizeselect | forecolor backcolor | alignleft alignright |
aligncenter alignjustify | indent outdent | bullist numlist table |
| link image media | codesample |
''',
'toolbar2': '''
visualblocks visualchars |
charmap hr pagebreak nonbreaking anchor | code |
''',
'contextmenu': 'formats | link image',
'menubar': True,
'statusbar': True,
}
here in configuration dictionary you can customise editor by changing values
like theme and many more.
setting TinyMCE is done now to bring it into actions we need forms.py file
with some required values like needed size of input field it is used by
displaying content on html page
__
__
__
__
__
__
__
from django import forms
from tinymce import TinyMCE
from .models import _your_model_
class TinyMCEWidget(TinyMCE):
def use_required_attribute(self, *args):
return False
class PostForm(forms.ModelForm):
content = forms.CharField(
widget=TinyMCEWidget(
attrs={'required': False, 'cols': 30, 'rows':
10}
)
)
class Meta:
model = _your_model_
fields = '__all__'
---
__
__
Last step is to add htmlfield to your model you can also use different field
check out them on their official website
__
__
__
__
__
__
__
...
from tinymce.models import HTMLField
class article(models.Model):
...
content = HTMLField()
---
__
__
And its all set just make migrations for see changes in admin page by running
following commands
python manage.py makemigrations
python manage.py migrate
Now check it in admin area by running server
python manage.py runserver
**Output –**
here how it will look like it may have different appearance

Editor in admin area
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
c98d726a4abb22a8daeee2ba7c22d6dde58d525e
|
7858da232b9dbfb9c32d6900de51e14e5d48e241
|
/lesson_7_3_2.py
|
1c43af55a0b611e9985a9c1383853dc4ac62717a
|
[] |
no_license
|
Mameluke8888/QA_Automation_Lesson_7_3
|
4069e202ca3f5a0de1f1a0734654f7fd19e12ed5
|
9d2b5735da2fe4850c15236e675cc48b24d16a1d
|
refs/heads/main
| 2023-04-27T19:32:48.868635
| 2021-05-06T09:06:10
| 2021-05-06T09:06:10
| 364,848,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
# Exercise #2
# Find the mistake in the following code snippet and correct it:
# corrected snippet
def compute_patterns(inputs=None, pattern="new pattern"):
if inputs is None:
inputs = []
inputs.append(pattern)
patterns = ["a list based on "] + inputs
return patterns
# just some tests - you can remove them if you want
print("".join(compute_patterns()))
print("".join(compute_patterns()))
print("".join(compute_patterns()))
test_inputs = []
print(" ".join(compute_patterns(test_inputs, "very new pattern")))
print(" ".join(compute_patterns(test_inputs, "super new pattern")))
print(" ".join(compute_patterns(test_inputs, "super duper new pattern")))
print("".join(compute_patterns()))
|
[
"evgenyabdulin@Evgenys-Mac-mini.local"
] |
evgenyabdulin@Evgenys-Mac-mini.local
|
6bf7f969c43b526df2273d8c69f24b6846b19657
|
96b6f183cda10aac03f9fb4ffa11cba6445c35aa
|
/algoriz/settings.py
|
57f2ac9bf3f2798173af3decde8ebb0931baa17b
|
[] |
no_license
|
akshar-raaj/algoriz
|
3a6ea60b15dc35e848a5534cdd7f6b047fd9a7d3
|
21e0797a7d19248043c5810fed89ba7c50e551f9
|
refs/heads/master
| 2020-04-03T03:38:53.705683
| 2018-10-27T19:51:31
| 2018-10-27T19:51:31
| 154,991,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
"""
Django settings for algoriz project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a#l@_bofm=q)&$i=t#u1$1x*sqa$nx6ms260p7d793+bz861vh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'trades',
'graphos',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'algoriz.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'algoriz.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE':'django.db.backends.postgresql_psycopg2',
'NAME': 'algoriz',
'USER': 'akshar',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432',
},
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"akshar@agiliq.com"
] |
akshar@agiliq.com
|
6188b38f2b472324d725074b31351f8de45d833f
|
94b4177306b898b86601cae5ff1e580eb95e502f
|
/mysite/settings.py
|
54d6592cf1cb0a445fbc3a37c96516f7e8d54fef
|
[] |
no_license
|
inho2736/my-first-blog
|
c797b0d4b613fa17ac3b23962d39835df514926b
|
591750ee222425fc96910040b6b84f4bc4236a7e
|
refs/heads/master
| 2020-03-22T07:39:31.308553
| 2018-07-04T11:53:32
| 2018-07-04T11:53:32
| 139,715,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,194
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.14.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ha9kod=amx36+&xxrcbg!bk69vzzq)j=xsl=cb+k(u$b-g#)l3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ko'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"you@example.com"
] |
you@example.com
|
2d89ffe2040450be51655549fc2da71608ecba62
|
b6c931fea41658914844ceae8906a2cb03294614
|
/math/0x06-multivariate_prob/0-mean_cov.py
|
8ae4bbede7e0867b9a69d2b891f075cfb6ace21b
|
[] |
no_license
|
sidneyriffic/holbertonschool-machine_learning
|
05ccbe13e1b4b9cb773e0c531a1981a7970daa1b
|
56356c56297d8391bad8a1607eb226489766bc63
|
refs/heads/master
| 2021-07-04T07:45:22.919076
| 2020-12-19T01:09:01
| 2020-12-19T01:09:01
| 207,622,396
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
#!/usr/bin/env python3
"""Return means and covariance matrix of a multivariate data set"""
import numpy as np
def mean_cov(X):
"""Return means and covariance matrix of a multivariate data set"""
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError("X must be a 2D numpy.ndarray")
if X.shape[0] < 2:
raise ValueError("X must contain multiple data points")
means = np.mean(X, axis=0, keepdims=True)
covmat = np.ndarray((X.shape[1], X.shape[1]))
for x in range(X.shape[1]):
for y in range(X.shape[1]):
covmat[x][y] = (((X[:, x] - means[:, x]) *
(X[:, y] - means[:, y])).sum() /
(X.shape[0] - 1))
return means, covmat
|
[
"sidneyriffic@gmail.com"
] |
sidneyriffic@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.