blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4570a4f1135d69481df51ef52485f7fe43b9827d
|
430bd23decf16dc572a587b7af9f5c8e7dea5e6b
|
/clients/python/swagger_client/models/funding.py
|
ff0870b285368906885d8379191500364f1d06c3
|
[
"Apache-2.0"
] |
permissive
|
jltrade/api-connectors
|
332d4df5e7e60bd27b6c5a43182df7d99a665972
|
fa2cf561b414e18e9d2e1b5d68e94cc710d315e5
|
refs/heads/master
| 2020-06-19T10:20:46.022967
| 2016-09-24T13:12:17
| 2016-09-24T13:12:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,097
|
py
|
# coding: utf-8
"""
BitMEX API
REST API for the BitMEX.com trading platform.<br><br><a href=\"/app/restAPI\">REST Documentation</a><br><a href=\"/app/wsAPI\">Websocket Documentation</a>
OpenAPI spec version: 1.2.0
Contact: support@bitmex.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class Funding(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, timestamp=None, symbol=None, funding_interval=None, funding_rate=None, funding_rate_daily=None):
"""
Funding - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'timestamp': 'date',
'symbol': 'str',
'funding_interval': 'date',
'funding_rate': 'float',
'funding_rate_daily': 'float'
}
self.attribute_map = {
'timestamp': 'timestamp',
'symbol': 'symbol',
'funding_interval': 'fundingInterval',
'funding_rate': 'fundingRate',
'funding_rate_daily': 'fundingRateDaily'
}
self._timestamp = timestamp
self._symbol = symbol
self._funding_interval = funding_interval
self._funding_rate = funding_rate
self._funding_rate_daily = funding_rate_daily
@property
def timestamp(self):
"""
Gets the timestamp of this Funding.
:return: The timestamp of this Funding.
:rtype: date
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""
Sets the timestamp of this Funding.
:param timestamp: The timestamp of this Funding.
:type: date
"""
self._timestamp = timestamp
@property
def symbol(self):
"""
Gets the symbol of this Funding.
:return: The symbol of this Funding.
:rtype: str
"""
return self._symbol
@symbol.setter
def symbol(self, symbol):
"""
Sets the symbol of this Funding.
:param symbol: The symbol of this Funding.
:type: str
"""
self._symbol = symbol
@property
def funding_interval(self):
"""
Gets the funding_interval of this Funding.
:return: The funding_interval of this Funding.
:rtype: date
"""
return self._funding_interval
@funding_interval.setter
def funding_interval(self, funding_interval):
"""
Sets the funding_interval of this Funding.
:param funding_interval: The funding_interval of this Funding.
:type: date
"""
self._funding_interval = funding_interval
@property
def funding_rate(self):
"""
Gets the funding_rate of this Funding.
:return: The funding_rate of this Funding.
:rtype: float
"""
return self._funding_rate
@funding_rate.setter
def funding_rate(self, funding_rate):
"""
Sets the funding_rate of this Funding.
:param funding_rate: The funding_rate of this Funding.
:type: float
"""
self._funding_rate = funding_rate
@property
def funding_rate_daily(self):
"""
Gets the funding_rate_daily of this Funding.
:return: The funding_rate_daily of this Funding.
:rtype: float
"""
return self._funding_rate_daily
@funding_rate_daily.setter
def funding_rate_daily(self, funding_rate_daily):
"""
Sets the funding_rate_daily of this Funding.
:param funding_rate_daily: The funding_rate_daily of this Funding.
:type: float
"""
self._funding_rate_daily = funding_rate_daily
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"samuel.trace.reed@gmail.com"
] |
samuel.trace.reed@gmail.com
|
55bd1e9c03901d04fa75221fbb2e004339501afe
|
c3379fb707daf434fc731006e173da817b68ca75
|
/pydatview/fast/runner.py
|
f321a81d662cb9f3714995b306df20a08da376e1
|
[
"MIT"
] |
permissive
|
dviewtest/pyDatView
|
43cb6d2bb76a78670ecd1083495024f935bc9e9b
|
3516ffaff601c122d62ffc94abd842958354ece8
|
refs/heads/master
| 2023-06-27T11:08:52.056689
| 2021-06-23T17:57:35
| 2021-06-23T17:57:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,304
|
py
|
# --- For cmd.py
from __future__ import division, print_function
import os
import subprocess
import multiprocessing
import collections
import glob
import pandas as pd
import numpy as np
import shutil
import stat
import re
# --- Fast libraries
from weio.weio.fast_input_file import FASTInputFile
from weio.weio.fast_output_file import FASTOutputFile
# from pyFAST.input_output.fast_input_file import FASTInputFile
# from pyFAST.input_output.fast_output_file import FASTOutputFile
FAST_EXE='openfast'
# --------------------------------------------------------------------------------}
# --- Tools for executing FAST
# --------------------------------------------------------------------------------{
# --- START cmd.py
def run_cmds(inputfiles, exe, parallel=True, showOutputs=True, nCores=None, showCommand=True):
""" Run a set of simple commands of the form `exe input_file`
By default, the commands are run in "parallel" (though the method needs to be improved)
The stdout and stderr may be displayed on screen (`showOutputs`) or hidden.
A better handling is yet required.
"""
Failed=[]
def _report(p):
if p.returncode==0:
print('[ OK ] Input : ',p.input_file)
else:
Failed.append(p)
print('[FAIL] Input : ',p.input_file)
print(' Directory: '+os.getcwd())
print(' Command : '+p.cmd)
print(' Use `showOutputs=True` to debug, or run the command above.')
#out, err = p.communicate()
#print('StdOut:\n'+out)
#print('StdErr:\n'+err)
ps=[]
iProcess=0
if nCores is None:
nCores=multiprocessing.cpu_count()
if nCores<0:
nCores=len(inputfiles)+1
for i,f in enumerate(inputfiles):
#print('Process {}/{}: {}'.format(i+1,len(inputfiles),f))
ps.append(run_cmd(f, exe, wait=(not parallel), showOutputs=showOutputs, showCommand=showCommand))
iProcess += 1
# waiting once we've filled the number of cores
# TODO: smarter method with proper queue, here processes are run by chunks
if parallel:
if iProcess==nCores:
for p in ps:
p.wait()
for p in ps:
_report(p)
ps=[]
iProcess=0
# Extra process if not multiptle of nCores (TODO, smarter method)
for p in ps:
p.wait()
for p in ps:
_report(p)
# --- Giving a summary
if len(Failed)==0:
print('[ OK ] All simulations run successfully.')
return True
else:
print('[FAIL] {}/{} simulations failed:'.format(len(Failed),len(inputfiles)))
for p in Failed:
print(' ',p.input_file)
return False
def run_cmd(input_file_or_arglist, exe, wait=True, showOutputs=False, showCommand=True):
""" Run a simple command of the form `exe input_file` or `exe arg1 arg2` """
# TODO Better capture STDOUT
if isinstance(input_file_or_arglist, list):
args= [exe] + input_file_or_arglist
input_file = ' '.join(input_file_or_arglist)
input_file_abs = input_file
else:
input_file=input_file_or_arglist
if not os.path.isabs(input_file):
input_file_abs=os.path.abspath(input_file)
else:
input_file_abs=input_file
if not os.path.exists(exe):
raise Exception('Executable not found: {}'.format(exe))
args= [exe,input_file]
#args = 'cd '+workDir+' && '+ exe +' '+basename
shell=False
if showOutputs:
STDOut= None
else:
STDOut= open(os.devnull, 'w')
if showCommand:
print('Running: '+' '.join(args))
if wait:
class Dummy():
pass
p=Dummy()
p.returncode=subprocess.call(args , stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)
else:
p=subprocess.Popen(args, stdout=STDOut, stderr=subprocess.STDOUT, shell=shell)
# Storing some info into the process
p.cmd = ' '.join(args)
p.args = args
p.input_file = input_file
p.input_file_abs = input_file_abs
p.exe = exe
return p
# --- END cmd.py
def run_fastfiles(fastfiles, fastExe=None, parallel=True, showOutputs=True, nCores=None, showCommand=True, reRun=True):
if fastExe is None:
fastExe=FAST_EXE
if not reRun:
# Figure out which files exist
newfiles=[]
for f in fastfiles:
base=os.path.splitext(f)[0]
if os.path.exists(base+'.outb') or os.path.exists(base+'.out'):
print('>>> Skipping existing simulation for: ',f)
pass
else:
newfiles.append(f)
fastfiles=newfiles
return run_cmds(fastfiles, fastExe, parallel=parallel, showOutputs=showOutputs, nCores=nCores, showCommand=showCommand)
def run_fast(input_file, fastExe=None, wait=True, showOutputs=False, showCommand=True):
if fastExe is None:
fastExe=FAST_EXE
return run_cmd(input_file, fastExe, wait=wait, showOutputs=showOutputs, showCommand=showCommand)
def writeBatch(batchfile, fastfiles, fastExe=None):
""" Write batch file, everything is written relative to the batch file"""
if fastExe is None:
fastExe=FAST_EXE
fastExe_abs = os.path.abspath(fastExe)
batchfile_abs = os.path.abspath(batchfile)
batchdir = os.path.dirname(batchfile_abs)
fastExe_rel = os.path.relpath(fastExe_abs, batchdir)
with open(batchfile,'w') as f:
for ff in fastfiles:
ff_abs = os.path.abspath(ff)
ff_rel = os.path.relpath(ff_abs, batchdir)
l = fastExe_rel + ' '+ ff_rel
f.write("%s\n" % l)
def removeFASTOuputs(workDir):
# Cleaning folder
for f in glob.glob(os.path.join(workDir,'*.out')):
os.remove(f)
for f in glob.glob(os.path.join(workDir,'*.outb')):
os.remove(f)
for f in glob.glob(os.path.join(workDir,'*.ech')):
os.remove(f)
for f in glob.glob(os.path.join(workDir,'*.sum')):
os.remove(f)
if __name__=='__main__':
run_cmds(['main1.fst','main2.fst'], './Openfast.exe', parallel=True, showOutputs=False, nCores=4, showCommand=True)
pass
# --- Test of templateReplace
|
[
"elmanuelito.github@gmail.com"
] |
elmanuelito.github@gmail.com
|
27d32813b7fee47a8f3898e5b10327bb6f1e91ce
|
25404f4cfb9be3e6f1b3fe31a1554459eb200813
|
/1_todo/string_io_and_json_example.py
|
5cb62ee749b5815bcf6dba5c20c390f1ac5608f1
|
[] |
no_license
|
nightimero/annal_report_test
|
1c6eb4b71482f870c753f5084212afd071929f57
|
7bbc76ba703527ba8f4b84fbdb94fd57b37b9887
|
refs/heads/master
| 2021-09-06T21:18:59.534963
| 2018-02-11T15:31:21
| 2018-02-11T15:31:21
| 103,259,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
# -*- coding: utf-8 -*-
from StringIO import StringIO
import json
io = StringIO()
json.dump(['streaming API'], io)
io.getvalue()
# '["streaming API"]'
# 2.use seperator, Compact encoding
import json
json.dumps([1, 2, 3, {'4': 5, '6': 7}], separators=(',', ':'))
'[1,2,3,{"4":5,"6":7}]'
# 3.Pretty printing: indent参数是缩进的意思
import json
print json.dumps({'4': 5, '6': 7}, sort_keys=True,
indent=4, separators=(',', ': '))
# {
# "4": 5,
# "6": 7
# }
# 4.Decoding JSON:
import json
json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
json.loads('"\\"foo\\bar"')
u'"foo\x08ar'
from StringIO import StringIO
io = StringIO('["streaming API"]')
json.load(io)
[u'streaming API']
# 5跳过错误的键值
# 另一个比较有用的dumps参数是skipkeys,默认为False。 dumps方法存储dict对象时,key必须是str类型,如果出现了其他类型的话,
# 那么会产生TypeError异常,如果开启该参数,设为True的话,则会比较优雅的过度。
data = {'b': 789, 'c': 456, (1, 2): 123}
print json.dumps(data, skipkeys=True)
#
# {"c": 456, "b": 789}
|
[
"chenxiang@aiknown.com"
] |
chenxiang@aiknown.com
|
3b2b4b72c827466af785eb8a9670fc7e4d2bff0d
|
06ee5a5d83466896bbfd1653206da0151d6aa81a
|
/apps/business/serializers/file_serializer.py
|
ae6dac0452ba845b69a632709ac10c18ac7e31f3
|
[] |
no_license
|
fengjy96/rest_task
|
201421a40ce42031223f61135d1d5e85809188e6
|
db1d7c4eb2d5d229ab54c6d5775f96fc1843716e
|
refs/heads/master
| 2020-07-22T19:48:19.940094
| 2019-09-02T13:40:11
| 2019-09-02T13:40:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
from rest_framework import serializers
from business.models.files import Files
class FilesSerializer(serializers.ModelSerializer):
"""
文件:增删改查
"""
class Meta:
model = Files
fields = '__all__'
class FilesListSerializer(serializers.ModelSerializer):
"""
消息:增删改查
"""
class Meta:
model = Files
fields = '__all__'
depth = 1
|
[
"onerf@sina.com"
] |
onerf@sina.com
|
e01b140eb36a9c67eba75192ebe27eb8b1a977f6
|
6f2a8a9d2f11d194fe41762e71ebd7270a22325b
|
/source/abstract/entities/electronic/controller/controller.py
|
889ac5c8eca1c378a0464c9d0484d2aa82609ba9
|
[] |
no_license
|
rschum/game
|
053da314a276445e03d682c6481a35aa888c5125
|
59ef0461c1ac60e690d39f6c180256f387999e44
|
refs/heads/master
| 2020-05-23T20:10:57.698939
| 2017-04-20T03:04:31
| 2017-04-20T03:04:31
| 84,785,024
| 0
| 0
| null | 2017-03-13T04:45:46
| 2017-03-13T04:45:46
| null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from source.abstract.entities.inanimate.controller import controller
class Controller(controller.Controller):
def __init__(self):
controller.Controller.__init__(self)
pass
|
[
"Master.Foo.v.1.0.0@gmail.com"
] |
Master.Foo.v.1.0.0@gmail.com
|
11247c56107695e84821a8412a5d43b66542c9fc
|
a5d0a0499dd069c555080c8cefc2434304afead4
|
/Programmers/pipe.py
|
bfa9ff3f16b5e878de473bd4fbe430f11b47ebcb
|
[] |
no_license
|
devjinius/algorithm
|
9bdf9afc021249b188d6930cf9d71f9147325d9f
|
007fa6346a19868fbbc05eefd50848babb5f1cca
|
refs/heads/master
| 2020-05-04T06:08:32.827207
| 2019-07-31T02:39:39
| 2019-07-31T02:39:39
| 178,999,456
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
# 프로그래머스 쇠막대기
# https://programmers.co.kr/learn/courses/30/lessons/42585
def solution(arrangement):
stack = []
prevStr = ''
count = 0
for word in arrangement:
if(word == ")"):
if(prevStr == "("):
stack.pop()
count += len(stack)
else:
stack.pop()
count += 1
else:
stack.append(word)
prevStr = word
return count
|
[
"eugenekang94@gmail.com"
] |
eugenekang94@gmail.com
|
b1ac9099c36ddeeab3548464dd1b5d5e9b1ee687
|
84d2040faf1acaabedce67e884b55767b6b98e57
|
/source/watches/migrations/0003_auto_20210305_1130.py
|
e955040939fd33e381c347577ff1f00f4c1035ee
|
[] |
no_license
|
UuljanAitnazarova/watches_shop
|
3adae63141107c91ae6a489dddeb8f8fa9433666
|
6f54b11d468957cf05275c37b17f4c2e669e9fc2
|
refs/heads/master
| 2023-05-08T11:51:25.597190
| 2021-05-27T12:48:46
| 2021-05-27T12:48:46
| 344,481,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
# Generated by Django 3.1.7 on 2021-03-05 11:30
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watches', '0002_auto_20210304_1426'),
]
operations = [
migrations.AlterField(
model_name='product',
name='product_availability',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(0)], verbose_name='Остаток'),
),
]
|
[
"u.aitnazarova@gmail.com"
] |
u.aitnazarova@gmail.com
|
b73d1826be68e566cc4418a478ee654d378cc0a6
|
073d40d3ea58e37d8a130794910068005f3f259d
|
/processing/surface_based_analysis.py
|
56afba929f609a17760fcae36ccf26cd024a0541
|
[
"BSD-2-Clause"
] |
permissive
|
KamalakerDadi/public_analysis_code
|
bd925f442d32fbedc56e145ad0bc981d5ac3924c
|
b8770d485fd2697838b911120c41d91250671636
|
refs/heads/master
| 2020-03-20T21:10:33.759118
| 2018-07-30T18:27:10
| 2018-07-30T18:27:10
| 137,727,239
| 0
| 0
| null | 2018-06-18T08:27:58
| 2018-06-18T08:27:58
| null |
UTF-8
|
Python
| false
| false
| 5,182
|
py
|
"""
This script does 2 things:
1. Freesurfer segmentation
2. project the coregistered fMRI images to the surface:
the surface is the grey-white matter interface of the subject
The purpose is to perform proper group analysis on the surface on fsaverage,
and use existing atlases on the surface.
Author: Bertrand Thirion, Isabelle Courcol, 2013 -- 2016
Note
----
First run: export SUBJECTS_DIR=''
"""
import os
import glob
import commands
from nipype.caching import Memory
from joblib import Parallel, delayed
from nipype.interfaces.freesurfer import ReconAll, BBRegister
work_dir = '/neurospin/ibc/derivatives'
subjects = ['sub-%02d' % i for i in [1, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15]]
subjects = ['sub-%02d' % i for i in [8, 9, 11, 12, 13, 14]]
mem = Memory(base_dir='/neurospin/tmp/ibc')
# Step 1: Perform recon-all
os.environ['SUBJECTS_DIR'] = ''
def recon_all(work_dir, subject, high_res=True):
# create directories in output_dir
if high_res:
# high-resolution T1
anat_img = glob.glob(os.path.join(
work_dir, subject, 'ses-*/anat/sub-*_ses-*_acq-highres_T1w.nii*'))[0]
print(anat_img)
t1_dir = os.path.dirname(anat_img)
os.system('recon-all -all -subjid %s -sd %s -hires -i %s -expert expert.opts' % (subject, t1_dir, anat_img))
else:
# low-resolution T1
subject_dir = os.path.join(work_dir, subject, 'ses-00')
t1_dir = os.path.join(subject_dir, 'anat')
anat_img = glob.glob(os.path.join(t1_dir, '%s_ses-00_T1w.nii*' % subject))[0]
# reconall = mem.cache(ReconAll)
#reconall(subject_id=subject,
# directive='all',
# subjects_dir=t1_dir,
# T1_files=anat_img)
os.system('recon-all -all -subjid %s -sd %s' % (subject, t1_dir))
#Parallel(n_jobs=1)(delayed(recon_all)(work_dir, subject, True)
# for subject in subjects)
# Step 2: Perform the projection
def project_volume(work_dir, subject, sessions, do_bbr=True):
t1_dir = os.path.join(work_dir, subject, 'ses-00', 'anat')
for session in sessions:
subject_dir = os.path.join(work_dir, subject, session)
if not os.path.exists(subject_dir):
continue
fmri_dir = os.path.join(subject_dir, 'func')
fs_dir = os.path.join(subject_dir, 'freesurfer')
fmri_images = glob.glob(os.path.join(fmri_dir, 'rdc*.nii.gz'))
# --------------------------------------------------------------------
# run the projection using freesurfer
os.environ['SUBJECTS_DIR'] = t1_dir
if not os.path.exists(fs_dir):
os.mkdir(fs_dir)
# take the fMRI series
print("fmri_images", fmri_images)
for fmri_session in fmri_images:
basename = os.path.basename(fmri_session).split('.')[0]
print (basename)
# output names
# the .gii files will be put in the same directory as the input fMRI
left_fmri_tex = os.path.join(fs_dir, basename + '_lh.gii')
right_fmri_tex = os.path.join(fs_dir, basename + '_rh.gii')
if do_bbr:
# use BBR registration to finesse the coregistration
bbreg = BBRegister(subject_id=subject, source_file=fmri_session,
init='header', contrast_type='t2')
bbreg.run()
# run freesrufer command for projection
regheader = os.path.join(fmri_dir, basename + '_bbreg_%s.dat' % subject)
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_vol2surf --src %s --o %s '\
'--out_type gii --srcreg %s --hemi lh --projfrac-avg 0 2 0.1'
% (fmri_session, left_fmri_tex, regheader)))
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_vol2surf --src %s --o %s '\
'--out_type gii --srcreg %s --hemi rh --projfrac-avg 0 2 0.1'
% (fmri_session, right_fmri_tex, regheader)))
# resample to fsaverage
left_fsaverage_fmri_tex = os.path.join(
fs_dir, basename + '_fsaverage_lh.gii')
right_fsaverage_fmri_tex = os.path.join(
fs_dir, basename + '_fsaverage_rh.gii')
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_surf2surf --srcsubject %s --srcsurfval '\
'%s --trgsurfval %s --trgsubject ico --trgicoorder 7 '\
'--hemi lh --nsmooth-out 5' %
(subject, left_fmri_tex, left_fsaverage_fmri_tex)))
print(commands.getoutput(
'$FREESURFER_HOME/bin/mri_surf2surf --srcsubject %s --srcsurfval '\
'%s --trgsubject ico --trgicoorder 7 --trgsurfval %s '\
'--hemi rh --nsmooth-out 5' %
(subject, right_fmri_tex, right_fsaverage_fmri_tex)))
from pipeline import get_subject_session
subject_sessions = sorted(get_subject_session('enumeration'))
Parallel(n_jobs=4)(
delayed(project_volume)(work_dir, subject_session[0], [subject_session[1]], do_bbr=True)
for subject_session in subject_sessions)
|
[
"bertrand.thirion@inria.fr"
] |
bertrand.thirion@inria.fr
|
42e44d36df2d8995690e0ac00535e4955d8b3472
|
94d5467b1315791fa75165eb862fdd8fef300958
|
/yunyan_baotou/src/business_ultra/init_data.py
|
61600784148a3a71b91b0ae55c58a09ba84d4b62
|
[] |
no_license
|
scmsqhn/code
|
e31926174c247d49c1db8f121e3ec1b82f8a2d9d
|
b389d7dc5fafad8a4185a03cd6d5519ccf8f99df
|
refs/heads/master
| 2022-12-09T05:37:07.065840
| 2019-05-14T01:55:07
| 2019-05-14T01:55:07
| 185,903,771
| 1
| 0
| null | 2022-12-08T05:05:51
| 2019-05-10T02:22:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,621
|
py
|
#!/usr/bin/env python3
import datetime
import pandas as pd
from datetime import datetime
import json
import os
import codecs
import numpy as np
import traceback
import sys
sys.path.append(os.environ['YUNYAN'])
sys.path.append(os.environ['ROOT'])
sys.path.append(os.environ['WORKBENCH'])
#import gensim
#from gensimplus.source.gensim_plus_config import FLAGS
#from gensimplus.source.model_save_load_helper import ModelSaveLoadHelper
#from gensim.models import LsiModel
#from gensim.models import LdaModel
#from gensim.models import TfidfModel
import myconfig
import src
from src import myjieba_posseg
from myjieba_posseg import posseg as posseg
import user_prob
from user_prob.test import new_cut
import re
import numpy as np
import pdb
import codecs
import function_ultra.trie_tree as trie_tree
import function_ultra.utils as utils
#DEBUG = False
DICT = False#$True
DEBUG = True
JIEBACUT= True
global r_cnt
global w_cnt
r_cnt = 1
w_cnt = 0
standard_addr = {}
load_json = lambda x:json.load(open(x,'r',encoding='utf-8'))
standard_addr = load_json(myconfig.STDADD)
standard_dct = {}
ks = []
vs = []
for item in standard_addr['RECORDS']:
v = item['name']
k = item['type']
ks.append(k)
vs.append(v)
keys = list(set(ks))
values = list(set(vs))
level_keys = ["省","市","区","社区","村居委会","街路巷名","自然村组",\
"门牌号","小区名","建筑物名称","组团名称","栋号",\
"单元号","楼层","户室号","sent","rw"]
out_keys = ["省","市","区","社区","村居委会","街路巷名","自然村组","门牌号","小区名","组团名称","栋号","单元号","楼层","户室号"]
global global_cnt
def read_standard_data(self,docpath='standard_address.json'):
'''
read word from standard dict, return key words dict
'''
standard_kvs = {}
standard_num = {}
fl = open(docpath,'r',encoding='utf-8')
info = json.load(fl)
return info #返回标准地址库
kvs_lst = info.get('RECORDS','')
for item in kvs_lst:
k = item.get('name','')
v = len(standard_kvs)
standard_kvs[k] = v
for k in standard_kvs:
_k = standard_kvs[k]
_v = k
standard_num[_k] = _v
return standard_kvs, standard_num
def gen_word_tree(filename=myconfig.STDTXTPATH,sav_file=myconfig.MY_WORD):
print('\n>gen_address_tree start')
my_tree = trie_tree.Trie()
df = open(filename,'r')
lines = df.readlines() #pd.read_csv(filename)
print(len(lines))
for sent in lines:
words = sent.split('/')
for word in words:
my_tree.insert(word)
utils.save_var(my_tree,sav_file)
print('\n>my address tree save ok')
return my_tree
def gen_std_tree(filename=myconfig.STDTXTPATH,sav_file=myconfig.MY_TREE,delimeter='/'):
print('\n>gen_std_tree start')
my_tree = trie_tree.Trie()
df = open(filename,'r')
lines = df.readlines() #pd.read_csv(filename)
for sent in lines:
words = sent.split(delimeter)
my_tree.insert(words)
utils.save_var(my_tree,sav_file)
print('\n>my std tree save ok')
return my_tree
def remove_nan(item):
clritem = []
for node in item:
if 'nan' in node:
continue
clritem.append(node)
return clritem
def gen_std_tree_from_dataframe(data_src, sav_file=myconfig.MY_TREE):
# 从dataframe创建标准地址树
print('\n>gen_std_tree_from_dataframe start')
my_tree = trie_tree.Trie()
for item in data_src:
clritem = remove_nan(item)
print(clritem)
pdb.set_trace()
my_tree.part_insert(my_tree.root,clritem)
utils.save_var(my_tree,sav_file)
print('\n>gen_std_tree_from_dataframe ready and save finish')
return myconfig.SUCCESS
def gen_address_tree(filename=myconfig.STDTXTPATH,sav_file=myconfig.MY_TREE):
print('\n>gen_address_tree start')
my_tree = trie_tree.Trie()
df = open(filename,'r')
lines = df.readlines() #pd.read_csv(filename)
for sent in lines:
my_tree.insert(sent)
utils.save_var(my_tree,sav_file)
print('\n>my address tree save ok')
return my_tree
def gen_zhengzhou_tree(dirname=myconfig.ZZ_STD_ADD,sav_file=myconfig.zhengzhou_std_word,sav_file_2=myconfig.zhengzhou_std_tree):
addr_kv_rec = open("./addr_match.txt",'w+')
print('\n>gen_zhengzhou_tree start')
#pdb.set_trace()
my_tree = trie_tree.Trie()
my_word = trie_tree.Trie()
paths = os.walk(dirname)
sum_lines = []
cnt = 0
for _,_,fs in paths:
for f in fs:
pth = os.path.join(dirname,str(f))
lines = open(pth,'r').readlines()
np.random.shuffle(lines)
#lines = open(pth,'r').readlines()[:myconfig.TRAIN_DATA]
for line in lines:
if not ',' in line:
continue
_line = line.split(',')[1]
line = utils.pre_trans(_line)
addr_kv_rec.write('%s\t%s\n'%(str(line),str(_line)))
cnt+=1
if cnt%10000==1:
print(cnt)
my_tree.insert(line)
my_word.insert(_line)
utils.save_var(my_word,sav_file)
utils.save_var(my_tree,sav_file_2)
print('\n>my address tree save ok')
addr_kv_rec.close()
def load_address_tree(sav_file='./my_tree.pkl'):
my_tree = utils.read_var(sav_file)
return my_tree
#gen_address_tree()
if __name__ == "__time__":
pass
print('')
gen_address_tree(filename='/home/distdev/src/iba/dmp/gongan/gy_addr_normal/pre_data/yyap_address_tree.csv',sav_file='./my_tree.pkl')
|
[
"2364839934@qq.com"
] |
2364839934@qq.com
|
c6d6095b6aecf8907d6dbe353e20a0cf0c58d042
|
cc36d7ba409dfc2c9b7252b3c4efa55ca829adb7
|
/tests/test_split_and_export.py
|
354a7a0d37f2c8667857f6c75d9617afb5048cbd
|
[] |
no_license
|
shinglyu/MusicPupil-tf
|
e09b2615047e9b87caa797fd7108b8ae35b34cf5
|
5ae05dc23fef1f9daf9deecd378adee9353a9e66
|
refs/heads/master
| 2021-05-15T18:10:37.284122
| 2017-12-30T15:34:46
| 2017-12-30T15:34:46
| 107,603,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,370
|
py
|
import os
import sys
sys.path.append(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..',
'feature_extractor'
)
)
from unittest.mock import patch, MagicMock
import music21
import csv
import split_and_export
def test_split_train_test():
samples = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
# Split in 2 as a group
splitted = split_and_export.split_train_test(samples, int(len(samples)/2))
assert len(splitted) > 1 # More then one way to split
assert len(splitted[0]['training']) > 0
assert len(splitted[0]['testing']) > 0
assert len(splitted[0]['training']) > len(splitted[0]['testing'])
for elem in splitted[0]['testing']:
assert elem not in splitted[0]['training']
def test_export_to_csv():
samples = [
{
"score_features": {
"foo": [1, 2, 3]
},
"perf_features": {
"bar": [7, 8, 9]
}
},
{
"score_features": {
"foo": [4, 5, 6]
},
"perf_features": {
"bar": [10, 11, 12]
}
}
]
split_and_export.export_to_csv(samples, "tests/test_export_training.csv")
with open('tests/test_export_training.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
rows = list(reader)
assert rows[0] == ["foo", "bar"]
assert rows[1] == ["1", "7"]
def test_export_all_to_csv():
splits = [
{"training": "training_0", "testing": "testing_0"},
{"training": "training_1", "testing": "testing_1"},
{"training": "training_2", "testing": "testing_2"},
]
with patch("split_and_export.export_to_csv") as mock_export:
split_and_export.export_all_to_csv(splits, "tests/test_export")
mock_export.assert_any_call("testing_0", "tests/test_export_0_testing.csv")
mock_export.assert_any_call("training_0", "tests/test_export_0_training.csv")
mock_export.assert_any_call("testing_1", "tests/test_export_1_testing.csv")
mock_export.assert_any_call("training_1", "tests/test_export_1_training.csv")
mock_export.assert_any_call("testing_2", "tests/test_export_2_testing.csv")
mock_export.assert_any_call("training_2", "tests/test_export_2_training.csv")
|
[
"shing.lyu@gmail.com"
] |
shing.lyu@gmail.com
|
cbf6bc2fa02f3077d4a2e66ac887debcce4bae36
|
aba1d17ddc7d7ad9f49e2d6d87600e9e0387ba14
|
/mi/dataset/driver/flort_kn/stc_imodem/flort_kn__stc_imodem_recovered_driver.py
|
4fe4de3d18ce68d6534b32380e50fd98fe6bab2f
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
oceanobservatories/mi-instrument
|
3ad880c1366b1a8461fc9085768df0e9ddeb6ef5
|
bdbf01f5614e7188ce19596704794466e5683b30
|
refs/heads/master
| 2023-07-23T07:28:36.091223
| 2023-07-14T15:54:49
| 2023-07-14T15:54:49
| 24,165,325
| 1
| 32
|
BSD-2-Clause
| 2023-07-13T01:39:22
| 2014-09-17T22:53:22
|
Python
|
UTF-8
|
Python
| false
| false
| 877
|
py
|
from mi.dataset.parser.flort_kn__stc_imodem import Flort_kn_stc_imodemParser,Flort_kn_stc_imodemParserDataParticleRecovered
from mi.dataset.dataset_driver import DataSetDriver
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.core.versioning import version
@version("0.0.2")
def parse(unused, source_file_path, particle_data_handler):
with open(source_file_path,"r") as fil :
parser = Flort_kn_stc_imodemParser({
DataSetDriverConfigKeys.PARTICLE_MODULE: "mi.dataset.parser.flort_kn__stc_imodem",
DataSetDriverConfigKeys.PARTICLE_CLASS: "Flort_kn_stc_imodemParserDataParticleRecovered"},
None,
fil,
lambda state, f: None,
lambda state: None)
driver = DataSetDriver(parser, particle_data_handler)
driver.processFileStream()
return particle_data_handler
|
[
"petercable@gmail.com"
] |
petercable@gmail.com
|
60f501dd33bc408bb5b0ce9ae012cb0765548801
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_144/ch20_2020_03_09_20_17_14_756367.py
|
0730239956a1947237866393bd5dc6de5401f7cc
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
distância=float(input("Digite a distância a percorrer:"))
multa = distancia - 200
excesso = multa * 0.45
if distância <= 200:
passagem = 0.5 * distância
else:
passagem = passagem + excesso
print("Preço da passagem: R$ %7.2f" % passagem)
|
[
"you@example.com"
] |
you@example.com
|
aa82b974a22240b99dced283997bfed6a235f20a
|
a9063fd669162d4ce0e1d6cd2e35974274851547
|
/swagger_client/models/inline_response20094_site.py
|
60809a21527af5e7d917c54707fe326dad72bc22
|
[] |
no_license
|
rootalley/py-zoom-api
|
9d29a8c750e110f7bd9b65ff7301af27e8518a3d
|
bfebf3aa7b714dcac78be7c0affb9050bbce8641
|
refs/heads/master
| 2022-11-07T14:09:59.134600
| 2020-06-20T18:13:50
| 2020-06-20T18:13:50
| 273,760,906
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,519
|
py
|
# coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse20094Site(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name'
}
def __init__(self, id=None, name=None): # noqa: E501
"""InlineResponse20094Site - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
@property
def id(self):
"""Gets the id of this InlineResponse20094Site. # noqa: E501
Unique Identifier of the [site](https://support.zoom.us/hc/en-us/articles/360020809672-Managing-Multiple-Sites). # noqa: E501
:return: The id of this InlineResponse20094Site. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this InlineResponse20094Site.
Unique Identifier of the [site](https://support.zoom.us/hc/en-us/articles/360020809672-Managing-Multiple-Sites). # noqa: E501
:param id: The id of this InlineResponse20094Site. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this InlineResponse20094Site. # noqa: E501
Name of the site. # noqa: E501
:return: The name of this InlineResponse20094Site. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this InlineResponse20094Site.
Name of the site. # noqa: E501
:param name: The name of this InlineResponse20094Site. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse20094Site, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20094Site):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"github@rootalley.com"
] |
github@rootalley.com
|
134f1ee4050d20ac333a4b35da4222bf51a32243
|
cce0f3939036f536a182d7541b636874cd8247b6
|
/xicam/core/data/bluesky_utils.py
|
f93d436d1f70f7dd0bcf439a14fb2df9fa7ab9e4
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
Xi-CAM/Xi-cam.core
|
b942ab7935935b4b514cd8593afcfba83ce7b042
|
f993699391439402624934daafe329024165bb0b
|
refs/heads/master
| 2023-08-25T16:16:19.231948
| 2020-05-01T17:28:29
| 2020-05-01T17:28:29
| 111,475,839
| 0
| 0
|
NOASSERTION
| 2020-04-28T22:51:49
| 2017-11-20T23:55:13
|
Python
|
UTF-8
|
Python
| false
| false
| 907
|
py
|
from databroker.core import BlueskyRun
def ndims_from_descriptor(descriptor: dict, field: str):
return len(descriptor['data_keys'][field]['shape']) # NOTE: this doesn't include event dim
def shape_from_descriptor(descriptor: dict, field: str):
return descriptor['data_keys'][field]['shape']
def fields_from_stream(run: BlueskyRun, stream: str):
return fields_from_descriptor(descriptors_from_stream(run, stream))
def descriptors_from_stream(run: BlueskyRun, stream: str):
return run[stream].metadata['descriptors']
def fields_from_descriptor(descriptor):
return list(descriptor['data_keys'].keys())
def streams_from_run(run: BlueskyRun):
return list(run)
def xarray_from_run(run: BlueskyRun, stream: str = None, field: str = None):
data = run.to_dask()
if stream:
data = data[stream]
if field:
data = data[field]
return data
|
[
"ronpandolfi@gmail.com"
] |
ronpandolfi@gmail.com
|
1553d5d277a72ef2274a5f58479348835444fb15
|
c1e31f49a59beb6089328d09040f6f48d2e12cde
|
/lib/python2.7/tests/test_plotants.py
|
7a7cdc4a579d40018e4ad412b42fcc84faf8eb45
|
[
"Python-2.0"
] |
permissive
|
kernsuite-debian/casalite
|
3d81761e0d8ae497f97ea242e98d4357618a7591
|
b620981f14f4ba5b77f347f649cd2c16d498db04
|
refs/heads/master
| 2021-06-22T16:22:51.765703
| 2021-02-25T13:28:05
| 2021-02-25T13:28:05
| 80,822,139
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,517
|
py
|
import os
import string
import sys
import shutil
import unittest
from __main__ import default
from tasks import *
#from taskinit import *
from __casac__ import tableplot
'''
Unit tests for task plotants. It tests the following parameters:
vis: wrong and correct values
figfile: if output is created
'''
tp = tableplot.tableplot()
class plotants_test(unittest.TestCase):
# Input and output names
msfile = 'ic2233_1.ms'
res = None
fig = 'plotantstest.png'
#tp = tableplot.tableplot()
def setUp(self):
self.res = None
default(plotants)
# Switch off the displaying of the GUI
tp.setgui(gui=False)
# It is not necessary to copy it for all tests
if (not os.path.exists(self.msfile)):
datapath = os.environ.get('CASAPATH').split()[0] + '/data/regression/ic2233/'
shutil.copytree(datapath+self.msfile, self.msfile)
def tearDown(self):
if (os.path.exists(self.msfile)):
os.system('rm -rf ' + self.msfile)
os.system('rm -rf ' + self.fig)
# Switch GUI back on
tp.setgui(gui=True)
def test1(self):
'''Test 1: Default parameters'''
self.res = plotants()
self.assertFalse(self.res)
def test2(self):
'''Test 2: Bad input file'''
msfile = 'badfile'
self.res = plotants(vis=msfile)
self.assertFalse(self.res)
def test3(self):
'''Test 3: Good input file and output exists'''
self.res = plotants(vis=self.msfile, figfile=self.fig)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test4(self):
'''Test 4: Label antenna IDs'''
self.res = plotants(vis=self.msfile, figfile=self.fig, antindex=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test5(self):
'''Test 5: Logarithmic antenna positions'''
self.res = plotants(vis=self.msfile, figfile=self.fig, logpos=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test6(self):
'''Test 6: Exclude antenna positions'''
self.res = plotants(vis=self.msfile, figfile=self.fig,
exclude='1,5,19,14,10,13')
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test7(self):
'''Test 7: checkbaselines'''
self.res = plotants(vis=self.msfile, figfile=self.fig,
checkbaselines=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test8(self):
'''Test 8: exclude checkbaselines'''
# antenna (name) 11 is already excluded by checkbaselines
# (warning)
self.res = plotants(vis=self.msfile, figfile=self.fig,
exclude='11', checkbaselines=True)
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test9(self):
'''Test 9: Title'''
self.res = plotants(vis=self.msfile, figfile=self.fig,
title='IC2233')
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def test10(self):
'''Test 10: All arguments'''
self.res = plotants(self.msfile, self.fig, True, True, '1,3,5,7,9',
True, "IC2233")
self.assertEqual(self.res,None)
self.assertTrue(os.path.exists(self.fig))
def suite():
return [plotants_test]
|
[
"gijs@pythonic.nl"
] |
gijs@pythonic.nl
|
a03b0d31c5006e59062ef309a36e5e16b33b6c54
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/web/v20190801/web_app_private_endpoint_connection.py
|
61880949796fac9550f145ecc6ea0878b1e34616
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 6,253
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['WebAppPrivateEndpointConnection']
class WebAppPrivateEndpointConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Private Endpoint Connection ARM resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the site.
:param pulumi.Input[pulumi.InputType['PrivateLinkConnectionStateArgs']] private_link_service_connection_state: The state of a private link connection
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['kind'] = kind
if name is None:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
if private_endpoint_connection_name is None:
raise TypeError("Missing required property 'private_endpoint_connection_name'")
__props__['private_endpoint_connection_name'] = private_endpoint_connection_name
__props__['private_link_service_connection_state'] = private_link_service_connection_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['private_endpoint'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/latest:WebAppPrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppPrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppPrivateEndpointConnection, __self__).__init__(
'azure-nextgen:web/v20190801:WebAppPrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppPrivateEndpointConnection':
"""
Get an existing WebAppPrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return WebAppPrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.ArmIdWrapperResponse']]:
"""
PrivateEndpoint of a remote private endpoint connection
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkConnectionStateResponse']]:
"""
The state of a private link connection
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
12bfd823bba8659e67c22af6c2bd0062937a4c5f
|
362224f8a23387e8b369b02a6ff8690c200a2bce
|
/django/django_orm/courses/courses/settings.py
|
d11c36f0bb97c266c1f7db84060415fcde1a5412
|
[] |
no_license
|
Helenyixuanwang/python_stack
|
ac94c7c532655bf47592a8453738daac10f220ad
|
97fbc77e3971b5df1fe3e79652b294facf8d6cee
|
refs/heads/main
| 2023-06-11T02:17:27.277551
| 2021-06-21T17:01:09
| 2021-06-21T17:01:09
| 364,336,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,353
|
py
|
"""
Django settings for courses project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_1&w+4p@b%g)to7vg0oi5+wjevbh58q0l1k3ieg9m7!lsjak@e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'courses_app',
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',#newly added when watch django extra on platform
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',#newly added May 19,2021
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'courses.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'courses.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
GRAPH_MODELS = {
'all_applications': True,
'group_models': True,
}
|
[
"wangyixuan@msn.com"
] |
wangyixuan@msn.com
|
6df3222d955efd0abe5781c7c48aced830dbed13
|
5dcaf0c31a8362d64134d0dcd9131fb8e827307a
|
/footmark/vpc/router.py
|
9ca6687ef880578501b031e2b61357f50519bf50
|
[
"Apache-2.0"
] |
permissive
|
lixue323/footmark
|
10a94ef97cefdab2264088cda70c937c63b819ec
|
30cbb2f4b61546d530d955079ccbb38f22fa3edb
|
refs/heads/master
| 2020-08-11T21:57:07.782124
| 2019-10-15T16:15:17
| 2019-10-15T16:16:08
| 211,007,645
| 0
| 0
|
Apache-2.0
| 2019-09-26T05:34:05
| 2019-09-26T05:34:05
| null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
"""
Represents an VPC Security Group
"""
from footmark.vpc.vpcobject import TaggedVPCObject
class RouteTable(TaggedVPCObject):
def __init__(self, connection=None, ):
super(RouteTable, self).__init__(connection)
self.tags = {}
def __repr__(self):
return 'RouteTable:%s' % self.id
def __getattr__(self, name):
if name == 'id':
return self.route_table_id
raise AttributeError
def __setattr__(self, name, value):
if name == 'id':
self.route_table_id = value
super(TaggedVPCObject, self).__setattr__(name, value)
class RouteEntry(TaggedVPCObject):
def __init__(self, connection=None, ):
super(RouteEntry, self).__init__(connection)
self.tags = {}
def __repr__(self):
return 'RouteEntry:%s' % self.destination_cidrblock
def __getattr__(self, name):
if name == 'destination_cidrblock':
return self.destination_cidr_block
if name == 'next_hop_id':
return self.instance_id
if name.startswith('nexthop_'):
return getattr(self, 'next_hop' + name[7:])
raise AttributeError
def __setattr__(self, name, value):
if name == 'destination_cidrblock':
self.destination_cidr_block = value
if name == 'next_hop_id':
self.instance_id = value
if name.startswith('nexthop_'):
setattr(self, 'next_hop' + name[7:], value)
super(TaggedVPCObject, self).__setattr__(name, value)
|
[
"guimin.hgm@alibaba-inc.com"
] |
guimin.hgm@alibaba-inc.com
|
6d150af553878700d5df20c1eccef683e5acb322
|
c3ffb020314af5894242073c23c7138a9aa6ea6e
|
/Past/Rest/script.py
|
66cc3e50b6f1c8087cc3a27b882438816d74bbb2
|
[] |
no_license
|
mohammedjasam/CleverNator
|
1fa8a54c8dca281696de1f33c4c62d7ab78725a1
|
dd04b975c4caaa201ccdf92df51635213156c920
|
refs/heads/master
| 2020-05-18T15:13:40.667968
| 2017-11-25T17:44:35
| 2017-11-25T17:44:35
| 84,256,689
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,907
|
py
|
"""# Pull the existing Tensorflow Environment
docker run -it gcr.io/tensorflow/tensorflow:latest-devel
# Download the multilabel data from internet to a single folder
# Ex: Place Darth_vader pics folder + Darth_Maul Pics Folder in Star_Wars folder
# Move the multi-label image folder(star_wars) to docker
mv "c:../.../star_wars/" .
# link that folder in the container
docker run -it -v $HOME/data:/data/ gcr.io/tensorflow/tensorflow:latest-devel
docker run -it -v $HOME/dataa:/data/ ci:new
# Go to root
cd ..
# Pull latest tf image
cd tensorflow
git pull
# Train the model using the images
python35 tensorflow/examples/image_retraining/retrain.py \
--bottleneck_dir=/tf_files/bottlenecks \
--how_many_training_steps 500 \
--model_dir=/tf_files/inception \
--output_graph=/tf_files/retrained_graph.pb \
--output_labels=/tf_files/retrained_labels.txt \
--image_dir /tf_files/trainData"""
C:\Users\Stark\Desktop\CleverNator\KerasBuild\
python35 retrain.py --bottleneck_dir=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\bottlenecks --how_many_training_steps 500 --model_dir=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\inception --output_graph=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\retrained_graph.pb --output_labels=C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\retrained_labels.txt --image_dir C:\Users\Stark\Desktop\CleverNator\KerasBuild\tf_files\trainData
# go into tf_files and write python file
cat > classifier.py
write code then ctrl + c
$ docker commit f6434fa9498e star_wars_classifier:initial
docsha256:d0484f84fbf56d0271c0e35730c2d6ae1f13fb9a06910966380336864b5f2d30
Stark@LAPTOP-M7QFG7RS MINGW64 ~
$ docker run -it -v $HOME/star_wars:/star_wars/ star_wars_classifier:initial
$ docker commit 4f27d772af7b violent:initial
import tensorflow as tf
import sys
# change this as you see fit
image_path = sys.argv[1]
# Read in the image_data
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("/tf_files/retrained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("/tf_files/retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score))
|
[
"mnqnd@mst.edu"
] |
mnqnd@mst.edu
|
2e83051cab98c6e966a89981f641b396e0630240
|
efde9197a0a0ea1e11113e79bce87c3ded80573e
|
/hackerRank/cyclic binary string.py
|
00f79a18ec9e5abbffb5f41d0b5d339823f58e5e
|
[] |
no_license
|
dkarthicks27/ML_Database
|
bb370366e7b4d2ad06d992778c02815304a30f2d
|
9f3856b1ac2aead5df4e3ef05e1800b1152f777e
|
refs/heads/master
| 2021-10-27T10:45:47.258344
| 2021-10-24T10:42:55
| 2021-10-24T10:42:55
| 238,627,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
"""Algorithm: 1011010
So it is actually left rotation
So what we can actually do is that we can probably shift each time convert to decimal and see if there exist a integer log for this number but I don’t know if this method is feasible
Take the input string save it to a variable original
1. Convert it to decimal and then check if there exist a positive log to the base 2 for this number
2. If it exist, store it as current value and also check if its greater than previous value if its replace it as the new value
3. Now check left shift the string and check if it is different from the original, if its different repeat the process else exist.
"""
from math import log2
from copy import deepcopy
def leftShift(string):
new_string = string[-1] + string[:-1]
return new_string
def maximumPower(string):
originals = deepcopy(string)
print('string: ', string)
original = string
number = int(original, 2)
print('number:', number)
val = log2(number)
print('val: ', val)
maximumVal = 0
if val.is_integer():
maximumVal = int(val)
string = leftShift(originals)
while string != originals:
print('\n')
print('binary string:', string)
number = int(string, 2)
print('decimal value:', number)
val = log2(number)
print('val:', val)
if val.is_integer():
maximumVal = max(maximumVal, int(val))
print('maximum_value: ', maximumVal)
string = leftShift(string)
else:
string = leftShift(originals)
while string != originals:
print('\n')
print('binary string:', string)
number = int(string, 2)
print('decimal value:', number)
val = log2(number)
print('val:', val)
if val.is_integer():
maximumVal = max(maximumVal, int(val))
print('maximum_value: ', maximumVal)
string = leftShift(string)
print('\n\n\n')
return maximumVal
print(maximumPower('0011'))
|
[
"dkarthicks27@gmail.com"
] |
dkarthicks27@gmail.com
|
eb1e71aacc3892c3756d3e6efab1d5dbebcb4e7a
|
4331279865c4b1262179068ba5ac85d8d75123b6
|
/final/home/views/insurance.py
|
ae8d65e45714f7c174c4d5d0273a66627fdbf017
|
[] |
no_license
|
manankshastri/NYmed-Scripts
|
fb4633c19dadfdf982f127454a5dd643ba0f8a8b
|
993af47223ca7cb38a2e9af88a2fc99baa7f3d88
|
refs/heads/master
| 2020-04-07T11:29:01.839909
| 2019-05-16T22:39:50
| 2019-05-16T22:39:50
| 158,328,115
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,010
|
py
|
from django.contrib import messages
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.views.generic import (CreateView, ListView, DeleteView, DetailView, UpdateView)
from django.contrib.messages.views import SuccessMessageMixin
from ..decorators import insurance_required
from ..forms import InsuranceSignUpForm
from ..models import Insurance, User, Prescription, Patient
class InsuranceSignUpView(CreateView):
model = User
form_class = InsuranceSignUpForm
template_name = 'registration/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'insurance'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('insurance:insurance_list')
@login_required
@insurance_required
def InsuranceDetailView(request, pk):
pat_all = Patient.objects.all()
template_name = 'home/insurance/insurance_detail.html'
return render(request, template_name, context = {'pat_all': pat_all},)
@method_decorator([login_required, insurance_required], name='dispatch')
class InsuranceListView(ListView):
model = Insurance
template_name = 'home/insurance/insurance_list.html'
@login_required
@insurance_required
def InsurancePatientBillsView(request, pk):
pat_all = Prescription.objects.all()
template_name = 'home/insurance/insurance_patient.html'
return render(request, template_name, context = {'pat_all': pat_all},)
@method_decorator([login_required, insurance_required], name='dispatch')
class InsuranceBillDetailView(DetailView):
model = Prescription
template_name = 'home/insurance/insurance_bills.html'
|
[
"manank.shastri@gmail.com"
] |
manank.shastri@gmail.com
|
c18989b9fc9e25bf40b4ac083d12d27f4d5d3a0e
|
96b2009e5a3bcaa4a0a6bb699015e5d2d62e3ccc
|
/卷积神经网络+keras/__init__.py
|
c9f26105c126796a86360adc6512fe8d3d8fda08
|
[] |
no_license
|
zlszhonglongshen/NLP
|
612d9f73ca017d387c48b8b3ebae24510dad6732
|
8f373f737b309c7441b516c0d408e43aebacff61
|
refs/heads/master
| 2022-07-24T20:18:12.465840
| 2019-11-27T09:06:57
| 2019-11-27T09:06:57
| 125,830,945
| 0
| 0
| null | 2022-07-15T20:19:20
| 2018-03-19T09:11:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,572
|
py
|
#coding:utf-8
import os
import numpy as np
np.random.seed(1337)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense,Input,Flatten
from keras.layers import Conv1D,MaxPooling1D,Embedding
from keras.models import Model
from keras.optimizers import *
from keras.models import Sequential
from keras.layers import merge
import sys
BASE_DIR = 'E:/NLP/卷积神经网络+keras' #这里指定当前目录
GLOVE_DIR = BASE_DIR + '/glove.6B/' # 根据实际目录名更改
TEXT_DATA_DIR = BASE_DIR + '/news20/20_newsgroup/' # 根据实际目录名更改
MAX_SEQUENCE_LENGTH = 1000 #每个文本的最长选取程度,较短的文本可以设短一些
MAX_NB_WORDS = 20000 #整体词库字典中,词的多少,可以略微调大或者调小
EMBEDDING_DIM = 50 #词向量的维度,可以根据实际情况使用
VALIDATION_SPLIT = 0.4 #这里用作是测试集的比例,单词本身的意思是验证集
#first build index mapping words in the embedding set
#to their embedding vector 这段话是指简历一个词到向量之间的索引比如 peking 对应的词向量可能是(0.1,0,32,...0.35,0.5)等等。
print('Indexing word vectors...')
embedding_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.50d.txt'),encoding="utf-8") # 读入50维的词向量文件,可以改成100维或者其他
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:],dtype='float32')
embedding_index[word] = coefs
f.close()
print('Found %s word vectors.'%len(embedding_index))
#second prepare text samples and their labels
print('Processing text dateset') #下面主要是读入训练集和测试集
texts = [] #存储训练样本的list
labels_index = {} #词到词编号的字典,比如peking对应100
labels = [] #存储训练样本,类别编号的文本,比如文章A属于第一类文本
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR,name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path,fname)
if sys.version_info<(3,):
f = open(fpath)
else:
f = open(fpath,encoding='latin-1')
texts.append(f.read())
f.close()
labels.append(label_id)
print('Found %s texts.'%len(texts)) #输出训练样本的数量
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
# finally, vectorize the text samples into a 2D integer tensor,下面这段代码主要是将文本转换成文本序列,比如 文本'我爱中华' 转化为[‘我爱’,'中华'],然后再将其转化为[101,231],最后将这些编号展开成词向量,这样每个文本就是一个2维矩阵,这块可以参加本文‘<span style="font-size:18px;">二.卷积神经网络与词向量的结合’这一章节的讲述</span>
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.'%len(word_index))
data = pad_sequences(sequences,maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set,下面这段代码,主要是将数据集分为,训练集和测试集(英文原意是验证集,但是我略有改动代码)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples] # 训练集
y_train = labels[:-nb_validation_samples]# 训练集的标签
x_val = data[-nb_validation_samples:] # 测试集,英文原意是验证集
y_val = labels[-nb_validation_samples:] # 测试集的标签
print('Preparing embedding matrix.')
# prepare embedding matrix 这部分主要是创建一个词向量矩阵,使每个词都有其对应的词向量相对应
nb_words = min(MAX_NB_WORDS,len(word_index))
embedding_matrix = np.zeros((nb_words+1,EMBEDDING_DIM))
for word,i in word_index.items():
if i>MAX_NB_WORDS:
continue
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
#words not found in embedding index will be all_zeros
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# 神经网路的第一层,词向量层,本文使用了预训练glove词向量,可以把trainable那里设为False
embedding_layer = Embedding(nb_words+1,EMBEDDING_DIM,input_length=MAX_SEQUENCE_LENGTH,weights=[embedding_matrix],trainable=True)
#train a 1D convert with global maxpoolinnb_words
# left model 第一块神经网络,卷积窗口是5*50(50是词向量维度)
model_left = Sequential()
# model.add(Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32'))
model_left.add(embedding_layer)
model_left.add(Conv1D(128, 5, activation='tanh'))
model_left.add(MaxPooling1D(5))
model_left.add(Conv1D(128, 5, activation='tanh'))
model_left.add(MaxPooling1D(5))
model_left.add(Conv1D(128, 5, activation='tanh'))
model_left.add(MaxPooling1D(35))
model_left.add(Flatten())
# right model <span style="font-family:Arial, Helvetica, sans-serif;">第二块神经网络,卷积窗口是4*50</span>
model_right = Sequential()
model_right.add(embedding_layer)
model_right.add(Conv1D(128, 4, activation='tanh'))
model_right.add(MaxPooling1D(4))
model_right.add(Conv1D(128, 4, activation='tanh'))
model_right.add(MaxPooling1D(4))
model_right.add(Conv1D(128, 4, activation='tanh'))
model_right.add(MaxPooling1D(28))
model_right.add(Flatten())
# third model <span style="font-family:Arial, Helvetica, sans-serif;">第三块神经网络,卷积窗口是6*50</span>
model_3 = Sequential()
model_3.add(embedding_layer)
model_3.add(Conv1D(128, 6, activation='tanh'))
model_3.add(MaxPooling1D(3))
model_3.add(Conv1D(128, 6, activation='tanh'))
model_3.add(MaxPooling1D(3))
model_3.add(Conv1D(128, 6, activation='tanh'))
model_3.add(MaxPooling1D(30))
model_3.add(Flatten())
merged = merge([model_left, model_right, model_3],mode='concat') # 将三种不同卷积窗口的卷积层组合 连接在一起,当然也可以只是用三个model中的一个,一样可以得到不错的效果,只是本文采用论文中的结构设计
model = Sequential()
model.add(merged) # add merge
model.add(Dense(128, activation='tanh')) # 全连接层
model.add(Dense(len(labels_index), activation='softmax')) # softmax,输出文本属于20种类别中每个类别的概率
# 优化器我这里用了adadelta,也可以使用其他方法
model.compile(loss='categorical_crossentropy',
optimizer='Adadelta',
metrics=['accuracy'])
# =下面开始训练,nb_epoch是迭代次数,可以高一些,训练效果会更好,但是训练会变慢
model.fit(x_train, y_train, nb_epoch=3)
score = model.evaluate(x_train, y_train, verbose=0) # 评估模型在训练集中的效果,准确率约99%
print('train score:', score[0])
print('train accuracy:', score[1])
score = model.evaluate(x_val, y_val, verbose=0) # 评估模型在测试集中的效果,准确率约为97%,迭代次数多了,会进一步提升
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
[
"593956670@qq.com"
] |
593956670@qq.com
|
0e7c3b828694480c0b910383edc2dc5f6294ab81
|
a728a685fa841388da0d27c8d596ce2178a60ad0
|
/app/core/tests/test_models.py
|
55151200fda78a0faad3fa52acf148c40fe6a526
|
[
"MIT"
] |
permissive
|
aikinyi/recipe-app-api
|
bd3c037acf650a09cdae35497c8e62b4988ad454
|
419ab18f715f66d044af125680ce3417f7af61f4
|
refs/heads/main
| 2023-03-19T13:46:50.341555
| 2021-03-20T22:39:36
| 2021-03-20T22:39:36
| 321,140,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
# Helper functions
def sample_user(email='test@gmail.com', password='test123456'):
return get_user_model().objects.create_user(email, password)
class ModelTest(TestCase):
"""
Creating Model TDD
"""
def test_create_user(self):
"""
Creating test user TDD function
"""
email = 'aikinyiltd@gmail.com'
password = '123456'
user = get_user_model().objects.create_user(
email=email,
password=password,
)
# Asserting the password and email
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_normalize_email(self):
"""
TDD for normalizing email
"""
email = 'aikinyiltd@GMAIL.COM'
user = get_user_model().objects.create_user(
email, 'aikinyiltd',
)
# Assertion on email normalization
self.assertEqual(user.email, email.lower())
def test_validate_user_email(self):
"""
Validating user email
"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'email address here')
def test_create_superuser(self):
"""
Creaating superuser
"""
user = get_user_model().objects.create_superuser(
'aikinyiltd@gmail.com',
'123abdcd'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""
Creating TDD for testing tag MODEL
"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Abdul'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""
TDD for testing creation of new ingredient
"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
|
[
"learntoprogress@yahoo.com"
] |
learntoprogress@yahoo.com
|
cd604accecbe1e3a174eb64d58aa50cb702a0acf
|
26771494974942f4ab18d2cd8247506c344e1d14
|
/895-maximumFrequencyStack.py
|
4a40053b7ed6952b9019de75037801c0192ff639
|
[] |
no_license
|
wangyunpengbio/LeetCode
|
9f4c6076e067c5e847d662679483f737d40e8ca5
|
cec1fd11fe43177abb2d4236782c0f116e6e8bce
|
refs/heads/master
| 2020-04-29T22:28:25.899420
| 2020-04-03T07:37:26
| 2020-04-03T07:37:26
| 176,448,957
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
class FreqStack:
# 超时
def __init__(self):
from collections import defaultdict
self.stack = []
self.dic = defaultdict(int)
self.maxFrequency = 0
def push(self, x: int) -> None:
self.stack.append(x)
self.dic[x] = self.dic[x] + 1
self.calculateMaxFrequency()
def pop(self) -> int:
# print(self.stack,self.dic,self.maxFrequency)
for i in range(len(self.stack)-1,-1,-1):
# print(self.stack[i])
if self.dic[self.stack[i]] == self.maxFrequency:
self.dic[self.stack[i]] = self.dic[self.stack[i]] - 1
item = self.stack.pop(i)
break
self.calculateMaxFrequency()
return item
def calculateMaxFrequency(self):
self.maxFrequency = 0
for key,value in self.dic.items():
if value > self.maxFrequency:
self.maxFrequency = value
# Your FreqStack object will be instantiated and called as such:
# obj = FreqStack()
# obj.push(x)
# param_2 = obj.pop()
|
[
"wangyunpeng_bio@qq.com"
] |
wangyunpeng_bio@qq.com
|
bed170e3a61e169e68a386884050efbff4067342
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/labels_20200908183820.py
|
429c534d5ee867e57cc47bc486a667f5c91d2405
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
def labels(S):
if len(S) == 0:
return 0
output_arr = []
last_indices = []
for i in range(len(S)):
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
4c374d623b41f4b08ccaf0d7c3dc45adefcbee20
|
233928d206e13e068cf8cb5ff7888c9a2d84ad61
|
/BOJ/BOJ_2920_음계.py
|
d99e9a4bb4060c1a3c802597873370a6c6437450
|
[] |
no_license
|
Jinwoongma/Algorithm
|
7f6daa2d3c2c361059c09fb4fe287b1cce4863e2
|
78803f4572f1416451a9f4f31f53b7d653f74d4a
|
refs/heads/master
| 2022-10-07T22:53:20.333329
| 2020-06-07T13:27:47
| 2020-06-07T13:27:47
| 237,114,107
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
data = list(map(int, input().split()))
if data == list(range(1, 9)):
print('ascending')
elif data == list(range(8, 0, -1)):
print('descending')
else:
print('mixed')
|
[
"jinwoongma@gmail.com"
] |
jinwoongma@gmail.com
|
a16fd8e50b9c997067a44669d605721cbf30a699
|
c82b0584f91a7a130718273ecf72039e2d5f9ab1
|
/polyaxon_deploy/schemas/security_context.py
|
a6ce5946b5aed47c96e476bc8c5a116f43003948
|
[
"MIT"
] |
permissive
|
todokku/polyaxon-deploy
|
7af770dac9fb9797b86e3bf6b5f1da477a751ba0
|
77828e028670c43cc74704a4d7b9ec2e661e10a4
|
refs/heads/master
| 2021-02-15T16:02:13.468664
| 2020-03-04T09:37:06
| 2020-03-04T09:37:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import ValidationError, fields, validates_schema
from polyaxon_deploy.schemas.base import BaseConfig, BaseSchema
def validate_security_context(user, group):
if any([user, group]) and not all([user, group]):
raise ValidationError(
"Security context requires both `user` and `group` or none.")
class SecurityContextSchema(BaseSchema):
enabled = fields.Bool(allow_none=True)
user = fields.Int(allow_none=True)
group = fields.Int(allow_none=True)
@staticmethod
def schema_config():
return SecurityContextConfig
@validates_schema
def validate_security_context(self, data):
validate_security_context(data.get('user'), data.get('group'))
class SecurityContextConfig(BaseConfig):
SCHEMA = SecurityContextSchema
REDUCED_ATTRIBUTES = ['enabled', 'user', 'group']
def __init__(self, enabled=None, user=None, group=None):
validate_security_context(user, group)
self.enabled = enabled
self.user = user
self.group = group
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
015f28cff9057185f32b9aa80589b0f4ae92b00a
|
b1a7fce60e8935592d07323222212d132eedb407
|
/Raspi/Confirm.py
|
a4d5142e76c993a17e454a2068f3e4dc046cbad7
|
[] |
no_license
|
Namlitruong/Capstone-ModularRobot
|
d0922030a8ee0af7a06667ea5f333b19e1bbb070
|
e23b07b260a7bfef9a0ef07bb74816cf64cc6a56
|
refs/heads/master
| 2022-12-17T23:07:07.952625
| 2020-08-17T00:41:11
| 2020-08-17T00:41:11
| 273,672,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,766
|
py
|
import CANbus
import can
import csv
#############################--INTERRUPT--######################################
import time
import os, signal
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def interrupt_handler(channel):
ID = os.getppid()
print(ID)
pid = os.popen("ps aux | grep 'python3 Confirm.py' | awk '{print $2}'").readlines()
print ("Length: ", len(pid))
for i in range (len(pid)):
print (pid[i])
os.system ('sudo kill -9 '+ pid[i])
print("####################################")
GPIO.add_event_detect(13, GPIO.RISING,
callback=interrupt_handler,
bouncetime=500)
###################################################################################
actuatorID = []
sensorID = []
def wriToFile (aID, sID):
f = open ('config.csv', 'w')
with f:
writer = csv.writer(f, delimiter = ";")
writer.writerow (aID)
writer.writerow (sID)
def classifier (msg):
subID = 0
mType = 0
if (msg.arbitration_id == 0x1A0):
print ("Module detected !!!")
subID = 0x1A0
mType = 'A'
elif (msg.arbitration_id == 0x1F0):
#print ("Sensor module detected !!!")
subID = 0x1F0
mType = 'S'
return subID, mType
def searchValidID (IDlist, tempModule):
for i in range (1, 16):
flag = False
tempModule.ID = tempModule.ID + 1
if (len(IDlist) == 0):
break
for j in range (len(IDlist)):
if (IDlist[j].ID == tempModule.ID):
flag = True
break
if (flag == False and j+1 == len(IDlist)):
break
IDlist.append (tempModule)
print ("Assign new ID: ", hex(tempModule.ID))
return tempModule.ID
def verifyID (IDlist):
activeList = []
for i in range (len(IDlist)):
while (True):
CANbus.send((IDlist[i].ID - 0x100), [0x00])
msg = CANbus.receiveNonBlocking(0.1)
if (IDlist[i].timeout == 5):
break
if (msg == None):
IDlist[i].timeout = IDlist[i].timeout + 1
else:
activeList.append (IDlist[i])
break
return activeList
def printAvailableID (msg, module):
IDlist =[]
print (msg)
for i in range (len(module)):
print (module[i].ID, " ", i)
IDlist.append (module[i].ID)
return IDlist
if __name__ == "__main__":
while (True):
while (True):
print ("Waiting for connecting modules")
msg = CANbus.receive()
tempID, mType = classifier (msg)
if (msg.arbitration_id == tempID):
break
tempModule = CANbus.module(msg.arbitration_id)
if (mType == 'A'):
tempID = searchValidID (actuatorID, tempModule)
CANbus.send (0x0A0, [(tempID - 0x1A0)])
elif (mType == 'S'):
tempID = searchValidID (sensorID, tempModule)
CANbus.send (0x0F0, [(tempID - 0x1F0)])
#CANbus.send (0x0A0, [(tempID - 0x1A0)])
print ("Sending Confirmation", tempID - 0x100)
while (True):
msg = CANbus.receive()
if (msg.arbitration_id == tempID):
break
print ("Confirmation Complete")
#Verify modules
print ("Verifying existing modules")
actuatorID = verifyID (actuatorID)
sensorID = verifyID (sensorID)
aID = printAvailableID ("Available Module: ", actuatorID)
#sID = printAvailableID ("Available Sensor: ", sensorID)
sID = printAvailableID (" ", sensorID)
wriToFile (aID, sID)
|
[
"pi@raspberrypi"
] |
pi@raspberrypi
|
1ae71121fe67533c75e20874fc8ff41f033c1d67
|
a9243f735f6bb113b18aa939898a97725c358a6d
|
/0.16/_downloads/plot_artifacts_detection.py
|
86f915a1f8213e207c582dae54ccbc31f59c58bd
|
[] |
permissive
|
massich/mne-tools.github.io
|
9eaf5edccb4c35831400b03278bb8c2321774ef2
|
95650593ba0eca4ff8257ebcbdf05731038d8d4e
|
refs/heads/master
| 2020-04-07T08:55:46.850530
| 2019-09-24T12:26:02
| 2019-09-24T12:26:02
| 158,233,630
| 0
| 0
|
BSD-3-Clause
| 2018-11-19T14:06:16
| 2018-11-19T14:06:16
| null |
UTF-8
|
Python
| false
| false
| 5,773
|
py
|
"""
Introduction to artifacts and artifact detection
================================================
Since MNE supports the data of many different acquisition systems, the
particular artifacts in your data might behave very differently from the
artifacts you can observe in our tutorials and examples.
Therefore you should be aware of the different approaches and of
the variability of artifact rejection (automatic/manual) procedures described
onwards. At the end consider always to visually inspect your data
after artifact rejection or correction.
Background: what is an artifact?
--------------------------------
Artifacts are signal interference that can be
endogenous (biological) and exogenous (environmental).
Typical biological artifacts are head movements, eye blinks
or eye movements, heart beats. The most common environmental
artifact is due to the power line, the so-called *line noise*.
How to handle artifacts?
------------------------
MNE deals with artifacts by first identifying them, and subsequently removing
them. Detection of artifacts can be done visually, or using automatic routines
(or a combination of both). After you know what the artifacts are, you need
remove them. This can be done by:
- *ignoring* the piece of corrupted data
- *fixing* the corrupted data
For the artifact detection the functions MNE provides depend on whether
your data is continuous (Raw) or epoch-based (Epochs) and depending on
whether your data is stored on disk or already in memory.
Detecting the artifacts without reading the complete data into memory allows
you to work with datasets that are too large to fit in memory all at once.
Detecting the artifacts in continuous data allows you to apply filters
(e.g. a band-pass filter to zoom in on the muscle artifacts on the temporal
channels) without having to worry about edge effects due to the filter
(i.e. filter ringing). Having the data in memory after segmenting/epoching is
however a very efficient way of browsing through the data which helps
in visualizing. So to conclude, there is not a single most optimal manner
to detect the artifacts: it just depends on the data properties and your
own preferences.
In this tutorial we show how to detect artifacts visually and automatically.
For how to correct artifacts by rejection see
:ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_rejection.py`.
To discover how to correct certain artifacts by filtering see
:ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`
and to learn how to correct artifacts
with subspace methods like SSP and ICA see
:ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ssp.py`
and :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ica.py`.
Artifacts Detection
-------------------
This tutorial discusses a couple of major artifacts that most analyses
have to deal with and demonstrates how to detect them.
"""
import numpy as np
import mne
from mne.datasets import sample
from mne.preprocessing import create_ecg_epochs, create_eog_epochs
# getting some data ready
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
###############################################################################
# Low frequency drifts and line noise
(raw.copy().pick_types(meg='mag')
.del_proj(0)
.plot(duration=60, n_channels=100, remove_dc=False))
###############################################################################
# we see high amplitude undulations in low frequencies, spanning across tens of
# seconds
raw.plot_psd(tmax=np.inf, fmax=250)
###############################################################################
# On MEG sensors we see narrow frequency peaks at 60, 120, 180, 240 Hz,
# related to line noise.
# But also some high amplitude signals between 25 and 32 Hz, hinting at other
# biological artifacts such as ECG. These can be most easily detected in the
# time domain using MNE helper functions
#
# See :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`.
###############################################################################
# ECG
# ---
#
# finds ECG events, creates epochs, averages and plots
average_ecg = create_ecg_epochs(raw).average()
print('We found %i ECG events' % average_ecg.nave)
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
average_ecg.plot_joint(**joint_kwargs)
###############################################################################
# we can see typical time courses and non dipolar topographies
# not the order of magnitude of the average artifact related signal and
# compare this to what you observe for brain signals
###############################################################################
# EOG
# ---
average_eog = create_eog_epochs(raw).average()
print('We found %i EOG events' % average_eog.nave)
average_eog.plot_joint(**joint_kwargs)
###############################################################################
# Knowing these artifact patterns is of paramount importance when
# judging about the quality of artifact removal techniques such as SSP or ICA.
# As a rule of thumb you need artifact amplitudes orders of magnitude higher
# than your signal of interest and you need a few of such events in order
# to find decompositions that allow you to estimate and remove patterns related
# to artifacts.
#
# Consider the following tutorials for correcting this class of artifacts:
# - :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`
# - :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ica.py`
# - :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_ssp.py`
|
[
"larson.eric.d@gmail.com"
] |
larson.eric.d@gmail.com
|
64cbbf12cccecdd79098ee784933598a826d5869
|
b9f7c7a87292c1a9c231ce89933ae9d4bc51f487
|
/src/sst/elements/simpleElementExample/tests/basicStatistics0.py
|
3ea5c138cfc1b3558768044804877e0a4e49d5e9
|
[
"BSD-3-Clause"
] |
permissive
|
sstsimulator/sst-elements
|
3a8db475a7a6cbd4c2a5d737c32718752da9797a
|
68cdb3ac843750705805653b3fdcd4b015e84089
|
refs/heads/master
| 2023-08-17T03:30:24.145168
| 2023-08-16T13:58:07
| 2023-08-16T13:58:07
| 43,475,440
| 85
| 145
|
NOASSERTION
| 2023-09-12T13:59:11
| 2015-10-01T02:57:18
|
C++
|
UTF-8
|
Python
| false
| false
| 3,054
|
py
|
# Import the SST module
import sst
# The basicStatisticsX.py scripts demonstrate user-side configuration of statistics.
# Each one focuses on a different aspect of user-side configuration
#
# This example demonstrates:
# 1. Default output behavior (reporting statistics at the end of simulation)
# 2. Various output formats for statistics
#
# This component has no links and SST will produce a warning because that is an unusual configuration
# that often points to a mis-configuration. For this simulation, the warning can be ignored.
#
# Relevant code:
# simpleElementExample/basicStatistics.h
# simpleElementExample/basicStatistics.cc
# simpleElementExample/basicEvent.h
#
# Output:
# simpleElementExample/tests/refFiles/basicStatistics0.out
# simpleElementExample/tests/refFiles/basicStatistics0.csv
#
### Create two components (to compare different components' output in the CSV file)
component0 = sst.Component("StatisticComponent0", "simpleElementExample.basicStatistics")
component1 = sst.Component("StatisticComponent1", "simpleElementExample.basicStatistics")
### Parameterize the components.
# Run 'sst-info simpleElementExample.basicStatistics' at the command line
# to see parameter documentation
params0 = {
"marsagliaZ" : 438, # Seed for Marsaglia RNG
"marsagliaW" : 9375794, # Seed for Marsaglia RNG
"mersenne" : 102485, # Seed for Mersenne RNG
"run_cycles" : 1000, # Number of cycles to run for
"subids" : 3 # Number of SUBID_statistic instances
}
component0.addParams(params0)
params1 = {
"marsagliaZ" : 957537, # Seed for Marsaglia RNG
"marsagliaW" : 5857, # Seed for Marsaglia RNG
"mersenne" : 860, # Seed for Mersenne RNG
"run_cycles" : 1200, # Number of cycles to run for
"subids" : 6 # Number of SUBID_statistic instances
}
component1.addParams(params1)
### Enable statistics
## Limit the verbosity of statistics to any with a load level from 0-4
# This component's statistics range from 1-4 (see sst-info)
sst.setStatisticLoadLevel(4)
## Determine where statistics should be sent. By default this script uses CSV, other options are
# commented out below. Output locations are case-insensitive (e.g., statOutputCSV = statoutputcsv).
# Default: Output to CSV. Filename and separator can be specified
sst.setStatisticOutput("sst.statOutputCSV", { "filepath" : "./basicStatistics0.csv", "separator" : "," } )
# Option: Output to the terminal
#sst.setStatisticOutput("sst.statoutputconsole")
# Option: Output to a text file
#sst.setStatisticOutput("sst.statOutputTXT", { "filepath" : "./basicStatistics0.txt" } )
# Option: Output to HDF5. Requires sst-core to be configured with HDF5 library.
#sst.setStatisticOutput("sst.statoutputhd5f")
# Option: Output to JSON
#sst.setStatisticOutput("sst.statOutputJSON", { "filepath" : "./basicStatistics0.json" } )
## Enable statistics on the components
sst.enableAllStatisticsForComponentType("simpleElementExample.basicStatistics")
|
[
"grvosku@sandia.gov"
] |
grvosku@sandia.gov
|
2fd1b907e6eff215b937433a3f361834b3dd96ec
|
a355b16b9b4cebdd39beb69a6c5aa4e175ae52f6
|
/phytosanitary/urls/links.py
|
8d16c92f08f546895ad6e4779cd0a8695434b8ee
|
[] |
no_license
|
hypertexthero/Phytosanitary
|
e2ba31116b432a8623b332e53a390ff31c24fc10
|
4f001436c90de7a64649e82089e577af6981b793
|
refs/heads/master
| 2016-09-05T09:47:01.448846
| 2012-11-28T16:34:03
| 2012-11-28T16:34:03
| 3,460,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
from django.conf.urls.defaults import *
from phytosanitary.models import Link
link_info_dict = {
'queryset': Link.objects.all(),
'date_field': 'pub_date',
}
urlpatterns = patterns('django.views.generic.date_based',
(r'^$', 'archive_index', link_info_dict, 'phytosanitary_link_archive_index'),
(r'^(?P<year>\d{4})/$', 'archive_year', link_info_dict, 'phytosanitary_link_archive_year'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/$', 'archive_month', link_info_dict, 'phytosanitary_link_archive_month'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/$', 'archive_day', link_info_dict, 'phytosanitary_link_archive_day'),
(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$', 'object_detail', link_info_dict, 'phytosanitary_link_detail'),
)
|
[
"simon@hypertexthero.com"
] |
simon@hypertexthero.com
|
3160ede5e603262448964d8dc9e3a89b58592466
|
60d5ea4f007d49768d250ef394003f554003e4d0
|
/python/Depth-first Search/111.Minimum Depth of Binary Tree.py
|
28976c05b41b56e4880a2b5192eea9b5868c08e4
|
[] |
no_license
|
EvanJamesMG/Leetcode
|
dd7771beb119ea1250dbb3b147a09053298cd63b
|
fa638c7fda3802e9f4e0751a2c4c084edf09a441
|
refs/heads/master
| 2021-01-10T17:11:10.896393
| 2017-12-01T16:04:44
| 2017-12-01T16:04:44
| 46,968,756
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
# coding=utf-8
# Definition for singly-linked list.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
解题思路:
递归
分几种情况考虑:
1,树为空,则为0。
2,根节点如果只存在左子树或者只存在右子树,则返回值应为左子树或者右子树的(最小深度+1)。
3,如果根节点的左子树和右子树都存在,则返回值为(左右子树的最小深度的较小值+1)。
'''
class Solution(object):
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root == None:
return 0
if root.left == None and root.right != None:
return self.minDepth( root.right ) + 1
if root.left != None and root.right == None:
return self.minDepth( root.left ) + 1
return min( self.minDepth( root.left ), self.minDepth( root.right ) ) + 1
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
#
if __name__ == "__main__":
mnode = ListNode(3)
mnode.next = ListNode(5)
mnode.next.next = ListNode(6)
mnode.next.next.next = ListNode(7)
mnode.next.next.next.next = ListNode(8)
result = Solution().rotateRight(mnode, 6)
print(result.val)
|
[
"Evan123mg@gmail.com"
] |
Evan123mg@gmail.com
|
9ef94e2e4d69efad94f09beea5a420f9acda3202
|
c1654d09c1eccf17d105d31c62bbf4106feb89d8
|
/resolution-mylar.py
|
4d6222a94a7d894fdaa9fbff4e10052cca671b70
|
[] |
no_license
|
piti118/crystal-length-study-for-mu2e
|
142be2f059299c9902706b50d375fda01e651ead
|
a0287d2676fef33c15298caf432b0d5b38443bd1
|
refs/heads/master
| 2016-09-11T09:12:07.118526
| 2012-05-14T05:26:27
| 2012-05-14T05:26:27
| 3,666,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,825
|
py
|
# -*- coding: utf-8 -*-
# <nbformat>3</nbformat>
# <codecell>
from root_numpy import *
from dist_fit import *
from cithep import *
from h5py import *
sample='mylar'
# <codecell>
class Hitmap:
def __init__(self,numrow=21,numcol=21):
self.hmap = np.zeros([numrow,numcol])
self.numrow = numrow
self.numcol = numcol
def acc(self,l,k,E):
i,j = self.lk2ij(l,k)
self.hmap[i,j]+=E
def lk2ij(self, l,k):
return l+self.numcol/2,k+self.numrow/2
def sumE(self,cutoff=None):
if cutoff is not None:
return np.sum(np.sum(self.hmap[self.hmap>cutoff]))
else:
return np.sum(np.sum(self.hmap))
# <codecell>
hitmap = root2array('%s.root'%sample,'hitmap')
einfo = root2array('%s.root'%sample,'eventinfo')
# <codecell>
# <codecell>
laster = tuple()
thishit = None
result = np.array([],dtype=[('angle',np.double),('E',np.double)])
for hit in hitmap:
runno = hit['runno']
eventno = hit['eventno']
if (runno,eventno) != laster and laster != tuple():
result.resize(len(result)+1)
result[-1]['angle'] = laster[0]*5.
result[-1]['E'] = thishit.sumE()
thishit=None
laster = (runno,eventno)
if thishit is None:
thishit = Hitmap()
thishit.acc(hit['l'],hit['k'],hit['E'])
if thishit is not None:
result.resize(len(result)+1)
result[-1]['angle'] = laster[0]*5.
result[-1]['E'] = thishit.sumE()
thishit=None #take care of the last one
# <codecell>
f = File('%s.hdf5'%sample,'w')
f.create_dataset('result',data=result)
f.close()
# <codecell>
f = File('%s.hdf5'%sample,'r')
tmp = f['result']
result = np.array(tmp)
f.close()
# <codecell>
def my_gau(x,g_mu,g_sigma):
return gaussian(x,g_mu,g_sigma)
# <codecell>
def smear(E):
w = sqrt(1000.*E)#1000 photon per mev
ret = randn(len(E))
ret*=w/1000.
ret+=E
return ret
def doOneFit(E,range=(95.,110.),mean=104.,sigma=1.,n=20.,alpha=0.5,N=80000,
limit_N=(1000,100000),limit_n=(0.1,100.), limit_mean=(90,106), limit_sigma=(0.3,5.),limit_alpha=(0.,5.)):
#eg = Add2Pdf(my_gau,Normalize(crystalball,range))
#describe(eg)
#eg = Normalize(crystalball,range)
eg = Convolve(Normalize(crystalball,range),my_gau,(-2,2),nbins=40)
#eeg = eg
eeg = Extend(eg)
print describe(eeg)
#fit, m = fit_uml(eg,sm,mean=104.5,sigma=1.,n=20.,alpha=0.5, limit_n=(0.1,50.), limit_mean=(90,106), limit_sigma=(0.3,5.),limit_alpha=(0.,2.))
#try_uml(eg,sm,mean=104.,sigma=1.,n=50.,alpha=0.5)
fit,m = None,None
good = False
itry = 0
first = True
while not good and itry<5:
try:
if not first:
mean = 104.5+randn(1)*2.
alpha=0.5+randn(1)*0.2
first =False
fit,m = fit_binpoisson(eeg,E,maxcalls=2000000,bins=100,
mean=mean,sigma=sigma,n=n,alpha=alpha,N=N,g_mu=0.,g_sigma=0.3,
limit_N=limit_N,limit_n=limit_n, limit_mean=limit_mean, limit_sigma=limit_sigma,limit_alpha=limit_alpha,
limit_g_mu=(-1,1),limit_g_sigma=(0.001,0.5),
quiet=False,throw=False)
good = True
except Exception as e:
print e
#raise e
itry+=1
fit.draw(m)
l,h = fwhm_f(eeg,range,m.args)
print m.values
vertical_highlight(l,h)
return fit,m,h,l,eeg
# <codecell>
angles = np.linspace(0,90,19)[:-1]
myresult = {}
# <codecell>
arg = {
0 :{'range':(96.,105.5)},
1 :{'range':(96.,105.5)},
2 :{'range':(96.,105.5)},
3 :{'range':(96.,105.5)},
4 :{'range':(96.,105.5)},
5 :{'range':(96.,105.5)},
6 :{'range':(96.,105.5)},
7 :{'range':(96.,105.5)},
8 :{'range':(96.,105.5)},
9 :{'range':(96.,105.5)},
10:{'range':(96.,105.5)},
11:{'range':(96.,105.5)},
12:{'range':(90.,105.5)},
13:{'range':(90.,105.5)},
14:{'range':(90.,105.5)},
15:{'range':(90.,105.5)},
16:{'range':(80.,105.5)},
17:{'range':(80.,105.5)},
}
for i,angle in enumerate(angles):
if i < 14: continue
myE = result['E'][(result['angle']>(angle-0.1)) & (result['angle']<(angle+0.1))]
figure()
myE = smear(myE)
emin,emax = 101.,105.5
if i in arg:
emin,emax = arg[i]['range']
myE = myE[(myE>emin) & (myE<emax)]
myresult[i] = doOneFit(myE,range=(emin,emax),N=len(myE))
title(str(angle)+' '+str(i))
# <codecell>
#make and save the plot
def make_nice_plot(r):
fig,axs = subplots(3,3,figsize=(20,12))
for i in r:
ii = i%9
row = ii/3
col = ii%3
fit = myresult[i][0]
m = myresult[i][1]
fh,fl = myresult[i][2],myresult[i][3]
fwhm_res = (fh-fl)/2.35
ax=axs[row,col]
sca(ax)
fit.draw(m)
vertical_highlight(fl,fh)
title('%s %d deg'%(sample,5*i))
text(0.5,0.2,r'fwhm/2.35=%3.2f'%(fwhm_res),transform = ax.transAxes)
make_nice_plot(range(9))
savefig('%s_1.pdf'%sample,bbox_inches='tight')
make_nice_plot(range(9,18))
savefig('%s_2.pdf'%sample,bbox_inches='tight')
# <codecell>
fwhm = np.zeros(18)
for i in range(18): fwhm[i]=(myresult[i][2]-myresult[i][3])/2.35
np.save('fwhm_%s.npy'%sample,fwhm)
x = np.array(range(18))*5.
plot(x,fwhm,'xb')
# <codecell>
hist(result['E'],bins=100,range=(100,110),histtype='step');,
# <codecell>
a = numpy.array([],dtype=[('a',np.double)])
a
a.resize(len(a)+1)
a.resize(len(a)+1)
a
# <codecell>
gdf = df.groupby(['runno','eventno'])
# <codecell>
for k,v in gdf:
h = Hitmap(10,10)
for i in xrange(len(v)):
h.acc(v.l[i],v.k[i],v.E[i])
print h.hmap
print h.sumE()
break
# <codecell>
h = Hitmap(10,10)
# <codecell>
for x in hmap:
# <codecell>
|
[
"piti118@gmail.com"
] |
piti118@gmail.com
|
cd6a459ece5a08bd23ac75e022e08a981b4e98c4
|
5d09e3b32b0f7dee1147139e5e57822f33dc0f32
|
/lib/authorship_simulate_citations.py
|
dfe00d94e2db5ca746145205494cf1700d1da662
|
[] |
no_license
|
scone-snu/pyflib2
|
cb797f625100d280f6bd3b757795040ca892b1ed
|
bb2ad7d9974903ac8c3b01ac48b4d6ab72d2ac80
|
refs/heads/master
| 2020-03-31T17:37:54.216805
| 2011-05-06T04:43:31
| 2011-05-06T04:43:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,186
|
py
|
import os
import glob
import re
import networkx as nx
import itertools
import matplotlib.pyplot as plt
import pickle
from collections import defaultdict
from PlotFunctions import *
import random
from scipy.stats import gamma
import math
# Variables that can be modified
START_YEAR = 1997 # Year to start simulation from (i.e. start simulation from START_YEAR+1)
NEW_EDGES_PER_YEAR = 1370 # Number of new edges per year
T = 6 # Years to simulate
P = 0.4 # Probability of choosing a neighbor
Q = 0.4 # Probability of choosing at random or closing a triangle, etc.
PREFIX = "ca"
# # Simulate from the single-edge graph
# G = nx.Graph()
# G.add_edge("1","2", weight=1, years=[START_YEAR])
# Simulate from START_YEAR
G = nx.read_edgelist("../data/parsed/authorship_%d.edgelist" % START_YEAR, create_using=nx.Graph(), comments='#', delimiter='|', data=True, encoding='utf-8')
# Load year of first publication for each author
with open("../data/parsed/authorship.year", "r") as f:
first_paper = pickle.load(f)
# Load # of papers each author produces in his/her lifetime
with open("../data/parsed/authorship.count", "r") as f:
num_papers = pickle.load(f)
max_gam = max(gamma.pdf(range(1,12),3,scale=2))
def num_new_nodes(year, author):
# Constant Activity Level
if random.random() < 0.648:
return 1
else:
return 0
def num_papers_dist():
return 4
def num_citations_dist():
return 71
new_num_citations = {}
for t in range(START_YEAR+1,START_YEAR+1+T):
print "Simulating year %d..." % t
# Load # of citations
with open("../data/parsed/citations_%d.count" % t) as f:
num_citations = pickle.load(f)
num_citations.update(new_num_citations)
# Create new edges for existing nodes
print "\t for existing nodes"
for node in G.nodes_iter():
for i in range(0, num_new_nodes(t,node)):
# See if we want to form an edge and set target if we want to
rand = random.random()
target = None
if rand < P:
# Pick a node proportional to edge weight
bins = []
for nbr in G.neighbors(node):
#print node,nbr,G[node][nbr]
mult = max([num_citations[p] for p in G[node][nbr]['papers']])
#clist = [num_citations[p] for p in G[node][nbr]['papers']]
#mult = int(round(float(sum(clist)) / len(clist)))
bins += [nbr] * mult
if len(bins) == 0:
bins = G.neighbors(node)
target = random.choice(bins)
elif rand < P + Q:
# Degree-random
bins = []
for nbr in G.neighbors(node):
for nbr2 in G.neighbors(nbr):
bins += [nbr2]
target = random.choice(bins)
# Form an edge if target is set, don't form self-loops
if target:
#print "Adding edge from %s to %s" % (node,target)
new_paper = "N"+str(t)+"_"+node+"_"+target
num_citations[new_paper] = num_citations_dist()
if G.has_edge(node,target):
G[node][target]['weight'] += 1
G[node][target]['years'].append(t)
G[node][target]['papers'].append(new_paper)
elif node != target:
G.add_edge(node, target, weight=1, years=[t], papers=[new_paper])
# New node additions
print "\t for new nodes"
if len(G.nodes()) > 0:
# Generate bins for preferential attachment
bins = []
for node,degree in G.degree_iter():
bins += [node] * degree
# Add new nodes and connect them to existing nodes using preferential attachment
for i in range(0,NEW_EDGES_PER_YEAR):
new_node = "N"+str(t)+"_"+str(i)
new_paper = "N"+str(t)+"_"+new_node
new_num_citations[new_paper] = num_citations_dist()
first_paper[new_node] = t
num_papers[new_node] = num_papers_dist()
# Pick & connect to a random node
G.add_edge(random.choice(bins), new_node, weight=1, years=[t], papers=[new_paper])
nx.write_edgelist(G, "../data/simulations/%ssim_%d_%d_%f_%f.edgelist" % (PREFIX, START_YEAR, t, P, Q), comments='#', delimiter='|', data=True, encoding='utf-8')
#print G.edges()
# # Uncomment the below to visualize the graph. Might take extremely long to render!
# nx.draw_graphviz(G)
# plt.show()
|
[
"jccccf@gmail.com"
] |
jccccf@gmail.com
|
12519564ac2077f1120fb5cbb0e9bfaf0c9762c4
|
0bb991864bb1c68eb41c40229b2a78adcbbf69c9
|
/python/model_features/statistics.py
|
5f73b2e6b61173784966955ab4a9f0dc70ecff90
|
[] |
no_license
|
kristianeschenburg/Parcellating-connectivity
|
ab78a62a11e549f027a177f57c15924ef6eafb9e
|
19edaba4d923b1d283b182f21dca4f46a0fbd2f6
|
refs/heads/master
| 2020-03-22T13:37:16.801653
| 2018-07-29T18:33:47
| 2018-07-29T18:33:47
| 140,120,191
| 0
| 0
| null | 2018-07-07T22:16:40
| 2018-07-07T22:16:39
| null |
UTF-8
|
Python
| false
| false
| 1,568
|
py
|
import numpy as np
import time
def UpdateStats(stats, t0, curr_lp, max_lp, K, z, c, steps, gt_z, map_z, verbose):
"""
Update diagnostic statistics.
Parameters:
- - - - -
t0 : initial start time
curr_lp : current log-probability of map
max_lp : max log-probability
K : number of clusters
z : current map
c : current parent links
steps : total number of steps taken
gt_z : ground truth map
map_z : maximum a-posterior map
verbose : flag to print status updates
"""
stats['lp'].append(curr_lp)
stats['max_lp'].append(max_lp)
stats['K'].append(K)
stats['z'] = np.row_stack([stats['z'],z])
stats['c'] = np.row_stack([stats['c'],c])
curr_time = time.clock() - t0
stats['times'].append(curr_time)
if verbose:
print('Step: ' + str(steps) + ' Time: ' + str(curr_time) +
' LP: ' + str(curr_lp) + ' K: ' + str(K) + ' MaxLP: ' + str(max_lp))
if np.any(gt_z):
stats['NMI'].append(NMI(gt_z, map_z))
return stats
def NMI(z1, z2):
"""
Compute normalized mutual information between two maps.two
Parameters:
- - - - -
z1, z2 : maps to compare
"""
N = len(z1)
assert N == len(z2)
p1 = np.bincount(z1)/N
p1[p1 == 0] = 1
H1 = (-p1*np.log(p1)).sum()
p2 = np.bincount(z2)/N
p2[p2 == 0] = 1
H2 = (-p2*np.log(p2)).sum()
joint = np.histogram2d(z1,z2,[range(0,z1.max()+2), range(0,z2.max()+2)],
normed=True)
joint_p = joint[0]
pdiv = joint_p/np.outer(p1,p2)
pdiv[joint_p == 0] = 1
MI = (joint_p*np.log(pdiv)).sum()
if MI == 0:
NMI = 0
else:
NMI = MI/np.sqrt(H1*H2)
return NMI
|
[
"keschenb@uw.edu"
] |
keschenb@uw.edu
|
801a2a01933e03fb0f56781ece4a79654cc8788c
|
b72d0900bec98fcee6c725cef035c02ca29bbf1b
|
/Python/100Excersises/1 to 25/25/25.py
|
38dc3ba7dc12908e54d10b12f5a442b5a1ccd3cd
|
[
"MIT"
] |
permissive
|
sugamkarki/NAMI-Year-II-TERM-I-Group_Project
|
68b8808c8607858a313e8b4d601d8d12c6edda2b
|
f0a9a5f219ccbec024eb5316361db3fca46e171c
|
refs/heads/master
| 2023-06-28T19:07:19.330236
| 2021-07-24T03:05:42
| 2021-07-24T03:05:42
| 312,819,148
| 0
| 0
|
MIT
| 2021-07-24T12:45:06
| 2020-11-14T13:08:08
|
Python
|
UTF-8
|
Python
| false
| false
| 163
|
py
|
alphabet=[]
for letters in range(97,123):
alphabet.append(chr(letters))
d=dict(a=alphabet)
for item in d.values():
for alpha in item:
print(alpha)
|
[
"sugamkarki7058@gmail.com"
] |
sugamkarki7058@gmail.com
|
75ed8c814760c96bc4cb333a81523c02f6fce8d5
|
52a4d282f6ecaf3e68d798798099d2286a9daa4f
|
/test_sa.py
|
81104dd1d3c6c5b477f238e92d7d1b4e9c05347a
|
[
"MIT"
] |
permissive
|
bkovitz/FARGish
|
f0d1c05f5caf9901f520c8665d35780502b67dcc
|
3dbf99d44a6e43ae4d9bba32272e0d618ee4aa21
|
refs/heads/master
| 2023-07-10T15:20:57.479172
| 2023-06-25T19:06:33
| 2023-06-25T19:06:33
| 124,162,924
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,332
|
py
|
# test_sa.py -- Test of spreading activation
import unittest
from pprint import pprint as pp
import inspect
from time import process_time
from dataclasses import dataclass
import operator
from operator import itemgetter
from heapq import nlargest
from typing import Union, List, Tuple, Dict, Set, FrozenSet, Iterable, Any, \
NewType, Type, ClassVar, Sequence, Callable, Hashable
from itertools import chain
import networkx as nx
from Propagator import Propagator, Delta
NodeId = NewType('NodeId', int)
@dataclass
class MyProp(Propagator):
noise: float = 0.0
def make_deltas(self, g, old_d):
#print() #DEBUG
return chain.from_iterable(
self.deltas_from(g, old_d, nodeid)
for nodeid in old_d
)
def deltas_from(self, g, old_d, nodeid) \
-> List[Delta]:
'''Deltas from nodeid to its neighbors.'''
result: List[Delta] = []
nodeid_a = old_d.get(nodeid, 0.0)
for neighborid, edge_d in g.adj[nodeid].items():
weight = edge_d.get('weight', 1.0)
delta = Delta(
neighborid,
weight * nodeid_a,
nodeid
)
result.append(delta)
return result
def min_value(self, g, nodeid):
return 0.0
class Node:
nodeid: NodeId
@dataclass(frozen=True)
class Operator:
func: Callable
name: str
def call(self, *operands: int) -> int:
return self.func(*operands)
def __str__(self):
return self.name
plus = Operator(operator.add, '+')
times = Operator(operator.mul, 'x')
minus = Operator(operator.sub, '-')
@dataclass(frozen=True)
class Before:
'''A feature meaning that .obj was present before the action represented
by the slipnode occurred.'''
obj: Hashable
def __str__(self):
return f'Before({self.obj})'
@dataclass(frozen=True)
class After:
'''A feature meaning that .obj was present after the action represented
by the slipnode occurred.'''
obj: Hashable
def __str__(self):
return f'After({self.obj})'
@dataclass(frozen=True)
class Equation(Node):
operands: Tuple[int]
operator: Operator
result: int
def features(self) -> Iterable[Hashable]:
for operand in self.operands:
yield operand
yield Before(operand)
yield self.operator
yield self.result
yield After(self.result)
#return set(self.operands + (self.operator, self.result, Before
def __str__(self):
expr = f' {self.operator} '.join(str(n) for n in self.operands)
return f'{expr} = {self.result}'
class TestSA(unittest.TestCase):
def test_sa(self):
p = MyProp(positive_feedback_rate=0.0)
self.assertEqual(p.noise, 0.0)
g = nx.Graph() # undirected graph
g.add_edge(1, 2, weight=1.0)
g.add_edge(1, 3, weight=1.3)
g.add_node(4)
#print(g.edges[1, 2]['weight'])
#for neighbor in g.adj[1].items():
#print(neighbor)
# Let's give all nodes activation=1.0.
initial_a_dict = dict((nodeid, 1.0) for nodeid in g.nodes)
# Propagate
got: Dict[NodeId, float] = p.propagate(g, initial_a_dict)
self.assertEqual(got, {1: 1.026, 2: 1.0, 3: 1.006, 4: 0.98})
def test_eqns(self):
p = MyProp(positive_feedback_rate=0.0, sigmoid_p=1.5)
def query(g, features, k=10):
activations_in = dict((f, 1.0) for f in features)
activations_out = p.propagate(g, activations_in, num_iterations=10)
tups = [
(node, a)
for (node, a) in activations_out.items()
if isinstance(node, Equation)
]
return nlargest(k, tups, itemgetter(1))
def see(activations_d):
for node, a in sorted(activations_d.items(), key=itemgetter(1)):
print(f'{node!s:20s} {a:0.3f}')
g = nx.Graph()
# Make slipnet: a bipartite graph of Equations and features
for a in range(1, 11):
for b in range(1, 11):
if b >= a:
continue
for operator in [plus, minus, times]:
e = Equation((a, b), operator, operator.call(a, b))
g.add_node(e)
for f in e.features():
g.add_edge(f, e, weight=1.0)
tups = query(g, [4, 5, Before(4), Before(5)], k=3)
self.assertCountEqual(
['5 + 4 = 9', '5 x 4 = 20', '5 - 4 = 1'],
[str(eqn) for (eqn, a) in tups]
)
if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.ion()
p = MyProp(positive_feedback_rate=0.0, sigmoid_p=1.5)
def query(g, features, k=4):
activations_in = dict((f, 1.0) for f in features)
activations_out = p.propagate(g, activations_in, num_iterations=10)
tups = [
(node, a)
for (node, a) in activations_out.items()
if isinstance(node, Equation)
]
return nlargest(k, tups, itemgetter(1))
def see(activations_d):
for node, a in sorted(activations_d.items(), key=itemgetter(1)):
print(f'{node!s:20s} {a:0.3f}')
g = nx.Graph()
for a in range(1, 11):
for b in range(1, 11):
if b >= a:
continue
for operator in [plus, minus, times]:
e = Equation((a, b), operator, operator.call(a, b))
g.add_node(e)
for f in e.features():
g.add_edge(f, e, weight=1.0)
#e1 = Equation((2, 3), plus, plus.call(2, 3))
#print(e1)
# g.add_node(e1)
# for f in e1.features():
# g.add_edge(f, e1, weight=1.0)
# a0 = dict((f, 1.0) for f in [4, 5, Before(4), Before(5)])
# #a0 = dict((f, 1.0) for f in [7, 6, Before(7), Before(6)])
# see(a0)
# print()
#
# start = process_time()
# a1 = p.propagate(g, a0, num_iterations=10)
# end = process_time()
# print(end - start)
# #see(a1)
# print(sum(a1.values()))
es = query(g, [4, 5, Before(4), Before(5)])
pp(es)
#nx.draw(g, with_labels=True, pos=nx.bipartite_layout(g, [n for n in g.nodes if isinstance(n, Equation)]))
#plt.show()
|
[
"bkovitz@indiana.edu"
] |
bkovitz@indiana.edu
|
49d98b69895f2db5dd9fa22267d1e67e92e73d52
|
669196cb7444c699b9c477bd36d76082d534e08a
|
/tests/unit/test_user_email.py
|
c475eef807feb4dd45015fb7490c85ba2be6c329
|
[
"MIT"
] |
permissive
|
tilgovi/pyramid_fullauth
|
d51ad9fabca0ef380f6981c0f62e5c36d8484cba
|
3de2f784e89c2e82104dbe36acbb85597e4fff31
|
refs/heads/master
| 2021-01-24T15:15:28.691347
| 2014-11-02T18:45:05
| 2014-11-02T18:45:05
| 26,466,736
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
"""Test email related User methods."""
from pyramid.compat import text_type
from pyramid_fullauth.models import User
NEW_EMAIL = text_type('new@example.com')
def test_set_new_email():
"""
Test User.set_new_email method.
setting new email should result in setting new_email field,
and key used to activate the change.
"""
user = User()
assert user.email_change_key is None
assert user.new_email is None
user.set_new_email(NEW_EMAIL)
assert user.new_email == NEW_EMAIL
assert user.email_change_key
def test_change_email():
"""
Test User.change_email method.
Calling it should copy new email set by set_new_email method
into regular email field.
"""
user = User()
assert not user.email
user.set_new_email(NEW_EMAIL)
user.change_email()
assert not user.email_change_key
assert user.email == NEW_EMAIL
|
[
"fizyk@fizyk.net.pl"
] |
fizyk@fizyk.net.pl
|
775bc8ad2440dec3fa0750bcca10332e6a975a4f
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-4/16a4c177de3f63055c5f0252c3f8ba202175fb41-<start_merge>-bug.py
|
488cafe673b3ea8201fc11c222ab29d021e87ebf
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
def start_merge(self, project_id, previous_group_ids, new_group_id):
if (not previous_group_ids):
return
state = {
'transaction_id': uuid4().hex,
'project_id': project_id,
'previous_group_ids': previous_group_ids,
'new_group_id': new_group_id,
'datetime': datetime.now(tz=pytz.utc),
}
self._send(project_id, 'merge', extra_data=(state,), asynchronous=False)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
c1e9f92e53090868a41830a7785c711adfab01bc
|
d9f63d87a9f7b19d5ee60c5f38e9007687df4078
|
/面向对象-类和对象4.py
|
6b8af3e544ed5021e3843f440b94064de10669be
|
[] |
no_license
|
zhouf1234/untitled3
|
4b156046f0fea2c773785cba0486621625004786
|
238c5aaef121f3d716c96290e7e417a9a4a03b4e
|
refs/heads/master
| 2020-05-05T02:36:07.396459
| 2019-04-05T08:27:31
| 2019-04-05T08:27:31
| 179,643,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
class Person:
school='阳光幼儿园'
def __init__(self):
self.name='丹丹'
p1=Person()
# 使用对象的方法(属性)时,先看有无此属性,如果没有再看类有无此属性
print(p1.school) #阳光幼儿园
# 给对象改school属性后
Person.school='夏天小学'
print(p1.school) #夏天小学
print()
p2=Person()
print(p2.school) #夏天小学
|
[
"="
] |
=
|
8440e8250bda5ae92abd0501c1219d37a8251790
|
d713770971a0d9e4a77921fa85fd03daf339dd84
|
/business_hardcode/build_project/build_project.py
|
b34832268d919212f956754af2974f20ed2d4dea
|
[
"Apache-2.0"
] |
permissive
|
laashub/laas-soa
|
cf9c0403cb25eedc74326752aaa776f501fac9d0
|
63a5e84b646bf1d857e97ddbbc7c1c487a9dc9e4
|
refs/heads/master
| 2023-01-07T17:44:24.431030
| 2020-11-12T13:35:31
| 2020-11-12T13:35:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,528
|
py
|
"""
构建项目
需要依赖一些数据
构建服务器
源码仓库信息
项目配置信息
"""
import datetime
import json
import os
import time
import traceback
from rest.operate.executor import context
local_executor_root_path = os.path.join(os.getcwd(), "business_hardcode/build_project")
remote_executor_root_path = "/data/tristan/1" # 远程执行器根目录
# 准备本地目录
local_executor_data_data_path = os.path.join(local_executor_root_path, "data_data")
context.prepare_local_dirs([local_executor_data_data_path])
# 本地数据版本记录文件
local_update_datetime_record_path = local_executor_root_path + "/" + "local_update_datetime_record"
def build_project(executor_data_id, data_data_data):
"""
构建项目
:param executor_data_id:
:param data_data_data:
:return:
"""
# 记录全局数据
context.global_data.executor_data_id = executor_data_id
startup_timestamp = int(time.time())
context.log("启动时间: " + str(datetime.datetime.now()))
try:
"""
{'id': 11, 'git_server': '1', 'project_name': '仓库系统', 'branches': 'master', 'tags': '',
'program_language': 'java', 'docker_registry_id': '1', 'update_datetime': {'$date': 1605035741000},
'create_datetime': {'$date': 1605035741000}, 'repo_path': 'http://git.wjh.com/wms/wms_service'}
"""
# 查询执行器
host_build = context.select_data_by_data_id__data_data_id(15, 1)[0] # 查询服务器连接信息
# 获取最新版本的数据, 保存数据到本地, 同步最新版本的数据到执行器目录
latest_update_datetime_record = ""
"""
data_data:
git_server.json
docker_registry.json
"""
# 查询 git服务器
data_data_git_server = context.select_data_by_data_id__data_data_id('5', data_data_data['git_server'])[0]
latest_update_datetime_record += str(data_data_git_server["update_datetime"]) + ";"
# 查询 docker镜像仓库
data_data_docker_registry = \
context.select_data_by_data_id__data_data_id('4', data_data_data['docker_registry_id'])[0]
latest_update_datetime_record += str(data_data_docker_registry["update_datetime"]) + ";"
# 查询 仓库地址
local_update_datetime_record = None
if os.path.exists(local_update_datetime_record_path):
with open(local_update_datetime_record_path) as f:
local_update_datetime_record = f.read()
if not local_update_datetime_record or local_update_datetime_record != latest_update_datetime_record:
# ############### 同步数据到文件到远程服务器
# 准备远程目录
context.log(context.declare_remote_dirs(host_build, [remote_executor_root_path]))
context.write_data_data_2_file(data_data_git_server, local_executor_data_data_path + '/git_server.json')
context.write_data_data_2_file(data_data_docker_registry,
local_executor_data_data_path + '/docker_registry.json')
# 获取最新版本的业务, 保存业务到本地, 同步最新版本的业务到执行器
"""
business_hyper_fusion:
java:
do_build_project.sh
build_project.sh
clean_build_project.sh
startup.sh
Dockerfile
do_build_docker.sh
clean_build_docker.sh
"""
# 同步数据、业务脚本目录到服务器
context.sync_dirs_2_remote(host_build, local_executor_root_path, remote_executor_root_path,
["data_data", "business_hyper_fusion"])
# 同步启动文件到服务器
context.sync_files_2_remote(host_build, local_executor_root_path, remote_executor_root_path, ["startup.py"])
with open(local_update_datetime_record_path, 'w')as f:
f.write(latest_update_datetime_record)
# ######每次执行器都需要创建执行目录, 并将启动数据写入执行目录的data_data.json文件中
remote_executor_run_n_path = remote_executor_root_path + "/run/" + str(executor_data_id)
# 创建这次执行器的运行目录
context.declare_remote_dirs(host_build, [remote_executor_run_n_path])
# 写入启动参数
context.execute_remote_command(host_build, """
sudo cat >> %s<<EOF
%s
EOF
""" % (remote_executor_run_n_path + "/data_data.json", json.dumps(data_data_data, ensure_ascii=False)))
# 是否应该考虑将共享文件拷贝到自己的区域???
# 好处是什么? 目录都都可以在自己的目录, 坏处是什么, 需要拷贝文件
command = "cd %s && python startup.py -ei %s" % (remote_executor_root_path, executor_data_id)
context.RemoteShell(host_build["ip"], host_build["port"], host_build["username"],
host_build["password"]).execute(command)
# context.ShellHandler(host_build["ip"], host_build["port"], host_build["username"],host_build["password"]).execute(command)
print("=" * 200)
except Exception as e:
traceback.print_exc()
context.log(str(e))
context.log("结束时间: " + str(datetime.datetime.now()))
context.log("总耗时: %s 秒钟" + str(int((int(time.time()) - startup_timestamp) / 1000)))
|
[
"tanshilinmail@gmail.com"
] |
tanshilinmail@gmail.com
|
bf0840495fc063b35d948fe9b69befd937bd7de7
|
d60acaac9e460c5693efe61449667b3c399c53c8
|
/algebra/linear/fishercriterion.py
|
1c1c14ab2e5666bf05a05221df9b5c7bd15195f6
|
[] |
no_license
|
HussainAther/mathematics
|
53ea7fb2470c88d674faa924405786ba3b860705
|
6849cc891bbb9ac69cb20dfb13fe6bb5bd77d8c5
|
refs/heads/master
| 2021-07-22T00:07:53.940786
| 2020-05-07T03:11:17
| 2020-05-07T03:11:17
| 157,749,226
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 900
|
py
|
import numpy as np
"""
We can use dimensionality reduction for linear classification models.
One-dimensional input vector x projected down to one dimension using
y = w^T x
We consider a two-class problem with N1 points of class C1 and N2 points of class C2
so the mean vectors of the two classes aare given by:
m1 = (1/N1) * summation of x_n over class C1 and m2 = (1/N2) times summation of x_n over class C2
Separation of the projected class means lets us choose w (the plane onto which we project)
m2 - m1 = w^T (m2-m1)
such that mk = w^T mk .
Fisher criterion is defined as the ratio of the between-class variance to the
within-class variance given by:
J(w) = (m2-m1)^2 / (s1^2 + s2^2)
in which sk^2 for some k is given by the summation of (yn - mk)^2
for one-dimensional space y
"""
def fisher_criterion(v1, v2):
return abs(np.mean(v1) - np.mean(v2)) / (np.var(v1) + np.var(v2))
|
[
"shussainather@gmail.com"
] |
shussainather@gmail.com
|
0d6f563bf487e50143491c9294e56c9e298e24ec
|
a7596165a29e5186bc6c4718e3b6e835939b105d
|
/apps/pig/src/pig/views.py
|
47823c4bb576f890292573687f7d79887416ac0b
|
[
"Apache-2.0"
] |
permissive
|
lockhart39/HueQualityAndIngestionApp
|
f0c778665f0fbe699ec30e0df5e9f3ed8a9c3384
|
c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c
|
refs/heads/master
| 2021-08-20T00:31:29.481333
| 2017-11-27T19:22:16
| 2017-11-27T19:22:16
| 112,237,923
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,542
|
py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.rest.http_client import RestException
from desktop.models import Document
from oozie.views.dashboard import show_oozie_error, check_job_access_permission,\
check_job_edition_permission
from pig import api
from pig.management.commands import pig_setup
from pig.models import get_workflow_output, hdfs_link, PigScript,\
create_or_update_script, get_scripts
LOG = logging.getLogger(__name__)
@ensure_csrf_cookie
def app(request):
autocomplete_base_url = ''
try:
autocomplete_base_url = reverse('beeswax:api_autocomplete_databases', kwargs={}) + '/'
except:
LOG.exception('failed to find autocomplete base url')
return render('app.mako', request, {
'autocomplete_base_url': autocomplete_base_url,
})
def scripts(request):
return JsonResponse(get_scripts(request.user, is_design=True), safe=False)
@show_oozie_error
def dashboard(request):
pig_api = api.get(request.fs, request.jt, request.user)
jobs = pig_api.get_jobs()
hue_jobs = Document.objects.available(PigScript, request.user, with_history=True)
massaged_jobs = pig_api.massaged_jobs_for_json(request, jobs, hue_jobs)
return JsonResponse(massaged_jobs, safe=False)
def save(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
attrs = {
'id': request.POST.get('id'),
'name': request.POST.get('name'),
'script': request.POST.get('script'),
'user': request.user,
'parameters': json.loads(request.POST.get('parameters')),
'resources': json.loads(request.POST.get('resources')),
'hadoopProperties': json.loads(request.POST.get('hadoopProperties')),
}
pig_script = create_or_update_script(**attrs)
pig_script.is_design = True
pig_script.save()
response = {
'id': pig_script.id,
'docId': pig_script.doc.get().id
}
return JsonResponse(response, content_type="text/plain")
@show_oozie_error
def stop(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
pig_script = PigScript.objects.get(id=request.POST.get('id'))
job_id = pig_script.dict['job_id']
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
try:
api.get(request.fs, request.jt, request.user).stop(job_id)
except RestException, e:
raise PopupException(_("Error stopping Pig script.") % e.message)
return watch(request, job_id)
@show_oozie_error
def run(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
attrs = {
'id': request.POST.get('id'),
'name': request.POST.get('name'),
'script': request.POST.get('script'),
'user': request.user,
'parameters': json.loads(request.POST.get('parameters')),
'resources': json.loads(request.POST.get('resources')),
'hadoopProperties': json.loads(request.POST.get('hadoopProperties')),
'is_design': False
}
pig_script = create_or_update_script(**attrs)
params = request.POST.get('submissionVariables')
oozie_id = api.get(request.fs, request.jt, request.user).submit(pig_script, params)
pig_script.update_from_dict({'job_id': oozie_id})
pig_script.save()
response = {
'id': pig_script.id,
'watchUrl': reverse('pig:watch', kwargs={'job_id': oozie_id}) + '?format=python'
}
return JsonResponse(response, content_type="text/plain")
def copy(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
pig_script = PigScript.objects.get(id=request.POST.get('id'))
doc = pig_script.doc.get()
try:
doc.can_read_or_exception(request.user)
except Exception, e:
raise PopupException(e)
existing_script_data = pig_script.dict
owner = request.user
name = existing_script_data["name"] + _(' (Copy)')
script = existing_script_data["script"]
parameters = existing_script_data["parameters"]
resources = existing_script_data["resources"]
hadoopProperties = existing_script_data["hadoopProperties"]
script_copy = PigScript.objects.create(owner=owner)
script_copy.update_from_dict({
'name': name,
'script': script,
'parameters': parameters,
'resources': resources,
'hadoopProperties': hadoopProperties
})
script_copy.save()
copy_doc = doc.copy(content_object=script_copy, name=name, owner=owner)
response = {
'id': script_copy.id,
'docId': copy_doc.id,
'name': name,
'script': script,
'parameters': parameters,
'resources': resources,
'hadoopProperties': hadoopProperties
}
return JsonResponse(response, content_type="text/plain")
def delete(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
ids = request.POST.get('ids').split(",")
for script_id in ids:
try:
pig_script = PigScript.objects.get(id=script_id)
pig_script.can_edit_or_exception(request.user)
pig_script.doc.all().delete()
pig_script.delete()
except:
LOG.exception('failed to delete pig script')
None
response = {
'ids': ids,
}
return JsonResponse(response, content_type="text/plain")
@show_oozie_error
def watch(request, job_id):
oozie_workflow = check_job_access_permission(request, job_id)
logs, workflow_actions, is_really_done = api.get(request.fs, request.jt, request.user).get_log(request, oozie_workflow)
output = get_workflow_output(oozie_workflow, request.fs)
workflow = {
'job_id': oozie_workflow.id,
'status': oozie_workflow.status,
'progress': oozie_workflow.get_progress(),
'isRunning': oozie_workflow.is_running(),
'killUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id': oozie_workflow.id, 'action': 'kill'}),
'rerunUrl': reverse('oozie:rerun_oozie_job', kwargs={'job_id': oozie_workflow.id, 'app_path': oozie_workflow.appPath}),
'actions': workflow_actions
}
response = {
'workflow': workflow,
'logs': logs,
'isReallyDone': is_really_done,
'output': hdfs_link(output)
}
return JsonResponse(response, content_type="text/plain")
def install_examples(request):
result = {'status': -1, 'message': ''}
if request.method != 'POST':
result['message'] = _('A POST request is required.')
else:
try:
pig_setup.Command().handle_noargs()
result['status'] = 0
except Exception, e:
LOG.exception(e)
result['message'] = str(e)
return JsonResponse(result)
|
[
"cloudera@quickstart.cloudera"
] |
cloudera@quickstart.cloudera
|
5df953e7136216e7adfa597079d091686b4fa538
|
deb97b21457bc360563e09c7bbba235cdd915548
|
/gitkit/commands/del_merged.py
|
de55050ed183a4ab19f91ae4bcc81325227a18e2
|
[
"MIT"
] |
permissive
|
akx/git-kit
|
e381ae5516a6f36f39d72af00e93aa5d4f0e985f
|
8084d99c6a113aad56764b0907d157c6957a3977
|
refs/heads/master
| 2023-07-19T20:16:27.358018
| 2023-07-18T07:49:41
| 2023-07-18T07:49:41
| 22,340,212
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
import click
from gitkit.conf import sacred_branches
from gitkit.util.refs import get_main_branch
from gitkit.util.shell import get_lines, run
@click.command()
@click.argument("ref", required=False, default=None)
def del_merged(ref):
"""
Delete merged branches.
"""
if not ref:
ref = get_main_branch()
for branch in set(get_lines(["git", "branch", "-l", "--merged", ref])):
branch = branch.strip("* ")
if branch != ref and branch not in sacred_branches:
run(["git", "branch", "-v", "-d", branch])
|
[
"akx@iki.fi"
] |
akx@iki.fi
|
e19d83d920cbf214a0559c2f0bb610c90b9d69ee
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_116/1914.py
|
20d7b72d1b8a35128812032e9655e83a53e17756
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
f = open("A-large.in","r")
t = int (f.readline())
ent = []
def check(ent):
for i in range(0,4):
if ('.' not in ent[i])and ('O' not in ent[i]):
return 0
if ('.' not in ent[i])and ('X' not in ent[i]):
return 1
for i in range(0,4):
a = []
for j in range(0,4):
a.append(ent[j][i])
if ('.' not in a)and ('O' not in a):
return 0
if ('.' not in a)and ('X' not in a):
return 1
a = [ent[0][0],ent[1][1],ent[2][2],ent[3][3]]
if ('.' not in a)and ('O' not in a):
return 0
if ('.' not in a)and ('X' not in a):
return 1
a = [ent[0][3],ent[1][2],ent[2][1],ent[3][0]]
if ('.' not in a)and ('O' not in a):
return 0
if ('.' not in a)and ('X' not in a):
return 1
if ('.' not in ent[0]) and ('.' not in ent[1]) and ('.' not in ent[2]) and ('.' not in ent[3]):
return 2
return 3
s = open("output.out","w")
for i in range(1,t+1):
for j in range(0,4):
ent.append(f.readline())
x = check(ent)
if x == 0:
s.write("Case #%d: X won" % i)
if x == 1:
s.write("Case #%d: O won" % i)
if x == 2:
s.write("Case #%d: Draw" % i)
if x == 3:
s.write("Case #%d: Game has not completed" % i)
if i<t:
ent.append(f.readline())
s.write("\n")
ent = []
f.close()
s.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
360fbd0df75ba142aadd5589508fdb2d95ba7602
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_074/ch117_2020_04_01_19_24_01_200930.py
|
446a96f7337eaf516aa30fe9c7ef40edbc6f0571
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
import math
def snell_descartes (n1,n2,c):
o1>=o2
c=math.degrees(o1)
d=math.degrees(o2)
a=math.sin(c)
b=math.sin(d)
b=a*n1/n2
o1<=90 and o1>=0
if o1==0:
o2==0 and a==b==1
return(o1)
|
[
"you@example.com"
] |
you@example.com
|
b87d3f64e713ba53fb5b94de3507f74d8a97ea0b
|
5c533e2cf1f2fa87e55253cdbfc6cc63fb2d1982
|
/python/quantumhall/cyclotron.py
|
108c267d7ee00673328a312228abdcb7f535d40f
|
[] |
no_license
|
philzook58/python
|
940c24088968f0d5c655e2344dfa084deaefe7c6
|
6d43db5165c9bcb17e8348a650710c5f603e6a96
|
refs/heads/master
| 2020-05-25T15:42:55.428149
| 2018-05-14T03:33:29
| 2018-05-14T03:33:29
| 69,040,196
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 993
|
py
|
#A suggesiton for the classical fractional hall effect
#Is a mean field organiztion of the cycltron phases, such that they synchronize.
#Leading to an effective time and angle dependant
# self consistantly dz/dt2 = i w dz/dt + P
# where E is a vortex configuration by conjecture. P = f(|z|)z^n
# and also has angular time dependance z/|z|
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
omega =1.
g = -.5
def pack(z,zdot):
return np.array([np.real(z),np.imag(z),np.real(zdot),np.imag(zdot)])
def unpack(x):
return x[0]+1.j * x[1], x[2]+1.j * x[3],
def accel(z,zdot):
return 1.j * omega * zdot + g * np.conj(z)**3
def diffeq(x,t):
z, zdot = unpack(x)
return pack(zdot, accel(z,zdot))
from scipy import signal
T = 1000.
N = 1000
initcond = pack(1. + 0.j ,0. + 1.j)
t = np.linspace(0,T, N)
sol = odeint(diffeq, initcond, t)
f , P = signal.periodogram(sol[:,1],N/T)
plt.plot(t,sol[:,1])
plt.figure()
plt.plot(f,P)
plt.show()
|
[
"philip@FartMachine7.local"
] |
philip@FartMachine7.local
|
60d4e232d5fa663fa88d5d6da7e0953144542f33
|
9ef0f266173887eafd5c797d13a6538733b39002
|
/trimesh/path/entities.py
|
de2166781a4699322e91ad3e70b13e8fccd4f1c4
|
[
"MIT"
] |
permissive
|
MiaoLi/trimesh
|
a850e3a922e43ce6500085eeaf16df8404ad0f17
|
8f6e537151d914d23180a1c1152d849c41d2c1fa
|
refs/heads/master
| 2021-01-14T12:36:02.831270
| 2015-10-17T01:36:33
| 2015-10-17T01:36:33
| 44,636,986
| 2
| 0
| null | 2015-10-20T21:52:11
| 2015-10-20T21:52:10
| null |
UTF-8
|
Python
| false
| false
| 5,072
|
py
|
'''
entities.py: basic geometric primitives
Design intent: only store references to vertex indices and pass the vertex
array back to functions that require it.
This keeps all vertices in one external list.
'''
import numpy as np
from .arc import discretize_arc, arc_center
from .curve import discretize_bezier, discretize_bspline
from ..points import unitize
from ..util import replace_references
_HASH_LENGTH = 5
class Entity(object):
def __init__(self,
points,
closed = False):
self.points = np.array(points)
self.closed = closed
@property
def _class_id(self):
'''
Return an integer that is unique to the class type.
Note that this implementation will fail if a class is defined
that starts with the same letter as an existing class.
Since this function is called a lot, it is a tradeoff between
speed and robustness where speed won.
'''
return ord(self.__class__.__name__[0])
@property
def hash(self):
'''
Returns a string unique to the entity.
If two identical entities exist, they can be removed
by comparing the string returned by this function.
'''
hash = np.zeros(_HASH_LENGTH, dtype=np.int)
hash[-2:] = self._class_id, int(self.closed)
points_count = np.min([3, len(self.points)])
hash[0:points_count] = np.sort(self.points)[-points_count:]
return hash
def to_dict(self):
'''
Returns a dictionary with all of the information about the entity.
'''
return {'type' : self.__class__.__name__,
'points': self.points.tolist(),
'closed': self.closed}
def rereference(self, replacement):
'''
Given a replacement dictionary, change points to reflect the dictionary.
eg, if replacement = {0:107}, self.points = [0,1902] becomes [107, 1902]
'''
self.points = replace_references(self.points, replacement)
@property
def nodes(self):
'''
Returns an (n,2) list of nodes, or vertices on the path.
Note that this generic class function assumes that all of the reference
points are on the path, which is true for lines and three point arcs.
If you were to define another class where that wasn't the case
(for example, the control points of a bezier curve),
you would need to implement an entity- specific version of this function.
The purpose of having a list of nodes is so that they can then be added
as edges to a graph, so we can use functions to check connectivity,
extract paths, etc.
The slicing on this function is essentially just tiling points
so the first and last vertices aren't repeated. Example:
self.points = [0,1,2]
returns: [[0,1], [1,2]]
'''
return np.column_stack((self.points,
self.points)).reshape(-1)[1:-1].reshape((-1,2))
@property
def end_points(self):
'''
Returns the first and last points. Also note that if you
define a new entity class where the first and last vertices
in self.points aren't the endpoints of the curve you need to
implement this function for your class.
self.points = [0,1,2]
returns: [0,2]
'''
return self.points[[0,-1]]
class Arc(Entity):
def discrete(self, vertices, scale=1.0):
return discretize_arc(vertices[self.points],
close = self.closed,
scale = scale)
def center(self, vertices):
return arc_center(vertices[self.points])
class Line(Entity):
def discrete(self, vertices, scale=1.0):
return vertices[self.points]
class Curve(Entity):
@property
def _class_id(self):
return sum([ord(i) for i in self.__class__.__name__])
@property
def nodes(self):
return [[self.points[0],
self.points[1]],
[self.points[1],
self.points[-1]]]
class Bezier(Curve):
def discrete(self, vertices, scale=1.0):
return discretize_bezier(vertices[self.points], scale=scale)
class BSpline(Curve):
def __init__(self, points, knots, closed=False):
self.points = points
self.knots = knots
self.closed = closed
def discrete(self, vertices, count=None, scale=1.0):
result = discretize_bspline(control = vertices[self.points],
knots = self.knots,
count = count,
scale = scale)
return result
|
[
"mik3dh@gmail.com"
] |
mik3dh@gmail.com
|
76732c90be1e6c89d923ed2aabebc32359ae7817
|
b73b77dbbd6b4b2c216c1c1e08e5d92c734e545c
|
/hotel/migrations/0102_auto_20200414_1402.py
|
4c95c54d31333b48f288d476d6df915d58142931
|
[] |
no_license
|
aadarshachapagain/hotel_booking
|
0cf248b78a03277a5208aecb1a72aa1282319ead
|
58503c57d2fd6d07fdbe6b7eb113954a0282dc3d
|
refs/heads/main
| 2023-08-27T01:53:21.176194
| 2021-10-01T03:13:42
| 2021-10-01T03:13:42
| 412,294,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
# Generated by Django 2.1.5 on 2020-04-14 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hotel', '0101_bedtype_status'),
]
operations = [
migrations.AlterField(
model_name='bedtype',
name='description',
field=models.TextField(blank=True, max_length=500, null=True),
),
]
|
[
"aadarshachapagain@gmail.com"
] |
aadarshachapagain@gmail.com
|
acbeb910b65258b18b71182806b2cc75e84ffa03
|
3b1efdd0aacc98738f3b8b9ee09c6ff59cccc14e
|
/ietf/person/factories.py
|
e076b4ef72e4bec53e2bc6a55c5798054d06ced0
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
unofficial-mirror/ietfdb
|
15beb6bf17b1d4abb257ee656ac6b7488339d331
|
ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81
|
refs/heads/master
| 2020-08-06T17:24:13.966746
| 2019-10-04T20:54:05
| 2019-10-04T20:54:05
| 213,088,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,456
|
py
|
# Copyright The IETF Trust 2015-2019, All Rights Reserved
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import factory
import faker
import faker.config
import os
import random
import shutil
from unidecode import unidecode
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.text import slugify
from django.utils.encoding import force_text
import debug # pyflakes:ignore
from ietf.person.models import Person, Alias, Email
from ietf.person.name import normalize_name, unidecode_name
fake = faker.Factory.create()
def random_faker():
# The transliteration of some arabic and devanagari names introduces
# non-alphabetic characgters that don't work with the draft author
# extraction code, and also don't seem to match the way people with arabic
# names romanize arabic names. Exlude those locales from name generation
# in order to avoid test failures.
locales = set( [ l for l in faker.config.AVAILABLE_LOCALES if not (l.startswith('ar_') or l.startswith('sg_')) ] )
return faker.Faker(random.sample(locales, 1)[0])
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = User
django_get_or_create = ('username',)
exclude = ['faker', ]
faker = factory.LazyFunction(random_faker)
first_name = factory.LazyAttribute(lambda o: o.faker.first_name())
last_name = factory.LazyAttribute(lambda o: o.faker.last_name())
email = factory.LazyAttributeSequence(lambda u, n: '%s.%s_%d@%s'%( slugify(unidecode(u.first_name)),
slugify(unidecode(u.last_name)), n, fake.domain_name()))
username = factory.LazyAttribute(lambda u: u.email)
@factory.post_generation
def set_password(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
obj.set_password( '%s+password' % obj.username ) # pylint: disable=no-value-for-parameter
class PersonFactory(factory.DjangoModelFactory):
class Meta:
model = Person
user = factory.SubFactory(UserFactory)
name = factory.LazyAttribute(lambda p: normalize_name('%s %s'%(p.user.first_name, p.user.last_name)))
ascii = factory.LazyAttribute(lambda p: force_text(unidecode_name(p.name)))
class Params:
with_bio = factory.Trait(biography = "\n\n".join(fake.paragraphs()))
@factory.post_generation
def default_aliases(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
make_alias = getattr(AliasFactory, 'create' if create else 'build')
make_alias(person=obj,name=obj.name)
make_alias(person=obj,name=obj.ascii)
if obj.name != obj.plain_name():
make_alias(person=obj,name=obj.plain_name())
if obj.ascii != obj.plain_ascii():
make_alias(person=obj,name=obj.plain_ascii())
@factory.post_generation
def default_emails(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if extracted is None:
extracted = True
if create and extracted:
make_email = getattr(EmailFactory, 'create' if create else 'build')
make_email(person=obj, address=obj.user.email)
@factory.post_generation
def default_photo(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
import atexit
if obj.biography:
photo_name = obj.photo_name()
media_name = "%s/%s.jpg" % (settings.PHOTOS_DIRNAME, photo_name)
obj.photo = media_name
obj.photo_thumb = media_name
photosrc = os.path.join(settings.TEST_DATA_DIR, "profile-default.jpg")
photodst = os.path.join(settings.PHOTOS_DIR, photo_name + '.jpg')
if not os.path.exists(photodst):
shutil.copy(photosrc, photodst)
def delete_file(file):
os.unlink(file)
atexit.register(delete_file, photodst)
class AliasFactory(factory.DjangoModelFactory):
class Meta:
model = Alias
@classmethod
def _create(cls, model_class, *args, **kwargs):
person = kwargs['person']
name = kwargs['name']
existing_aliases = set(model_class.objects.filter(person=person).values_list('name', flat=True))
if not name in existing_aliases:
obj = model_class(*args, **kwargs)
obj.save()
return obj
name = factory.Faker('name')
def fake_email_address(n):
address_field = [ f for f in Email._meta.fields if f.name == 'address'][0]
count = 0
while True:
address = '%s.%s_%d@%s' % (
slugify(unidecode(fake.first_name())),
slugify(unidecode(fake.last_name())),
n, fake.domain_name()
)
count += 1
if len(address) <= address_field.max_length:
break
if count >= 10:
raise RuntimeError("Failed generating a fake email address to fit in Email.address(max_length=%s)"%address_field.max_lenth)
return address
class EmailFactory(factory.DjangoModelFactory):
class Meta:
model = Email
django_get_or_create = ('address',)
address = factory.Sequence(fake_email_address)
person = factory.SubFactory(PersonFactory)
active = True
primary = False
origin = factory.LazyAttribute(lambda obj: obj.person.user.username if obj.person.user else '')
|
[
"henrik@levkowetz.com"
] |
henrik@levkowetz.com
|
c2329e1d0a37e88a0fcbfb5d6a743b80e8753c28
|
df3853b41ed05d86f5bcd992fcc265f637c67784
|
/big_deal/test2/14.py
|
d79e788612e926b9cf62a3a53eddc0a537b10ca5
|
[] |
no_license
|
KseniaMIPT/Adamasta
|
6ab0121519581dbbbf6ae788d1da85f545f718d1
|
e91c34c80834c3f4bf176bc4bf6bf790f9f72ca3
|
refs/heads/master
| 2021-01-10T16:48:31.141709
| 2016-11-23T21:02:25
| 2016-11-23T21:02:25
| 43,350,507
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
def digraph_from_input():
N = int(input())
digraph = {}
for i in range(N-1):
line = input().split()
if line[1] not in digraph:
digraph[line[1]] = {line[0]}
else:
digraph[line[1]].add(line[0])
if line[0] not in digraph:
digraph[line[0]] = set()
return digraph
digraph = digraph_from_input()
start_node = str(input())
def bfs_fire(g, start, fired=set(), tree =[]):
"""Функция выделяет остовое дерево методом обхода в ширину.
:param g: основной граф
:param start: начальная вершина
:param fired: множество уже имеющихся в графе вершин
:return tree: остовое дерево
"""
fired.add(start)
queue = [start]
while queue:
current = queue.pop(0)
for neighbour in g[current]:
if neighbour not in fired:
fired.add(neighbour)
queue.append(neighbour)
tree.append([current, neighbour])
return tree
tree = bfs_fire(digraph, start_node)
|
[
"ksenia22.11@yandex.ru"
] |
ksenia22.11@yandex.ru
|
abf58fb31e51c78bb90abe08fcf94e44fc5f36c0
|
1985d1a7462d537e1f43055e3c75d91145407ff9
|
/Next_Permutation.py
|
fcc699c978f678ede7468f2b601e8c68627e87c9
|
[] |
no_license
|
yeonnseok/algorithm_practice
|
d95425e59b7b579a70dbbd932e4fb691c57f4534
|
c1468f23b2c077ecadac1fa843180674b6ea3295
|
refs/heads/master
| 2020-04-28T08:51:32.728010
| 2019-04-05T03:20:44
| 2019-04-05T03:20:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
def swap(list, a, b):
temp = list[b]
list[b] = list[a]
list[a] = temp
def next_permutation(c_list, n):
i = n - 1
while c_list[i - 1] >= c_list[i]:
i -= 1
if i <= 0: return False
j = n - 1
while c_list[j] <= c_list[i - 1]:
j -= 1
swap(c_list, j, i - 1)
j = n - 1
while i < j:
swap(c_list, j, i)
i += 1
j -= 1
return c_list
c_list = [7, 2, 3, 6, 5, 4, 1]
n = len(c_list)
print(next_permutation(c_list, n))
|
[
"smr603@snu.ac.kr"
] |
smr603@snu.ac.kr
|
efb691981ff05fe7bcb03faa225d88b4bee1bde0
|
084d1b9cb341a1b943f95e98ee3cf680df502ba9
|
/Products/mediaPage/tests/base.py
|
b0e818b0d28196ee7fc5c4b6020c8236190fd002
|
[] |
no_license
|
intk/Products.mediaPage
|
629aa7c8f98e308b536f997cafbab177ba6ae1a5
|
a3f4b0c900565b438593888a3009f8e7e4867792
|
refs/heads/master
| 2016-09-06T13:57:17.209247
| 2014-09-18T08:56:37
| 2014-09-18T08:56:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,990
|
py
|
"""Test setup for integration and functional tests.
When we import PloneTestCase and then call setupPloneSite(), all of
Plone's products are loaded, and a Plone site will be created. This
happens at module level, which makes it faster to run each test, but
slows down test runner startup.
"""
from Products.Five import zcml
from Products.Five import fiveconfigure
from Testing import ZopeTestCase as ztc
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
# When ZopeTestCase configures Zope, it will *not* auto-load products
# in Products/. Instead, we have to use a statement such as:
# ztc.installProduct('SimpleAttachment')
# This does *not* apply to products in eggs and Python packages (i.e.
# not in the Products.*) namespace. For that, see below.
# All of Plone's products are already set up by PloneTestCase.
@onsetup
def setup_product():
"""Set up the package and its dependencies.
The @onsetup decorator causes the execution of this body to be
deferred until the setup of the Plone site testing layer. We could
have created our own layer, but this is the easiest way for Plone
integration tests.
"""
# Load the ZCML configuration for the example.tests package.
# This can of course use <include /> to include other packages.
fiveconfigure.debug_mode = True
import Products.mediaPage
zcml.load_config('configure.zcml', Products.mediaPage)
fiveconfigure.debug_mode = False
# We need to tell the testing framework that these products
# should be available. This can't happen until after we have loaded
# the ZCML. Thus, we do it here. Note the use of installPackage()
# instead of installProduct().
# This is *only* necessary for packages outside the Products.*
# namespace which are also declared as Zope 2 products, using
# <five:registerPackage /> in ZCML.
# We may also need to load dependencies, e.g.:
# ztc.installPackage('borg.localrole')
ztc.installPackage('Products.mediaPage')
# The order here is important: We first call the (deferred) function
# which installs the products we need for this product. Then, we let
# PloneTestCase set up this product on installation.
setup_product()
ptc.setupPloneSite(products=['Products.mediaPage'])
class TestCase(ptc.PloneTestCase):
"""We use this base class for all the tests in this package. If
necessary, we can put common utility or setup code in here. This
applies to unit test cases.
"""
class FunctionalTestCase(ptc.FunctionalTestCase):
"""We use this class for functional integration tests that use
doctest syntax. Again, we can put basic common utility or setup
code in here.
"""
def afterSetUp(self):
roles = ('Member', 'Contributor')
self.portal.portal_membership.addMember('contributor',
'secret',
roles, [])
|
[
"andreslb1@gmail.com"
] |
andreslb1@gmail.com
|
a47988e12caea650f9b6dc78153c6e2a74602047
|
5aa0e5f32d529c3321c28d37b0a12a8cf69cfea8
|
/client/local_objects/ClientPlayerManager.py
|
8acf4ecba25471df1e138e3be612cc0741d8054f
|
[] |
no_license
|
sheepsy90/survive
|
26495f1ff2d8247fbb9470882f8be9f5272e7f2c
|
0eddf637be0eacd34415761b78fc2c9d50bc1528
|
refs/heads/master
| 2021-01-09T05:55:16.546762
| 2017-02-03T20:15:28
| 2017-02-03T20:15:28
| 80,864,391
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
# -*- coding:utf-8 -*-
from client.local_objects.PlayerModel import PlayerModel
class ClientPlayerManager(object):
def __init__(self):
self.players = {}
self.me = None
def add_new_player_position(self, player_id, player_name, position, is_moving, is_me):
if player_id not in self.players:
self.players[player_id] = PlayerModel(player_id, player_name, position, is_moving)
else:
self.players[player_id].update_position(position, is_moving)
if is_me:
self.me = self.players[player_id]
def has_me(self):
return self.me is not None
def get_players(self):
return self.players.values()
def remove_player(self, name):
print "REMOVE PLAYER FROM CLIENT"
del self.players[name]
def get_me(self):
return self.me
def set_my_character_condition(self, blurriness, redness):
self.me.set_character_condition(blurriness, redness)
|
[
"robert.kessler@klarna.com"
] |
robert.kessler@klarna.com
|
b9cd9b43fb64eb1805b8b9e3a30ddee088c9540c
|
76f59c245744e468577a293a0b9b078f064acf07
|
/3.longest-substring-without-repeating-characters.py
|
f5ddb791b9a978f2ed72a471cf53a960cb68a2a9
|
[] |
no_license
|
satoshun-algorithm-example/leetcode
|
c3774f07e653cf58640a6e7239705e58c5abde82
|
16b39e903755dea86f9a4f16df187bb8bbf835c5
|
refs/heads/master
| 2020-07-01T10:24:05.343283
| 2020-01-13T03:27:27
| 2020-01-13T03:27:27
| 201,144,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
#
# @lc app=leetcode id=3 lang=python3
#
# [3] Longest Substring Without Repeating Characters
#
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if not s:
return 0
c = 0
for i, _ in enumerate(s):
characters = ''
for j in s[i:]:
if j in characters:
break
characters += j
if len(characters) > c:
c = len(characters)
if len(characters) > c:
c = len(characters)
return c
|
[
"shun.sato1@gmail.com"
] |
shun.sato1@gmail.com
|
f7876ee7e8a2e78ce0603729c772cba69f9f259d
|
f61db5940e29773aba8fc342a21de00e91a5ab2e
|
/base/day15/note/demo2/testcases.py
|
d496be253d9081853b34930bf67e2d3b34b715c9
|
[] |
no_license
|
liyaozr/project
|
c17a9dcbcda38fe9a15ec4c41a01242a13695991
|
0b0fc10e267ceb19f6792b490fede177035459fe
|
refs/heads/master
| 2020-11-29T18:38:03.297369
| 2020-03-10T01:11:00
| 2020-03-10T01:11:00
| 230,190,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,518
|
py
|
"""
============================
Author:柠檬班-木森
Time:2020/2/7 21:29
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
import unittest
from py26_15day.demo2.register import register
from py26_15day.demo2.login import login_check
from py26_15day.demo2.readexcel import ReadExcel
class RegisterTestCase(unittest.TestCase):
excel = ReadExcel("cases.xlsx", "register")
def __init__(self, methodName, case_data):
self.case_data = case_data
# 调用父类的init的方法
super().__init__(methodName)
def test_register(self):
# 第一步:准备用例的数据
# 预期结果:
excepted = eval(self.case_data["expected"])
# 参数:data
data = eval(self.case_data["data"])
# 用例所在行
row = self.case_data["case_id"] + 1
# 第二步:调用被测试的功能函数,传入参数,获取实际结果:
res = register(*data)
# 第三步:断言(比对预期结果和实际结果)
try:
self.assertEqual(excepted, res)
except AssertionError as e:
# 在excel中写入用例未通过
self.excel.write_data(row=row, column=5, value="未通过")
raise e
else:
# 在excel中写入用例通过
self.excel.write_data(row=row, column=5, value="通过")
class LoginTestCase(unittest.TestCase):
excel = ReadExcel("cases.xlsx", "login")
def __init__(self, methodName, case_data):
self.case_data = case_data
# 调用父类的init的方法
super().__init__(methodName)
def test_login(self):
# 第一步:准备用例的数据
# 预期结果:
expected = eval(self.case_data["expected"])
# 参数:data
data = eval(self.case_data["data"])
# 用例所在行
row = self.case_data["case_id"] + 1
# 第二步:调用被测试的功能函数,传入参数,获取实际结果:
res = login_check(*data)
# 第三步:断言(比对预期结果和实际结果)
try:
self.assertEqual(expected, res)
except AssertionError as e:
# 在excel中写入用例未通过
self.excel.write_data(row=row, column=5, value="未通过")
raise e
else:
# 在excel中写入用例通过
self.excel.write_data(row=row, column=5, value="通过")
|
[
"lyz_fordream@163.com"
] |
lyz_fordream@163.com
|
d23f0fdc9f79350dc59b7bbff909a0248f0ab93b
|
4e59f5fbd1e777f2488eb2a46deca34acf813979
|
/clients/admin.py
|
b19f3d0c3e98075355f6e14f4524c33f0aa4eac9
|
[] |
no_license
|
BoughezalaMohamedAimen/laser
|
f8c051be5c85be8f09b3ac4272065ce24af26555
|
1ac9c97b8ead4edcfcadeaafa0ee567f3f3d3d0d
|
refs/heads/master
| 2020-08-09T19:26:21.516671
| 2019-10-10T10:30:54
| 2019-10-10T10:30:54
| 214,154,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(SeanceHistorique)
admin.site.register(Abonnement)
|
[
"mamoumou121@gmail.com"
] |
mamoumou121@gmail.com
|
4e7b737ef7c0dfbd4334a02c47e6e82ee662b5e9
|
bec623f2fab5bafc95eb5bd95e7527e06f6eeafe
|
/django-shared/treemenus/migrations/0003_menuitem_caption_pt.py
|
7f07c34686f12f29e3581c5062d3499f2d994595
|
[] |
no_license
|
riyanhax/a-demo
|
d714735a8b59eceeb9cd59f788a008bfb4861790
|
302324dccc135f55d92fb705c58314c55fed22aa
|
refs/heads/master
| 2022-01-21T07:24:56.468973
| 2017-10-12T13:48:55
| 2017-10-12T13:48:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('treemenus', '0002_menuitem_caption_pl'),
]
operations = [
migrations.AddField(
model_name='menuitem',
name='caption_pt',
field=models.CharField(max_length=150, null=True, verbose_name='Caption Portuguese', blank=True),
),
]
|
[
"ibalyko@ubuntu-server-16-04"
] |
ibalyko@ubuntu-server-16-04
|
4b964397df7ef88fabea054402bb1db1ad59d9b4
|
7f43264f32a57599d87fe8be8e0d748d89abecab
|
/api_v0/ElasticsearchURL.py
|
46da6ee9e31f2e2d8f574166965a86e3a980e86c
|
[] |
no_license
|
chair300/rsss_api
|
e13215439be1bfaa536ea7be5bfe4cc657bb0663
|
03866b0f5052dc81b61cab3b1c2a451d8e2ec449
|
refs/heads/master
| 2023-03-19T02:38:09.963553
| 2018-01-17T00:41:18
| 2018-01-17T00:41:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,498
|
py
|
from rest_framework.response import Response
from api_v0.serializers import ScoresRowSerializer
from django.conf import settings
import requests
import random
import json
#make all the stuff in views.py use this class.
#NOTE: if it turns out that making a query to check if a server is response is
#This is the way to do it.
#TOO overhead-intensive, use the following algorithm:
# Fully formulate the URL as it would be used (randomly shuffle the ES boxes)
# make the request as-is, and try/catch to detect timeout and/or connection errors.
# If there's a dropped request; then pop the next machine off of the shuffled list of
# available ES nodes; try that URL.
# Either end up returning the result set; or a 500 status Response with a descriptive
# message about Elasticsearch being down.
class ElasticsearchURL(object):
#if operation is None, id_to_get had better be there.
#if scroll duration is included, this is a scrolling download.
def __init__(self, data_type, operation="_search",
from_result=None, page_size=None, id_to_get=None,
scroll_info=None):
url_base = self.get_base_es_url()
name_of_index = None
if data_type == 'atsnp_output':
name_of_index = settings.ES_INDEX_NAMES['ATSNP_DATA']
elif data_type == 'gencode_gene_symbols':
name_of_index = settings.ES_INDEX_NAMES['GENE_NAMES']
elif data_type == 'sequence':
name_of_index = settings.ES_INDEX_NAMES['SNP_INFO']
elif data_type == 'motif_bits':
name_of_index = settings.ES_INDEX_NAMES['MOTIF_BITS']
#print "url_base : " + url_base
#print "name_of_index: " + name_of_index
#print "data_type: " + data_type
#print "operation: " + operation
url_parts = [url_base, name_of_index, data_type]
get_args = []
if id_to_get is not None:
#throw a nice exception if this is invalid?
url_parts.append(id_to_get)
else:
#this is a search.
url_parts.append(operation)
get_args.append(self.get_page_size(page_size))
if scroll_info is not None:
if 'duration' in scroll_info:
get_args.append('scroll=' + scroll_info['duration'])
else:
#Use a bare URL to continue a scroll
get_args = []
url_parts = [url_base, operation]
url_parts.append('scroll')
if from_result is not None:
get_args.append("from=" + str(from_result))
bare_url = "/".join(url_parts)
if len(get_args) > 0:
self.url = '?'.join([bare_url,'&'.join(get_args)])
else:
self.url = bare_url
#print "url created: " + self.url
def setup_scroll_args(self, scroll_info):
scroll_args = []
if 'duration' in scroll_info:
scroll_args.append('scroll=' + scroll_info['duration'])
return scroll_args
#for searches
def get_page_size(self, page_size):
if page_size is None:
page_size = settings.ELASTICSEARCH_PAGE_SIZE
return "size=" + str(page_size)
def get_base_es_url(self):
machines_to_try = settings.ELASTICSEARCH_URLS[:]
random.shuffle(machines_to_try)
return machines_to_try.pop()
def get_url(self):
return self.url
|
[
"rebeccakathrynhudson@gmail.com"
] |
rebeccakathrynhudson@gmail.com
|
7058046baa3c952775c38a273ce86611b6ff8399
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_117/516.py
|
2f37f445e68945c2132f222f58ca3bd97747e8c4
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
lines = open('data.txt').read()
output = open('output.txt', 'w')
lines = lines.splitlines()
cases_num = int(lines[0])
lines = lines[1:]
cur_index = 0
for i in range(cases_num):
case_num = i + 1
m, n = lines[cur_index].split()
n = int(n)
m = int(m)
cur_index += 1
matrix = []
for row_ind in range(m):
line = lines[row_ind + cur_index]
matrix.append([int(x) for x in line.split()])
rows = []
columns = []
for row in matrix:
rows.append(sorted(set(row)))
for column in zip(*matrix):
columns.append(sorted(set(column)))
def is_lawnable():
for i in range(m):
for j in range(n):
elem = matrix[i][j]
i_row = rows[i].index(elem)
j_column = columns[j].index(elem)
if len(rows[i]) > i_row + 1 and len(columns[j]) > j_column + 1:
return False
return True
is_good = is_lawnable()
cur_index += m
if is_good:
output.write('Case #{0}:'.format(case_num) + ' YES\n')
print 'Case #{0}:'.format(case_num), 'YES'
else:
output.write('Case #{0}:'.format(case_num) + ' NO\n')
print 'Case #{0}:'.format(case_num), 'NO'
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
9fee9927053a85fe0988554aa2c1cf7fc746326b
|
8a7950440a4a8015523a1e1474a3bfc3aaa95782
|
/email_smtplib/basic/email_send2.py
|
d3b046d99e4e22a27c9ecef541a328042c1dbfab
|
[] |
no_license
|
SatishNitk/Python_Web_Scrapper
|
bddb320b86a8942b6b3c346eb09f09b933be5b37
|
f257ad2e6d2053f0f86443905de87ccf81df0c62
|
refs/heads/master
| 2020-05-07T19:51:51.816353
| 2019-07-07T13:31:27
| 2019-07-07T13:31:27
| 180,826,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from smtplib import SMTP, SMTPException,SMTPAuthenticationError
host = "smtp.gmail.com"
port = 587
email = "hungrygupta@gmail.com"
password = ""
from1 = "hungrygupta@gmail.com"
to_list = ["hungrygupta@gmail.com"]
try:
email_obj = SMTP(host, port)
email_obj.ehlo()
email_obj.starttls()
email_obj.ehlo()
email_obj.login(email,password)
plain_text = "just a simple text message"
html_txt = """
<html>
<body>
<h1>
This paragraph
contains a lot of lines
in the source code,
but the browser
ignores it.
</h1>
</body>
</html>
"""
the_msg = MIMEMultipart("alternative")
the_msg['Subject'] = "Hello there"
the_msg['From'] = from1
part1 = MIMEText(plain_text, "plain")
part2 = MIMEText(html_txt, "html")
the_msg.attach(part1)
the_msg.attach(part2)
print(the_msg.as_string())
email_obj.sendmail(from1,to_list,the_msg.as_string())
except SMTPException:
print("exception occured in sending rmail check once whole code")
|
[
"satishkrgu95@gmail.com"
] |
satishkrgu95@gmail.com
|
50fb214882899ea973df69630262b57e20b57534
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D94B/CONQVAD94BUN.py
|
8785753a062814ca0ea352440adb08d86535cd20
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,437
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD94BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 9},
{ID: 'AUT', MIN: 0, MAX: 2},
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 25},
{ID: 'FII', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 10, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'DOC', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'BII', MIN: 0, MAX: 100000, LEVEL: [
{ID: 'RCS', MIN: 0, MAX: 1},
{ID: 'QTY', MIN: 1, MAX: 6},
{ID: 'PRI', MIN: 0, MAX: 1},
{ID: 'LIN', MIN: 1, MAX: 100, LEVEL: [
{ID: 'IMD', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 5},
{ID: 'GIS', MIN: 0, MAX: 5},
]},
]},
{ID: 'TAX', MIN: 0, MAX: 5, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 5},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 5},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
d5e5e491086979335728a5ce09637227e79fbd84
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-rabbitmq/huaweicloudsdkrabbitmq/v2/model/show_background_task_request.py
|
15f666a3a90534bc5325dbb4cc52bae5849ca114
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300
| 2021-05-26T08:54:18
| 2021-05-26T08:54:18
| 370,898,764
| 0
| 0
|
NOASSERTION
| 2021-05-26T03:50:07
| 2021-05-26T03:50:07
| null |
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
# coding: utf-8
import pprint
import re
import six
class ShowBackgroundTaskRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'task_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id',
'task_id': 'task_id'
}
def __init__(self, instance_id=None, task_id=None):
"""ShowBackgroundTaskRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._task_id = None
self.discriminator = None
self.instance_id = instance_id
self.task_id = task_id
@property
def instance_id(self):
"""Gets the instance_id of this ShowBackgroundTaskRequest.
实例ID。
:return: The instance_id of this ShowBackgroundTaskRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ShowBackgroundTaskRequest.
实例ID。
:param instance_id: The instance_id of this ShowBackgroundTaskRequest.
:type: str
"""
self._instance_id = instance_id
@property
def task_id(self):
"""Gets the task_id of this ShowBackgroundTaskRequest.
任务ID。
:return: The task_id of this ShowBackgroundTaskRequest.
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this ShowBackgroundTaskRequest.
任务ID。
:param task_id: The task_id of this ShowBackgroundTaskRequest.
:type: str
"""
self._task_id = task_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowBackgroundTaskRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
a7e3300d975a841171e8c857c965142b30239106
|
23631af0987b3f1d30b0bf8bfcea1bd63159eeba
|
/gate_api/api/__init__.py
|
56f948e75c0f565a6647dec42431e9458c3446f2
|
[] |
no_license
|
xuvw/gateapi-python
|
08c3c72ff0e2c4713bf3a2ffe0b15d05e57491ca
|
1a3f3551cba4a756f76f17b070c3e0c5ff2e88ea
|
refs/heads/master
| 2020-05-25T14:33:35.592775
| 2019-04-02T08:50:25
| 2019-04-02T08:50:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from gate_api.api.futures_api import FuturesApi
from gate_api.api.margin_api import MarginApi
from gate_api.api.spot_api import SpotApi
|
[
"revilwang@gmail.com"
] |
revilwang@gmail.com
|
665b261c26c914af9be8d0cc6ca2991861d06d4a
|
1d164438ac1ba7c88aeabb7c9ea39b58680ba79c
|
/django_postgres_matviews/management/commands/drop_matviews.py
|
da51ebd3f07c9398a3e198e8445d967b5dc87d2b
|
[
"Unlicense"
] |
permissive
|
andrewp-as-is/django-postgres-matviews.py
|
fac3288f199f013a0421ae23f634ea7082020181
|
ff7d76f885318e208b81be7f5dcaa71ff7fc4fb3
|
refs/heads/master
| 2023-01-20T18:38:58.232754
| 2020-12-03T20:46:40
| 2020-12-03T20:46:40
| 285,872,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from django.core.management.base import BaseCommand
from django.db import connection
from django_postgres_matviews.utils import drop_matviews
class Command(BaseCommand):
def handle(self, *args, **options):
drop_matviews()
|
[
"russianidiot.github@gmail.com"
] |
russianidiot.github@gmail.com
|
5bfee7606764826ff036404a7b07620623e24a96
|
88745dafec989d39726ca2e4d7f6cfb20bb60f5d
|
/tests/unit_tests/modules/s3/s3gis/BingLayer.py
|
c25ecc6c14edf56ab2f86ebbe914dc43f8fc5a3b
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sungkomp/SAMBRO
|
f1ced7c9d198ccfe30aaa1bf883c2f8a7478fffb
|
4618d785d03424d122206d88d9ebfb6971486e2c
|
refs/heads/master
| 2020-05-30T08:41:26.855362
| 2019-10-15T02:48:47
| 2019-10-15T02:48:47
| 69,448,194
| 1
| 0
|
NOASSERTION
| 2019-10-15T04:25:13
| 2016-09-28T09:31:35
|
Python
|
UTF-8
|
Python
| false
| false
| 784
|
py
|
s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis")
def test_BingLayer():
s3gis_tests.layer_test(
db,
db.gis_layer_bing,
dict(
name = "Test Bing Layer",
description = "Test Bing layer",
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
aerial_enabled = True,
road_enabled = True,
hybrid_enabled = True,
apikey = "FAKEAPIKEY",
),
"S3.gis.Bing",
{
"Aerial": u"Bing Satellite",
"ApiKey": u"FAKEAPIKEY",
"Hybrid": u"Bing Hybrid",
"Road": u"Bing Roads",
},
session = session,
request = request,
)
|
[
"fran@aidiq.com"
] |
fran@aidiq.com
|
127e521fc174bcb018737f195d7d9d13e672b726
|
9b4fe9c2693abc6ecc614088665cbf855971deaf
|
/78.subsets.py
|
e02e0a2c09554ecf70645818837f819efcf53e44
|
[
"MIT"
] |
permissive
|
windard/leeeeee
|
e795be2b9dcabfc9f32fe25794878e591a6fb2c8
|
0dd67edca4e0b0323cb5a7239f02ea46383cd15a
|
refs/heads/master
| 2022-08-12T19:51:26.748317
| 2022-08-07T16:01:30
| 2022-08-07T16:01:30
| 222,122,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,215
|
py
|
# coding=utf-8
#
# @lc app=leetcode id=78 lang=python
#
# [78] Subsets
#
# https://leetcode.com/problems/subsets/description/
#
# algorithms
# Medium (51.03%)
# Likes: 2192
# Dislikes: 54
# Total Accepted: 396.6K
# Total Submissions: 731K
# Testcase Example: '[1,2,3]'
#
# Given a set of distinct integers, nums, return all possible subsets (the
# power set).
#
# Note: The solution set must not contain duplicate subsets.
#
# Example:
#
#
# Input: nums = [1,2,3]
# Output:
# [
# [3],
# [1],
# [2],
# [1,2,3],
# [1,3],
# [2,3],
# [1,2],
# []
# ]
#
#
class Solution(object):
def _subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
# DFS
# 组合
# 结果正确,顺序不对
result = temp = [[]]
last = []
while temp:
temp = []
for num in nums:
if not last:
temp.append(last + [num])
else:
for l in last:
if num > max(l):
temp.append(l + [num])
last = temp
result.extend(last)
return result
def __subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = []
length = len(nums)
data = {value:2**key for key,value in enumerate(nums)}
for i in range(2**length):
temp = []
for key,value in data.items():
if value & i != 0:
temp.append(key)
result.append(temp)
return result
def ___subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = []
length = len(nums)
for i in range(1<<length):
temp = []
for key,value in enumerate(nums):
if 1<<key & i != 0:
temp.append(value)
result.append(temp)
return result
def ____subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
# Best of All
result = [[]]
for n in nums:
current = result[:]
for t in current:
result.append(t+[n])
return result
def _____subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
return self.helper(nums, 0, [[]])
def helper(self, nums, index, result):
if index >= len(nums):
return result
temp = result[:]
for t in temp:
result.append(t+[nums[index]])
return self.helper(nums, index+1, result)
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
start = 0
e = len(nums)
result = []
def backtrack(s, p):
result.append(p)
for i in range(s, e):
backtrack(i+1, p+[nums[i]])
backtrack(start, [])
return result
# if __name__ == "__main__":
# s = Solution()
# print s.subsets([1,2,3])
|
[
"windard@qq.com"
] |
windard@qq.com
|
246e17ff0c48c787a0a932071216fd5a5e87c321
|
770e3f4fcb3d2f96ea8cc36bfa47625778c40c71
|
/unit_tests/test_provides.py
|
154f566f2b1cbe63d5f075866c676d2654f56ed0
|
[] |
no_license
|
openstack-charmers/charm-interface-pacemaker-remote
|
8d12a0594bc580f74c9a591b44429320912c8cbf
|
f1297f72a5c6f8dc4f89461850a7d8ebaa01cf04
|
refs/heads/master
| 2020-04-30T03:33:29.086571
| 2019-03-20T07:31:55
| 2019-03-20T07:31:55
| 176,589,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,454
|
py
|
import unittest
import mock
with mock.patch('charmhelpers.core.hookenv.metadata') as _meta:
_meta.return_Value = 'ss'
import provides
_hook_args = {}
TO_PATCH = [
]
def mock_hook(*args, **kwargs):
def inner(f):
# remember what we were passed. Note that we can't actually determine
# the class we're attached to, as the decorator only gets the function.
_hook_args[f.__name__] = dict(args=args, kwargs=kwargs)
return f
return inner
class _unit_mock:
def __init__(self, unit_name, received=None):
self.unit_name = unit_name
self.received = received or {}
class _relation_mock:
def __init__(self, application_name=None, units=None):
self.to_publish_raw = {}
self.to_publish = {}
self.application_name = application_name
self.units = units
class TestPacemakerRemoteProvides(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._patched_hook = mock.patch('charms.reactive.when', mock_hook)
cls._patched_hook_started = cls._patched_hook.start()
# force provides to rerun the mock_hook decorator:
# try except is Python2/Python3 compatibility as Python3 has moved
# reload to importlib.
try:
reload(provides)
except NameError:
import importlib
importlib.reload(provides)
@classmethod
def tearDownClass(cls):
cls._patched_hook.stop()
cls._patched_hook_started = None
cls._patched_hook = None
# and fix any breakage we did to the module
try:
reload(provides)
except NameError:
import importlib
importlib.reload(provides)
def patch(self, method):
_m = mock.patch.object(self.obj, method)
_mock = _m.start()
self.addCleanup(_m.stop)
return _mock
def setUp(self):
self.relation_obj = provides.PacemakerRemoteProvides(
'some-relation',
[])
self._patches = {}
self._patches_start = {}
self.obj = provides
for method in TO_PATCH:
setattr(self, method, self.patch(method))
def tearDown(self):
self.relation_obj = None
for k, v in self._patches.items():
v.stop()
setattr(self, k, None)
self._patches = None
self._patches_start = None
def patch_relation_obj(self, attr, return_value=None):
mocked = mock.patch.object(self.relation_obj, attr)
self._patches[attr] = mocked
started = mocked.start()
started.return_value = return_value
self._patches_start[attr] = started
setattr(self, attr, started)
def test_publish_info(self):
mock_rel = _relation_mock()
self.relation_obj._relations = [mock_rel]
self.relation_obj.publish_info(
'node1.az1.local',
stonith_hostname='node1.stonith',
enable_resources=True)
expect = {
'remote-hostname': 'node1.az1.local',
'stonith-hostname': 'node1.stonith',
'enable-resources': True}
self.assertEqual(
mock_rel.to_publish,
expect)
def test_get_pacemaker_key(self):
unit1 = _unit_mock(
'unit1',
received={'pacemaker-key': 'cG1ha2Vya2V5MQo='})
mock_rel = _relation_mock(units=[unit1])
self.relation_obj._relations = [mock_rel]
self.assertEqual(
self.relation_obj.get_pacemaker_key(),
b'pmakerkey1\n')
def test_get_pacemaker_key_inconsistent(self):
unit1 = _unit_mock(
'unit1',
received={'pacemaker-key': 'cG1ha2Vya2V5MQo='})
unit2 = _unit_mock(
'unit2',
received={'pacemaker-key': 'cG1ha2Vya2V5Mgo='})
mock_rel = _relation_mock(units=[unit1, unit2])
self.relation_obj._relations = [mock_rel]
with self.assertRaises(Exception):
self.relation_obj.get_pacemaker_key()
def test_get_pacemaker_key_missing(self):
unit1 = _unit_mock(
'unit1',
received={})
unit2 = _unit_mock(
'unit2',
received={})
mock_rel = _relation_mock(units=[unit1, unit2])
self.relation_obj._relations = [mock_rel]
self.assertEqual(
self.relation_obj.get_pacemaker_key(),
None)
|
[
"liam.young@canonical.com"
] |
liam.young@canonical.com
|
894b732050372338c14fa012e7f9b16f6e1eadbf
|
11812a0cc7b818292e601ecdd4aa4c4e03d131c5
|
/02_多任务/3_协程/hm_15_使用协程完成多任务终极.py
|
ed3306cb3d774702e938b02a0d1ebc14291efd90
|
[] |
no_license
|
SunshineFaxixi/Python_Learning
|
f1e55adcfa898489cc9146ccfb220f0b48a31a22
|
ab3ca44d013311b6de02124091acc4c36a83c4d9
|
refs/heads/master
| 2021-08-16T05:47:29.963118
| 2021-01-04T13:48:30
| 2021-01-04T13:48:30
| 238,857,341
| 1
| 0
| null | 2020-03-03T13:53:08
| 2020-02-07T06:21:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 409
|
py
|
import gevent
import time
import random
from gevent import monkey
def coroutine_work(coroutine_name):
for i in range(10):
print(coroutine_name, i)
time.sleep(random.random())
def main():
monkey.patch_all() # 打补丁
gevent.joinall([
gevent.spawn(coroutine_work, "work1"),
gevent.spawn(coroutine_work, "work2")
])
if __name__ == "__main__":
main()
|
[
"xxhan2018@163.com"
] |
xxhan2018@163.com
|
a9dd8620e61118abf0707b5fb0f71735b60984ba
|
b45d66c2c009d74b4925f07d0d9e779c99ffbf28
|
/gp/business_logic/business_objects/monopoly.py
|
f40bcc4ebbf90d3908c5f5b1da8a279f7018e9f4
|
[] |
no_license
|
erezrubinstein/aa
|
d96c0e39762fe7aaeeadebbd51c80b5e58576565
|
a3f59ba59519183257ed9a731e8a1516a4c54b48
|
refs/heads/master
| 2021-03-12T23:44:56.319721
| 2016-09-18T23:01:17
| 2016-09-18T23:01:17
| 22,665,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
import datetime
__author__ = 'erezrubinstein'
class Monopoly(object):
def __init__(self, store_id, monopoly_type_id, trade_area_id, start_date, end_date):
self.store_id = store_id
self.monopoly_type_id = monopoly_type_id
self.trade_area_id = trade_area_id
self.start_date = start_date
self.end_date = end_date
def __eq__(self, other):
# sometimes mongo selects the start date slightly off. so this just makes sure they're within one seconds
return self.store_id == other.store_id and self.monopoly_type_id == other.monopoly_type_id and self.trade_area_id == other.trade_area_id and \
(other.start_date - self.start_date) < datetime.timedelta(seconds = 1) and \
(other.end_date - self.end_date) < datetime.timedelta(seconds = 1)
|
[
"erezrubinstein@hotmail.com"
] |
erezrubinstein@hotmail.com
|
db6dac8b0b6013de4ea57a1b57fa20f6b8d368f8
|
0ddcfcbfc3faa81c79e320c34c35a972dab86498
|
/puzzles/add_and_search_word.py
|
13c99ec33dbc7abde7199d0dc2552efa2636dc28
|
[] |
no_license
|
IvanWoo/coding-interview-questions
|
3311da45895ac4f3c394b22530079c79a9215a1c
|
1312305b199b65a11804a000432ebe28d1fba87e
|
refs/heads/master
| 2023-08-09T19:46:28.278111
| 2023-06-21T01:47:07
| 2023-06-21T01:47:07
| 135,307,912
| 0
| 0
| null | 2023-07-20T12:14:38
| 2018-05-29T14:24:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,277
|
py
|
# https://leetcode.com/problems/add-and-search-word-data-structure-design/
"""
Design a data structure that supports the following two operations:
void addWord(word)
bool search(word)
search(word) can search a literal word or a regular expression string containing only letters a-z or .. A . means it can represent any one letter.
Example:
addWord("bad")
addWord("dad")
addWord("mad")
search("pad") -> false
search("bad") -> true
search(".ad") -> true
search("b..") -> true
Note:
You may assume that all words are consist of lowercase letters a-z.
"""
from dataclasses import dataclass, field
from typing import Any, Dict, Optional
@dataclass
class TrieNode:
children: Dict[str, "TrieNode"] = field(default_factory=dict)
# we don't need to save the val, simply using a isEnd flag is enough
value: Optional[Any] = None
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def addWord(self, word: str) -> None:
"""
Adds a word into the data structure.
"""
node = self.root
for char in word:
if char not in node.children:
node.children[char] = TrieNode()
node = node.children[char]
node.value = word
def searchHelper(self, word: str, index: int, node: TrieNode) -> bool:
if index == len(word):
return node.value != None
if word[index] == ".":
return any(
[
self.searchHelper(word, index + 1, node.children[child])
for child in node.children
]
)
if word[index] not in node.children:
return False
return self.searchHelper(word, index + 1, node.children[word[index]])
def search(self, word: str) -> bool:
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
"""
return self.searchHelper(word, 0, self.root)
if __name__ == "__main__":
obj = WordDictionary()
for word in ["bad", "dad", "mad", "pad"]:
obj.addWord(word)
for word in ["bad", ".ad", "b.."]:
print(f"{obj.search(word)=}")
|
[
"tyivanwu@gmail.com"
] |
tyivanwu@gmail.com
|
9bdd0b54603f4bced8f4c82edb28d3dca4e88841
|
4a191e5aecd53c4cea28482a0179539eeb6cd74b
|
/blogproject/settings.py
|
a7f99cc15f936dff53808f9385c2c2992e57abbc
|
[] |
no_license
|
jiangjingwei/blogproject
|
631a2e8e2f72420cce45ddaf152174852376d831
|
daf14e88092dc030a3ab0c295ee06fb6b2164372
|
refs/heads/master
| 2020-03-14T23:29:08.052253
| 2018-05-10T11:35:59
| 2018-05-10T11:35:59
| 131,846,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,596
|
py
|
"""
Django settings for blogproject project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm&=b!x8(eqh&ek!4e_)#h@=g$6sjfd1ulx*exs4$d1!h&tef@@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '.jjwxy.com', '139.196.81.14']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gunicorn',
'blog',
'comments',
'haystack',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blogproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blogproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'blog.whoosh_cn_backend.WhooshEngine',
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
},
}
HAYSTACK_SEARCH_RESULTS_PER_PAGE = 10
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
|
[
"270159429@qq.com"
] |
270159429@qq.com
|
795985da57f6d924af7ddb13359a42bc964faca8
|
334d0a4652c44d0c313e11b6dcf8fb89829c6dbe
|
/checkov/terraform/checks/resource/aws/ImagebuilderImageRecipeEBSEncrypted.py
|
754146fc760da36332b301b41159066dcef14f23
|
[
"Apache-2.0"
] |
permissive
|
schosterbarak/checkov
|
4131e03b88ae91d82b2fa211f17e370a6f881157
|
ea6d697de4de2083c8f6a7aa9ceceffd6b621b58
|
refs/heads/master
| 2022-05-22T18:12:40.994315
| 2022-04-28T07:44:05
| 2022-04-28T07:59:17
| 233,451,426
| 0
| 0
|
Apache-2.0
| 2020-03-23T12:12:23
| 2020-01-12T20:07:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
from typing import Dict, List, Any
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.models.enums import CheckCategories, CheckResult
class ImagebuilderImageRecipeEBSEncrypted(BaseResourceCheck):
def __init__(self):
name = "Ensure that Image Recipe EBS Disk are encrypted with CMK"
id = "CKV_AWS_200"
supported_resources = ["aws_imagebuilder_image_recipe"]
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
if conf.get('block_device_mapping'):
mappings = conf.get('block_device_mapping')
for mapping in mappings:
if mapping.get("ebs"):
ebs = mapping["ebs"][0]
if not ebs.get("encrypted"):
return CheckResult.FAILED
if not ebs.get("kms_key_id"):
return CheckResult.FAILED
# pass thru
return CheckResult.PASSED
check = ImagebuilderImageRecipeEBSEncrypted()
|
[
"noreply@github.com"
] |
schosterbarak.noreply@github.com
|
0a3714c4393419c790f0b83b5e274f57f3d9effd
|
c140ad38b1463024e289ceb0d5d6d44a45c91724
|
/test/test_sed.py
|
de9b9a31a4d19a6bce8f59f8af9aff375038c1e6
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/hpc-container-maker
|
3a333526decbd18352ef8d1fb3bec0033be221e8
|
60fd2a51c171258a6b3f93c2523101cb7018ba1b
|
refs/heads/master
| 2023-08-21T13:32:27.132476
| 2023-06-12T21:12:40
| 2023-06-12T21:12:40
| 126,385,168
| 419
| 88
|
Apache-2.0
| 2023-09-11T18:33:26
| 2018-03-22T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the sed module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from hpccm.templates.sed import sed
class Test_sed(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
def test_basic(self):
"""Basic sed"""
s = sed()
self.assertEqual(s.sed_step(file='foo',
patterns=[r's/a/A/g',
r's/FOO = BAR/FOO = BAZ/g']),
r'''sed -i -e s/a/A/g \
-e 's/FOO = BAR/FOO = BAZ/g' foo''')
def test_nofile(self):
"""No file specified"""
s = sed()
self.assertEqual(s.sed_step(patterns=[r's/a/A/g']), '')
def test_nopatterns(self):
"""No patterns specified"""
s = sed()
self.assertEqual(s.sed_step(file='foo'), '')
|
[
"noreply@github.com"
] |
NVIDIA.noreply@github.com
|
da0e360ef04be5b4a9aef897331aa98e4b9ce97c
|
4d93c6999f1c938f12b7ff6fb779557e1a77479f
|
/chapter11/names.py
|
37cab2bbcddd9ca3a3f64613ed94eea1aa8473fc
|
[] |
no_license
|
MadhuV99/pywork
|
5efd1aac74f2c88413bb90bbc9e0d0c250057e7c
|
81ea17d8bed89ba57cdd35d2ceb0560f68a21cc8
|
refs/heads/main
| 2023-01-20T06:50:03.004849
| 2020-11-29T16:01:06
| 2020-11-29T16:01:06
| 312,609,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# names.py
from name_function import get_formatted_name
print("Enter 'q' at any time to quit.")
while True:
first = input("\nPlease give me a first name: ")
if first.strip().lower() == 'q':
break
last = input("Please give me a last name: ")
if last.strip().lower() == 'q':
break
formatted_name = get_formatted_name(first, last)
print(f"\tNeatly formatted name: {formatted_name}.")
|
[
"madhuvasudevan@yahoo.com"
] |
madhuvasudevan@yahoo.com
|
4869a312afecf5587acf929abf9f9adcd24f3ff4
|
3a50c0712e0a31b88d0a5e80a0c01dbefc6a6e75
|
/thrift/lib/python/any/test/serializer.py
|
e10f1866cd979f95c40dfcde5b071bca2dbe8ba4
|
[
"Apache-2.0"
] |
permissive
|
facebook/fbthrift
|
3b7b94a533666c965ce69cfd6054041218b1ea6f
|
53cf6f138a7648efe5aef9a263aabed3d282df91
|
refs/heads/main
| 2023-08-24T12:51:32.367985
| 2023-08-24T08:28:35
| 2023-08-24T08:28:35
| 11,131,631
| 2,347
| 666
|
Apache-2.0
| 2023-09-01T01:44:39
| 2013-07-02T18:15:51
|
C++
|
UTF-8
|
Python
| false
| false
| 7,122
|
py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import typing
import unittest
from apache.thrift.type.standard.thrift_types import TypeName, Void
from apache.thrift.type.type.thrift_types import Type
from folly.iobuf import IOBuf
from testing.thrift_types import Color
from thrift.python.any.serializer import (
deserialize_list,
deserialize_map,
deserialize_primitive,
deserialize_set,
serialize_list,
serialize_map,
serialize_primitive,
serialize_set,
)
from thrift.python.any.typestub import PrimitiveType, SerializableType, TKey, TValue
# @manual=//thrift/test/testset:testset-python-types
from thrift.test.testset import thrift_types
class SerializerTests(unittest.TestCase):
def _test_round_trip(
self, value: PrimitiveType, thrift_type: typing.Optional[Type] = None
) -> None:
iobuf = serialize_primitive(value, thrift_type=thrift_type)
decoded = deserialize_primitive(type(value), iobuf, thrift_type=thrift_type)
self.assertIs(type(value), type(decoded))
if isinstance(value, float):
assert isinstance(decoded, float)
self.assertAlmostEqual(float(value), float(decoded), places=3)
else:
self.assertEqual(value, decoded)
def test_bool_round_trip(self) -> None:
self._test_round_trip(True)
def test_int_round_trip(self) -> None:
self._test_round_trip(42)
def test_float_round_trip(self) -> None:
self._test_round_trip(123456.789)
def test_str_round_trip(self) -> None:
self._test_round_trip("thrift-python")
def test_bytes_round_trip(self) -> None:
self._test_round_trip(b"raw bytes")
def test_iobuf_round_trip(self) -> None:
self._test_round_trip(IOBuf(b"iobuf"))
def test_enum_round_trip(self) -> None:
self._test_round_trip(Color.green)
def _test_round_trip_with_type_names(
self, value: PrimitiveType, type_names: typing.Sequence[TypeName]
) -> None:
for type_name in type_names:
with self.subTest(type_name=type_name):
self._test_round_trip(value, thrift_type=Type(name=type_name))
def test_int_round_trip_with_type_name(self) -> None:
self._test_round_trip_with_type_names(
42,
[
TypeName(byteType=Void.Unused),
TypeName(i16Type=Void.Unused),
TypeName(i32Type=Void.Unused),
TypeName(i64Type=Void.Unused),
],
)
def test_float_round_trip_with_type_name(self) -> None:
self._test_round_trip_with_type_names(
123456.789,
[
TypeName(floatType=Void.Unused),
TypeName(doubleType=Void.Unused),
],
)
def _test_list_round_trip(
self,
value: typing.Sequence[SerializableType],
) -> None:
iobuf = serialize_list(value)
decoded = deserialize_list(
type(value[0]) if value else str,
iobuf,
)
self.assertEqual(value, decoded)
def test_empty_list_round_trip(self) -> None:
self._test_list_round_trip([])
def test_list_of_ints_round_trip(self) -> None:
self._test_list_round_trip([1, 1, 2, 3, 5, 8])
def test_list_of_structs_round_trip(self) -> None:
self._test_list_round_trip(
[
thrift_types.struct_map_string_i32(field_1={"one": 1}),
thrift_types.struct_map_string_i32(field_1={"two": 2}),
]
)
def test_list_of_unions_round_trip(self) -> None:
self._test_list_round_trip(
[
thrift_types.union_map_string_string(field_2={"foo": "bar"}),
thrift_types.union_map_string_string(field_2={"hello": "world"}),
]
)
def test_list_of_exceptions_round_trip(self) -> None:
self._test_list_round_trip(
[
thrift_types.exception_map_string_i64(field_1={"code": 400}),
thrift_types.exception_map_string_i64(field_1={"code": 404}),
]
)
def test_thrift_list_round_trip(self) -> None:
self._test_list_round_trip(
thrift_types.struct_list_i32(field_1=[1, 2, 3, 4]).field_1
)
def _test_set_round_trip(
self,
value: typing.AbstractSet[SerializableType],
) -> None:
iobuf = serialize_set(value)
decoded = deserialize_set(
type(next(iter(value))) if value else bytes, # doesn't matter for empty set
iobuf,
)
self.assertEqual(value, decoded)
def test_empty_set_round_trip(self) -> None:
self._test_set_round_trip(set())
def test_set_of_ints_round_trip(self) -> None:
self._test_set_round_trip({1, 1, 2, 3, 5, 8})
def test_set_of_structs_round_trip(self) -> None:
self._test_set_round_trip(
{
thrift_types.struct_map_string_i32(field_1={"one": 1}),
thrift_types.struct_map_string_i32(field_1={"two": 2}),
}
)
def test_thrift_set_round_trip(self) -> None:
self._test_set_round_trip(
thrift_types.struct_set_i64(field_1={1, 2, 3, 4}).field_1
)
def _test_map_round_trip(
self,
original: typing.Mapping[TKey, TValue],
) -> None:
iobuf = serialize_map(original)
if original:
k, v = next(iter(original.items()))
key_cls = type(k)
value_cls = type(v)
else:
key_cls = bool # doesn't matter for empty dict
value_cls = bool # doesn't matter for empty dict
decoded = deserialize_map(
key_cls,
value_cls,
iobuf,
)
self.assertEqual(original, decoded)
def test_empty_map_round_trip(self) -> None:
self._test_map_round_trip({})
def test_int_to_str_map_round_trip(self) -> None:
self._test_map_round_trip({1: "one", 2: "two"})
def test_str_to_struct_map_round_trip(self) -> None:
self._test_map_round_trip(
{
"one": thrift_types.struct_map_string_i32(field_1={"one": 1}),
"two": thrift_types.struct_map_string_i32(field_1={"two": 2}),
}
)
def test_thrift_map_round_trip(self) -> None:
self._test_map_round_trip(
thrift_types.struct_map_string_i32(field_1={"one": 1}).field_1
)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
f0e30cd721e3980995d0f449df77418b9cfddd8a
|
30e1dc84fe8c54d26ef4a1aff000a83af6f612be
|
/deps/src/libxml2-2.9.1/python/tests/reader5.py
|
220a3e5bbafc048a0ea35a0277fe47bbaca38f99
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
Sitispeaks/turicreate
|
0bda7c21ee97f5ae7dc09502f6a72abcb729536d
|
d42280b16cb466a608e7e723d8edfbe5977253b6
|
refs/heads/main
| 2023-05-19T17:55:21.938724
| 2021-06-14T17:53:17
| 2021-06-14T17:53:17
| 385,034,849
| 1
| 0
|
BSD-3-Clause
| 2021-07-11T19:23:21
| 2021-07-11T19:23:20
| null |
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
#!/usr/bin/python -u
#
# this tests the Expand() API of the xmlTextReader interface
# this extract the Dragon bibliography entries from the XML specification
#
import libxml2
import sys
# Memory debug specific
libxml2.debugMemory(1)
expect="""<bibl id="Aho" key="Aho/Ullman">Aho, Alfred V.,
Ravi Sethi, and Jeffrey D. Ullman.
<emph>Compilers: Principles, Techniques, and Tools</emph>.
Reading: Addison-Wesley, 1986, rpt. corr. 1988.</bibl>"""
f = open('../../test/valid/REC-xml-19980210.xml', 'rb')
input = libxml2.inputBuffer(f)
reader = input.newTextReader("REC")
res=""
while reader.Read() > 0:
while reader.Name() == 'bibl':
node = reader.Expand() # expand the subtree
if node.xpathEval("@id = 'Aho'"): # use XPath on it
res = res + node.serialize()
if reader.Next() != 1: # skip the subtree
break;
if res != expect:
print("Error: didn't get the expected output")
print("got '%s'" % (res))
print("expected '%s'" % (expect))
#
# cleanup
#
del input
del reader
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
|
[
"znation@apple.com"
] |
znation@apple.com
|
8528817f2e818ab95c640dec2fbc42d988e68de4
|
8bd63bc56b39d26458ad54b7f18c4b149c1e3ce2
|
/sphinx-files/rst-files/Data/code/2011/11/000032/binary_liquid_mixture_immiscibility_and_stability.py
|
d86c0be99462bbdd27b0749ff15621910c02ba82
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
isabella232/scipy-central-rescue
|
43270c0e1850b989fbe9a5b1a06c3be11d16464a
|
2b331610d52c189ae96bea4f4ce2ec343146b608
|
refs/heads/master
| 2021-09-06T09:17:30.627497
| 2018-02-04T19:41:11
| 2018-02-04T19:41:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,469
|
py
|
# License: Creative Commons Zero (almost public domain) http://scpyce.org/cc0
#Determines regions of immiscibility and any limits of essential instability
#for a binary liquid mixture of components B and C. the excess Gibbs energy of
#mixing is given explicitly by an empirical equation:
#deltaGex/RT = xBxC[k1+k2(xB-xC)+k3(xB-xC)^2] where xB+xC=1
import numpy as np
from matplotlib.pylab import *
# These are the functions called by the bisection method
def f(x, id):
if id == 1:
return (-2 * (k1 + k2 * (6 * x - 3) + k3 * (24 * x**2 - 24 * x + 5))
+ 1 / (x - x**2))
elif id == 2:
return (-2 * k1 * x + k1 + k2 * (-6 * x**2 + 6 * x - 1) + k3 *
( -16 * x**3 + 24 * x**2 - 10 * x + 1) + log(x) - log(1 - x))
elif id == 3:
return (dummys - (-2 * k1 * x + k1 + k2 * (-6 * x**2 + 6 * x - 1) +
k3 * (-16 * x**3 + 24 * x**2 - 10 * x + 1) +
log(x) - log(1 - x)))
#This function is to calculate values for the y-axis on the figure
def g(x):
return (x * (1 - x) * (k1 + k2 * (x - (1 - x)) + k3 * (x - (1 - x))**2) +
x * log(x) + (1 - x) * log(1 - x))
#The incremental search method is used to start off the bisection method
def incremental(x0,xf,id):
dx = (xf - x0) / 998
for i in range(998):
y1 = f(x0,id)
y2 = f(x0 + (i + 1) * dx,id)
if y1 * y2 < 0:
for j in range(10):
y1 = f(x0 + i * dx,id)
y2 = f(x0 + i * dx + (j + 1) * dx/10,id)
if y1 * y2 < 0:
x1 = x0 + i * dx + j * dx / 10
x2 = x0 + i * dx + (j + 1) * dx / 10
y1 = f(x1,id)
y2 = f(x2,id)
return x1, x2, y1, y2
# Bisection method used to solve for non-linear equation
def bisec(x0,xf,id):
x1, x2, y1, y2 = incremental(x0,xf,id)
e = 1
while e > 1e-6:
x3 = (x1 + x2) / 2
y3 = f(x3,id)
if y1 * y3 < 0:
x2 = x3
y2 = y3
else:
x1 = x3
y1 = y3
e = abs(1 - (x1 / x2))
return x2
# Constants
k1 = 2.0
k2 = 0.2
k3 = -0.8
#Set up vectors of composition values
xB = np.linspace(0.001,0.999,101)
xC = 1 - xB
#This is deltaG/RT calculated from the excess Gibbs given at top
deltaGoverRT = (xB * xC * (k1 + k2 * (xB - xC) + k3 * (xB - xC)**2) +
xB * log(xB) + xC * log(xC))
#First and second derivative of deltaG/RT
derivative = (-2 * k1 * xB + k1 + k2 * (-6 * xB**2 + 6 * xB - 1) + k3 *
(-16 * xB**3 + 24 * xB**2 - 10 * xB + 1) + log(xB) - log(1 - xB))
derivative2 = (-2 * (k1 + k2 * (6 * xB - 3) + k3 * (24 * xB**2 - 24 * xB + 5))
+ 1 / (xB - xB**2))
#find spinodal points for instability region using bisection method
xspin1 = bisec(0.001, 0.999, 1)
xspin2 = bisec(xspin1, 0.999, 1)
#initial guess at binodal points at minima of function
xB1 = bisec(0.001, 0.999, 2)
xB2 = bisec(xB1, 0.999, 2)
xB3 = bisec(xB2, 0.999, 2)
xBa = xB1
xBb = xB3
#Solve for binodal points using bisection method
converged = False
while not converged:
dummys = (g(xBb) - g(xBa)) / (xBb - xBa) #dummy slope
e = abs(1 - (dummys / f(xBb, 2)))
if e < 1e-4:
converged = True
else:
xBa = bisec(0.001, 0.999, 3)
xBu = bisec(xBa, 0.999, 3)
xBb = bisec(xBu, 0.999, 3)
yint = g(xBa) - dummys * xBa
y = yint + dummys * xB
figure()
plot(xB, deltaGoverRT, '-')
plot(xB, y, '-')
plot(xB1, g(xB1), '.', color='blue', markersize=12)
plot(xB3, g(xB3), '.', color='blue', markersize=12)
plot(xBa, g(xBa), '.', color='red', markersize=12)
plot(xBb, g(xBb), '.', color='red', markersize=12)
plot(xspin1, g(xspin1), '.', color='orange', markersize=12)
plot(xspin2, g(xspin2), '.', color='orange', markersize=12)
grid('on')
xlabel(' xB ')
ylabel(' deltaG/RT ')
title('DeltaG/RT vs xB')
show()
print 'There is one-phase instability between xB = ', "%.2f" % xspin1,
'and xB = ', "%.2f" % xspin2
print '(Orange points on figure, "spinodal points")'
print 'The region of immiscibility is between xB = ', "%.2f" % xBa,
'and xB = ', "%.2f" % xBb
print '(Red points on figure, "binodal points")'
print 'Blue points on fig show minima, which do not equal to the binodal points'
|
[
"jiayue.li@berkeley.edu"
] |
jiayue.li@berkeley.edu
|
7e7d1b9026dfc15931ee9281fa1c3cbbd6ee0303
|
c818eafff8fb9cfb052e9c016aa7de67de246f21
|
/sales/migrations/0027_remove_return_receipt.py
|
b27fbc4fb3e044ba81c8479d9cf55a5e81ca6c45
|
[] |
no_license
|
mugagambi/mgh-server
|
a4275b07243f476db9d63e568c8b9331190b75f0
|
da966882bd695df606622ab816cd93fab1d53773
|
refs/heads/master
| 2021-10-22T05:52:15.354561
| 2019-03-08T11:50:19
| 2019-03-08T11:50:19
| 120,087,420
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
# Generated by Django 2.0.3 on 2018-04-17 15:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sales', '0026_return_approved_by'),
]
operations = [
migrations.RemoveField(
model_name='return',
name='receipt',
),
]
|
[
"mugagambi@gmail.com"
] |
mugagambi@gmail.com
|
7c30ca77ff7ab8d16b8eccdf763b818abbd72e45
|
ac810c7e637afd67cf19704a1a724eaac56fed93
|
/Hackerrank_python/4.sets/30.Symmetric Difference.py
|
880ecccb9397cfde54a05b96340d08a4c960acc0
|
[
"MIT"
] |
permissive
|
Kushal997-das/Hackerrank
|
57e8e422d2b47d1f2f144f303a04f32ca9f6f01c
|
1256268bdc818d91931605f12ea2d81a07ac263a
|
refs/heads/master
| 2021-10-28T06:27:58.153073
| 2021-10-18T04:11:18
| 2021-10-18T04:11:18
| 298,875,299
| 41
| 8
|
MIT
| 2021-03-01T04:40:57
| 2020-09-26T18:26:19
|
Python
|
UTF-8
|
Python
| false
| false
| 194
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
M=input()
x=set(map(int,input().split()))
N=input()
y=set(map(int,input().split()))
f=x^y
for i in sorted(f):
print (i)
|
[
"noreply@github.com"
] |
Kushal997-das.noreply@github.com
|
546f4b0a7b9e573b93414313b979be1eeb48b1b5
|
b43c6c03eea348d68d6582c3594760bbe0ecaa08
|
/gitlab/tests/conftest.py
|
929f128062588569e26097e5ee90aebcc993b89f
|
[
"MIT"
] |
permissive
|
imsardine/learning
|
1b41a13a4c71c8d9cdd8bd4ba264a3407f8e05f5
|
925841ddd93d60c740a62e12d9f57ef15b6e0a20
|
refs/heads/master
| 2022-12-22T18:23:24.764273
| 2020-02-21T01:35:40
| 2020-02-21T01:35:40
| 24,145,674
| 0
| 0
|
MIT
| 2022-12-14T20:43:28
| 2014-09-17T13:24:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,278
|
py
|
import os
from os import path
from subprocess import Popen, PIPE
import pytest
class DataFileHelper(object):
def __init__(self, base_dir):
self._base_dir = base_dir
def abspath(self, fn):
return path.join(self._base_dir, fn)
def relpath(self, fn):
return path.relpath(self.abspath(fn)) # relative to CWD
def read(self, fn, encoding=None):
with open(self.abspath(fn), 'rb') as f:
data = f.read()
return data.decode(encoding) if encoding else data
def json(self, fn, encoding='utf-8'):
import json
return json.loads(self.read(fn, encoding))
class CommandLine(object):
def __init__(self, base_dir):
self._base_dir = base_dir
def run(self, cmdline, cwd=None):
_cwd = os.getcwd()
assert path.isabs(_cwd), _cwd
os.chdir(self._base_dir)
if cwd:
os.chdir(cwd) # absolute or relative to base dir
try:
p = Popen(cmdline, stdout=PIPE, stderr=PIPE, shell=True)
out, err = p.communicate()
return CommandLineResult(
out.decode('utf-8'), err.decode('utf-8'), p.returncode)
finally:
os.chdir(_cwd)
class CommandLineResult(object):
def __init__(self, out, err, rc):
self.out = out
self.err = err
self.rc = rc
@pytest.fixture
def testdata(request):
base_dir = path.dirname(request.module.__file__)
return DataFileHelper(base_dir)
@pytest.fixture
def cli(request):
base_dir = path.dirname(request.module.__file__)
return CommandLine(base_dir)
import urllib, urllib2
import json
class GitLabAPI():
def __init__(self, url, access_token):
self._url = url
self._access_token = access_token
def _request(self, endpoint):
request = urllib2.Request(self._url + endpoint)
request.add_header('Private-Token', self._access_token)
return request
def get(self, endpoint, params={}):
qs = urllib.urlencode(params)
resp = urllib2.urlopen(self._request(endpoint + '?' + qs))
return json.loads(resp.read())
@pytest.fixture
def gitlab():
return GitLabAPI(
os.environ['GITLAB_URL'],
os.environ['GITLAB_ACCESS_TOKEN'])
|
[
"imsardine@gmail.com"
] |
imsardine@gmail.com
|
e4b9f8ab5bb19544e331e60d7ba9441168e86c0f
|
3c9727c4b5a89684b861fa90424e43c5a914ea45
|
/Production/test/get_py.py
|
5234fbfb2b6066772be282bce7ee1e8393f89862
|
[] |
no_license
|
vhegde91/TreeMaker
|
f51b453243081ccef0cfa721468ed7f7f9ca51f2
|
e9dc3e3de793250980b29bebfef9b07c78bc97f7
|
refs/heads/Run2
| 2021-08-11T03:45:45.430562
| 2018-04-11T14:30:28
| 2018-04-11T14:30:28
| 78,883,127
| 0
| 0
| null | 2017-01-13T20:14:01
| 2017-01-13T20:14:01
| null |
UTF-8
|
Python
| false
| false
| 6,669
|
py
|
import re,sys,getopt,urllib2,json
from dbs.apis.dbsClient import DbsApi
from optparse import OptionParser
# Read parameters
parser = OptionParser()
parser.add_option("-d", "--dict", dest="dict", default="", help="check for samples listed in this dict (default = %default)")
parser.add_option("-p", "--py", dest="py", default=False, action="store_true", help="generate python w/ list of files (default = %default)")
parser.add_option("-w", "--wp", dest="wp", default=False, action="store_true", help="generate WeightProducer lines (default = %default)")
parser.add_option("-s", "--se", dest="se", default=False, action="store_true", help="make list of sites with 100% hosting (default = %default)")
(options, args) = parser.parse_args()
dictname = options.dict.replace(".py","");
flist = __import__(dictname).flist
makepy = options.py
makewp = options.wp
makese = options.se
if not makepy and not makewp and not makese:
parser.error("No operations selected!")
#interface with DBS
dbs3api = DbsApi("https://cmsweb.cern.ch/dbs/prod/global/DBSReader")
#format for dict entries:
# data: [['sample'] , []]
# MC: [['sample'] , [xsec]]
# MC w/ extended sample: [['sample','sample_ext'] , [xsec]]
# MC w/ negative weights (amcatnlo): [['sample'] , [xsec, neff]]
#MC w/ negative weights (amcatnlo) + extended sample: [['sample','sample_ext'] , [xsec, neff, neff_ext]]
if makewp:
wname = "weights_"+dictname+".txt"
wfile = open(wname,'w')
if makese:
sname = "sites_"+dictname+".txt"
sfile = open(sname,'w')
for fitem in flist:
ff = fitem[0]
x = fitem[1]
nevents_all = []
for f in ff: # in case of extended samples
if makepy:
#get sample name
oname = f.split('/')[1]
#check for extended sample
extcheck = re.search("ext[0-9]",f.split('/')[2])
if not extcheck==None and len(extcheck.group(0))>0: oname = oname+"_"+extcheck.group(0)
#make python file with preamble
pfile = open(oname+"_cff.py",'w')
pfile.write("import FWCore.ParameterSet.Config as cms\n\n")
pfile.write("maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n")
pfile.write("readFiles = cms.untracked.vstring()\n")
pfile.write("secFiles = cms.untracked.vstring()\n")
pfile.write("source = cms.Source (\"PoolSource\",fileNames = readFiles, secondaryFileNames = secFiles)\n")
#get dataset info - detail only needed in makewp case
filelist = []
nevents = 0
print f
fileArrays = dbs3api.listFileArray(dataset=f,detail=makewp)
for fileArray in fileArrays:
if makepy:
filelist.append(fileArray["logical_file_name"])
if makewp:
nevents += fileArray["event_count"]
nevents_all.append(nevents)
# check for sites with 100% dataset presence (using PhEDEx API)
# refs:
# https://github.com/dmwm/DAS/blob/master/src/python/DAS/services/combined/combined_service.py
# https://github.com/gutsche/scripts/blob/master/PhEDEx/checkLocation.py
if makese:
url='https://cmsweb.cern.ch/phedex/datasvc/json/prod/blockreplicas?dataset=' + f
jstr = urllib2.urlopen(url).read()
jstr = jstr.replace("\n", " ")
result = json.loads(jstr)
site_list = {}
for block in result['phedex']['block']:
for replica in block['replica']:
site = replica['node']
addr = replica['se']
#safety checks
if site is None: continue
if addr is None: addr = ""
if (site,addr) not in site_list.keys(): site_list[(site,addr)] = 0
site_list[(site,addr)] += replica['files']
# get total number of expected files from DBS
nfiles_tot = len(fileArrays)
# calculate dataset fraction (presence) in % and check for completion
highest_percent = 0
for site,addr in site_list:
this_percent = float(site_list[(site,addr)])/float(nfiles_tot)*100
site_list[(site,addr)] = this_percent
if this_percent > highest_percent: highest_percent = this_percent
sfile.write(f+"\n")
if highest_percent < 100:
sfile.write(" !!! No site has complete dataset !!! ( Highest: "+str(highest_percent)+"% )\n")
for site,addr in site_list:
this_percent = site_list[(site,addr)]
if this_percent==highest_percent:
sfile.write(" "+site+" ("+addr+")\n")
if makepy:
#sort list of files for consistency
filelist.sort()
counter = 0
#split into chunks of 255
for lfn in filelist:
if counter==0: pfile.write("readFiles.extend( [\n")
pfile.write(" '"+lfn+"',\n")
if counter==254 or lfn==filelist[-1]:
pfile.write("] )\n")
counter = 0
else:
counter += 1
#only do weightproducer stuff for MC (w/ xsec provided)
if makewp and len(x)>0:
xsec = x[0]
nevents = nevents_all[0]
neff = 0
if len(x)>1: neff = x[1]
#handle combining extended samples
if len(ff)>1:
neff = sum(x[1:])
nevents = sum(nevents_all)
for i,f in enumerate(ff):
#make line for weightproducer
line = " MCSample(\""+f.split('/')[1]+"\", \""+"-".join(f.split('/')[2].split('-')[1:3])+"\", \""+f.split('/')[2].split('-')[0]+"\", \"Constant\", "+str(x[0])+", ";
if neff>0:
line = line+str(neff)+"),"
if len(ff)>1: line = line+" # subtotal = "+str(x[i+1])+", straight subtotal = "+str(nevents_all[i])+"\n"
else: line = line+" # straight total = "+str(nevents)+"\n"
else:
line = line+str(nevents)+"),"
if len(ff)>1: line = line+" # subtotal = "+str(nevents_all[i])+"\n"
else: line = line+"\n"
wfile.write(line)
|
[
"kpedro88@gmail.com"
] |
kpedro88@gmail.com
|
9ba94fdaa0336d97658bb817cac17daeacb40efa
|
11841e8fb1e44c69ae7e50c0b85b324c4d90abda
|
/zipfile1.py
|
57928210c0c0f271bff15ecb5d69c931b5a2dca3
|
[] |
no_license
|
chenlong2019/python
|
1d7bf6fb60229221c79538234ad2f1a91bb03c50
|
fc9e239754c5715a67cb6d743109800b64d74dc8
|
refs/heads/master
| 2020-12-08T11:11:49.951752
| 2020-01-10T04:58:29
| 2020-01-10T04:59:50
| 232,968,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
import os, zipfile
#打包目录为zip文件(未压缩)
def make_zip(source_dir, output_filename):
zipf = zipfile.ZipFile(output_filename, 'w')
for parent, dirnames, filenames in os.walk(source_dir):
for filename in filenames:
pathfile = os.path.join(parent, filename)
zipf.write(pathfile, filename)
zipf.close()
if __name__ == '__main__':
make_zip("F:\\changshu\\state\\pm25\\PM252019_08_16_16", 'F:\\zip\\PM252019_08_190977_.zip')
|
[
"1174650816@qq.com"
] |
1174650816@qq.com
|
fcb806b070156928c2b03ad6d408e9055efc9a9a
|
3cde5a749af89c9dc4d2aca3fb9bf7c56d9a4a7f
|
/website.py
|
43a44ed6750624703414cd6a969170689fe73bba
|
[] |
no_license
|
akrherz/kimthub
|
b211974c071f6ed5f2caa7349ba8ff8e2ec2f87b
|
028894591841e83ddc35d2157fe4044049d20db8
|
refs/heads/master
| 2020-12-29T00:25:50.689178
| 2019-04-01T15:46:14
| 2019-04-01T15:46:14
| 16,999,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
# website tool to configure this application
from twisted.web import resource, server
class HomePage(resource.Resource):
def __init__(self, r):
resource.Resource.__init__(self)
self.r = r
def render(self, request):
s = self.r.dumpObs()
request.setHeader('Content-Length', len(s))
request.setHeader('Content-Type', 'text/plain')
request.setResponseCode(200)
request.write( s )
request.finish()
return server.NOT_DONE_YET
class RootResource(resource.Resource):
def __init__(self, r):
resource.Resource.__init__(self)
self.putChild('', HomePage(r))
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
148d3e817efcd11b28dcc6c13e49112239d6e335
|
39f95a7b4abe665f1b0e3a0f4b356db002ddce2e
|
/tests/test_exceptions.py
|
3f14f4213f02a864daf23f5b39c6897b7609a298
|
[
"MIT"
] |
permissive
|
gitter-badger/tapioca-wrapper
|
d96a538071d44c36f93f0bbd7318510dfc9f7633
|
4e6dbd85da1a218d00f08fee84dfea29a83d61c3
|
refs/heads/master
| 2021-01-16T18:48:50.848519
| 2015-09-09T15:21:41
| 2015-09-09T15:21:41
| 42,362,017
| 0
| 0
| null | 2015-09-12T15:29:44
| 2015-09-12T15:29:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,889
|
py
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
import responses
import requests
from tapioca.exceptions import (
ClientError, ServerError, ResponseProcessException)
from tests.client import TesterClient, TesterClientAdapter
class TestExceptions(unittest.TestCase):
def setUp(self):
self.wrapper = TesterClient()
@responses.activate
def test_adapter_raises_response_process_exception_on_400s(self):
responses.add(responses.GET, self.wrapper.test().data(),
body='{"erros": "Server Error"}',
status=400,
content_type='application/json')
response = requests.get(self.wrapper.test().data())
with self.assertRaises(ResponseProcessException):
TesterClientAdapter().process_response(response)
@responses.activate
def test_adapter_raises_response_process_exception_on_500s(self):
responses.add(responses.GET, self.wrapper.test().data(),
body='{"erros": "Server Error"}',
status=500,
content_type='application/json')
response = requests.get(self.wrapper.test().data())
with self.assertRaises(ResponseProcessException):
TesterClientAdapter().process_response(response)
@responses.activate
def test_raises_request_error(self):
responses.add(responses.GET, self.wrapper.test().data(),
body='{"data": {"key": "value"}}',
status=400,
content_type='application/json')
with self.assertRaises(ClientError):
self.wrapper.test().get()
@responses.activate
def test_raises_server_error(self):
responses.add(responses.GET, self.wrapper.test().data(),
status=500,
content_type='application/json')
with self.assertRaises(ServerError):
self.wrapper.test().get()
|
[
"filipeximenes@gmail.com"
] |
filipeximenes@gmail.com
|
1ce8eab6442ed03dd6f60806e1900e36fe0df0d2
|
1670dca534ef4fd7e8d9ca9e6d55b5885e4071f9
|
/AlgoExpert/Day2.py
|
4117f3dcd99612ad1bd1b4b1307b4ac6c8d06480
|
[] |
no_license
|
Tejas1510/Pythonary
|
24512a6c5abfee17457397aa37849f3a5a739002
|
55c11f74d9f540bf696acecaa78febecd14d8422
|
refs/heads/master
| 2022-11-23T23:27:32.219513
| 2020-08-02T17:22:17
| 2020-08-02T17:22:17
| 264,151,076
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
#Modification of last Day Question
#Find a triplet that sum to a given value
#Brute Force approach requires o(n^3)
#We will Solve it with the help of hash map in O(n^2) approach
#This question has been asked in multiple times in most of the FANNG Company interview
def Solution(a,TargetSum):
for i in range(0,len(a)-1):
nums={}
current_sum=TargetSum-a[i]
for j in range(1,len(a)):
if(current_sum-a[j] in nums):
return [a[j],a[i],current_sum-a[j]]
else:
nums[a[j]]=True
return -1
t=int(input())
for i in range(t):
n=int(input())
a=list(map(int,input().split()))
TargetSum=int(input())
a=Solution(a,TargetSum)
print(*a)
|
[
"noreply@github.com"
] |
Tejas1510.noreply@github.com
|
084af231761d48ccdf9950ed5fbab1a7a44f86ab
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/SjSzRZMR/YW_RZMR_SZSJ_150.py
|
60204aa1a24e2cc122eacbff5931e061d9482cba
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,334
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import json
sys.path.append("/home/yhl2/workspace/xtp_test")
from xtp.api.xtp_test_case import xtp_test_case, Api, unittest
from service.ServiceConfig import *
from financing.service.mainService import ParmIni, serviceTest
from financing.service.QueryStkPriceQty import QueryStkPriceQty
from service.log import *
from financing.service.CaseParmInsertMysql import *
from mysql.QueryOrderErrorMsg import queryOrderErrorMsg
reload(sys)
sys.setdefaultencoding('utf-8')
class YW_RZMR_SZSJ_150(xtp_test_case):
# YW_RZMR_SZSJ_150 YW_RZMR_SZSJ_150 YW_RZMR_SZSJ_150 YW_RZMR_SZSJ_150
def test_YW_RZMR_SZSJ_150(self):
title = '对方最优转限价买——错误的价格(价格10亿)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '0', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_MARGIN'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_MARGIN_TRADE'],
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_REVERSE_BEST_LIMIT'],
'price': 1000000000,
'quantity': 200
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
if rs['用例测试结果']:
logger.warning('执行结果为{0}'.format(str(rs['用例测试结果'])))
else:
logger.warning('执行结果为{0},{1},{2}'.format(
str(rs['用例测试结果']), str(rs['用例错误源']),
json.dumps(rs['用例错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
f4a7e68daf70584049de24fcf0e3d0d1aa07c352
|
0ff99fc75b1f42811f72aa86f4b32d1e3f8b6b48
|
/PR_inference/maskrcnn_benchmark/data/datasets/__init__.py
|
a9fdd2e7e4b7c500fbf9a66017cd0a5759e6581a
|
[
"MIT"
] |
permissive
|
alwc/buildings2vec
|
f95a4468a0d5c21f2732c177474350e767d459f7
|
bd5121e715bc28f6e88163f18407a762736d38d5
|
refs/heads/master
| 2022-04-25T05:26:50.053219
| 2020-04-28T20:49:49
| 2020-04-28T20:49:49
| 263,258,064
| 0
| 1
| null | 2020-05-12T06:57:24
| 2020-05-12T06:57:23
| null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .coco import COCODataset
from .voc import PascalVOCDataset
from .buildings import BuildingsDataset
from .concat_dataset import ConcatDataset
__all__ = ["COCODataset", "ConcatDataset", "PascalVOCDataset", "BuildingsDataset"]
|
[
"ennauata@gmail.com"
] |
ennauata@gmail.com
|
59aeeb5cfbbd52e95cf09691c8180bb4633af9c4
|
321b4ed83b6874eeb512027eaa0b17b0daf3c289
|
/606/606.construct-string-from-binary-tree.234546044.Wrong-Answer.leetcode.py
|
30bee179f09e3deb04c3c0ab49b7e971a008aac3
|
[] |
no_license
|
huangyingw/submissions
|
7a610613bdb03f1223cdec5f6ccc4391149ca618
|
bfac1238ecef8b03e54842b852f6fec111abedfa
|
refs/heads/master
| 2023-07-25T09:56:46.814504
| 2023-07-16T07:38:36
| 2023-07-16T07:38:36
| 143,352,065
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
class Solution:
def tree2str(self, t):
if not t:
return ''
left = '({})'.format(self.tree2str(t.left)) if (t.left or t.right) else ''
right = '({})'.format(self.tree2str(t.right)) if t.right else ''
return '{}{}{}'.format(t.val, left, right)
def tree2str(self, t):
if not t:
return ""
subleft = "(%s)" % (self.tree2str(t.left) if t.left or t.right else "")
subright = "(%s)" % (self.tree2str(t.right) if t.right else "")
return ("%s%s%s" % (str(t.val), subleft, subright)).replace("()", "")
|
[
"huangyingw@gmail.com"
] |
huangyingw@gmail.com
|
b21aab70f83a44383ba2584afdf1c8db013d0187
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/databoxedge/v20210201preview/get_role.py
|
2b3f43979b4fd165e04ffde6de3dfaed95f863d9
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,584
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetRoleResult',
'AwaitableGetRoleResult',
'get_role',
'get_role_output',
]
warnings.warn("""Please use one of the variants: CloudEdgeManagementRole, IoTRole, KubernetesRole, MECRole.""", DeprecationWarning)
@pulumi.output_type
class GetRoleResult:
"""
Compute role.
"""
def __init__(__self__, id=None, kind=None, name=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Role type.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Role configured on ASE resource
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
class AwaitableGetRoleResult(GetRoleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRoleResult(
id=self.id,
kind=self.kind,
name=self.name,
system_data=self.system_data,
type=self.type)
def get_role(device_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRoleResult:
"""
Compute role.
:param str device_name: The device name.
:param str name: The role name.
:param str resource_group_name: The resource group name.
"""
pulumi.log.warn("""get_role is deprecated: Please use one of the variants: CloudEdgeManagementRole, IoTRole, KubernetesRole, MECRole.""")
__args__ = dict()
__args__['deviceName'] = device_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:databoxedge/v20210201preview:getRole', __args__, opts=opts, typ=GetRoleResult).value
return AwaitableGetRoleResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
system_data=__ret__.system_data,
type=__ret__.type)
@_utilities.lift_output_func(get_role)
def get_role_output(device_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRoleResult]:
"""
Compute role.
:param str device_name: The device name.
:param str name: The role name.
:param str resource_group_name: The resource group name.
"""
pulumi.log.warn("""get_role is deprecated: Please use one of the variants: CloudEdgeManagementRole, IoTRole, KubernetesRole, MECRole.""")
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
b2890fee28b3469e99f5ae1c676d8500ba428280
|
48d1bdfe8ef88e9e24e26f05a07b61a220fd5663
|
/tests/settings.py
|
d264de5677cb4acca69cc9729cd414a7b2c6905b
|
[
"MIT"
] |
permissive
|
dejmail/django-data-wizard
|
b2680cf14564e4be3d74c5e63d17060665adfb8d
|
cfb4d00032c73d4b55abceb542b68563f3a79a05
|
refs/heads/master
| 2023-05-10T20:59:46.222978
| 2022-08-18T01:37:40
| 2022-08-18T01:37:40
| 278,087,179
| 0
| 0
|
MIT
| 2020-07-08T12:46:19
| 2020-07-08T12:46:19
| null |
UTF-8
|
Python
| false
| false
| 2,248
|
py
|
import os
TEST_BACKEND = os.environ.get("TEST_BACKEND", "threading")
TEST_VARIANT = os.environ.get("TEST_VARIANT", "default")
WITH_WQDB = TEST_VARIANT == "wq.db"
SECRET_KEY = "1234"
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
if TEST_VARIANT == "wq.db":
WQ_APPS = (
"wq.db.rest",
"wq.db.rest.auth",
)
else:
WQ_APPS = tuple()
if TEST_VARIANT == "reversion":
REVERSION_APPS = ("reversion",)
else:
REVERSION_APPS = tuple()
INSTALLED_APPS = (
(
"django.contrib.contenttypes",
"django.contrib.admin",
"django.contrib.sessions",
"django.contrib.staticfiles",
"django.contrib.auth",
)
+ WQ_APPS
+ REVERSION_APPS
+ (
"data_wizard",
"data_wizard.sources",
"tests.data_app",
"tests.naturalkey_app",
"tests.eav_app",
"tests.source_app",
)
)
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "data_wizard_test.sqlite3",
}
}
ROOT_URLCONF = "tests.urls"
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), "media")
if TEST_BACKEND == "celery":
CELERY_RESULT_BACKEND = BROKER_URL = "redis://localhost/0"
if TEST_VARIANT == "wq.db":
from wq.db.default_settings import * # noqa
DATA_WIZARD = {
"BACKEND": f"data_wizard.backends.{TEST_BACKEND}",
}
STATIC_URL = "/static/"
DEBUG = True
|
[
"andrew@wq.io"
] |
andrew@wq.io
|
c2006c7cd89aca0775e2f8862c0c7c80d2818081
|
6ac683881a26231638ae77261bc1c2e962ed81e6
|
/message/models.py
|
7155156b1bd11413e82722ed09d6d44072e0ac20
|
[] |
no_license
|
tenshiPure/chat
|
a3deea994d106b27bdcf7c8ac6bc21987b853601
|
c10489b87814033ffbd4f50d0eebc3b9e1c364d4
|
refs/heads/master
| 2016-09-06T02:24:40.094709
| 2014-02-06T03:37:06
| 2014-02-06T03:37:06
| 16,363,786
| 0
| 0
| null | 2016-02-20T02:14:08
| 2014-01-30T00:12:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,828
|
py
|
#-*- coding: utf-8 -*-
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.forms import ModelForm
from django.db import models
from django.contrib.auth.models import User, Group
class UserForm(UserCreationForm):
first_name = forms.CharField(max_length = 32)
last_name = forms.CharField(max_length = 32)
class Meta:
model = User
fields = ('first_name', 'last_name')
class Tag(models.Model):
body = models.CharField(max_length = 64)
last_used = models.DateTimeField(auto_now = True)
group = models.ForeignKey(Group)
def formatedDatetime(self):
return self.last_used.strftime('%Y-%m-%d %H:%M')
@staticmethod
def tagging(tag, create, group):
if not tag and not create:
return None
if tag:
result = Tag.objects.get(pk = tag)
elif create:
rows = Tag.objects.filter(body = create).filter(group = group)
if rows:
result = rows[0]
else:
result = Tag(body = create, group = group)
result.save()
return result
def __unicode__(self):
return self.body
class TagForm(ModelForm):
class Meta:
model = Tag
class Message(models.Model):
body = models.TextField()
datetime = models.DateTimeField(u'送信日時', auto_now = True)
ref = models.ForeignKey('self', null = True, blank = True)
tag = models.ForeignKey(Tag, null = True, blank = True)
user = models.ForeignKey(User)
group = models.ForeignKey(Group)
def formatedDatetime(self):
return self.datetime.strftime('%Y-%m-%d %H:%M')
def __unicode__(self):
return '%s - %s' % (self.user.username, self.body[0:40])
class MessageForm(ModelForm):
class Meta:
model = Message
exclude = ('user', 'group')
def __init__(self, *args, **kwargs):
group = kwargs.get('group', False)
if group:
kwargs.pop('group')
super(MessageForm, self).__init__(*args, **kwargs)
self.fields['body'] = forms.CharField(
label = '',
widget = forms.Textarea(
attrs = {
'class' : 'class_form_input',
'cols' : 80,
'rows' : 5
}
)
)
self.fields['tag_create'] = forms.CharField(
label = '',
required = False,
widget = forms.TextInput(
attrs = {
'class' : 'class_form_input'
}
)
)
self.fields['ref'] = forms.ModelChoiceField(
# queryset = Message.objects.filter(group = group).order_by('-id'),
queryset = Message.objects.all().order_by('-id'),
label = '',
required = False,
widget = forms.Select(
attrs = {
'class' : 'class_form_input'
}
)
)
self.fields['tag'] = forms.ModelChoiceField(
# queryset = Tag.objects.filter(group = group).order_by('last_used'),
queryset = Tag.objects.all().order_by('last_used'),
label = '',
required = False,
widget = forms.Select(
attrs = {
'class' : 'class_form_input'
}
)
)
|
[
"user.ryo@gmail.com"
] |
user.ryo@gmail.com
|
e25e3fb611bdf6fa99186813f21592c175ee2b99
|
53ee800e1cd6b4cd3e834e049a74c67c5e32eaca
|
/conftest.py
|
d7d40aca37ed6a8b6431be82ec5d473360206d71
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sasobadovinac/ht
|
482cd7e7c8ef351dd4bcb5bc9993ef3f74d8cab0
|
de707506c00a3aefc2985008e98e9df0e7af9cb6
|
refs/heads/master
| 2023-02-09T04:42:11.961473
| 2023-01-23T02:21:06
| 2023-01-23T02:21:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,984
|
py
|
import sys
import platform
is_pypy = 'PyPy' in sys.version
def pytest_ignore_collect(path):
path = str(path)
if 'manual_runner' in path or 'make_test_stubs' in path or 'plot' in path or 'prerelease' in path:
return True
if 'conf.py' in path:
return True
ver_tup = platform.python_version_tuple()[0:2]
ver_tup = tuple(int(i) for i in ver_tup)
if ver_tup < (3, 7) or ver_tup >= (3, 10) or is_pypy:
# numba does not yet run under pypy
if 'numba' in path:
return True
if '.rst' in path: # skip .rst tests as different rendering from pint and no support for NUMBER flag
return True
if sys.version[0] == '2':
if 'numba' in path or 'typing_utils' in path:
return True
if 'test' not in path:
return True
if 'ipynb' in path and 'bench' in path:
return True
return False
#def pytest_addoption(parser, pluginmanager):
# if sys.version[0] == '323523':
# parser.addoption("--doctest-modules")
# parser.addini(name="doctest_optionflags", help="", default="NORMALIZE_WHITESPACE NUMBER")
#def pytest_configure(config):
# print(config)
#open('/home/caleb/testoutput', 'w').write(str(1))
#if sys.version[0] == '2':
# args = []
# #print(args)
def pytest_load_initial_conftests(args):
a = 1
b = 2
def pytest_configure(config):
if sys.version[0] == '3':
import pytest
if pytest.__version__.split('.')[0] >= '6':
config.addinivalue_line("addopts", '--doctest-modules')
config.option.doctestmodules = True
config.addinivalue_line("doctest_optionflags", "NUMBER")
# config.addinivalue_line("addopts", config.inicfg['addopts'].replace('//', '') + ' --doctest-modules')
#config.inicfg['addopts'] = config.inicfg['addopts'] + ' --doctest-modules'
#
config.addinivalue_line("doctest_optionflags", "NORMALIZE_WHITESPACE")
|
[
"Caleb.Andrew.Bell@gmail.com"
] |
Caleb.Andrew.Bell@gmail.com
|
bfbdcb02acc6bbaaf28aed62a3a02c0364e3390f
|
1e5f6ac1590fe64e2d5a2d8b036c0948847f668d
|
/codes/Module_3/lecture_14/lecture_14_1.py
|
8e31229472bbd0149536f6ac5d764794c79ff078
|
[] |
no_license
|
Gedanke/Reptile_study_notes
|
54a4f48820586b1784c139716c719cc9d614c91b
|
a9705ebc3a6f95160ad9571d48675bc59876bd32
|
refs/heads/master
| 2022-07-12T23:43:24.452049
| 2021-08-09T12:54:18
| 2021-08-09T12:54:18
| 247,996,275
| 5
| 1
| null | 2022-06-26T00:21:48
| 2020-03-17T14:50:42
|
HTML
|
UTF-8
|
Python
| false
| false
| 728
|
py
|
# -*- coding: utf-8 -*-
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
url = "https://www.baidu.com"
browser = webdriver.Chrome()
try:
browser.get(url)
input = browser.find_element_by_id('kw')
input.send_keys('Python')
input.send_keys(Keys.ENTER)
wait = WebDriverWait(browser, 10)
wait.until(EC.presence_of_element_located((By.ID, 'content_left')))
time.sleep(5)
print(browser.current_url)
print(browser.get_cookies())
print(browser.page_source)
finally:
browser.close()
|
[
"13767927306@163.com"
] |
13767927306@163.com
|
4cac7bbd91f2ee70771624bc6cc8a2c4bfff9f5f
|
3ea45d6acd362a646e906eac31ab6d3ea019d727
|
/qaeval/tests/scoring/scorers/lerc_test.py
|
cadff9977a9b00b52775e5e6b44447cb724e1300
|
[
"Apache-2.0"
] |
permissive
|
rajhans/qaeval
|
9747dea5dd0a234cc3df7837d6cbc0406b5d1b03
|
dd7273183dd1b2c9995115310ef041daa953ca81
|
refs/heads/master
| 2023-07-10T04:15:05.399369
| 2021-08-03T02:22:15
| 2021-08-03T02:22:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
import os
import pytest
from qaeval.scoring.scorers import LERCScorer
from qaeval.tests.scoring.scorers.scorer_test import TestScorer
@pytest.mark.skipif('LERC_MODEL' not in os.environ or 'LERC_PRETRAINED' not in os.environ, reason='LERC environment variables not set')
class TestLERCScorer(TestScorer):
@classmethod
def setUpClass(cls) -> None:
cls.scorer = LERCScorer(
model_path=os.environ['LERC_MODEL'],
pretrained_path=os.environ['LERC_PRETRAINED'],
cuda_device=0
)
def test_keys(self):
assert self.scorer.keys() == {'lerc'}
def test_default_scores(self):
assert self.scorer.default_scores() == {'lerc': 0.0}
def test_is_answered(self):
self.assert_expected_output(
# This is a regression test. It does not ensure these numbers are correct
self.scorer,
{'lerc': (2.5152266025543213 + 4.940724849700928) / 2},
[{'lerc': 2.5152266025543213}, {'lerc': 4.940724849700928}],
[[{'lerc': 2.5210483074188232}, {'lerc': 5.024631500244141}, {'lerc': 0.0}], [{'lerc': 4.940724849700928}]]
)
|
[
"danfdeutsch@gmail.com"
] |
danfdeutsch@gmail.com
|
2eda323e1df29dba8b357199e32a196401cea08e
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_2_1_neat/16_2_1_latsyrc11235_1.py
|
da55e94217b12acb619e4ed1d23e38ecc1f4df14
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,143
|
py
|
f = [line.rstrip() for line in open('/Users/roshil/Desktop/A-small-attempt0 (2).in')]
out = open('/Users/roshil/Desktop/out.txt','w')
out.truncate()
line = 0
testcases = int(f[line])
line += 1
for i in range(1, testcases+1):
r1 = f[line]
line += 1
r1 = r1.lower()
word = [k for k in r1]
s = []
while len(word) > 0:
#print word
if 'z' in word:
word.remove('z')
word.remove('e')
word.remove('r')
word.remove('o')
s.append(0)
elif 'w' in word:
word.remove('t')
word.remove('w')
word.remove('o')
s.append(2)
elif 'u' in word:
word.remove('f')
word.remove('o')
word.remove('u')
word.remove('r')
s.append(4)
elif 'r' in word:
word.remove('t')
word.remove('h')
word.remove('r')
word.remove('e')
word.remove('e')
s.append(3)
elif 'x' in word:
word.remove('s')
word.remove('i')
word.remove('x')
s.append(6)
elif 'g' in word:
word.remove('e')
word.remove('i')
word.remove('g')
word.remove('h')
word.remove('t')
s.append(8)
elif 'o' in word:
word.remove('o')
word.remove('n')
word.remove('e')
s.append(1)
elif 'f' in word:
word.remove('f')
word.remove('i')
word.remove('v')
word.remove('e')
s.append(5)
elif 'v' in word:
word.remove('s')
word.remove('e')
word.remove('v')
word.remove('e')
word.remove('n')
s.append(7)
else:
word.remove('n')
word.remove('i')
word.remove('n')
word.remove('e')
s.append(9)
s.sort()
ans = "".join([str(l) for l in s])
print ans
out.write("Case #"+str(i)+": "+str(ans) + "\n")
out.close()
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
023d9f5a2081647f38c2abb19c67c5d07e7f1bac
|
fb3f2c3f83fbfe894f01ea514c760371ef05d54f
|
/Algorithm/chapter5/flatten.py
|
0b99312d0778169c809ff206410031189ac979eb
|
[] |
no_license
|
jonXue92/PythonGit
|
8160220a3d51fb6a317702a2b50e8ca3306a8f0e
|
a9358ac79a47b3d1fd072a4af603bf07a89b1a2c
|
refs/heads/master
| 2020-04-02T05:25:51.032912
| 2019-04-12T04:18:15
| 2019-04-12T04:18:15
| 154,076,228
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
# -*- coding: utf-8 -*-
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Flatten:
last_node = None
def flatten(self, root):
if root is None:
return
if self.last_node is not None:
self.last_node.left = None
self.last_node.right = root
self.last_node = root
right = root.right
self.flatten(root.left)
self.flatten(right)
def flatten1(self, root):
self.helper(root)
# restructure and return last node in preorder
def helper(self, root):
if root is None:
return None
left_last = self.helper(root.left)
right_last = self.helper(root.right)
# connect
if left_last is not None:
left_last.right = root.right
root.right = root.left
root.left = None
if right_last is not None:
return right_last
if left_last is not None:
return left_last
return root
|
[
"xzywrz@gmail.com"
] |
xzywrz@gmail.com
|
edfb24502e388ee7e252a957ea60815238e99f0f
|
29f8b7f92eb22cc3134a16c439d3180e254df4bb
|
/chp04_database_programming/04_65_sql.py
|
d81a91878a6b51f9b1bfd0ac8be6453d5ed66e59
|
[] |
no_license
|
Hemie143/realpython2
|
7df80dd5f61ce7cd8c31b8bf78111b8507cbdb36
|
b8535ffe97594e1b18233bcd9aa0de664257cb09
|
refs/heads/master
| 2022-12-12T04:51:53.120131
| 2021-01-03T19:52:32
| 2021-01-03T19:52:32
| 208,735,855
| 0
| 0
| null | 2023-08-17T05:45:32
| 2019-09-16T07:22:16
|
Python
|
UTF-8
|
Python
| false
| false
| 925
|
py
|
import sqlite3
with sqlite3.connect("new.db") as connection:
c = connection.cursor()
c.execute("CREATE TABLE regions (city TEXT, region TEXT)")
cities = [
('New York City', 'Northeast'),
('San Francisco', 'West'),
('Chicago', 'Midwest'),
('Houston', 'South'),
('Phoenix', 'West'),
('Boston', 'Northeast'),
('Los Angeles', 'West'),
('Houston', 'South'),
('Philadelphia', 'Northeast'),
('San Antonio', 'South'),
('San Diego', 'West'),
('Dallas', 'South'),
('San Jose', 'West'),
('Jacksonville', 'South'),
('Indianapolis', 'Midwest'),
('Austin', 'South'),
('Detroit', 'Midwest')
]
c.executemany("INSERT INTO regions VALUES(?, ?)", cities)
c.execute("SELECT * FROM regions ORDER BY region ASC")
rows = c.fetchall()
for r in rows:
print(r[0], r[1])
|
[
"hemie143@gmail.com"
] |
hemie143@gmail.com
|
537d39ea66e7cc44ae00acb9282f590cf9ffb326
|
ae8074a50ee666e46484e33bed7eb1cc16dfd0b8
|
/notebooks/CaseStudies/executor_1.py
|
04316c053eba23dd3c842d1844e318ff17f821f8
|
[] |
no_license
|
ayyogg0628/AnomalyDetection_MEAD
|
72edb3c5f222c1d8c1f4fc7fc6d2ae17a757e254
|
0df68f91568726c40f5ff7309cf8f74bcc2af74e
|
refs/heads/master
| 2023-03-18T22:22:17.045809
| 2020-07-07T23:44:59
| 2020-07-07T23:44:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,089
|
py
|
import operator
import pickle
import numpy as np
import os
import sys
import time
import pprint
import inspect
from collections import OrderedDict
import matplotlib.pyplot as plt
import yaml
from sklearn.metrics import auc
import logging
import logging.handlers
import tensorflow as tf
import pandas as pd
tf.logging.set_verbosity(tf.logging.ERROR)
# matplotlib.use('Agg')
sys.path.append('./..')
sys.path.append('./../../.')
try:
import src.m2_test_1layer.tf_model_3_withNorm as tf_model
except:
from .src.m2_test_1layer import tf_model_3_withNorm as tf_model
try:
from src.Eval import eval_v1 as eval
except:
from .src.Eval import eval_v1 as eval
# ------------------------------------ #
cur_path = '/'.join(
os.path.abspath(
inspect.stack()[0][1]
).split('/')[:-1]
)
sys.path.append(cur_path)
_author__ = "Debanjan Datta"
__email__ = "ddatta@vt.edu"
__version__ = "5.0"
__processor__ = 'embedding'
_SAVE_DIR = 'save_dir'
MODEL_NAME = None
_DIR = None
DATA_DIR = None
MODEL_OP_FILE_PATH = None
CONFIG_FILE = 'config_caseStudy_1.yaml'
CONFIG = None
# ----------------------------------------- #
def get_domain_dims():
global DATA_DIR
f_path = os.path.join(DATA_DIR, 'domain_dims.pkl')
with open(f_path, 'rb') as fh:
res = pickle.load(fh)
return list(res.values())
# ----------------------------------------- #
# --------- Model Config --------- #
# ----------------------------------------- #
# embedding_dims = None
DOMAIN_DIMS = None
logger = None
def setup_general_config():
global MODEL_NAME
global _DIR
global SAVE_DIR
global OP_DIR
global _SAVE_DIR
global CONFIG
global logger
SAVE_DIR = os.path.join(CONFIG['SAVE_DIR'], _DIR)
OP_DIR = os.path.join(CONFIG['OP_DIR'], _DIR)
if not os.path.exists(CONFIG['SAVE_DIR']):
os.mkdir(os.path.join(CONFIG['SAVE_DIR']))
if not os.path.exists(SAVE_DIR):
os.mkdir(os.path.join(SAVE_DIR))
return
# --------------------------------------------- #
def set_up_model(config, _dir):
global embedding_dims
global SAVE_DIR
global OP_DIR
global MODEL_NAME
MODEL_NAME = config['MODEL_NAME']
if type(config[_dir]['op_dims']) == str:
embedding_dims = config[_dir]['op_dims']
embedding_dims = embedding_dims.split(',')
embedding_dims = [int(e) for e in embedding_dims]
else:
embedding_dims = [config[_dir]['op_dims']]
model_obj = tf_model.model(MODEL_NAME, SAVE_DIR, OP_DIR)
model_obj.set_model_options(
show_loss_figure=config[_dir]['show_loss_figure'],
save_loss_figure=config[_dir]['save_loss_figure']
)
domain_dims = get_domain_dims()
LR = config[_dir]['learning_rate']
model_obj.set_model_hyperparams(
domain_dims=domain_dims,
emb_dims=embedding_dims,
batch_size=config[_dir]['batchsize'],
num_epochs=config[_dir]['num_epochs'],
learning_rate=LR,
num_neg_samples=config[_dir]['num_neg_samples']
)
model_obj.set_l2_loss_flag(True)
model_obj.inference = False
model_obj.build_model()
return model_obj
def get_data():
global CONFIG
global DATA_DIR
global _DIR
DIR = _DIR
with open(os.path.join(
CONFIG['DATA_DIR'],
DIR,
'domain_dims.pkl'
), 'rb') as fh:
domain_dims = pickle.load(fh)
train_x_pos_file = os.path.join(
CONFIG['DATA_DIR'],
DIR,
'matrix_train_positive_v1.pkl'
)
with open(train_x_pos_file, 'rb') as fh:
train_x_pos = pickle.load(fh)
train_x_neg_file = os.path.join(
CONFIG['DATA_DIR'],
DIR,
'negative_samples_v1.pkl'
)
with open(train_x_neg_file, 'rb') as fh:
train_x_neg = pickle.load(fh)
train_x_neg = train_x_neg
test_x_file = os.path.join(
CONFIG['DATA_DIR'],
DIR,
'matrix_test_positive.pkl'
)
with open(test_x_file, 'rb') as fh:
test_x = pickle.load(fh)
_df = pd.read_csv(os.path.join(CONFIG['DATA_DIR'],DIR,'test_data.csv'),header=0)
test_id_list = list(_df['PanjivaRecordID'])
return train_x_pos, train_x_neg, test_x, test_id_list, domain_dims
def process(
CONFIG,
_DIR,
train_x_pos,
train_x_neg,
test_data_x,
test_id_list
):
global logger
num_neg_samples = train_x_neg.shape[1]
CONFIG[_DIR]['num_neg_samples'] = num_neg_samples
model_obj = set_up_model(CONFIG, _DIR)
_use_pretrained = CONFIG[_DIR]['use_pretrained']
if _use_pretrained is True:
saved_file_path = None
pretrained_file = CONFIG[_DIR]['saved_model_file']
print('Pretrained File :', pretrained_file)
saved_file_path = os.path.join(
SAVE_DIR,
'checkpoints',
pretrained_file
)
if saved_file_path is not None:
model_obj.set_pretrained_model_file(saved_file_path)
else:
model_obj.train_model(
train_x_pos,
train_x_neg
)
elif _use_pretrained is False:
model_obj.train_model(
train_x_pos,
train_x_neg
)
print(' Len of test_ids ', len(test_id_list))
print('Length of test data', test_data_x.shape)
res = model_obj.get_event_score(test_data_x)
print('Length of results ', len(res))
res = list(res)
_id_score_dict = {
id: _res for id, _res in zip(
test_id_list,
res
)
}
'''
sort by ascending
since lower likelihood means anomalous
'''
tmp = sorted(
_id_score_dict.items(),
key=operator.itemgetter(1)
)
sorted_id_score_dict = OrderedDict()
for e in tmp:
sorted_id_score_dict[e[0]] = e[1][0]
_ID = []
_SCORE = []
for k,v in sorted_id_score_dict.items():
_ID.append(k)
_SCORE.append(v)
_df = pd.DataFrame(columns=['PanjivaRecordID','score'])
_df['PanjivaRecordID'] = _ID
_df['score'] = _SCORE
_df.to_csv(os.path.join(OP_DIR,'result_1.csv'))
# get embeddings
emb_res = model_obj.get_record_embeddings(train_x_pos)
with open(os.path.join(OP_DIR,'train_embeddings.pkl'),'wb') as fh:
pickle.dump(emb_res,fh,pickle.HIGHEST_PROTOCOL)
return
def main():
global embedding_dims
global SAVE_DIR
global _DIR
global DATA_DIR
global CONFIG
global CONFIG_FILE
global MODEL_NAME
global DOMAIN_DIMS
global logger
with open(CONFIG_FILE) as f:
CONFIG = yaml.safe_load(f)
DATA_DIR = os.path.join(CONFIG['DATA_DIR'], _DIR)
setup_general_config()
if not os.path.exists(os.path.join(SAVE_DIR, 'checkpoints')):
os.mkdir(
os.path.join(SAVE_DIR, 'checkpoints')
)
# ------------ #
if not os.path.exists(os.path.join(SAVE_DIR, 'checkpoints')):
os.mkdir(os.path.join(SAVE_DIR, 'checkpoints'))
# ------------ #
logger.info('-------------------')
train_x_pos, train_x_neg, test_x, test_id_list, domain_dims = get_data()
process(
CONFIG,
_DIR,
train_x_pos,
train_x_neg,
test_x,
test_id_list
)
logger.info('-------------------')
# ----------------------------------------------------------------- #
# find out which model works best
# ----------------------------------------------------------------- #
with open(CONFIG_FILE) as f:
CONFIG = yaml.safe_load(f)
try:
log_file = 'case_studies_1.log'
except:
log_file = 'm2.log'
_DIR = 'us_import'
logger = logging.getLogger('main')
logger.setLevel(logging.INFO)
OP_DIR = os.path.join(CONFIG['OP_DIR'], _DIR)
if not os.path.exists(CONFIG['OP_DIR']):
os.mkdir(CONFIG['OP_DIR'])
if not os.path.exists(OP_DIR):
os.mkdir(OP_DIR)
handler = logging.FileHandler(os.path.join(OP_DIR, log_file))
handler.setLevel(logging.INFO)
logger.addHandler(handler)
logger.info(' Info start ')
logger.info(' -----> ' + _DIR)
main()
|
[
"ddatta@vt.edu"
] |
ddatta@vt.edu
|
70772f2adcd137ef04c0dd0f83df8264fa9192f8
|
72db8db1a513dfa01ce81bf88b39c10c662bfae2
|
/annoying/tests/models.py
|
099d3338d0b17db45ce4bfc6d5fbc2b27c37d152
|
[
"MIT"
] |
permissive
|
colorstheforce/insta-clonewars
|
ec6053853505db26e9e931c531e531b5e6754740
|
2e8e6fc2e5ef7d2401d7902679e64d8859918d3a
|
refs/heads/master
| 2022-03-30T13:28:39.755391
| 2019-01-17T12:55:11
| 2019-01-17T12:55:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
from django.db import models
from annoying.fields import AutoOneToOneField
from annoying.fields import JSONField
class SuperVillain(models.Model):
name = models.CharField(max_length=20, default="Dr Horrible")
stats = JSONField(default=None, blank=True, null=True)
class SuperHero(models.Model):
name = models.CharField(max_length=20, default="Captain Hammer")
mortal_enemy = AutoOneToOneField(SuperVillain, related_name='mortal_enemy')
|
[
"jackogina60@gmail.com"
] |
jackogina60@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.