blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fa9e74de9960aafc87cf37002938ddea36129615 | f7b3c8f6c10a742c742ca21184f797f0ad737408 | /svn2git.py | b172a8a5d083d46fd3735a02aa1a7d7abca7f490 | [] | no_license | mcdonc/svn2git | 88488a415c8e0a514a0cc42d7dd4bf5d9ea9504b | f37b624e3b25746ae61fab07045cdb728f84828c | refs/heads/master | 2020-04-20T15:52:18.217285 | 2011-08-18T18:21:10 | 2011-08-18T18:21:10 | 1,415,728 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | # usage:
#
# python2.7 svn2git.py \
# http://svn.repoze.org/repoze.catalog \
# git@github.com:repoze/repoze.catalog.git \
# branch1 \
# branch2
#
# (branch specs are optional, otherwise only trunk and tags are imported)
#
# users.txt should be in /tmp/users.txt
#
# requires python 2.7
import tempfile
import shutil
import os
import subprocess
import re
import sys
tag_re = re.compile(r'tags/(\d.*)')
def do(svn, git, *branches):
cmd = "git svn clone --stdlayout --no-metadata -A /tmp/users.txt %s tmp"
cmd = cmd % svn
wd = tempfile.mkdtemp()
try:
os.chdir(wd)
result = os.system(cmd)
if result:
raise ValueError(result)
os.chdir('tmp')
r = subprocess.check_output(['git', 'branch', '-r'])
tag_branches = [ x.strip() for x in filter(None, r.split('\n'))]
for tag_branch in tag_branches:
matched = tag_re.match(tag_branch)
if matched:
if not '@' in tag_branch:
tag = matched.group(1)
print 'making tag %s' % tag
os.system('git checkout -b tag_x remotes/%s' % tag_branch)
os.system('git checkout master')
os.system('git tag %s tag_x' % tag)
os.system('git branch -D tag_x')
for branch in branches:
print 'creating branch %s' % branch
os.system('git checkout -b %s remotes/%s' % (branch, branch))
os.system('git checkout master')
os.chdir('..')
os.system('git clone tmp dest')
os.chdir('dest')
os.system('git remote add xx %s' % git)
os.system('git push xx master')
for branch in branches:
print 'pushing branch %s' % branch
os.system('git checkout -b %s remotes/origin/%s' % (branch, branch))
os.system('git push xx %s' % branch)
os.system('git push xx --tags')
finally:
shutil.rmtree(wd)
if __name__ == '__main__':
do(sys.argv[1], sys.argv[2], *sys.argv[3:])
| [
"chrism@plope.com"
] | chrism@plope.com |
cdd207946758af304736b74d5fb083e7c096090c | 5c099927aedc6fdbc515f40ff543c65b3bf4ec67 | /algorithms/combination-sum-iii/src/Solution.py | 0a4d6205ea8f236eb6dd457b917b0509fb19bddc | [] | no_license | bingzhong-project/leetcode | 7a99cb6af1adfbd9bb1996a7f66a65679053c478 | ba82e7d94840b3fec272e4c5f82e3a2cfe4b0505 | refs/heads/master | 2020-04-15T09:27:33.979519 | 2020-03-10T03:43:07 | 2020-03-10T03:43:07 | 164,550,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | class Solution:
def combinationSum3(self, k, n):
"""
:type k: int
:type n: int
:rtype: List[List[int]]
"""
def dfs(target, k, start, paths, res):
if k == 0:
if target == 0:
res.append(paths)
return
for i in range(start, 10):
if target - i >= 0 and k - 1 >= 0:
dfs(target - i, k - 1, i + 1, paths + [i], res)
res = []
dfs(n, k, 1, [], res)
return res
| [
"zhongyongbin@foxmail.com"
] | zhongyongbin@foxmail.com |
8765c2225d5837e8d7fd36b6dc4044e605ab5686 | 039a274d8a8bfbfb90b3c884024edf8c18507150 | /examples/logisticRegression.py | 3fecc9725f6e1545a2e1b879c0744400da271f67 | [
"MIT"
] | permissive | JayceeLee/TheanoProject | 1e33ae2a58a188cfce6c5bcbd8a2f6f9fbd36a0d | be1f5f09aa84d64ad3df7b798cf6ff74a08bf3b7 | refs/heads/master | 2021-05-11T09:12:50.278105 | 2017-04-09T08:55:03 | 2017-04-09T08:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,248 | py | #! /usr/bin/python3
# -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals
import numpy as np
import theano
import theano.tensor as T
__author__ = 'fyabc'
RNG = np.random
def main():
N = 400 # training sample size
feats = 784 # number of input variables
# generate a dataset: D = (input_values, target_class)
# [NOTE]: randint is [low, high)
D = (RNG.randn(N, feats), RNG.randint(size=N, low=0, high=2))
trainingSteps = 10000
# Declare Theano symbolic variables
x = T.dmatrix('x')
y = T.dvector('y')
# initialize the weight vector w randomly
#
# this and the following bias variable b
# are shared so they keep their values
# between training iterations (updates)
w = theano.shared(RNG.randn(feats), name='w')
# initialize the bias term
b = theano.shared(0., name='b')
print('Initial model:')
print(w.get_value())
print(b.get_value())
# Construct Theano expression graph
p_1 = 1 / (1 + T.exp(-T.dot(x, w) - b)) # Probability that target = 1
prediction = p_1 > 0.5 # The prediction thresholded
xCE = -y * T.log(p_1) - (1 - y) * T.log(1 - p_1) # Cross-entropy loss function
cost = xCE.mean() + 0.01 * (w ** 2).sum() # The cost to minimize
gw, gb = T.grad(cost, [w, b]) # Compute the gradient of the cost
# w.r.t weight vector w and
# bias term b
# (we shall return to this in a
# following section of this tutorial)
# Compile
train = theano.function(
inputs=[x, y],
outputs=[prediction, xCE],
updates=((w, w - 0.1 * gw), (b, b - 0.1 * gb)))
predict = theano.function(inputs=[x], outputs=prediction)
# Train
for i in range(trainingSteps):
pred, err = train(D[0], D[1])
# print('Step %d: pred = %s, err = %s' % (i, str(pred), str(err)))
print('Step %d' % (i,))
print("Final model:")
print(w.get_value())
print(b.get_value())
print("target values for D:")
print(D[1])
print("prediction on D:")
print(predict(D[0]))
print("Error:")
print(D[1] - predict(D[0]))
if __name__ == '__main__':
main()
| [
"fyabc@mail.ustc.edu.cn"
] | fyabc@mail.ustc.edu.cn |
f3edbdc52fa052152bcf4d6e714ac29123c16186 | 2eb8e3606a8df45d432fdf56ee9aa24942304526 | /rocketgram/api/inline_query.py | cc10fabfbdd6a9c87cdb80a8c3c80a24aeaecdd4 | [
"MIT"
] | permissive | KulZlaK/rocketgram | 22848293980ba44dd9fb63db28f34be36c437c84 | 09587deecffcd7ccc9529f4d9e51221888870f23 | refs/heads/master | 2022-07-27T23:25:51.254444 | 2020-05-15T21:36:57 | 2020-05-15T21:36:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | # Copyright (C) 2015-2020 by Vd.
# This file is part of Rocketgram, the modern Telegram bot framework.
# Rocketgram is released under the MIT License (see LICENSE).
from dataclasses import dataclass
from typing import Optional
from .location import Location
from .user import User
@dataclass(frozen=True)
class InlineQuery:
"""\
Represents InlineQuery object:
https://core.telegram.org/bots/api#inlinequery
Differences in field names:
id -> query_id
from -> user
"""
query_id: str
user: User
location: Optional[Location]
query: str
offset: str
@classmethod
def parse(cls, data: dict) -> Optional['InlineQuery']:
if data is None:
return None
return cls(data['id'], User.parse(data['from']), Location.parse(data.get('location')),
data['query'], data['offset'])
| [
"vd@"
] | vd@ |
17abd873c71413e3e69a0166b662cadde12a971d | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Tensorflow_OpenCV_Nightly/source/tensorflow/contrib/crf/__init__.py | 80a31cc3341a2c7cf1867eef2ef3e463a325667f | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 1,633 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear-chain CRF layer.
See the @{$python/contrib.crf} guide.
@@crf_sequence_score
@@crf_log_norm
@@crf_log_likelihood
@@crf_unary_score
@@crf_binary_score
@@CrfForwardRnnCell
@@viterbi_decode
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.crf.python.ops.crf import _lengths_to_masks
from tensorflow.contrib.crf.python.ops.crf import crf_binary_score
from tensorflow.contrib.crf.python.ops.crf import crf_log_likelihood
from tensorflow.contrib.crf.python.ops.crf import crf_log_norm
from tensorflow.contrib.crf.python.ops.crf import crf_sequence_score
from tensorflow.contrib.crf.python.ops.crf import crf_unary_score
from tensorflow.contrib.crf.python.ops.crf import CrfForwardRnnCell
from tensorflow.contrib.crf.python.ops.crf import viterbi_decode
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
b16e2983b29295ca79df7a58ce01944eb1bc078d | 3879d1ca43c573c209f962182cd1e7f7fe978fbf | /leetcode/207. Course Schedule/207.py | 38345c4258f98161245c91f343273e5b8b1ee969 | [] | no_license | DoctorLai/ACM | 34a5600a5adf22660c5d81b2d8b7a358be537ecf | aefa170f74c55c1230eb6f352770512b1e3f469e | refs/heads/master | 2023-09-01T02:13:01.604508 | 2023-08-31T15:42:07 | 2023-08-31T15:42:07 | 146,173,024 | 62 | 18 | null | 2020-10-11T13:19:57 | 2018-08-26T11:00:36 | C++ | UTF-8 | Python | false | false | 979 | py | # https://helloacm.com/teaching-kids-programming-topological-sort-algorithm-on-directed-graphs-course-schedule-bfs/
# https://leetcode.com/problems/course-schedule/
# MEDIUM, TOPOLOGICAL SORT, GRAPH
class Node(object):
def __init__(self):
self.inDegrees = 0
self.outNodes = []
class Solution(object):
def canFinish(self, numCourses, prerequisites):
G = defaultdict(Node)
total = 0
for a, b in prerequisites:
G[a].inDegrees += 1
G[b].outNodes.append(a)
total += 1
q = deque()
for a, node in G.items():
if node.inDegrees == 0:
q.append(a)
r = 0
while q:
v = q.popleft()
for a in G[v].outNodes:
G[a].inDegrees -= 1
r += 1
if G[a].inDegrees == 0:
q.append(a)
return r == total
| [
"noreply@github.com"
] | DoctorLai.noreply@github.com |
963ad59463139f848c367bf564d5b5d04be6a85a | d802cb112c080f99fc800effe23b2a7ca55a694b | /tests/test_updater.py | d3e372ada57fb325e58edc6929cffb630c5c5bbd | [
"Apache-2.0"
] | permissive | zjj2wry/dvc | 211ce55c00856abc7d08408c79b93b47cff538a8 | c9df567938eefd7b1f5b094c15f04e5ce704aa36 | refs/heads/master | 2020-04-17T20:34:00.368060 | 2019-01-21T22:19:15 | 2019-01-21T22:19:15 | 166,911,251 | 0 | 0 | Apache-2.0 | 2019-01-22T02:13:23 | 2019-01-22T02:13:23 | null | UTF-8 | Python | false | false | 1,148 | py | import os
from tests.basic_env import TestDvc
class TestUpdater(TestDvc):
def test(self):
# NOTE: only test on travis CRON to avoid generating too much logs
travis = os.getenv('TRAVIS') == 'true'
if not travis:
return
cron = os.getenv('TRAVIS_EVENT_TYPE') == 'cron'
if not cron:
return
env = os.environ.copy()
if os.getenv('CI'):
del os.environ['CI']
self.dvc.updater.check()
self.dvc.updater.check()
self.dvc.updater.check()
os.environ = env.copy()
def test_check_version_newer(self):
self.dvc.updater.latest = "0.20.8"
self.dvc.updater.current = "0.21.0"
self.assertFalse(self.dvc.updater._is_outdated())
def test_check_version_equal(self):
self.dvc.updater.latest = "0.20.8"
self.dvc.updater.current = "0.20.8"
self.assertFalse(self.dvc.updater._is_outdated())
def test_check_version_outdated(self):
self.dvc.updater.latest = "0.21.0"
self.dvc.updater.current = "0.20.8"
self.assertTrue(self.dvc.updater._is_outdated())
| [
"kupruser@gmail.com"
] | kupruser@gmail.com |
c6a662e4deb03a754003a41447bcac49368a9cfa | a9243f735f6bb113b18aa939898a97725c358a6d | /0.11/_downloads/rt_feedback_server.py | 292a32b002948df77bf8eebf9e06156a38d1071c | [] | permissive | massich/mne-tools.github.io | 9eaf5edccb4c35831400b03278bb8c2321774ef2 | 95650593ba0eca4ff8257ebcbdf05731038d8d4e | refs/heads/master | 2020-04-07T08:55:46.850530 | 2019-09-24T12:26:02 | 2019-09-24T12:26:02 | 158,233,630 | 0 | 0 | BSD-3-Clause | 2018-11-19T14:06:16 | 2018-11-19T14:06:16 | null | UTF-8 | Python | false | false | 4,945 | py | """
==============================================
Real-time feedback for decoding :: Server Side
==============================================
This example demonstrates how to setup a real-time feedback
mechanism using StimServer and StimClient.
The idea here is to display future stimuli for the class which
is predicted less accurately. This allows on-demand adaptation
of the stimuli depending on the needs of the classifier.
To run this example, open ipython in two separate terminals.
In the first, run rt_feedback_server.py and then wait for the
message
RtServer: Start
Once that appears, run rt_feedback_client.py in the other terminal
and the feedback script should start.
All brain responses are simulated from a fiff file to make it easy
to test. However, it should be possible to adapt this script
for a real experiment.
"""
# Author: Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
import mne
from mne.datasets import sample
from mne.realtime import StimServer
from mne.realtime import MockRtClient
from mne.decoding import EpochsVectorizer, FilterEstimator
print(__doc__)
# Load fiff file to simulate data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.Raw(raw_fname, preload=True)
# Instantiating stimulation server
# The with statement is necessary to ensure a clean exit
with StimServer('localhost', port=4218) as stim_server:
# The channels to be used while decoding
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
rt_client = MockRtClient(raw)
# Constructing the pipeline for classification
filt = FilterEstimator(raw.info, 1, 40)
scaler = preprocessing.StandardScaler()
vectorizer = EpochsVectorizer()
clf = SVC(C=1, kernel='linear')
concat_classifier = Pipeline([('filter', filt), ('vector', vectorizer),
('scaler', scaler), ('svm', clf)])
stim_server.start(verbose=True)
# Just some initially decided events to be simulated
# Rest will decided on the fly
ev_list = [4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4]
score_c1, score_c2, score_x = [], [], []
for ii in range(50):
# Tell the stim_client about the next stimuli
stim_server.add_trigger(ev_list[ii])
# Collecting data
if ii == 0:
X = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')[None, ...]
y = ev_list[ii]
else:
X_temp = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')
X_temp = X_temp[np.newaxis, ...]
X = np.concatenate((X, X_temp), axis=0)
time.sleep(1) # simulating the isi
y = np.append(y, ev_list[ii])
# Start decoding after collecting sufficient data
if ii >= 10:
# Now start doing rtfeedback
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2,
random_state=7)
y_pred = concat_classifier.fit(X_train, y_train).predict(X_test)
cm = confusion_matrix(y_test, y_pred)
score_c1.append(float(cm[0, 0]) / sum(cm, 1)[0] * 100)
score_c2.append(float(cm[1, 1]) / sum(cm, 1)[1] * 100)
# do something if one class is decoded better than the other
if score_c1[-1] < score_c2[-1]:
print("We decoded class RV better than class LV")
ev_list.append(3) # adding more LV to future simulated data
else:
print("We decoded class LV better than class RV")
ev_list.append(4) # adding more RV to future simulated data
# Clear the figure
plt.clf()
# The x-axis for the plot
score_x.append(ii)
# Now plot the accuracy
plt.plot(score_x[-5:], score_c1[-5:])
plt.hold(True)
plt.plot(score_x[-5:], score_c2[-5:])
plt.xlabel('Trials')
plt.ylabel('Classification score (% correct)')
plt.title('Real-time feedback')
plt.ylim([0, 100])
plt.xticks(score_x[-5:])
plt.legend(('LV', 'RV'), loc='upper left')
plt.show()
| [
"larson.eric.d@gmail.com"
] | larson.eric.d@gmail.com |
2ce249532f1a3348bba6d0f7f78e80def67cb3a9 | a39adde99c75c2bf9b25b59fb0d6769196e74a63 | /datasets/hscic/load.py | ae0c205010a46e9d2e59aa6ad24fd23ad88fdd39 | [
"MIT"
] | permissive | uk-gov-mirror/nhsengland.publish-o-matic | 51624d52df562089f7acf4ac91aabcb37ac6d63b | dc8f16cb83a2360989afa44d887e63b5cde6af29 | refs/heads/master | 2021-06-09T06:17:50.473307 | 2016-08-18T10:29:50 | 2016-08-18T10:29:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,016 | py | """
Publish HSCIC Indicators to CKAN !
"""
import logging
import sys
import ffs
import slugify
import dc
from publish.lib.metadata import get_resource_path
from publish.lib.helpers import download_file, to_markdown, filename_for_resource
from publish.lib.upload import Uploader
from datasets.hscic.curate import Curator
logging.basicConfig(filename='publish.log',
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.DEBUG)
DATA_DIR = None
# TODO: Change this to just strip punctuation and check length
def clean_tag(t):
def valid_only(tags):
return [tag for tag in tags if len(tag) > 2]
t = t.replace('(', "").replace(")", "").replace(':', "")
t = t.replace('A+E', "A and E")
t = t.replace('ST&', "")
t = t.replace('A&', "A and")
if ';' in t:
return valid_only([s.strip() for s in t.split(';')])
elif '/' in t:
return valid_only([s.strip() for s in t.split('/')])
return valid_only([t.replace('&', '-and-')])
def publish_indicators(start_from=0):
global DATA_DIR
u = Uploader("hscic-indicators")
indicatorfile = ffs.Path(get_resource_path('indicators.json'))
logging.info('Loading {}'.format(indicatorfile))
indicators = indicatorfile.json_load()
logging.info('Processing {} indicators'.format(len(indicators)))
logging.info('Starting from record {}'.format(start_from))
for indicator in indicators[start_from:]:
try:
resources = []
for s in indicator['sources']:
resource = {
"description": s['description'],
"name": s['url'].split('/')[-1],
"format": s['filetype'].upper(),
"url": s["url"]
}
"""
filename = filename_for_resource(resource)
path = DATA_DIR / filename
download_file(resource['url'], path)
print "Uploading to S3"
url = u.upload(path)
resource['url'] = url
"""
resources.append(resource)
if not 'indicators' in indicator['keyword(s)']:
indicator['keyword(s)'].append('indicators')
title = indicator['title']
c = Curator(indicator)
groups = c.get_groups()
if not groups:
print "Not in a group"
continue
prefix = c.get_title_prefix()
if prefix:
title = u"{} - {}".format(prefix, title)
tags = []
if 'keyword(s)' in dataset:
dataset['keyword(s)'] = sum([clean_tag(k) for k in indicator.get('keyword(s)',[]) if len(k) > 2], [])
tags = dc.tags(*dataset['keywords'])
print '+ Create/Update dataset {}'.format(indicator['title'])
dc.Dataset.create_or_update(
name=slugify.slugify(title).lower()[:99],
title=title,
state='active',
licence_id='ogl',
notes=to_markdown(indicator['definition'].encode('utf8')),
url='https://indicators.ic.nhs.uk/webview/',
tags=dc.tags(tags),
resources=resources,
owner_org='hscic'
)
if groups:
try:
dataset = dc.ckan.action.package_show(id=slugify.slugify(title)[:99].lower())
except:
continue
for group in groups:
group = group.lower()
if [g for g in dataset.get('groups', []) if g['name'] == group]:
print 'Already in group', g['name']
else:
dc.ckan.action.member_create(
id=group,
object=dataset_name,
object_type='package',
capacity='member'
)
except Exception as ex:
import traceback
traceback.print_exc()
import sys; sys.exit(1)
u.close()
return
def publish_datasets(start_from=0):
global DATA_DIR
u = Uploader("hscic-datasets")
datasetfile = ffs.Path(get_resource_path('datasets.json'))
logging.info('Loading {}'.format(datasetfile))
datasets = datasetfile.json_load()
logging.info('Processing {} indicators'.format(len(datasets)))
logging.info('Starting from record {}'.format(start_from))
import random
total = len(datasets) - start_from
current = 1
for dataset in datasets[start_from:]:
print "STATUS: {}/{}".format(current, total)
current += 1
#print u'Processing {}'.format(dataset['title'])
#print ' ID: {}'.format(dataset['id'])
try:
resources = []
for s in dataset['sources']:
resource = {
"description": s['description'],
"name": s['url'].split('/')[-1],
"format": s['filetype'],
"url": s["url"]
}
"""
filename = filename_for_resource(resource)
path = DATA_DIR / filename
download_file(resource['url'], path)
resource['url'] = u.upload(path)
"""
resources.append(resource)
if not resources:
print "Dataset {} does not have any resources".format(dataset['id'])
continue
title = dataset['title']
c = Curator(dataset)
groups = c.get_groups()
if not groups:
print "Not in a group"
continue
prefix = c.get_title_prefix()
if prefix:
title = u"{} - {}".format(prefix, title)
name = slugify.slugify(title).lower()[0:99]
# Call cleantags on each work and expect back a list, which is then flattened
tags = []
if 'keywords' in dataset:
dataset['keywords'] = sum([clean_tag(k) for k in dataset.get('keywords',[]) if len(k) > 2], [])
tags = dc.tags(*dataset['keywords'])
notes = dataset['summary']
if 'key_facts' in dataset:
notes += '\n\n<h2>KEY FACTS:</h2>\n' + ''.join(dataset['key_facts'])
notes = to_markdown(notes)
name = 'hscic_dataset_{}'.format(dataset['id'])
dc.Dataset.create_or_update(
name=name,
title=title,
state='active',
licence_id='ogl',
notes=notes,
url=dataset['source'],
tags=tags,
resources=resources,
owner_org='hscic'
)
if groups:
try:
dataset = dc.ckan.action.package_show(id=name)
except:
continue
for group in groups:
group = group.lower()
if [g for g in dataset.get('groups', []) if g['name'] == group]:
print 'Already in group', g['name']
else:
dc.ensure_group(group)
dc.ckan.action.member_create(
id=group,
object=dataset['id'],
object_type='package',
capacity='member'
)
except Exception as ex:
import traceback
traceback.print_exc()
u.close()
return
def load(workspace):
global DATA_DIR
DATA_DIR = ffs.Path(workspace) / "data"
DATA_DIR.mkdir()
dc.ensure_publisher('hscic')
publish_indicators(0)
publish_datasets(0)
return 0
| [
"ross@servercode.co.uk"
] | ross@servercode.co.uk |
d25bc04e3fb0cd7456802691f219db394d38c4f8 | 8600ea155f279e5a8dfe5a1926038511f6b6a7ea | /l10n_chart_uk_minimal/__terp__.py | b4881867bb86055b6eb6e5cb179dee319e413791 | [] | no_license | MarkNorgate/addons-EAD | c2fff89ab16fce3ba19fbe433ee5863705a6f4e5 | 840f28642b5d328e4b86839c413e5164622295a5 | refs/heads/master | 2020-04-23T22:11:00.164438 | 2015-07-22T12:24:53 | 2015-07-22T12:24:53 | 39,501,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,690 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# This module provides a minimal UK chart of accounts for building upon further
# Open ERP's default currency and accounts are remapped to this chart
#
# This module works for Open ERP 4.1.0 (and, assumed, onwards).
# This module does not work for Open ERP 4.0.2 and before.
#
# VAT is structured thus:
# - the user company is assumed to be non-VAT exempt (easy to modify, however)
# - categories OVATS (Standard), OVATR (Reduced), OVATZ (Zero) should be
# assigned to the customer taxes section of products (depending on the product)
# - categories IVATS (Standard), IVATR (Reduced), IVATZ (Zero) should be
# assigned to the supplier taxes section of products (depending on the product)
# - categories OVATX (eXempt), OVATO (Out of scope), or nothing at all should be
# assigned to default tax field of customers (depending on the customer)
# - customer categorization trumps product categorization (unchanged Tiny functionality)
# - on purchases, upon invoicing
# - the base amount (ex-VAT) appears in the appropriate input base category (S, R, Z)
# - the VAT amount appears in the appropriate input VAT category (S, R)
# - invoice lines can be traced in these VAT categories
# - refunds of invoices are deducted from the input category
# - on sales, upon invoicing
# - the base amount (ex-VAT) appears in the appropriate output base category (S, R, Z, X, O)
# - the VAT amount appears in the appropriate output VAT category (S, R)
# - invoice lines can be traced in these VAT categories
# - refunds of invoices are deducted from the output category
#
# This forms a basis for accrual tax accounting
# Cash tax accounting can be accommodated with further processing in Open ERP
#
# Status beta 0.92 - tested on Open ERP 4.1.0
# Status beta 0.93 - tested on Open ERP 4.1.0
# - trivial change to depend only on 'account'
# (seemed to be important for importing with no demo data)
# Status 1.0 - tested on Open ERP 4.1.0, 4.0.3
# - COGS account type fixed
#
{
'name': 'United Kingdom - minimal',
'version': '1.1',
'category': 'Localisation/Account Charts',
'description': """This is the base module to manage the accounting chart for United Kingdom in Open ERP.""",
'author': 'Seath Solutions Ltd',
'website': 'http://www.seathsolutions.com',
'depends': ['base', 'account', 'base_iban', 'base_vat', 'account_chart'],
'init_xml': [],
'update_xml': [
'account_types.xml',
'account_chart.xml',
'account_tax.xml',
'l10n_uk_wizard.xml'
],
'demo_xml': [],
'installable': True,
'certificate': '0064392144797',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"mark.norgate@affinity-digital.com"
] | mark.norgate@affinity-digital.com |
b77b296b642288c01694999600780b625adcc2da | 7ab22a9b23502f6cf3dafa02ac68067f7aacca56 | /test/test1.py | 2b7029a6553abd58dff1dee5efe0ddcef5609e88 | [] | no_license | to2bage/flask_first | 62ba6e40f0df7b46cd3f9bff91a8a0a1d41df9be | 098f1d81f516c7d369375cf9c48ec8bb6efa3a80 | refs/heads/master | 2022-11-22T01:29:53.526222 | 2020-07-24T06:51:50 | 2020-07-24T06:51:50 | 281,608,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # webserver的概念 java php nginx apache tomcat iis
# app.run()使用的是单进程, 单线程处理请求
#Local: 使用字典的方式实现的线程隔离
#LocalStack: 封装了Local, 实现了线程隔离的栈
"""
from werkzeug.local import Local
from threading import Thread
import time
my_obj = Local()
my_obj.b = 1
def worker():
my_obj.b = 2
print("in new thread b is ", my_obj.b)
new_t = Thread(target=worker)
new_t.start()
time.sleep(1)
print("in main thread b is ", my_obj.b)
"""
from werkzeug.local import LocalStack
s = LocalStack()
s.push(1)
print(s.top) | [
"to2bage@hotmail.com"
] | to2bage@hotmail.com |
130cb2f1bd3d8c3aa0903d0a6039d875dae49c00 | 6f1e1c378997bf76942ce6e203e720035169ce27 | /sort/8-counting-sort.py | 731b1a6a22acd113ba2c815055e3f06458f47a35 | [
"MIT"
] | permissive | yuenliou/leetcode | a489b0986b70b55f29d06c2fd7545294ba6e7ee5 | e8a1c6cae6547cbcb6e8494be6df685f3e7c837c | refs/heads/main | 2021-06-16T07:47:39.103445 | 2021-05-11T09:16:15 | 2021-05-11T09:16:15 | 306,536,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,374 | py | #!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
def countingSort(arr):
"""
工作原理:其核心在于将输入的数据值转化为键存储在额外开辟的数组空间中
编码要素:计数排序要求输入的数据必须是有确定范围的整数
"""
#序列比较集中,如果过大:基于0的偏移量
#将数组长度定为max-min+1,即不仅要找出最大值,还要找出最小值,根据两者的差来确定计数数组的长度。
bucket = [0 for _ in range(max(arr) + 1)]
for i, val in enumerate(arr):
bucket[val] += 1
sortedIndex = 0
#根据bucket反向填充目标数组
for j, val in enumerate(bucket):
while val > 0:
arr[sortedIndex] = j
sortedIndex += 1
val -= 1
return arr
def countingSort_v2(arr):
"""
工作原理:其核心在于将输入的数据值转化为键存储在额外开辟的数组空间中
编码要素:计数排序要求输入的数据必须是有确定范围的整数
一文弄懂计数排序算法!https://www.cnblogs.com/xiaochuan94/p/11198610.html
"""
#value映射到index
bucket = [0] * (max(arr) + 1)
for i, val in enumerate(arr):
bucket[val] += 1
# print(bucket)
#调整bucket[i]的值,是该数据在output[]中的位置
for i in range(1, len(bucket)):
bucket[i] += bucket[i - 1]
# print(bucket)
"""
第五步:创建结果数组result,长度和原始数组一样。
第六步:遍历原始数组中的元素,当前元素A[j]减去最小值min,作为索引,在计数数组中找到对应的元素值count[A[j]-min],再将count[A[j]-min]的值减去1,就是A[j]在结果数组result中的位置,做完上述这些操作,count[A[j]-min]自减1。
是不是对第四步和第六步有疑问?为什么要这样操作?
第四步操作,是让计数数组count存储的元素值,等于原始数组中相应整数的最终排序位置,即计算原始数组中的每个数字在结果数组中处于的位置。
比如索引值为9的count[9],它的元素值为10,而索引9对应的原始数组A中的元素为9+101=110(要补上最小值min,才能还原),即110在排序后的位置是第10位,即result[9] = 110,排完后count[9]的值需要减1,count[9]变为9。
再比如索引值为6的count[6],他的元素值为7,而索引6对应的原始数组A中的元素为6+101=107,即107在排序后的位置是第7位,即result[6] = 107,排完后count[6]的值需要减1,count[6]变为6。
如果索引值继续为6,在经过上一次的排序后,count[6]的值变成了6,即107在排序后的位置是第6位,即result[5] = 107,排完后count[6]的值需要减1,count[6]变为5。
至于第六步操作,就是为了找到A中的当前元素在结果数组result中排第几位,也就达到了排序的目的。
"""
#a[13] = 50:output[bucket[a[13] - 1]] = output[49] = 14
output = [0] * len(arr)
for i in range(len(arr)):
output[bucket[arr[i] - 1]] = arr[i]
bucket[arr[i] - 1] -= 1
return output
def main():
arr = [3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50 ,48]
ret = countingSort_v2(arr)
print(ret)
if __name__ == '__main__':
main()
| [
"liuyuan@aplum.com.cn"
] | liuyuan@aplum.com.cn |
390a467424adb2bcd0c7434b36bd29a5310d3333 | a3855396e382ec8e5be2bd54d9864feda584546b | /20 Flask REST API with Threadding Run task in background.py | 95633da11872e90ea1ba2763b883145d836b907f | [] | no_license | raj713335/REST_API_VIA_FLASK | 6930862c7c993222f876de475f08257acb2dbfec | 0b6fca7bbfbb7571e468a3b292d9bbe0d79cc6fa | refs/heads/master | 2022-12-31T08:15:58.769835 | 2020-10-26T00:05:10 | 2020-10-26T00:05:10 | 306,668,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from flask import Flask
from flask_restful import Resource,Api
import time
import threading
app=Flask(__name__)
api=Api(app)
def task():
print("Started Task...")
print(threading.current_thread().name)
time.sleep(6)
print("completed ...")
class HelloWorld(Resource):
def get(self):
threading.Thread(target=task).start()
return {'hello':'world'}
api.add_resource(HelloWorld,'/')
if __name__=="__main__":
app.run(debug=True)
| [
"raj713335@gmail.com"
] | raj713335@gmail.com |
6ab5f1183af1a2d2c4981ad8344956be938f629a | 58654f5f9f01813da08ecb4f151e7dae32f18cad | /quant_mech/src/test/hierarchy_solver_steady_state_test.py | 45604b0638fe003db261f3defb9eb367f1e40e6b | [] | no_license | rstones/quant_mech | 57c2f106adfe6fcd1880ab3c50d6f68012963beb | 4dc8f59d66e131cca0cc896638f548d9fcae66e4 | refs/heads/master | 2021-01-19T02:58:24.027297 | 2018-11-26T11:15:04 | 2018-11-26T11:15:04 | 11,790,263 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,251 | py | '''
Created on 15 Mar 2017
@author: richard
'''
import numpy as np
import quant_mech.utils as utils
from quant_mech.OBOscillator import OBOscillator
from quant_mech.UBOscillator import UBOscillator
from quant_mech.hierarchy_solver import HierarchySolver
import scipy as sp
print sp.__version__
'''Ishizaki and Fleming params'''
electronic_coupling = 0.1
system_hamiltonian = np.array([[0, 0, 0],
[0, 0.2, electronic_coupling],
[0, electronic_coupling, 0]])
reorg_energy = 100.
cutoff_freq = 5.
temperature = 2.7 # Kelvin
beta = 4.297 # 0.4 #1. / (utils.KELVIN_TO_WAVENUMS * temperature)
mode_params = [] #[(200., 0.25, 10.)]
jump_ops = np.array([np.array([[0, 0, 0],
[1., 0, 0],
[0, 0, 0]]), np.array([[0, 0, 1.],
[0, 0, 0],
[0, 0, 0]])])
jump_rates = np.array([0.1, 0.0025])
K = 4
environment = []
if mode_params: # assuming that there is a single identical mode on each site
environment = [(OBOscillator(reorg_energy, cutoff_freq, beta, K=K), UBOscillator(mode_params[0][0], mode_params[0][1], mode_params[0][2], beta, K=K)), \
(OBOscillator(reorg_energy, cutoff_freq, beta, K=K), UBOscillator(mode_params[0][0], mode_params[0][1], mode_params[0][2], beta, K=K))]
else:
environment = [(),
(OBOscillator(reorg_energy, cutoff_freq, beta, K=K),),
(OBOscillator(reorg_energy, cutoff_freq, beta, K=K),)]
hs = HierarchySolver(system_hamiltonian, environment, beta, jump_ops, jump_rates, num_matsubara_freqs=K, temperature_correction=True)
hs.truncation_level = 7
hm = hs.construct_hierarchy_matrix_super_fast()
print 'hierarchy matrix shape: ' + str(hm.shape)
print hs.dm_per_tier()
np.savez('DQD_heom_matrix_N7_K4.npz', hm=hm)
import scipy.sparse.linalg as spla
np.set_printoptions(precision=6, linewidth=150, suppress=True)
v0 = np.zeros(hm.shape[0])
v0[0] = 1./3
v0[4] = 1./3
v0[8] = 1./3
evals,evec = spla.eigs(hm.tocsc(), k=1, sigma=0, which='LM', v0=v0)#, ncv=100)
print evals
evec = evec[:9]
evec.shape = 3,3
evec /= np.trace(evec)
print evec
| [
"r.stones@ucl.ac.uk"
] | r.stones@ucl.ac.uk |
41f450bc4cbe94baee0ef10c1f5414f0977a3a3f | 13e93cd07fb45f9fd3bc2a1de78d6d7d4a8f8d25 | /backend/theloungeconsole_28185/settings.py | 8a505b7264581a8cc9636fc340d22846b95ad478 | [] | no_license | crowdbotics-apps/theloungeconsole-28185 | fb8a246c44d781101fa924e77a83dc5cf6f541f0 | 57c9122f1865abdcb00fbe91a02598c599575c61 | refs/heads/master | 2023-05-30T04:29:22.662494 | 2021-06-23T11:01:55 | 2021-06-23T11:01:55 | 379,573,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,132 | py | """
Django settings for theloungeconsole_28185 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'theloungeconsole_28185.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'theloungeconsole_28185.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
1b52a8c073aa7b1299327b714f40bfab360ae620 | ca850269e513b74fce76847310bed143f95b1d10 | /build/navigation/map_server/catkin_generated/pkg.develspace.context.pc.py | d70657140fd79ea2f8707d898a32909e8b49fded | [] | no_license | dvij542/RISS-2level-pathplanning-control | f98f2c83f70c2894d3c248630159ea86df8b08eb | 18390c5ab967e8649b9dc83681e9090a37f3d018 | refs/heads/main | 2023-06-15T03:58:25.293401 | 2021-06-20T20:20:30 | 2021-06-20T20:20:30 | 368,553,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/dvij5420/catkin_ws/src/navigation/map_server/include".split(';') if "/home/dvij5420/catkin_ws/src/navigation/map_server/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nav_msgs;tf2".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmap_server_image_loader".split(';') if "-lmap_server_image_loader" != "" else []
PROJECT_NAME = "map_server"
PROJECT_SPACE_DIR = "/home/dvij5420/catkin_ws/devel"
PROJECT_VERSION = "1.14.9"
| [
"dvij.kalaria@gmail.com"
] | dvij.kalaria@gmail.com |
b97f9e2e6819731ed2e72f322fe82fc21b408239 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_20/models/support_response.py | 6615baa58a213837986445ba7e3bc77ac6e1937e | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,781 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.20
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_20 import models
class SupportResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[Support]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.Support]
):
"""
Keyword args:
items (list[Support])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SupportResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SupportResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SupportResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SupportResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SupportResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SupportResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
d3cdc7a0077856a2b97391ea48f1c5b7b2add971 | af2fca9931bd9d9531d7e3a7fba3df02fea1bfd3 | /bin/dl | 99e68cdd0bae029ebaf686b93fc8873ba55c2de9 | [] | no_license | hukkelas/Dotfiles | 21c5fa5500bf0a0ee3e4dfa1bd069d45c66738df | b0439f612261f1aa0c4f2bb89f06095a08dad990 | refs/heads/master | 2021-06-28T12:26:04.478573 | 2020-09-21T07:18:17 | 2020-09-21T07:18:17 | 136,961,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | #!/usr/bin/env python3
import subprocess
import argparse
from termcolor import colored
parser = argparse.ArgumentParser()
parser.add_argument("docker_name")
args = parser.parse_args()
keys_to_print = ["Image", "Status","RunningFor", "Names"]
query = "docker ps -a --format '{{.Names}}'"
wanted_idx = [1, 3, 4, 5, 6]
values = {
"Train": "",
"GPU": ""
}
output = subprocess.check_output(query, shell=True).decode().split("\n")
output = [x for x in output if args.docker_name in x]
assert len(output) == 1, output
out = subprocess.check_output(f"docker logs {output[0]}", shell=True).decode()
print(out)
| [
"hakon.hukkelas@ntnu.no"
] | hakon.hukkelas@ntnu.no | |
eee551a4414a19d3f9c93aa595e307f5d736289e | ac4dbe322dcb666abdbd5450ecf1194dd7fa21aa | /learn_pipe/model/opt_params.py | 7edcc46f16441c7e31765e0bf7154e2e93798abc | [
"MIT"
] | permissive | tpimentelms/meaning2form | 432a1cc1b83e54f6b3e3eed54d8d336ae00079a8 | 624b3947b3ac2a7a521cf35c762fb56508236f74 | refs/heads/master | 2022-12-13T21:50:10.196711 | 2020-03-12T10:29:09 | 2020-03-12T10:29:09 | 188,860,052 | 3 | 1 | MIT | 2022-12-08T05:15:50 | 2019-05-27T14:36:40 | Python | UTF-8 | Python | false | false | 804 | py | import pandas as pd
def _get_opt_params(fname, lang, delimiter='\t'):
results = pd.read_csv(fname, delimiter=delimiter)
instance = results[results['lang'] == lang]
embedding_size = int(instance['embedding_size'].item())
hidden_size = int(instance['hidden_size'].item())
word2vec_size = int(instance['word2vec_size'].item())
nlayers = int(instance['nlayers'].item())
dropout = instance['dropout'].item()
return embedding_size, hidden_size, word2vec_size, nlayers, dropout
def get_opt_params(lang, args):
context = args.context if 'shuffle' not in args.context else args.context[:-8]
fname = '%s/bayes-opt%s/orig/%s__%s__opt-results.csv' \
% (args.rfolder_base, args.fsuffix, args.model, context)
return _get_opt_params(fname, lang, delimiter=',')
| [
"tiagopms@gmail.com"
] | tiagopms@gmail.com |
4aa8df812330ebe7963dd961c6485a55d2669e95 | 433cf60d4a3bb69f126ab6b55c43eb34a79aaa8f | /state.py | 1c7863b94d133fba8d527416442c94ae094eeba5 | [] | no_license | pockerman/odisseus_raspberry_pi | 2d84460db859d4b9d52af10a433899945cd44f4c | b6ca2ebf9178411dcb71246880bfb97adcffbad0 | refs/heads/master | 2021-07-13T09:50:33.113891 | 2020-05-23T15:55:43 | 2020-05-23T15:55:43 | 199,181,066 | 2 | 1 | null | 2020-05-23T14:42:37 | 2019-07-27T15:19:03 | Python | UTF-8 | Python | false | false | 587 | py | """
State describe the state of Odisseus
"""
import numpy as np
class State(object):
@staticmethod
def names():
return ["X", "Y", "Vx", "Vy", "Theta"]
def __init__(self, init_cond):
if init_cond:
self._state = init_cond
else:
self._state = np.array([0., 0., 0., 0., 0.])
def get_value(self):
return self._state
def set_value(self, value):
self._state = value
def __iadd__(self, other):
self._state += other
return self
def __len__(self):
return len(self._state) | [
"a.giavaras@gmail.com"
] | a.giavaras@gmail.com |
e6e31132160cc9877eb1dd9ceee62b6a99180cc9 | 7b36801dd87a1df93b2836db74f68b5e00682638 | /scripts/02_MP2RAGE/04_apply_reg.py | 652c4181b66a141c87daf9611f9e9d2bd2b4e656 | [
"BSD-3-Clause"
] | permissive | ofgulban/meso-MRI | c873bf227ae1048a84ffa7999c7ece72f3a8c3f8 | 2afd70a3bb7576f401dd98eeb07df38368f42baf | refs/heads/main | 2023-04-11T19:49:50.837267 | 2022-10-24T13:31:49 | 2022-10-24T13:31:49 | 327,944,487 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,505 | py | """Apply registration."""
import os
import subprocess
import numpy as np
import nibabel as nb
# =============================================================================
NII_NAMES = [
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-02_dir-RL_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-03_dir-PA_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-04_dir-LR_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-05_dir-AP_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-06_dir-RL_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-07_dir-PA_MP2RAGE_T1_crop_ups2X.nii.gz",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-08_dir-LR_MP2RAGE_T1_crop_ups2X.nii.gz",
]
AFFINES = [
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-02_dir-RL_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-03_dir-PA_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-04_dir-LR_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-05_dir-AP_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-06_dir-RL_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-07_dir-PA_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
"/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/03_motion_correct/sub-05_ses-T1_run-08_dir-LR_part-mag_MP2RAGE_inv2_crop_ups2X_affine.mat",
]
REFERENCE = "/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/02_upsample/sub-05_ses-T1_run-01_dir-AP_MP2RAGE_uni_crop_ups2X.nii.gz"
OUTDIR = "/home/faruk/data2/DATA_MRI_NIFTI/derived/sub-05/T1/04_apply_reg"
# =============================================================================
print("MP2RAGE Step 04: Apply registration to UNI images.")
# Output directory
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
print(" Output directory: {}\n".format(OUTDIR))
for i in range(0, len(NII_NAMES)):
# -------------------------------------------------------------------------
# Apply affine transformation matrix
# -------------------------------------------------------------------------
# Prepare inputs
in_moving = NII_NAMES[i]
affine = AFFINES[i]
# Prepare output
basename, ext = in_moving.split(os.extsep, 1)
basename = os.path.basename(basename)
print(basename)
out_moving = os.path.join(OUTDIR, "{}_reg.nii.gz".format(basename))
command = "greedy "
command += "-d 3 "
command += "-rf {} ".format(REFERENCE) # reference
command += "-ri LINEAR " # No other better options than linear
command += "-rm {} {} ".format(in_moving, out_moving) # moving resliced
command += "-r {} ".format(affine)
# Execute command
subprocess.run(command, shell=True)
print('\n\nFinished.')
| [
"farukgulban@gmail.com"
] | farukgulban@gmail.com |
a7520b92dd939d6051721286e33af5bdb7690a8d | eb82e06402be351e1d41dfc0a2646426d26eace6 | /mkt/files/migrations/0001_initial.py | 6b85d73be2712eebb1cf43058d6865cbabce8250 | [] | permissive | sarvex/zamboni | 34c28697f007b40131444af10fa943b19244fa24 | 5fa5400a447f2e905372d4c8eba6d959d22d4f3e | refs/heads/main | 2023-08-19T04:53:22.857291 | 2023-08-14T10:04:05 | 2023-08-14T10:04:05 | 32,572,674 | 0 | 0 | BSD-3-Clause | 2023-09-10T15:16:10 | 2015-03-20T08:41:50 | Python | UTF-8 | Python | false | false | 3,661 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mkt.site.models
from django.conf import settings
import uuidfield.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('filename', models.CharField(default=b'', max_length=255)),
('size', models.PositiveIntegerField(default=0)),
('hash', models.CharField(default=b'', max_length=255)),
('status', models.PositiveSmallIntegerField(default=2, db_index=True, choices=[(0, 'Incomplete'), (16, 'Unlisted'), (2, 'Pending approval'), (4, 'Published'), (5, 'Banned from Marketplace'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved but private'), (15, 'Blocked')])),
('datestatuschanged', models.DateTimeField(auto_now_add=True, null=True)),
('reviewed', models.DateTimeField(null=True)),
('uses_flash', models.BooleanField(default=False, db_index=True)),
],
options={
'abstract': False,
'db_table': 'files',
'get_latest_by': 'created',
},
bases=(mkt.site.models.OnChangeMixin, models.Model),
),
migrations.CreateModel(
name='FileUpload',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('uuid', uuidfield.fields.UUIDField(primary_key=True, serialize=False, editable=False, max_length=32, blank=True, unique=True)),
('path', models.CharField(default=b'', max_length=255)),
('name', models.CharField(default=b'', help_text=b"The user's original filename", max_length=255)),
('hash', models.CharField(default=b'', max_length=255)),
('valid', models.BooleanField(default=False)),
('validation', models.TextField(null=True)),
('task_error', models.TextField(null=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
'db_table': 'file_uploads',
'get_latest_by': 'created',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FileValidation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('valid', models.BooleanField(default=False)),
('errors', models.IntegerField(default=0)),
('warnings', models.IntegerField(default=0)),
('notices', models.IntegerField(default=0)),
('validation', models.TextField()),
('file', models.OneToOneField(related_name='validation', to='files.File')),
],
options={
'db_table': 'file_validation',
},
bases=(models.Model,),
),
]
| [
"ashort@mozilla.com"
] | ashort@mozilla.com |
95d7af9ddf3ad428c306a3dda706de52e90220f3 | baad8c0884d2a0ff57dfb70f766a2f2c05f3f5cc | /douyu/douyu/spiders/douyuapp.py | 6b13e7cb5737b91e470ca568702e56f087646506 | [] | no_license | ShaoLay/douyu | ee96f9bd09fd7136de8078284baa2b4f2ede274f | 6b2fe2c3f05a58d5e7a323c6e72b7ba447fc74dd | refs/heads/master | 2020-04-14T18:59:58.695098 | 2019-01-04T02:36:24 | 2019-01-04T02:36:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | import scrapy
import json
from douyu.items import DouyuspiderItem
class DouyuSpider(scrapy.Spider):
name = "douyu"
allowd_domains = ["http://capi.douyucdn.cn"]
offset = 0
url = "http://capi.douyucdn.cn/api/v1/getVerticalRoom?limit=20&offset="
start_urls = [url + str(offset)]
def parse(self, response):
# 返回从json里获取 data段数据集合
data = json.loads(response.text)["data"]
for each in data:
item = DouyuspiderItem()
item["name"] = each["nickname"]
item["imagesUrls"] = each["vertical_src"]
yield item
self.offset += 20
yield scrapy.Request(self.url + str(self.offset), callback = self.parse) | [
"javs_shao@163.com"
] | javs_shao@163.com |
183946b6075590e313dcdd943ff083cf360204a4 | b0fdcd6038f8f51ac6fb88abd3698656d9df6ef5 | /HCI_LAB/line_detection/main.py | 14ba7d421926ef79a0388b5fc6345187f040f210 | [] | no_license | ShineySun/HCI_Deep_Lane_Detection | a5b40c3b9df5eaf6afc7cb9983574009199e1af9 | 0b7b7a97f03258942d1d2cd9d86e42348bebab15 | refs/heads/master | 2021-01-02T06:55:59.600259 | 2020-05-15T09:59:33 | 2020-05-15T09:59:33 | 239,537,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,033 | py | import sys
# print(sys.path)
# sys.path.append('/lib/python3.7/site-packages')
import opts
import math
import importlib
from preprocess import *
import _init_paths
import torch
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
import cv2
import numpy as np
# python3 main.py --netType stackedHGB --GPUs 0 --LR 0.001 --batchSize 1 --nStack 7 --optim Adam
def main():
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
#cudnn.benchmark = True
opt = opts.parse()
print(("device id: {}".format(torch.cuda.current_device())))
print("torch.version",torch.__version__)
print("cuda_version",torch.version.cuda)
models = importlib.import_module('models.init')
# print(models)
criterions = importlib.import_module('criterions.init')
checkpoints = importlib.import_module('checkpoints')
Trainer = importlib.import_module('models.' + opt.netType + '-train')
# if opt.genLine:
# if opt.testOnly:
# processData('test')
# else:
# print('Prepare train data')
# processData('train')
try:
DataLoader = importlib.import_module('models.' + opt.netType + '-dataloader')
#print('DataLoader1 : ', DataLoader)
except ImportError:
DataLoader = importlib.import_module('datasets.dataloader')
#print('DataLoader2 : ', DataLoader)
# Data loading
print('=> Setting up data loader')
trainLoader, valLoader = DataLoader.create(opt)
#print('opt',opt)
# Load previous checkpoint, if it exists
print('=> Checking checkpoints')
checkpoint = checkpoints.load(opt)
# Create model
model, optimState = models.setup(opt, checkpoint)
model.cuda()
criterion = criterions.setup(opt, checkpoint, model)
# The trainer handles the training loop and evaluation on validation set
trainer = Trainer.createTrainer(model, criterion, opt, optimState)
if opt.testOnly:
loss = trainer.test(valLoader, 0)
sys.exit()
bestLoss = math.inf
startEpoch = max([1, opt.epochNum])
#print("opt.epochNum : ", opt.epochNum)
if checkpoint != None:
startEpoch = checkpoint['epoch'] + 1
bestLoss = checkpoint['loss']
print('Previous loss: \033[1;36m%1.4f\033[0m' % bestLoss)
# optimizer.step()
trainer.LRDecay(startEpoch)
# opt.nEpochs + 1
for epoch in range(startEpoch, opt.nEpochs + 1):
trainer.scheduler.step()
#trainLoss = trainer.train(trainLoader, epoch)
testLoss = trainer.test(valLoader, epoch)
break
# bestModel = False
# if testLoss < bestLoss:
# bestModel = True
# bestLoss = testLoss
# print(' * Best model: \033[1;36m%1.4f\033[0m * ' % testLoss)
#
# checkpoints.save(epoch, trainer.model, criterion, trainer.optimizer, bestModel, testLoss ,opt)
#
# print(' * Finished Err: \033[1;36m%1.4f\033[0m * ' % bestLoss)
if __name__ == '__main__':
main()
| [
"ksp2246@naver.com"
] | ksp2246@naver.com |
371c31e5fc3bcc1a0141df7f8659376578c4ebf1 | 786de89be635eb21295070a6a3452f3a7fe6712c | /numpy/tags/V00-01-05/SConscript | 6f45a7e075210962a1c9f52bd87171f239eeedf9 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# SConscript file for package numpy
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
from os.path import join as pjoin
from SConsTools.standardExternalPackage import standardExternalPackage
#
# For the standard external packages which contain includes, libraries,
# and applications it is usually sufficient to call standardExternalPackage()
# giving some or all parameters.
#
pkg = "numpy"
pkg_ver = "1.3.0"
PREFIX = pjoin('$SIT_EXTERNAL_SW', pkg, pkg_ver)
PYDIR = pjoin('$LIB_ABI', '$PYTHON', "site-packages", pkg)
PYDIRSEP = True
INCDIR = pjoin(PYDIR, "core", "include", pkg)
PKGINFO = (pkg, pkg_ver, '$PYTHON', '$SIT_ARCH.found')
standardExternalPackage(pkg, **locals())
| [
"salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 | |
feb42e71803ec62191f6593b4b79a9c6a75d36a1 | b53e3d57d31a47a98d87141e44a5f8940ee15bca | /test/programytest/parser/pattern/matching/test_set.py | 002785e6fa43cb2e1fb506957d39ed763fe19deb | [
"MIT"
] | permissive | Chrissimple/program-y | 52177fcc17e75fb97ab3993a4652bcbe7906bd58 | 80d80f0783120c2341e6fc57e7716bbbf28a8b3f | refs/heads/master | 2020-03-29T13:20:08.162177 | 2018-09-26T19:09:20 | 2018-09-26T19:09:20 | 149,952,995 | 1 | 0 | null | 2018-09-23T06:11:04 | 2018-09-23T06:11:04 | null | UTF-8 | Python | false | false | 4,013 | py |
from programytest.parser.pattern.matching.base import PatternMatcherBaseClass
from programy.mappings.sets import SetLoader
class PatternMatcherSetTests(PatternMatcherBaseClass):
def test_basic_set_match_as_text(self):
loader = SetLoader()
if self._bot.brain.sets.contains("SEX") is False:
self._bot.brain.sets.add_set("SEX", loader.load_from_text("""
Man
Woman
"""))
self.add_pattern_to_graph(pattern="I AM A <set>sex</set>", topic="X", that="Y", template="1")
context = self.match_sentence("I AM A MAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("Man", context.star(1))
context = self.match_sentence("I AM A WOMAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("Woman", context.star(1))
def test_basic_set_match_as_name(self):
loader = SetLoader()
if self._bot.brain.sets.contains("SEX") is False:
self._bot.brain.sets.add_set("SEX", loader.load_from_text("""
Man
Woman
"""))
self.add_pattern_to_graph(pattern='I AM A <set name="sex" />', topic="X", that="Y", template="1")
context = self.match_sentence("I AM A MAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("Man", context.star(1))
context = self.match_sentence("I AM A WOMAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("Woman", context.star(1))
def test_multi_word_set_match(self):
loader = SetLoader()
self._bot.brain.sets.add_set("COLOR", loader.load_from_text("""
RED
RED AMBER
RED BURNT OAK
RED ORANGE
"""))
self.add_pattern_to_graph(pattern="I LIKE <set>color</set> *", topic="*", that="*", template="1")
context = self.match_sentence("I LIKE RED PAINT", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED", context.star(1))
self.assertEqual("PAINT", context.star(2))
context = self.match_sentence("I LIKE RED AMBER CARS", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED AMBER", context.star(1))
self.assertEqual("CARS", context.star(2))
context = self.match_sentence("I LIKE RED BURNT OAK MOTOR BIKES", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED BURNT OAK", context.star(1))
self.assertEqual("MOTOR BIKES", context.star(2))
def test_basic_set_number_match(self):
self._bot.brain.dynamics.add_dynamic_set('number', "programy.dynamic.sets.numeric.IsNumeric", None)
self.add_pattern_to_graph(pattern="I AM <set>number</set> YEARS OLD", topic="X", that="Y", template="1")
context = self.match_sentence("I AM 49 YEARS OLD", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("49", context.star(1))
| [
"keith@keithsterling.com"
] | keith@keithsterling.com |
3a72b003e889d606d91992540708d221eb06875b | ca47fe64364188b9317cf27c08c31c4af0ddf65f | /app/auth/forms.py | bb504b77ba7cf9b1984ba3ac07559dd5942d140d | [] | no_license | Kennedy128/kennedy-project3 | 79d1cfe2d90bec1c34cfc9135106dab2e1008e3b | a5523fcbf86acaae29759895002712cc046fafc4 | refs/heads/master | 2022-07-05T21:52:03.865036 | 2020-05-13T20:57:48 | 2020-05-13T20:57:48 | 263,722,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py |
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,BooleanField,SubmitField,ValidationError
from wtforms.validators import Required,Email,EqualTo
from ..models import User
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
username = StringField('Enter your username',validators = [Required()])
password = PasswordField('Password',validators = [Required(), EqualTo('password_confirm',message = 'Passwords must match')])
password_confirm = PasswordField('Confirm Passwords',validators = [Required()])
submit = SubmitField('Sign Up')
def validate_email(self, data_field):
if User.query.filter_by(email=data_field.data).first():
raise ValidationError('There is an account with that email')
def validate_username(self, data_field):
if User.query.filter_by(username=data_field.data).first():
raise ValidationError('That username is taken')
class LoginForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
password = PasswordField('Password',validators =[Required()])
remember = BooleanField('Remember me')
submit = SubmitField('Sign In') | [
"santa@northpole.com"
] | santa@northpole.com |
7f21588272c702b2fbfae216a5ae2764f36efb80 | 5891051796778cfb44a255248ce38789bfef9e70 | /P_base/faith_class/loading.py | 22388fec3e0227782de7fce6f4d8ec6eb0d8f417 | [] | no_license | Faithlmy/Python_base | cc546a5d86b123e102a69df1227cde9b6e567493 | 5a43557e6375dc9dbe5f6701d7c10e549873a5ab | refs/heads/master | 2021-01-01T17:07:04.097978 | 2018-03-31T16:44:01 | 2018-03-31T16:44:01 | 98,000,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
python的重载
"""
class human:
__age = 0
__sex = ''
__heigth = 0
__weigth = 0
name = ''
def __init__(self, age, sex, height, weight):
self.age = age
self.sex = sex
self.heigth = height
self.weigth = weight
def setname(self, name):
self.name = name
def show(self):
print(self.name)
print(self.__age)
print(self.__sex)
print(self.__heigth)
print(self.__weigth)
class student(human):
__classes = 0
__grade = 0
__num = 0
def __init__(self, classes, grade, num, age, sex, height, weight):#重载
self.__classes = classes
self.__grade = grade
self.__num = num
human.__init__(self, age, sex, height, weight) #调用human的初始化方法
def show(self):
human.show(self)
print(self.__classes)
print(self.__grade)
print(self.__num)
if __name__ == '__main__':
a = student(12, 3, 20170305, 18, 'male', 175, 65)
a.setname('faith')
a.show() | [
"lmengyy@126.com"
] | lmengyy@126.com |
9873106f27cd3a0141597de96df0b53e68ca1d87 | e65a428ca7ee11d2f62d702842d4afbd493f08a4 | /Data Types and Variables/elevator.py | 8863fad20ce9dfa6fc628d6a3a681caa5f3725d6 | [] | no_license | NikiDimov/SoftUni-Python-Fundamentals | d8ba24a06c4366e76bdc69f1c5225dca29fe955e | 5bb1bf5928e40f2bac867d33566c8b9dac13f566 | refs/heads/main | 2023-07-15T05:57:57.085880 | 2021-08-19T10:27:45 | 2021-08-19T10:27:45 | 323,631,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | number_of_people = int(input())
capacity_of_elevator = int(input())
total_courses = 0
if number_of_people > capacity_of_elevator:
total_courses += number_of_people // capacity_of_elevator
if not number_of_people % capacity_of_elevator == 0:
total_courses += 1
else:
total_courses += 1
print(total_courses)
| [
"niki.dimov86@gmail.com"
] | niki.dimov86@gmail.com |
ad19d9d129e4da03fb6bf7e6eab0b113ab9f3769 | b84f0d7cf248452d7becfdfb672bc91dba4ea46c | /benchmark.py | f50f9d2cbc7fc03c03e6b82c560e6569d2992d0c | [
"MIT"
] | permissive | saurabhkulkarni77/bert-as-service | 35a467c1140333ef1319c8b40987f70dcd86d492 | d5d5670b7aa79746163ff8061fe76398e7146d5b | refs/heads/master | 2020-04-17T17:04:19.314059 | 2019-01-19T06:06:29 | 2019-01-19T06:06:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,470 | py | import random
import string
import sys
import threading
import time
from collections import namedtuple
from bert_serving.client import BertClient
from bert_serving.server import BertServer, get_args_parser
from numpy import mean
PORT = 7779
PORT_OUT = 7780
MODEL_DIR = '/data/cips/save/chinese_L-12_H-768_A-12'
common = vars(get_args_parser().parse_args(['-model_dir', MODEL_DIR, '-port', str(PORT), '-port_out', str(PORT_OUT)]))
common['num_worker'] = 2 # set num workers
common['num_repeat'] = 5 # set num repeats per experiment
common['num_client'] = 1 # set number of concurrent clients, will be override later
args = namedtuple('args_nt', ','.join(common.keys()))
globals()[args.__name__] = args
def tprint(msg):
"""like print, but won't get newlines confused with multiple threads"""
sys.stdout.write(msg + '\n')
sys.stdout.flush()
class BenchmarkClient(threading.Thread):
def __init__(self):
super().__init__()
self.batch = [''.join(random.choices(string.ascii_uppercase + string.digits,
k=args.max_seq_len)) for _ in range(args.client_batch_size)]
self.num_repeat = args.num_repeat
self.avg_time = 0
def run(self):
time_all = []
bc = BertClient(port=PORT, port_out=PORT_OUT, show_server_config=False, check_version=False, check_length=False)
for _ in range(self.num_repeat):
start_t = time.perf_counter()
bc.encode(self.batch)
time_all.append(time.perf_counter() - start_t)
print(time_all)
self.avg_time = mean(time_all)
if __name__ == '__main__':
experiments = {
'client_batch_size': [1, 4, 8, 16, 64, 256, 512, 1024, 2048, 4096],
'max_batch_size': [32, 64, 128, 256, 512],
'max_seq_len': [20, 40, 80, 160, 320],
'num_client': [2, 4, 8, 16, 32],
'pooling_layer': [[-j] for j in range(1, 13)]
}
fp = open('benchmark-%d.result' % common['num_worker'], 'w')
for var_name, var_lst in experiments.items():
# set common args
for k, v in common.items():
setattr(args, k, v)
avg_speed = []
for var in var_lst:
# override exp args
setattr(args, var_name, var)
server = BertServer(args)
server.start()
# sleep until server is ready
time.sleep(15)
all_clients = [BenchmarkClient() for _ in range(args.num_client)]
tprint('num_client: %d' % len(all_clients))
for bc in all_clients:
bc.start()
all_thread_speed = []
for bc in all_clients:
bc.join()
cur_speed = args.client_batch_size / bc.avg_time
all_thread_speed.append(cur_speed)
max_speed = int(max(all_thread_speed))
min_speed = int(min(all_thread_speed))
t_avg_speed = int(mean(all_thread_speed))
tprint('%s: %s\t%.3f\t%d/s' % (var_name, var, bc.avg_time, t_avg_speed))
tprint('max speed: %d\t min speed: %d' % (max_speed, min_speed))
avg_speed.append(t_avg_speed)
server.close()
fp.write('#### Speed wrt. `%s`\n\n' % var_name)
fp.write('|`%s`|seqs/s|\n' % var_name)
fp.write('|---|---|\n')
for i, j in zip(var_lst, avg_speed):
fp.write('|%s|%d|\n' % (i, j))
fp.flush()
fp.close()
| [
"hanhxiao@tencent.com"
] | hanhxiao@tencent.com |
59fd38d364beae081dd066ea60debcfc76d65e69 | 1fb2da0e6f73652f0b0126c82a84562f6a8d3535 | /935. Knight Dialer.py | 0c6faf5974ebdf4a997d8c9f4c72c190f73e9101 | [] | no_license | JaylenZhang19/Leetcode | be3456fcb45270c8aad797f965f4c7a1781c0e61 | 178546686aa3ae8f5da1ae845417f86fab9a644d | refs/heads/master | 2023-02-27T06:08:58.818435 | 2021-01-31T20:28:10 | 2021-01-31T20:28:10 | 287,661,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | class Solution:
def knightDialer(self, n: int) -> int:
moves = [
[4, 6],
[6, 8],
[7, 9],
[4, 8],
[3, 9, 0],
[],
[0, 1, 7],
[2, 6],
[1, 3],
[2, 4],
]
dp = [1] * 10
for _ in range(n-1):
current_dp = [0] * 10
for node, count in enumerate(dp):
for nei in moves[node]:
current_dp[nei] = (current_dp[nei] + count) % (10 ** 9 + 7)
dp = current_dp
return sum(dp) % (10 ** 9 + 7)
| [
"noreply@github.com"
] | JaylenZhang19.noreply@github.com |
4727e9204aeccf1fa3855c1e6fdd478abea9f146 | 0734fe314483192e630272bb212aa7817d627628 | /parsl/tests/test_aalst_patterns/test_python_AND_SPLIT_P2.py | e9d383b559002bd1e8e680c61361fecb9d98ea66 | [
"Apache-2.0"
] | permissive | djf604/parsl | 9798f1043a2196d3b538c8683de6d34d57d8f279 | 118af3a52be1811a3355c79a7adadda5ea66afde | refs/heads/master | 2020-12-02T16:27:10.252111 | 2017-06-29T01:47:09 | 2017-06-29T01:47:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | ''' Testing bash apps
'''
import parsl
from parsl import *
import os
import time
import shutil
import argparse
#parsl.set_stream_logger()
workers = ThreadPoolExecutor(max_workers=4)
dfk = DataFlowKernel(workers)
@App('python', dfk)
def increment(x):
return x+1
@App('python', dfk)
def slow_increment(x, dur):
import time
time.sleep(dur)
return x+1
def test_and_split(depth=5):
''' Test simple pipeline A->B...->N
'''
futs = {}
for i in range(depth):
futs[i] = increment(i)
print([ futs[i].result() for i in futs])
if __name__ == '__main__' :
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--width", default="10", help="width of the pipeline")
parser.add_argument("-d", "--debug", action='store_true', help="Count of apps to launch")
args = parser.parse_args()
if args.debug:
pass
parsl.set_stream_logger()
#test_increment(depth=int(args.width))
test_and_split(depth=int(args.width))
| [
"yadudoc1729@gmail.com"
] | yadudoc1729@gmail.com |
0269fdda7a60854c67452fee04e6583d65eb2c04 | 6ff8b7b7ed534d36da6456feeda6ded80464a7de | /chains/tasks.py | d982b814bba7ac7e5b34a9a7950e17f285d6187f | [
"Apache-2.0"
] | permissive | denismakogon/aiorchestra-chain-plugin | c14c4fc1e8417edfbd0a60cc5e28542006928040 | 8607f2a547234952eeb4008aba48eb168b20d217 | refs/heads/master | 2021-01-17T08:46:54.599774 | 2016-07-05T11:02:45 | 2016-07-05T11:02:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,040 | py | # Author: Denys Makogon
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from aiorchestra.core import context
from aiorchestra.core import utils
DT = 'tosca.artifacts.chain.deployment_template'
DTI = 'tosca.artifacts.chain.deployment_inputs'
PC = 'tosca.artifacts.chain.persisted_context'
@utils.operation
async def create(node, inputs):
node.context.logger.info('[{0}] - Building chain function '
'deployment context.'.format(node.name))
template = node.get_artifact_from_type(DT)
persisted_context = node.get_artifact_from_type(PC)
if persisted_context and not template:
raise Exception('[{0}] - Persisted context requires '
'template.'.format(node.name))
if not template:
raise Exception('[{0}] - Deployment template artifact '
'required.'.format(node.name))
inputs = node.get_artifact_from_type(DTI)
if not inputs:
node.context.logger.warn('[{0}] - Inputs artifact was '
'not specified.'.format(node.name))
deployment_inputs_file = inputs.pop().get('file')
deployment_template_file = template.pop().get('file')
dti = {}
if deployment_inputs_file:
with open(deployment_inputs_file, 'r') as dti:
dti = yaml.load(dti)
deployment_context = context.OrchestraContext(
node.name, path=deployment_template_file,
template_inputs=dti, logger=node.context.logger,
enable_rollback=node.context.rollback_enabled,
event_loop=node.context.event_loop,
)
node.update_runtime_properties('deployment_context',
deployment_context)
node.context.logger.info('[{0}] - Deployment context assembled.'
.format(node.name))
@utils.operation
async def start(node, inputs):
node.context.logger.info('[{0}] - Starting chain function '
'deployment.'.format(node.name))
deployment_context = node.runtime_properties.get(
'deployment_context')
await deployment_context.deploy()
outputs = deployment_context.outputs
node.batch_update_runtime_properties(**{
'deployment_context': deployment_context,
'deployment_context_outputs': outputs,
'persisted_context': deployment_context.serialize(),
})
node.context.logger.info('[{0}] - Deployment finished with '
'status "{1}".'
.format(node.name,
deployment_context.
status.upper()))
@utils.operation
async def stop(node, inputs):
node.context.logger.info('[{0}] - Stopping chain function '
'deployment.'.format(node.name))
deployment_context = node.runtime_properties.get(
'deployment_context')
await deployment_context.undeploy()
node.context.logger.info('[{0}] - Deployment finished with '
'status "{1}".'
.format(node.name,
deployment_context.
status.upper()))
@utils.operation
async def delete(node, inputs):
node.context.logger.info('[{0}] - Deleting chain function '
'deployment context.'.format(node.name))
if 'deployment_context' in node.runtime_properties:
del node.runtime_properties['deployment_context']
| [
"lildee1991@gmail.com"
] | lildee1991@gmail.com |
4e618d4077b4c800d20aaf59ebaad94e9cddd0cb | 9defbebd427f77ac549548ea83280f253e335ea3 | /ltp/transformer_rel_linear.py | 820743317af12009087f3fe14b0d74f356c67ef9 | [] | no_license | okokyou/ltp | d992378ccff0c955852f9f2f948541ce63808a11 | f3d4a25ee2fbb71613f76c99a47e70a5445b8c03 | refs/heads/master | 2023-07-31T15:44:36.220184 | 2021-09-10T01:50:48 | 2021-09-10T01:50:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,774 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*_
# Author: Yunlong Feng <ylfeng@ir.hit.edu.cn>
from argparse import ArgumentParser
from typing import Optional
import torch
from torch import nn
from torch.nn import functional as F
from transformers import AutoModel
from ltp.nn import BaseModule, RelativeTransformer, CRF
from ltp.transformer_linear import TokenClassifierResult
class RelativeTransformerLinearClassifier(nn.Module):
crf: Optional[CRF]
def __init__(self, input_size, hidden_size, num_layers, num_heads, num_labels, max_length, dropout,
disable_relative_transformer=False, use_cls=False, use_sep=False, use_crf=False, crf_reduction='sum'):
super().__init__()
self.use_cls = use_cls
self.use_sep = use_sep
if disable_relative_transformer:
self.relative_transformer = None
else:
self.relative_transformer = RelativeTransformer(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
num_heads=num_heads,
dropout=dropout,
max_length=max_length * 2
)
self.classifier = nn.Linear(input_size, num_labels)
if use_crf:
self.crf = CRF(num_labels, batch_first=True)
self.crf_reduction = crf_reduction
else:
self.crf = None
def forward(self, input, attention_mask=None, word_index=None, word_attention_mask=None, labels=None,
is_processed=False):
if not is_processed:
if not self.use_cls:
input = input[:, 1:, :]
if not self.use_cls:
input = input[:, :-1, :]
if word_attention_mask is None:
assert word_index is None
bias = int(not self.use_cls) + int(not self.use_sep)
word_attention_mask = attention_mask[:, bias:] == 1
if word_index is not None:
input = torch.gather(input, dim=1, index=word_index.unsqueeze(-1).expand(-1, -1, input.size(-1)))
if self.relative_transformer is not None:
sequence_output = self.relative_transformer(input, word_attention_mask)
else:
sequence_output = input
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
# Only keep active parts of the loss
if word_attention_mask is not None and self.crf is not None:
logits = F.log_softmax(logits, dim=-1)
loss = - self.crf.forward(logits, labels, word_attention_mask, reduction=self.crf_reduction)
elif word_attention_mask is not None:
active_loss = word_attention_mask.view(-1)
active_logits = logits.view(-1, self.classifier.out_features)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.classifier.out_features), labels.view(-1))
decoded = None
if not self.training and self.crf is not None:
decoded = self.crf.decode(emissions=logits, mask=word_attention_mask)
if self.use_cls:
decoded = [sent[1:] for sent in decoded]
labels = labels[:, 1:]
if self.use_sep:
decoded = [sent[:-1] for sent in decoded]
labels = labels[:, :-1]
return TokenClassifierResult(loss=loss, logits=logits, decoded=decoded, labels=labels)
class TransformerRelLinear(BaseModule):
def __init__(self, hparams, config=None):
super().__init__()
self.save_hyperparameters(hparams)
if config is None:
self.transformer = AutoModel.from_pretrained(self.hparams.transformer)
else:
self.transformer = AutoModel.from_config(config)
self.dropout = nn.Dropout(self.hparams.dropout)
hidden_size = self.transformer.config.hidden_size
max_length = self.transformer.config.max_position_embeddings
self.classifier = RelativeTransformerLinearClassifier(
input_size=hidden_size,
hidden_size=self.hparams.hidden_size,
num_layers=self.hparams.num_layers,
num_heads=self.hparams.num_heads,
dropout=self.hparams.dropout,
max_length=max_length,
num_labels=self.hparams.num_labels,
use_crf=self.hparams.use_crf,
use_cls=self.hparams.use_cls,
use_sep=self.hparams.use_sep,
crf_reduction=self.hparams.crf_reduction,
disable_relative_transformer=self.hparams.disable_relative_transformer
)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False, conflict_handler='resolve')
parser.add_argument('--transformer', type=str, default="hfl/chinese-electra-base-discriminator")
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--hidden_size', type=int, default=256)
parser.add_argument('--num_heads', type=int, default=4)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--use_crf', action='store_true')
parser.add_argument('--use_cls', action='store_true')
parser.add_argument('--use_sep', action='store_true')
parser.add_argument('--disable_relative_transformer', action='store_true')
parser.add_argument('--crf_reduction', type=str, default='sum')
parser.add_argument('--num_labels', type=int)
return parser
def forward(
self,
input_ids=None,
attention_mask=None,
word_index=None,
word_attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None
) -> TokenClassifierResult:
hidden_states = self.transformer(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
inputs_embeds,
output_attentions=False,
output_hidden_states=False,
return_dict=False,
)
sequence_output = hidden_states[0]
sequence_output = self.dropout(sequence_output)
return self.classifier(
sequence_output,
word_index=word_index,
attention_mask=attention_mask,
word_attention_mask=word_attention_mask,
labels=labels
)
| [
"ylfeng@ir.hit.edu.cn"
] | ylfeng@ir.hit.edu.cn |
74377d062ee0d17bd11f3abf5d882d2cd8718a03 | 87e520f16911077e3944f27be142b028110239d9 | /guild/commands/package.py | 05fa5c9ad67a1dc7299619e61860d652e0a2ec07 | [
"Apache-2.0"
] | permissive | cfregly/guild-python-1 | 06c81e5c633be231f18318604f2402e8ac24bce9 | 543889469251e20c1ac55e358100952cdc33e58d | refs/heads/master | 2021-07-12T13:38:31.291333 | 2017-10-16T22:01:17 | 2017-10-16T22:01:17 | 107,252,039 | 0 | 0 | null | 2017-10-17T10:14:47 | 2017-10-17T10:14:47 | null | UTF-8 | Python | false | false | 1,841 | py | # Copyright 2017 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
@click.command()
@click.option(
"-p", "--project", "project_location", metavar="LOCATION",
help=("Project location (file system directory) of the "
"project to package. Defaults to current directory."))
@click.option(
"-d", "--dist-dir", metavar="DIR",
help="Directory to create the package distribution in.")
@click.option(
"--upload",
help="Upload the package distribution to PyPI after creating it.",
is_flag=True)
@click.option(
"-s", "--sign",
help="Sign a package distribution upload with gpg.",
is_flag=True)
@click.option("-i", "--identity", help="GPG identity used to sign upload.")
@click.option("-u", "--user", help="PyPI user name for upload.")
@click.option("-p", "--password", help="PyPI password for upload.")
@click.option("-c", "--comment", help="Comment to include with upload.")
@click.pass_context
@click_util.use_args
def package(ctx, args):
"""Create a package for distribution.
Packages are built from projects that contain a PACKAGE file that
describes the package to be built.
"""
from . import package_impl
package_impl.create_package(args, ctx)
| [
"g@rre.tt"
] | g@rre.tt |
96af65b743081e61f0620630af27fb2aa2652125 | 8efb4caeafe2cfb024827ce194b5abae6fdfc9a4 | /test/functional/rpc_named_arguments.py | d3557ce2777061179531d4a6c6782b345379400b | [
"MIT"
] | permissive | Worldcoin-Network/worldcoin | cd8ac9631154666cb11603d5f07e3a9dc2e1653a | 4f14d8baadda3f46363c26dc327a68b33f14e28c | refs/heads/master | 2022-03-04T01:50:14.783972 | 2021-10-26T15:21:47 | 2021-10-26T15:21:47 | 156,328,955 | 15 | 9 | MIT | 2021-05-10T16:58:07 | 2018-11-06T05:08:32 | C++ | UTF-8 | Python | false | false | 1,217 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Worldcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test using named arguments for RPCs."""
from test_framework.test_framework import WorldcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(WorldcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getblockchaininfo')
assert(h.startswith('getblockchaininfo\n'))
assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| [
"quentin.neveu@hotmail.ca"
] | quentin.neveu@hotmail.ca |
a235367cf7e0587291907ad9357befc485578b2d | 824f831ce0921b3e364060710c9e531f53e52227 | /Leetcode/Sliding_Window/LC-209. Minimum Size Subarray Sum.py | 09456da62cae8ff3353367758c2559fbab35706a | [] | no_license | adityakverma/Interview_Prepration | e854ff92c10d05bc2c82566ea797d2ce088de00a | d08a7f728c53943e9a27c33f8e4249633a69d1a6 | refs/heads/master | 2020-04-19T19:36:06.527353 | 2019-06-15T23:02:30 | 2019-06-15T23:02:30 | 168,392,921 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py |
# Given an array of n positive integers and a positive integer s, find the minimal length of a contiguous
# subarray of which the sum ≥ s. If there is not one, return 0 instead.
#
# Example:
#
# Input: s = 7, nums = [2,3,1,2,4,3]
# Output: 2
# Explanation: the subarray [4,3] has the minimal length under the problem constraint.
#
# Follow up:
# If you have figured out the O(n) solution, try coding another solution of which the time complexity is O(n log n).
# ================================================================================================================
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
# Using Sliding Window Algorithm: - See below explanation
minLen = len(nums) + 1
total, start = 0, 0
for i in range(len(nums)):
total += nums[i] # Get possible candidate
# If total is not >= target, then quit while loop and add more to total (expand the window).
# else refine candiate by moving left end to left since we need to get minimum number.
while total >= s:
minLen = min(minLen, i - start + 1)
total = total - nums[start]
start = start + 1 # Moving the window's left end now, because we need to get minmum number ele whole sum>=target
return 0 if minLen > len(nums) else minLen
'''
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtyp
# Using Binary Search
result = len(nums) + 1
for idx, n in enumerate(nums[1:], 1):
nums[idx] = nums[idx - 1] + n
left = 0
for right, n in enumerate(nums):
if n >= s:
left = self.find_left(left, right, nums, s, n)
result = min(result, right - left + 1)
return result if result <= len(nums) else 0
def find_left(self, left, right, nums, s, n):
while left < right:
mid = (left + right) // 2
if n - nums[mid] >= s:
left = mid + 1
else:
right = mid
return left
'''
'''
Sliding Window Algorithm:
========================
Trick here is to keep adding numbers from the start of array until you hit the target.
After that we keep adding numbers from the end and subtracting numbers from the start as long as the total is still above target and keep checking if the new array is the minimum length.
The intuition is that for example, a 10 added on the end could replace two 5's from start of array and thus the reduce the number of elements needed to hit target in that subarray.
IMP NOTE: To find maximum substring, we should update maximum after the inner while loop to guarantee that the substring is valid. On the other hand, when asked to find minimum substring, we should update minimum inside the inner while loop.
# https://leetcode.com/problems/minimum-size-subarray-sum/discuss/211775/Python-O(N)-greater-minimum-window-substring-template
# https://leetcode.com/problems/minimum-size-subarray-sum/discuss/59093/Python-O(n)-and-O(n-log-n)-solution
''' | [
"noreply@github.com"
] | adityakverma.noreply@github.com |
2573678e9815d6c0d02d522c1f682042b66018a9 | 5c0a253bf2fb83db01abc99097871c965f4cf565 | /spark/crm/PROC_A_SUBJECT_D004039.py | 4324513e029099964e80d4e9191f6fdd22410f7b | [] | no_license | airuibel/python-1 | 3b16553ede9d069ec56efbb12a89a4de6917a447 | 94f387e2d406fab2128bcfffce6146da720b2ccc | refs/heads/master | 2020-07-05T15:43:00.957221 | 2017-09-17T14:05:48 | 2017-09-17T14:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,160 | py | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_A_SUBJECT_D004039').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#---------------------------------------------------------------------------------------#
V_YEAR_MONTH = etl_date[0:4]+"-" + etl_date[4:6]
v_sub_id = 'D004039';
ACRM_A_TARGET_D004022 = sqlContext.read.parquet(hdfs+'/ACRM_A_TARGET_D004022/*')
ACRM_A_TARGET_D004022.registerTempTable("ACRM_A_TARGET_D004022")
ACRM_A_TARGET_D004023 = sqlContext.read.parquet(hdfs+'/ACRM_A_TARGET_D004023/*')
ACRM_A_TARGET_D004023.registerTempTable("ACRM_A_TARGET_D004023")
ACRM_A_TARGET_D004024 = sqlContext.read.parquet(hdfs+'/ACRM_A_TARGET_D004024/*')
ACRM_A_TARGET_D004024.registerTempTable("ACRM_A_TARGET_D004024")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT
A.CUST_ID as CUST_ID
,'' as ORG_ID
,'D004039' as INDEX_CODE
,CASE WHEN A.INDEX_VALUE < B.INDEX_VALUE AND B.INDEX_VALUE < C.INDEX_VALUE THEN 0.0
WHEN A.INDEX_VALUE > B.INDEX_VALUE AND B.INDEX_VALUE > C.INDEX_VALUE THEN 2.0
ELSE 1.0 END as INDEX_VALUE
,V_YEAR_MONTH as YEAR_MONTH
,V_DT as ETL_DATE
,A.CUST_TYPE as CUST_TYPE
,A.FR_ID as FR_ID
FROM ACRM_A_TARGET_D004022 A,
ACRM_A_TARGET_D004023 B,
ACRM_A_TARGET_D004024 C
WHERE A.CUST_ID = B.CUST_ID
AND A.FR_ID = B.FR_ID
AND A.CUST_TYPE = '2'
AND A.CUST_ID = C.CUST_ID
AND A.FR_ID = C.FR_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
sql = re.sub(r"\bV_YEAR_MONTH\b", "'"+V_YEAR_MONTH+"'", sql)
ACRM_A_TARGET_D004039 = sqlContext.sql(sql)
ACRM_A_TARGET_D004039.registerTempTable("ACRM_A_TARGET_D004039")
dfn="ACRM_A_TARGET_D004039/"+V_DT+".parquet"
ACRM_A_TARGET_D004039.cache()
nrows = ACRM_A_TARGET_D004039.count()
ACRM_A_TARGET_D004039.write.save(path=hdfs + '/' + dfn, mode='overwrite')
ACRM_A_TARGET_D004039.unpersist()
ACRM_A_TARGET_D004022.unpersist()
ACRM_A_TARGET_D004023.unpersist()
ACRM_A_TARGET_D004024.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_A_TARGET_D004039/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_A_TARGET_D004039 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| [
"cysuncn@126.com"
] | cysuncn@126.com |
90d2e9f95d7f2972285bb2645fbe7b1c72e80b3e | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /ddpm_w_distillation/ddpm_w_distillation/config/i64_w_unet3_distill.py | 087622af189706bd2ae189e6c0d8965bd8a31c4f | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 6,776 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring
# pylint: disable=g-no-space-after-comment,g-bad-todo
# pylint: disable=invalid-name,line-too-long
import ml_collections
class hyper:
pass
def D(**kwargs):
return ml_collections.ConfigDict(initial_dictionary=kwargs)
# added
# end_num_steps = 1 # eventual number of steps in the distilled sampler
# start_num_steps = 1024 # number of steps in baseline sampler
distill_steps_per_iter = 1000000
# TODO, change the teacher ckpt patg
another_teacher_ckpt_path = 'projects/diffusion/stage1_2048_42848231/1/retained_checkpoints/checkpoint_520000'
# 'projects/diffusion/i64_retrain_snr_1e-4_42469520/1/retained_checkpoints/checkpoint_380000'
teacher_ckpt_path = 'projects/diffusion/retrain_snr_2048_42804866/1/retained_checkpoints/checkpoint_220000'
# 'projects/diffusion/i64_retrain_snr_1e-4_42469520/1/retained_checkpoints/checkpoint_380000'
# 'projects/diffusion/i64_teacher_v_42202910/1/retained_checkpoints/checkpoint_100000'
# 'projects/diffusion/i64_retrain_42445613/1/retained_checkpoints/checkpoint_200000'
single_model_path = 'projects/diffusion/stage1_2048_42848231/1/retained_checkpoints/checkpoint_520000'
eval_sampling_num_steps = 256 #128 #512 #256 #512 #128
train_batch_size = 256 #2048, # 256
use_sample_single_ckpt = True #False
use_retained_ckpt = True #False
train_clip_x = False
# sampler = 'ddim', # 'noisy'
def get_config():
return D(
launch=D(
sweep=hyper.product([
# hyper.sweep('config.model.args.uncond_prob', [0.01, 0.02, 0.05]),
# hyper.sweep('config.model.args.uncond_prob', [0.1, 0.2, 0.5]),
hyper.sweep('config.seed', [0]), #TODO [1, 2, 3] change to [0]
hyper.sweep(
'config.model.args.uncond_prob', [0.1]
), # NOTE: not used for w_unet model check NOTE change from 0.1 to 0
# hyper.sweep(config.model.acond_uncond_coefs)
]),),
# added
distillation=D(
# teacher checkpoint is used for teacher and initial params of student
teacher_checkpoint_path=teacher_ckpt_path,
steps_per_iter=distill_steps_per_iter, # number of distillation training steps per halving of sampler steps
only_finetune_temb=False, #TODO!! False,
another_teacher_init=True, #False, #NOTE: change to False #False, #True,
another_teacher_path=another_teacher_ckpt_path,
# start_num_steps=start_num_steps,
# end_num_steps=end_num_steps,
),
# added
seed=0,
main_class='Model',
dataset=D(
name='ImageNet',
args=D(
image_size=64,
class_conditional=True,
randflip=True,
),
),
sampler='noisy', #'ddim', # 'noisy', # added
##
#together
use_sample_single_ckpt=use_sample_single_ckpt, #True,
sample_single_ckpt_path=single_model_path,
#together
model=D(
# architecture
name='w_unet3',
args=D(
ch=192,
emb_ch=768, # default is ch * 4
ch_mult=[1, 2, 3, 4],
num_res_blocks=3,
attn_resolutions=[8, 16, 32],
num_heads=None,
head_dim=64,
dropout=0., #NOTE!! changed previously 0.1,
logsnr_input_type='inv_cos',
w_input_type='inv_cos', # w embedding added
resblock_resample=True,
uncond_prob=0.1, #NOTE: default, but as sweep 0.,
),
teacher_extra_class=True, #NOTE added
mean_type='v', #'eps', #'v', #NOTE: change to v 'eps',
teacher_mean_type='v', #"eps", # added
logvar_type='fixed_medium:0.3', # TODO: check
mean_loss_weight_type='snr', #'constant', #'snr', #'snr_trunc', #note not 'constant', #constant='mse', snr, snr_trunc
logvar_loss_type='none',
# logsnr schedule
train_num_steps=0, # train in continuous time
eval_sampling_num_steps=eval_sampling_num_steps,
train_logsnr_schedule=D(
name='cosine', logsnr_min=-20., logsnr_max=20.),
eval_logsnr_schedule=D(
name='cosine', logsnr_min=-20., logsnr_max=20.),
eval_clip_denoised=True,
# added
train_w_schedule=D(
name='uniform',
# logsnr_min=0., logsnr_max=0.5),
# logsnr_min=0., logsnr_max=1.0),
# logsnr_min=0., logsnr_max=2.0),
logsnr_min=0.,
logsnr_max=4.),
# NOTE can set logsnr_max=logsnr_min for a single w value
# sample interpolation
# cond_uncond_coefs=[1.3, -0.3], # [cond_coef, uncond_coef]
# eval_cond_uncond_coefs # NOTE: never have it for distillation!, it does not make sense
),
train=D(
# optimizer
batch_size=train_batch_size, #2048, # 256 #2048, # TODO: change back 2048,
optimizer='adam',
learning_rate=3e-4, #edited 3e-4,
learning_rate_warmup_steps=0, #edited 10000, # used to be 1k, but 10k helps with stability
weight_decay=0.0,
ema_decay=0.9999,
grad_clip=1.0,
substeps=10,
enable_update_skip=False,
# logging
log_loss_every_steps=100,
checkpoint_every_secs=900, # 15 minutes
retain_checkpoint_every_steps=20000, # old checkpoints won't get deleted
eval_every_steps=20000,
train_clip_x=train_clip_x, # NOTE added
w_conditoned_training=True, # added
w_warmup_steps=10000, #1, #10000, # added to worm up w embedding
),
eval=D(
batch_size=128, # TODO change to 128,
num_inception_samples=50000,
sampling_fixed_classes=[249, 284], # visualize malamute, siamese cat
sampling_fixed_w=[0.1, 0.3, 0.5],
w_sample_const=4.0, #0.3, #0.3
noisy_sampler_interpolation=0.5, #0.2, # NOTE: need to change
use_retained_ckpt=use_retained_ckpt, #True,
),
)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
f840d8f08c5b3912798557561050b0c4b3506e47 | 70280955a5382d73e58395eba78c119a400f4ce7 | /comp/exawizards2019/test.py | a24b292819c9854baa39c6eac3b842189d260338 | [] | no_license | cohock13/atcoder | a7d0e26a10a4e58690347a2e36839c2f503a79ba | d268aa68fc96203eab94d021bd158cf84bdb00bc | refs/heads/master | 2021-01-03T00:41:31.055553 | 2020-10-27T12:28:06 | 2020-10-27T12:28:06 | 239,839,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | N = int(input())
S = {}
for i in range(N):
s = input()
if s in S:
S[s] += 1
else:
S[s] = 1
key = list(S.keys())
value = list(S.values())
m = max(value)
Ans = []
for i in range(len(key)):
Ans.append([value[i],key[i]])
Ans.sort()
for i in range(len(key)):
if Ans[i][0] == m:
print(Ans[i][1])
| [
"callout2690@gmail.com"
] | callout2690@gmail.com |
4134622276b22025842bfa81d13980db24e39416 | 041454da30317f0831d8af04a205db9716084ce9 | /docker/f_ffmpeg_1.py | 4646622786c900380acbad86af4f391aa3a63c83 | [] | no_license | elssm/Bug-Recurrence | 3db54b60e3968058566cdaf25589e61b147fb021 | 17365424c54401f83fc00547c7425e2f5901ef14 | refs/heads/master | 2020-05-02T20:18:16.429572 | 2019-03-28T11:16:23 | 2019-03-28T11:16:23 | 178,186,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#python version 3.6.4
#简介:脚本适用于docker下的vulapps环境,使用docker run -d -p 8004:80 medicean/vulapps:f_ffmpeg_1运行环境
#具体参见github地址:https://github.com/Medicean/VulApps/commit/86ee14f3b0c2e7e4fa1aa17655d77bed4184a177
#将poc下载到c盘根目录下
#此处ip为192.168.109.141
import requests
import os
import sys
argc = len(sys.argv) - 1
argv = sys.argv[1:]
if argc == -1 or argc > 2:
print "用法:python",sys.argv[0],"IP地址 端口号"
print "例如:url为http://127.0.0.1:8080/,则IP地址应为127.0.0.1,端口号应为8080"
sys.exit()
ip = "192.168.109.141"
port = 8004
if argc >= 1:
ip = argv[0]
if argc == 2:
port = argv[1]
py_url="https://github.com/neex/ffmpeg-avi-m3u-xbin/blob/master/gen_xbin_avi.py"
req=requests.get(py_url)
with open("gen_xbin_avi.py","wb") as f:
p=os.listdir('C:')
re="gen_xbin_avi.py"
if re in p:
os.system("python3 gen_xbin_avi.py file:///etc/passwd passwd.avi")
url="http://"+ip+":"+port+"/"
data=None
files={"field":("passwd.avi",open("C:","rb"))}
r=request.post(url,data,files=files)
print "post successful"
else:
print "No Such File"
| [
"noreply@github.com"
] | elssm.noreply@github.com |
8e1fec41ecc67bb074bc42051b00369bde2be3ef | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_211/ch140_2020_04_01_20_23_34_419582.py | 3947aeef47acbfc57438321e0d040d657c1d6513 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | def faixa_notas(l):
l=[]
i=0
cinco=0
cincset=0
sete=0
quantidade=[cinco,cincset,sete]
while(i<len(l)):
if(l[i]<5):
cinco+=1
elif(l[i]>=5 and l[i]<7):
cincset+=1
else:
sete+=1
return quantidade
| [
"you@example.com"
] | you@example.com |
d918e24948a40341e21650f17e4b4c41965e9398 | ab4046bba677f9378053a68fb74a150c86202a02 | /tools/tools.py | 666d4f7142cd726e30cfd0ae4b86f1375f5ce9e0 | [] | no_license | bvillasen/cooling_tools | f321100025f3b3f8a33b8afae22074f5ff947405 | f4c47776d8489944c398c91ebffb6931d46fcb39 | refs/heads/main | 2021-12-02T18:26:24.315233 | 2021-08-13T04:18:37 | 2021-08-13T04:18:37 | 229,338,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,979 | py | import os, sys
from os import listdir
from os.path import isfile, join
import numpy as np
import h5py as h5
import time
def Combine_List_Pair( a, b ):
output = []
for a_i in a:
for b_i in b:
if type(b_i) == list:
add_in = [a_i] + b_i
else:
add_in = [ a_i, b_i ]
output.append( add_in )
return output
def Get_Parameters_Combination( param_vals ):
n_param = len( param_vals )
indices_list = []
for i in range(n_param):
param_id = n_param - 1 - i
n_vals = len(param_vals[param_id])
indices_list.append( [ x for x in range(n_vals)] )
param_indx_grid = indices_list[0]
for i in range( n_param-1 ):
param_indx_grid = Combine_List_Pair( indices_list[i+1], param_indx_grid )
param_combinations = []
for param_indices in param_indx_grid:
p_vals = [ ]
for p_id, p_indx in enumerate(param_indices):
p_vals.append( param_vals[p_id][p_indx] )
param_combinations.append( p_vals )
return param_combinations
def print_progress( i, n, time_start ):
import time
time_now = time.time()
time = time_now - time_start
remaining = time * ( n - i ) / i
hrs = remaining // 3600
min = (remaining - hrs*3600) // 60
sec = remaining - hrs*3600 - min*60
etr = f'{hrs:02.0f}:{min:02.0f}:{sec:02.0f}'
progres = f'Progress: {i}/{n} {i/n*100:.1f}% ETR: {etr} '
print_line_flush (progres )
def Get_Free_Memory( print_out=False):
import psutil
mem = psutil.virtual_memory()
free = mem.free / 1e9
if print_out: print( f'Free Memory: {free:.1f} GB' )
return free
def check_if_file_exists( file_name ):
return os.path.isfile( file_name )
def Load_Pickle_Directory( input_name ):
import pickle
print( f'Loading File: {input_name}')
dir = pickle.load( open( input_name, 'rb' ) )
return dir
def Write_Pickle_Directory( dir, output_name ):
import pickle
f = open( output_name, 'wb' )
pickle.dump( dir, f)
print ( f'Saved File: {output_name}' )
def split_indices( indices, rank, n_procs, adjacent=False ):
n_index_total = len(indices)
n_proc_indices = (n_index_total-1) // n_procs + 1
indices_to_generate = np.array([ rank + i*n_procs for i in range(n_proc_indices) ])
if adjacent: indices_to_generate = np.array([ i + rank*n_proc_indices for i in range(n_proc_indices) ])
else: indices_to_generate = np.array([ rank + i*n_procs for i in range(n_proc_indices) ])
indices_to_generate = indices_to_generate[ indices_to_generate < n_index_total ]
return indices_to_generate
def extend_path( dir=None ):
if not dir: dir = os.getcwd()
subDirectories = [x[0] for x in os.walk(dir) if x[0].find('.git')<0 ]
sys.path.extend(subDirectories)
def print_mpi( text, rank, size, mpi_comm):
for i in range(size):
if rank == i: print( text )
time.sleep( 0.01 )
mpi_comm.Barrier()
def print_line_flush( terminalString ):
terminalString = '\r' + terminalString
sys.stdout. write(terminalString)
sys.stdout.flush()
def create_directory( dir, print_out=True ):
if print_out: print(("Creating Directory: {0}".format(dir) ))
indx = dir[:-1].rfind('/' )
inDir = dir[:indx]
dirName = dir[indx:].replace('/','')
dir_list = next(os.walk(inDir))[1]
if dirName in dir_list:
if print_out: print( " Directory exists")
else:
os.mkdir( dir )
if print_out: print( " Directory created")
def get_files_names( inDir, fileKey='', type=None ):
if not type: dataFiles = [f for f in listdir(inDir) if isfile(join(inDir, f)) ]
if type=='nyx': dataFiles = [f for f in listdir(inDir) if (f.find(fileKey) >= 0 ) ]
if type == 'cholla': dataFiles = [f for f in listdir(inDir) if (isfile(join(inDir, f)) and (f.find(fileKey) >= 0 ) ) ]
dataFiles = np.sort( dataFiles )
nFiles = len( dataFiles )
# index_stride = int(dataFiles[1][len(fileKey):]) - int(dataFiles[0][len(fileKey):])
if not type: return dataFiles
if type == 'nyx': return dataFiles, nFiles
if type == 'cholla': return dataFiles, nFiles
| [
"bvillasen@gmail.com"
] | bvillasen@gmail.com |
5374d74a73bd8124eafa008ae144228c9c2bdbc9 | 2652fd6261631794535589427a384693365a585e | /trunk/workspace/Squish/src/API/Android/ActionBar/File/FileConst.py | d18e3b2246eefd013a70b883052c3b54eccb6cd0 | [] | no_license | ptqatester1/ptqa | 88c652380167f64a953bfd7a65041e7d8ac48c90 | 5b5997ea459e9aac17db8da2041e2af331927104 | refs/heads/master | 2021-01-21T19:06:49.275364 | 2017-06-19T03:15:00 | 2017-06-19T03:15:00 | 92,115,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | class FileMenuConst:
NEW = ':newNetwork_HTML_Object'
LOAD = ':loadNetwork_HTML_Object'
SAVE = ':saveNetwork_HTML_Object'
SAVE_AS = ':saveAs_HTML_Object'
ABOUT_PT = ':about_HTML_Object'
#NETSPACE_LOGOUT = ':onLogout_HTML_Object'
#DROPBOX_LOGIN = ':dbLogin_HTML_Object'
SHARE_FACEBOOK = ':onScreenShot_HTML_Object'
OPTIONS = ':fileOptionsMenu_options_button_HTML_Object'
#EMAIL = ':email_HTML_Object'
EXIT = ':exit_HTML_Object'
class AboutPage:
VIEW_LICENSE = ':options_viewLicense_HTML_Object'
| [
"ptqatester1@gmail.com"
] | ptqatester1@gmail.com |
5a72cdf4e073f6fc04267e4ffd83999834f77307 | b2c0517a0421c32f6782d76e4df842875d6ffce5 | /Algorithms/Math/171. Excel Sheet Column Number.py | 394b929e34dda09459f87d61b0be287013ab7e34 | [] | no_license | SuYuxi/yuxi | e875b1536dc4b363194d0bef7f9a5aecb5d6199a | 45ad23a47592172101072a80a90de17772491e04 | refs/heads/master | 2022-10-04T21:29:42.017462 | 2022-09-30T04:00:48 | 2022-09-30T04:00:48 | 66,703,247 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | class Solution(object):
def titleToNumber(self, s):
num = 0
for i in s:
num = num*26 + (ord(i)-ord('A')+1)
return num
| [
"soration2099@gmail.com"
] | soration2099@gmail.com |
b3e7694ac5ff6f15948df85aa45cfdd6a80d169c | 71fafe9fb2190b6acf09f109105ca362bb9018c2 | /jcsbms/jcsbms/match_import_timer.py | 73cfe1de389815f1e235ace61b4546b089280935 | [] | no_license | zhangyibo007/bms | 1f43ca98057a72f1b62769719cb4aefbb4ffb289 | 1ae88e90415f0495d3a647112de0876da0b18e5e | refs/heads/master | 2021-06-21T05:40:24.468473 | 2017-08-02T12:35:08 | 2017-08-02T12:35:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,973 | py | # coding:utf-8
import sys,os,django
from datetime import timedelta,datetime
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) #manage.py的目录
os.environ['DJANGO_SETTINGS_MODULE'] = 'jcsbms.settings' #setting的目录
django.setup()
from apscheduler.schedulers.blocking import BlockingScheduler
from lottery.views import insert_match_from_football, select_scout_match_sql, insert_match_from_basketball, \
insert_cupleague_from_football, insert_cupleague_from_basketball
from lottery.models import Match, CupLeague
def scout_match_import():
print 'start import match at ', datetime.now().strftime("%Y/%m/%d %H:%M:%S")
from_date = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
to_date = (datetime.now() + timedelta(hours=48)).strftime("%Y/%m/%d %H:%M:%S")
scout_football_match_infos = select_scout_match_sql('scout_football_match_info', from_date, to_date) #使用sql按指定时间查询球探足球表
scout_basketball_match_infos = select_scout_match_sql('scout_basketball_match_info', from_date, to_date)
scout_match_id_list = Match.objects.filter(scout_match_id__isnull=False).values_list('scout_match_id', flat=True) #match表scout_match_id不为空
cup_league_name_foot_man_list = list(CupLeague.objects.filter(project='M', sport_type= 0).values_list('name', flat=True))
cup_league_name_foot_cant_list = list(CupLeague.objects.filter(project='C', sport_type= 0).values_list('name', flat=True))
cup_league_name_foot_en_list = list(CupLeague.objects.filter(project='E', sport_type=0).values_list('name', flat=True))
cup_league_name_basket_man_list = list(CupLeague.objects.filter(project='M', sport_type= 1).values_list('name', flat=True))
cup_league_name_basket_cant_list = list(CupLeague.objects.filter(project='C', sport_type= 1).values_list('name', flat=True))
cup_league_name_basket_en_list = list(CupLeague.objects.filter(project='E', sport_type=1).values_list('name', flat=True))
#足球
for scout_football_match_info in scout_football_match_infos:
# 如果对象不在Match表中,则开始新建数据
if scout_football_match_info[0] not in scout_match_id_list:
insert_match_from_football(scout_football_match_info,project='M') #插入国语赛事,from scout_football_match_info
insert_match_from_football(scout_football_match_info,project='C') #插入粤语赛事,from scout_football_match_info
insert_match_from_football(scout_football_match_info, project='E')#插入英语赛事,from scout_football_match_info
if scout_football_match_info[2] not in cup_league_name_foot_man_list:
insert_cupleague_from_football(scout_football_match_info, project='M') #插入国语杯赛, to lottery_cup_league
cup_league_name_foot_man_list.append(scout_football_match_info[2])
if scout_football_match_info[3] not in cup_league_name_foot_cant_list:
insert_cupleague_from_football(scout_football_match_info, project='C') #插入粤语杯赛, to lottery_cup_league
cup_league_name_foot_cant_list.append(scout_football_match_info[3])
if scout_football_match_info[8] not in cup_league_name_foot_en_list:
insert_cupleague_from_football(scout_football_match_info, project='E') #插入英语杯赛, to lottery_cup_league
cup_league_name_foot_en_list.append(scout_football_match_info[8])
#篮球
for scout_basketball_match_info in scout_basketball_match_infos:
if scout_basketball_match_info[0] not in scout_match_id_list:
insert_match_from_basketball(scout_basketball_match_info,project='M')
insert_match_from_basketball(scout_basketball_match_info,project='C')
insert_match_from_basketball(scout_basketball_match_info, project='E')
if scout_basketball_match_info[2] not in cup_league_name_basket_man_list:
insert_cupleague_from_basketball(scout_basketball_match_info,project='M')
cup_league_name_basket_man_list.append(scout_basketball_match_info[2])
if scout_basketball_match_info[3] not in cup_league_name_basket_cant_list:
insert_cupleague_from_basketball(scout_basketball_match_info, project='C')
cup_league_name_basket_cant_list.append(scout_basketball_match_info[3])
if scout_basketball_match_info[8] not in cup_league_name_basket_en_list:
insert_cupleague_from_basketball(scout_basketball_match_info, project='E')
cup_league_name_basket_en_list.append(scout_basketball_match_info[8])
print 'import match over at', datetime.now().strftime("%Y/%m/%d %H:%M:%S")
print '-----------------------------------------------------------------------'
if __name__ == '__main__':
scheduler = BlockingScheduler()
scheduler.add_job(scout_match_import, 'cron', hour=12)
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
scheduler.shutdown() | [
"zhangyibo@caifuzhinan.com"
] | zhangyibo@caifuzhinan.com |
dd907560daeaba856d64a8eee0f86fdab5032374 | 3cd1c0b680d3ed9b251f6afec6fb2d362d9dc8df | /sample15_download_files_via_static_view/sample15_download_files_via_static_view/__init__.py | aa7807e9d48eb800042ca5c7712fb3920bd9ad85 | [] | no_license | williamwu0220/pyramid_sample | ff34e02b6fdb06d906148a7b18c13694701d13f3 | 1b7c7b2a9c97f27912f812c0dce817eb8eeb4457 | refs/heads/master | 2020-03-14T22:14:07.336295 | 2018-05-02T08:15:00 | 2018-05-02T08:15:00 | 131,816,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | from pyramid.config import Configurator
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('pyramid_jinja2')
config.add_static_view('static', 'static', cache_max_age=3600)
# for example:
#
# 1.
# If set config.add_static_view('abc', 'def'), then
# request.static_route('def/xyz.jpg) will produce /abc/xyz.jpb
#
# 2.
# If set config.add_static_view('http://img.frontmobi.com', 'myimages'), then
# request.static_route('myimages/myfile.jpg') will produce http://img.frontmobi.com/myfile.jpg
config.add_static_view('files_path', settings['files_path'], cache_max_age=3600)
config.add_route('home', '/')
config.add_route('list', '/list')
config.scan()
return config.make_wsgi_app()
| [
"william@pylabs.org"
] | william@pylabs.org |
9facb01edf8123187c2216673316c6136a0b5655 | 7a31235b60706896351c7e2fe8dbc47217023ddf | /Progress/digital_clock.py | 53e4089d6aa1583c0493b46a886ac67bd3351408 | [] | no_license | sailendrachettri/learning-tkinter | b947e9423654c63bc7b96eb58c03b8f8e0ba99e9 | e978eaa428b71168a16e2ba66c0c54089738a47e | refs/heads/main | 2023-05-27T14:32:06.419250 | 2021-06-11T08:53:27 | 2021-06-11T08:53:27 | 375,646,752 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | import time
from tkinter import *
from PIL import Image
from PIL.ImageTk import PhotoImage
root = Tk()
root.title('Digital Clock - Sailendra')
root.geometry("600x650")
root.configure(bg="#0075A2")
#A function for clock
def clock():
hour = time.strftime("%I")
minute = time.strftime("%M")
second = time.strftime("%S")
am_pm = time.strftime("%P")
clock_label.config(text=hour + ":" + minute + ":" + second + " " + am_pm)
clock_label.after(1000, clock) # it call clock function in every 1 seconds
#Background image
image = Image.open("img/digital_clock.png")
resized = image.resize((500, 500), Image.ANTIALIAS)
new_image = PhotoImage(resized)
image_label = Label(root, image=new_image, bg="#0075A2")
image_label.grid(row=0, column=0,pady=40, padx=(40, 0))
#Create label to display watch's text
clock_label = Label(root, text="10:20:20 Am", font="Helvetica 46 bold", bg='white', fg='red')
clock_label.grid(row=0, column=0, padx=(40, 0))
#Calling the function
clock()
root.mainloop()
| [
"sailendra9083@gmail.com"
] | sailendra9083@gmail.com |
19ea764231c1a94b224080d6bd26c7496422741d | 6b78bd7f62f7f407bf11d877cc4d91e7db3b62fe | /python/diamond_mine.py | 4669941ed0ebfebb735a8125bc0f2eb0f15142c5 | [] | no_license | PascalUlor/code-challenges | b85efacd4bc5999a0748d1fa1e84f503be09dc94 | 6488d0a6d2729bd50b106573f16488479fd6e264 | refs/heads/master | 2023-03-03T17:50:18.413127 | 2023-02-21T13:10:02 | 2023-02-21T13:10:02 | 212,979,719 | 1 | 0 | null | 2023-02-15T22:59:13 | 2019-10-05T10:14:29 | Python | UTF-8 | Python | false | false | 430 | py | """
1) Given a matrix of n*n. Each cell contain 0, 1, -1.
0 denotes there is no diamond but there is a path.
1 denotes there is diamond at that location with a path
-1 denotes that the path is blocked.
Now you have start from 0,0 and reach to last cell & then return back to 0,0 collecting maximum no of diamonds.
While going to last cell you can move only right and down.
While returning back you can move only left and up.
"""
| [
"pascalulor@yahoo.com"
] | pascalulor@yahoo.com |
4dff494aab4fd14ae50a55d20d20b9b04525c132 | d79a614759a818cffad595e1ad376e244b4550ed | /tests/unittest.py | db854e957be2c8ab2a8305e0b975fde48cde6063 | [
"BSD-3-Clause"
] | permissive | absszero/sublime-phpunit | 9577fc8c8b3b836a08847acf112038d0f5c59314 | 37f43378c3dae37cc88fa1910eea07da1ab4af9e | refs/heads/master | 2023-06-29T15:09:30.865548 | 2020-05-05T22:53:33 | 2020-05-05T22:53:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | import os
from unittest import TestCase
from unittest import mock # noqa: F401
from unittest import skipIf # noqa: F401
from sublime import find_resources
from sublime import active_window
def fixtures_path(path=None):
if path is None:
return os.path.join(os.path.dirname(__file__), 'fixtures')
return os.path.join(os.path.dirname(__file__), 'fixtures', path)
class ViewTestCase(TestCase):
def setUp(self):
self.view = active_window().create_output_panel(
'phpunit_test_view',
unlisted=True
)
self.view.set_scratch(True)
self.view.settings().set('auto_indent', False)
self.view.settings().set('indent_to_bracket', False)
self.view.settings().set('tab_size', 4)
self.view.settings().set('trim_automatic_white_space', False)
self.view.settings().set('smart_indent', True)
self.view.settings().set('tab_size', 4)
self.view.settings().set('translate_tabs_to_spaces', True)
self.view.set_syntax_file(find_resources('PHP.sublime-syntax')[0])
def tearDown(self):
if self.view:
self.view.close()
def fixture(self, text):
self.view.run_command('phpunit_test_setup_fixture', {'text': text})
| [
"gerardroche@users.noreply.github.com"
] | gerardroche@users.noreply.github.com |
aafb5f0e2b2913ca35de9883987f2df7decb0f56 | 278d7f4467a112416d1adfbcd3218033ff0fd9b3 | /configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py | 12fdc8c8a7be3f7a59a9424662c683ff88982a17 | [] | no_license | Young-1217/detection | e3d67938b454e955b5b7a82d5ae222e62f9545fb | 6760288dac92e00ddc3e813ed0e1363c1fa1ce2d | refs/heads/main | 2023-06-01T21:41:37.998947 | 2021-06-21T10:03:01 | 2021-06-21T10:03:01 | 371,868,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,533 | py | _base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'
rpn_weight = 0.7
model = dict(
rpn_head=dict(
_delete_=True,
type='CascadeRPNHead',
num_stages=2,
stages=[
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg=dict(type='dilation', dilation=3),
bridged_feature=True,
sampling=False,
with_cls=False,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.5, 0.5)),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight)),
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
adapt_cfg=dict(type='offset'),
bridged_feature=False,
sampling=True,
with_cls=True,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.05, 0.05, 0.1, 0.1)),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0 * rpn_weight),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight))
]),
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=[
dict(
assigner=dict(
type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
],
rpn_proposal=dict(max_num=300, nms_thr=0.8),
rcnn=dict(
assigner=dict(
pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(type='RandomSampler', num=256))),
test_cfg=dict(
rpn=dict(max_num=300, nms_thr=0.8), rcnn=dict(score_thr=1e-3)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
| [
"noreply@github.com"
] | Young-1217.noreply@github.com |
be828be8e6b3ef5b3d0469a50a97f4c57b295f59 | aba00d6272765b71397cd3eba105fc79b3a346e0 | /Old_Python_projects/ITGK/øving10/sudoku.py | 11337b49d12f25436ebcbada686bb6db904a94e1 | [] | no_license | JosteinGj/School | a2c7cc090571b867637003fe6c647898ba9d8d24 | 3b5f29846e443b97f042241237dbda3208b20831 | refs/heads/master | 2023-05-02T11:07:29.517669 | 2021-04-26T09:04:57 | 2021-04-26T09:04:57 | 295,340,194 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | def make_board(filename):
file=open(filename)
board=file.readlines()
output=[]
for i in board:
row=i.split(",")
output.append(row)
return output
print(make_board("test.txt")) | [
"jostein.gj@gmail.com"
] | jostein.gj@gmail.com |
6b40833fba82cb85808e5f9f3fbb03fa0177cdfa | 03d07de94fc22d1583c45ca84c711a06df8a40ff | /lc/dynamic_programming/lc_474_ones-and-zeroes.py | 92e6f2fda2baa84c8f8040ccba0e83ef0384d9a7 | [] | no_license | gaopenghigh/algorithm | 94e04293c69a2ad6903495e1cf6e1b75556535bb | f5d78c98c7201c56f9d4c3a9c0c76e9447a17985 | refs/heads/master | 2022-03-11T18:46:38.712923 | 2022-02-20T14:20:54 | 2022-02-20T14:20:54 | 54,484,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,416 | py | # 474. 一和零
# 给你一个二进制字符串数组 strs 和两个整数 m 和 n 。
# 请你找出并返回 strs 的最大子集的长度,该子集中 最多 有 m 个 0 和 n 个 1 。
# 如果 x 的所有元素也是 y 的元素,集合 x 是集合 y 的 子集 。
#
# 示例 1:
# 输入:strs = ["10", "0001", "111001", "1", "0"], m = 5, n = 3
# 输出:4
# 解释:最多有 5 个 0 和 3 个 1 的最大子集是 {"10","0001","1","0"} ,因此答案是 4 。
# 其他满足题意但较小的子集包括 {"0001","1"} 和 {"10","1","0"} 。{"111001"} 不满足题意,因为它含 4 个 1 ,大于 n 的值 3 。
#
# 示例 2:
# 输入:strs = ["10", "0", "1"], m = 1, n = 1
# 输出:2
# 解释:最大的子集是 {"0", "1"} ,所以答案是 2 。
#
# 提示:
# 1 <= strs.length <= 600
# 1 <= strs[i].length <= 100
# strs[i] 仅由 '0' 和 '1' 组成
# 1 <= m, n <= 100
# dp[i][j][k] 中 i, j 和 k 描述状态,从前 i 个元素中选择满足要求的最大子集,其中最多有 j 个 0 和 k 个 1
# i 的最大值为 len(strs), j 和 k 的最大值为 m 和 n
# base case: dp[0][x][x] = 0
# 假设 strs[i] 有 x 个 '0' 和 y 个 '1'
# 如果最终的最大子集中包含了 strs[i],则 dp[i][j][k] = 1 + dp[i-1][j-x][k-y]
# 如果最大子集中不包含 strs[i],则 dp[i][j][k] = dp[i-1][j][k]
# 取其中的最大值作为 dp[i][j][k] 的值
# 最终的答案就是 dp[len(strs)][m][n]
class Solution:
def findMaxForm(self, strs: list[str], m: int, n: int) -> int:
dp = [
[
[ 0 for _ in range(n + 1) ] for _ in range(m + 1)
] for _ in range(len(strs) + 1)
]
for i in range(1, len(strs)+1):
s = strs[i-1]
n0 = len([i for i in s if i == '0'])
n1 = len(s) - n0
for j in range(m+1):
for k in range(n+1):
if j >= n0 and k >= n1:
dp[i][j][k] = max(
1 + dp[i-1][j-n0][k-n1], # 选择
dp[i-1][j][k] # 不选择
)
else: # 只能不选择
dp[i][j][k] = dp[i-1][j][k] # 不选择
return dp[len(strs)][m][n]
if __name__ == '__main__':
strs = ["10", "0001", "111001", "1", "0"]
m = 5
n = 3
print(Solution().findMaxForm(strs, m, n)) | [
"jh.gao@ucloud.cn"
] | jh.gao@ucloud.cn |
9b596c719b030f28eac270a22b939f7f9af7eb1b | 5e9de302964b59ccd74aa3d62d4786c87ca60108 | /testmachine/common.py | cc5324bbe50a67ea2ea0b142239a1e4783349923 | [
"BSD-2-Clause"
] | permissive | cid-aaron/testmachine | 6c409d356ae3a3dc45d0ab35c8954d92cb57b4bf | dc207986b0d2d74241842472c80e98dd9f536e7d | refs/heads/master | 2020-05-20T16:52:40.615042 | 2014-01-11T12:26:14 | 2014-01-11T12:26:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,383 | py | import operator
from .operations import (
Drop,
Swap,
Rot,
BinaryOperator,
UnaryOperator,
ReadAndWrite,
Check,
PushRandom,
)
def operation(*args, **kwargs):
"""
Add an operation which pops arguments from each of the varstacks named
in args, passes the result in that order to function and pushes the
result of the invocation onto target. If target is None the result is
ignored.
"""
return ReadAndWrite(
*args, **kwargs
)
def binary_operation(*args, **kwargs):
return BinaryOperator(*args, **kwargs)
def unary_operation(operation, varstack, name):
return UnaryOperator(operation, varstack, name)
def check(*args, **kwargs):
"""
Add an operation which reads from the varstacks in args in order,
without popping their result and passes them in order to test. If test
returns something truthy this operation passes, else it will fail.
"""
return Check(*args, **kwargs)
def generate(*args, **kwargs):
"""
Add a generator for operations which produces values by calling
produce with a Random instance and pushes them onto target.
"""
return PushRandom(*args, **kwargs)
def basic_operations(varstack):
"""
Define basic stack shuffling and manipulation operations on varstack.
Most testmachines will want these on most varstacks. They don't do
anything very interesting, but by moving data around they expand the
range of programs that can be generated.
"""
return (
Drop(varstack),
Swap(varstack),
Rot(varstack),
)
def arithmetic_operations(varstack):
"""
Elements of varstack may be combined with the integer operations +, -,
* and /. They may also be negated.
"""
return (
binary_operation(operator.add, varstack, "+"),
binary_operation(operator.sub, varstack, "-"),
binary_operation(operator.mul, varstack, "*"),
unary_operation(operator.neg, varstack, "-"),
)
def ints(target="ints"):
"""
Convenience function to define operations for filling target with ints.
Defines some generators, and adds basic and arithmetic operations to target
"""
return (
basic_operations(target),
arithmetic_operations(target),
generate(lambda r: r.randint(0, 10 ** 6), target),
generate(lambda r: r.randint(-10, 10), target),
)
def lists(source, target):
"""
Operations which populate target with lists whose elements come from source
"""
return (
basic_operations(target),
generate(lambda r: [], target),
operation(
function=lambda x, y: x.append(y),
argspec=(target, source),
target=None,
name="append",
pattern="{0}.append({1})"
),
operation(
function=lambda x: [x],
argspec=(source,),
target=target,
name="singleton",
pattern="[{0}]",
),
operation(
function=lambda x, y: [x, y],
argspec=(source, source),
target=target,
name="pair",
pattern="[{0}, {1}]",
),
operation(
function=list,
argspec=(target,),
target=target
),
binary_operation(operator.add, target, "+"),
)
| [
"david@drmaciver.com"
] | david@drmaciver.com |
77cbdfeadabbc31c5a1c4debe0f00849f53dbac8 | ae71e532468e861e3a9fcb90f613eddca267ace6 | /routes/class_incoming.py | f2d93f0a50f1963382d3895bbaf47dcf3e2de6e0 | [
"CC-BY-4.0"
] | permissive | soon14/proms-4.0 | 0b4ed398125e529c13dc8f0d9b0c14e0348ae5c6 | 6c3a1fd62c9394761664e100fc1dde50fd79dc11 | refs/heads/master | 2020-09-23T20:33:56.716317 | 2019-06-09T04:01:29 | 2019-06-09T04:01:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | from abc import ABCMeta, abstractmethod
import database
from . import w_l
class IncomingClass(metaclass=ABCMeta):
@abstractmethod
def __init__(self, request):
self.request = request
self.graph = None
self.uri = None
self.named_graph_uri = None
self.error_messages = None
@abstractmethod
def valid(self):
pass
@abstractmethod
def determine_uri(self):
pass
def stored(self):
""" Add an item to PROMS"""
if self.graph is None or self.named_graph_uri is None:
msg = 'The graph and the named_grapoh_uri properties of this class instance must not be None when trying ' \
'to store this instance in the provenance DB.'
self.error_messages = msg
return False
try:
w_l(str(self.graph))
w_l(str(self.named_graph_uri))
database.insert(self.graph, self.named_graph_uri)
return True
except Exception as e:
self.error_messages = ['Could not connect to the provenance database']
return False
| [
"m13001282105@163.com"
] | m13001282105@163.com |
1b720e6c3d0b7ff11d6d728118fb3c6214ec45a5 | 75cf6a9fd035883b64ca2309382e0178cf370b43 | /Empirical/python/sklearn/zdivers/benchmarks/bench_multilabel_metrics.py | 02a1fb8b5d83267d249892377574c05608ec6fba | [] | no_license | ygtfrdes/Program | 171b95b9f32a105185a7bf8ec6c8c1ca9d1eda9d | 1c1e30230f0df50733b160ca73510c41d777edb9 | refs/heads/master | 2022-10-08T13:13:17.861152 | 2019-11-06T04:53:27 | 2019-11-06T04:53:27 | 219,560,170 | 1 | 2 | null | 2022-09-30T19:51:17 | 2019-11-04T17:39:52 | HTML | UTF-8 | Python | false | false | 7,086 | py | #!/usr/bin/python
#!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| [
"githubfortyuds@gmail.com"
] | githubfortyuds@gmail.com |
9108e76bcc9deddc6f24d53e3de93b4c57e58f2e | 3005ac0fbafc802212f788185138206f13b1103d | /PILtest/tests2018_4_7.py | d5ec97cc486ac983271e930705ee8380a446d523 | [] | no_license | Tony1886/python_workplace | a8903127d33d95c8e02e09dc2b4c4528a26561ad | 6d1b325ee060dda46e81e359b7ed1402d3f02bdf | refs/heads/master | 2020-05-24T15:09:50.366564 | 2019-05-18T06:51:03 | 2019-05-18T06:51:03 | 187,324,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,689 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 7 17:38:41 2018
@author: Tan Zhijie
"""
import numpy as np
import myFunction as mf
import tensorflow as tf
import matplotlib.pyplot as plt
# 生成几个随机二值图进行傅里叶变换
M = 1
N = 64
m = 2000
n = M*N
Es = np.zeros((m,M,N))
Ir = np.zeros(np.shape(Es))
for i in range(m):
Es[i] = np.random.randint(0,2,[M,N])
Er = mf.mfft(Es[i])
Ir[i] = abs(Er)**2
Y = np.reshape(Es,[m,n])
X = np.reshape(Ir,[m,n])
X = X/np.max(X)
def compute_accuracy(v_xs,v_ys):
global prediction
y_pre = sess.run(prediction,feed_dict={xs:v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
result = sess.run(accuracy,feed_dict={xs:v_xs,ys:v_ys})
return result
# 构造神经网络的一层
def add_layer(inputs,in_size,out_size,activation = None):
W = tf.Variable(tf.random_normal([in_size,out_size])/in_size,name = 'W')
# W = tf.Variable(tf.zeros([in_size,out_size]),name = 'W')
b = tf.Variable(tf.zeros([1,out_size]),name = 'b')
Z = tf.matmul(inputs,W)+b
if activation ==None:
output = Z
else:
output = activation(Z)
return output
# define input
xs = tf.placeholder(tf.float32,[None,n])
ys = tf.placeholder(tf.float32,[None,n])
keep_drop = tf.placeholder(tf.float32)
# add layer
layer = [n,10*n,10*n,n]
for i in range(len(layer)-1):
if i == 0:
l = add_layer(xs,layer[i],layer[i+1],activation=tf.nn.relu)
elif i==len(layer)-2:
prediction = add_layer(l,layer[i],layer[i+1], activation=tf.sigmoid)
else:
l = add_layer(l,layer[i],layer[i+1],activation=tf.nn.relu)
# loss function 交叉熵
#loss = tf.reduce_mean(tf.reduce_sum(-ys*tf.log(prediction),
# reduction_indices = [1]))
# loss function mse
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
reduction_indices = [1]))
train = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(500):
sess.run(train,feed_dict = {xs:X,ys:Y,keep_drop:0.5})
if i%50 == 0:
print(compute_accuracy(X, Y))
# 比较任意一幅图结果
test = 0
result = sess.run(prediction,feed_dict={xs:X[test].reshape([1,n])})
plt.figure
#plt.subplot(121)
#plt.imshow(result.reshape([8,8]))
plt.scatter(np.linspace(1,64,64),result)
#plt.subplot(122)
#plt.imshow(Es[test])
plt.scatter(np.linspace(1,64,64),Y[test],c = 'r')
plt.show() | [
"="
] | = |
f47ce709574d8f6f0b2c6c34e551d32cd278a480 | 4c3e992678341ccaa1d4d14e97dac2e0682026d1 | /addons/account/wizard/account_report_common.py | da1be81acd79cea6ed6fb59206f46c531281111e | [] | no_license | gahan-corporation/wyatt | 3a6add8f8f815bd26643e1e7c81aea024945130d | 77e56da362bec56f13bf0abc9f8cf13e98461111 | refs/heads/master | 2021-09-03T18:56:15.726392 | 2018-01-08T02:54:47 | 2018-01-08T02:54:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | # -*- coding: utf-8 -*-
from gerp import api, fields, models, _
class AccountCommonReport(models.TransientModel):
_name = "account.common.report"
_description = "Account Common Report"
company_id = fields.Many2one('res.company', string='Company', readonly=True, default=lambda self: self.env.user.company_id)
journal_ids = fields.Many2many('account.journal', string='Journals', required=True, default=lambda self: self.env['account.journal'].search([]))
date_from = fields.Date(string='Start Date')
date_to = fields.Date(string='End Date')
target_move = fields.Selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], string='Target Moves', required=True, default='posted')
def _build_contexts(self, data):
result = {}
result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False
result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or ''
result['date_from'] = data['form']['date_from'] or False
result['date_to'] = data['form']['date_to'] or False
result['strict_range'] = True if result['date_from'] else False
return result
def _print_report(self, data):
raise NotImplementedError()
@api.multi
def check_report(self):
self.ensure_one()
data = {}
data['ids'] = self.env.context.get('active_ids', [])
data['model'] = self.env.context.get('active_model', 'ir.ui.menu')
data['form'] = self.read(['date_from', 'date_to', 'journal_ids', 'target_move'])[0]
used_context = self._build_contexts(data)
data['form']['used_context'] = dict(used_context, lang=self.env.context.get('lang') or 'en_US')
return self._print_report(data)
| [
"duchess@gahan-corporation.com"
] | duchess@gahan-corporation.com |
081a6467862d8313dc76b52f240463d6543170aa | e1092274408656117bc00252bc761e3609ec437f | /python/paddle/distributed/auto_parallel/operators/dist_transpose.py | 8b40524e47315260d17e38f12bb95b5d93df39fb | [
"Apache-2.0"
] | permissive | xiegegege/Paddle | 92822623e4d7fe0263503f11b63fb22610bf2773 | df1d04ca0031da2d701f314f1c98afdbb107b1b5 | refs/heads/develop | 2022-01-13T08:34:09.835700 | 2021-12-30T07:06:03 | 2021-12-30T07:06:03 | 226,800,733 | 0 | 0 | Apache-2.0 | 2019-12-09T06:28:40 | 2019-12-09T06:28:39 | null | UTF-8 | Python | false | false | 4,337 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
class DistributedTranspose2(DistributedOperatorImplContainer):
def __init__(self, name):
super(DistributedTranspose2, self).__init__()
self._name = name
register_distributed_operator_impl_container(
"transpose2", DistributedTranspose2("transpose2"))
class DistributedTranspose2Impl(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedTranspose2Impl, self).__init__()
self._name = name
self._forward_implemented = False
self._backward_implemented = False
def is_input_compatible(self, dist_op):
return True
def is_output_compatible(self, dist_op):
return True
def is_auto_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
perm = op_desc.attr('axis')
x_name = op_desc.input('X')[0]
out_name = op_desc.output('Out')[0]
x_shape_name = op_desc.output('XShape')[0]
x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping(
x_shape_name)
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
new_dims_mapping = [-1 for i in range(len(x_dims_mapping))]
for i in range(len(x_dims_mapping)):
new_dims_mapping[i] = x_dims_mapping[perm[i]]
if len(x_dims_mapping) != len(out_dims_mapping):
return False
if new_dims_mapping != out_dims_mapping:
return False
if x_shape_dims_mapping[0] != -1:
return False
if x_shape_dims_mapping[1:] != x_dims_mapping[:]:
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
out_name = op_desc.output('Out')[0]
x_shape_name = op_desc.output('XShape')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping(
x_shape_name)
perm = op_desc.attr('axis')
assert len(x_dims_mapping) == len(perm)
new_dims_mapping = [-1 for i in range(len(x_dims_mapping))]
for i in range(len(x_dims_mapping)):
new_dims_mapping[i] = x_dims_mapping[perm[i]]
for i in range(len(out_dims_mapping)):
dim_changed = compute_compatible_and_update_dim_mapping(
[new_dims_mapping, out_dims_mapping], [i, i])
if dim_changed:
changed = True
for i in range(len(x_dims_mapping)):
if x_dims_mapping[perm[i]] != new_dims_mapping[i]:
x_dims_mapping[perm[i]] = new_dims_mapping[i]
changed = True
for i in range(len(x_dims_mapping)):
x_shape_dims_mapping[i + 1] = x_dims_mapping[i]
return changed
@staticmethod
def backward(ctx, *args, **kwargs):
pass
register_distributed_operator_impl(
"transpose2", DistributedTranspose2Impl("same_mapping_transpose"))
| [
"noreply@github.com"
] | xiegegege.noreply@github.com |
a64a16ccd106e89067e9f9d78718ab8be8dfd26c | 3e6dffad73b8d5024024b52b044c57a05e7e9655 | /assets/2020-01-18/zoogle/zdocs/migrations/0001_initial.py | 63b30f2d3ceded209f620d30b12c629ee113f1c1 | [
"MIT"
] | permissive | dhilipsiva/talks | 07f33b162d8db6e20e3d5974576d71c273629187 | 05581b6b8fdd0598d4ffed4bf75204d718719ed9 | refs/heads/master | 2022-08-05T12:27:39.932612 | 2022-07-21T07:43:36 | 2022-07-21T07:43:36 | 68,734,565 | 5 | 3 | MIT | 2021-07-30T11:24:12 | 2016-09-20T17:05:44 | Python | UTF-8 | Python | false | false | 888 | py | # Generated by Django 3.0.2 on 2020-01-17 11:51
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Zdoc',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('subject', models.CharField(max_length=200)),
('description', models.CharField(max_length=400)),
('size', models.IntegerField()),
('owner', models.CharField(max_length=100)),
],
options={
'abstract': False,
},
),
]
| [
"dhilipsiva@pm.me"
] | dhilipsiva@pm.me |
b5da8a85646bf5e85130cdcf3f31dc9794265c46 | 83b5efa0d25c805971acf309146ca817f37692f2 | /src/visualization/markov_1_RMSE_comparison.py | 14c0f41a0dba1953a3e91bac58d3f03811fc6e2d | [
"MIT"
] | permissive | VictorOnink/Wind-Mixing-Diffusion | 3d104014de9e5d169c26320cca039aaaa1f490e2 | 16ac459f010ea3fd845335737a9a1e3f913b6103 | refs/heads/main | 2023-04-11T09:24:54.988598 | 2022-03-02T15:39:25 | 2022-03-02T15:39:25 | 314,201,646 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,748 | py | import matplotlib.pyplot as plt
import numpy as np
import analysis, settings
from visualization import utils_visualization as utils_v
def markov_1_RMSE_comparison(x_label=r'$u_{10}$ (m s$^{-1}$)', y_label=r'RMSE', fig_size=(16, 8),
ax_label_size=16, legend_size=11, wave_roughness=False):
"""
A figure showing the RMSE of M-0 and M-1 simulations relative to the field data for the various wind conditions
:param x_label: x axis label
:param y_label: y axis label
:param fig_size: figure size
:param ax_label_size: axis label fontsize
:param legend_size: legend fontsize
:param wave_roughness: if True, have surface roughness be wave height dependent
:return:
"""
# Setting the wind speeds, rise velocities and alpha values that we want to plot
w_10 = [0.85, 2.4, 4.35, 6.65, 9.3]
w_r = [-0.03, -0.003]
alpha = [0.0, 0.1, 0.3, 0.5, 0.7, 0.95]
# Setting a small offset so that markers for different rise velocities aren't plotted on top of each other
rise_offset = {-0.03: 0.15, -0.003: -0.15}
# Selecting the marker type according to the rise velocity
marker_type = {-0.03: 'o', -0.003: 'X'}
# Looping through the KPP simulations, and retrieving the RMSE values for them. First, for the M-0 simulations
point_list_KPP = []
for index_w10, wind in enumerate(w_10):
for rise in w_r:
RMSE = analysis.determine_RMSE(wind, rise, 'KPP', 'Ceiling', alpha=0.0, output=True,
wave_roughness=wave_roughness)
plot_tuple = RMSE, index_w10 + 1, marker_type[rise], utils_v.return_color(0)
point_list_KPP.append(plot_tuple)
# Then for the M-1 simulations
for index_w10, wind in enumerate(w_10):
for rise in w_r:
for index_a, a in enumerate(alpha):
RMSE = analysis.determine_RMSE(wind, rise, 'KPP', 'Ceiling_Markov', alpha=a, output=True,
wave_roughness=wave_roughness)
plot_tuple = RMSE, index_w10 + 1 + rise_offset[rise], marker_type[rise], utils_v.return_color(index_a + 1)
point_list_KPP.append(plot_tuple)
# Looping through the SWB simulations, and retrieving the RMSE values for them, first for the M-0 simulations
point_list_Kukulka = []
for index_w10, wind in enumerate(w_10):
for rise in w_r:
RMSE = analysis.determine_RMSE(wind, rise, 'SWB', 'Ceiling', alpha=0.0, output=True,
wave_roughness=wave_roughness)
plot_tuple = RMSE, index_w10 + 1, marker_type[rise], utils_v.return_color(0)
point_list_Kukulka.append(plot_tuple)
# And then for M-1 simulations
for index_w10, wind in enumerate(w_10):
for rise in w_r:
for index_a, a in enumerate(alpha):
RMSE = analysis.determine_RMSE(wind, rise, 'SWB', 'Ceiling_Markov', alpha=a, output=True,
wave_roughness=wave_roughness)
plot_tuple = RMSE, index_w10 + 1 + rise_offset[rise], marker_type[rise], utils_v.return_color(index_a + 1)
point_list_Kukulka.append(plot_tuple)
# Creating the axis
fig = plt.figure(figsize=fig_size)
# Adding the axis for KPP
ax = fig.add_subplot(121)
ax.set_xlabel(x_label, fontsize=ax_label_size)
ax.set_xlim((0, 6))
ax.tick_params(axis='both', labelsize=ax_label_size)
ax.set_ylabel(y_label, fontsize=ax_label_size)
ax.set_ylim((0, 0.15))
# Adding the axis for SWB
ax2 = fig.add_subplot(122)
ax2.set_xlim((0, 6))
ax2.set_xlabel(x_label, fontsize=ax_label_size)
ax2.tick_params(axis='both', labelsize=ax_label_size)
ax2.tick_params(labelleft=False)
# X axis = Concentration axis
ax2.set_ylim((0, 0.15))
ax.set_title(r'(a) KPP', fontsize=ax_label_size)
ax2.set_title(r'(b) SWB', fontsize=ax_label_size)
# Now, plotting the points
for point in point_list_KPP:
RMSE, index_w10, marker, color = point
ax.plot(index_w10, RMSE, color=color, marker=marker, alpha=0.7, markersize=10, mfc=None)
for point in point_list_Kukulka:
RMSE, index_w10, marker, color = point
ax2.plot(index_w10, RMSE, color=color, marker=marker, alpha=0.7, markersize=10, mfc=None)
# Now, altering the Y axis to list the wind speeds instead of the simple labels 1 - 5
ax.set_xticks(range(7))
ax.set_xticklabels(['', 0.85, 2.40, 4.35, 6.65, 9.30, ''])
ax2.set_xticks(range(7))
ax2.set_xticklabels(['', 0.85, 2.40, 4.35, 6.65, 9.30, ''])
# Next, adding a legend to explain the color scheme and the marker type
# Showing the marker type according to the rise velocity
marker = [plt.plot([], [], c='k', markersize=10, marker=marker_type[rise], label=label_marker(rise), linestyle='')[0] for rise in
w_r]
# Showing the color according to M-0/M-1 with alpha values
markov0 = [plt.plot([], [], c=utils_v.return_color(0), markersize=10, marker='o', label='M0', linestyle='')[0]]
markov1 = [plt.plot([], [], c=utils_v.return_color(ind + 1), markersize=10, marker='o',
label=r'M1 - $\alpha = $' + '{}'.format(a), linestyle='')[0] for ind, a in
enumerate(alpha)]
# Adding the legend
ax2.legend(handles=marker + markov0 + markov1, fontsize=legend_size, loc='lower left')
# Saving the figure
plt.savefig(settings.figure_dir + 'model_evaluation_markov_1.png', bbox_inches='tight', dpi=600)
def label_marker(rise):
""" Setting the figure label based on the rise velocity"""
return r'$w_{r}$' + ' = {}'.format(np.abs(rise)) + ' m s$^{-1}$'
| [
"31734765+VictorOnink@users.noreply.github.com"
] | 31734765+VictorOnink@users.noreply.github.com |
0afd4eee10e14e1b6303f505244a18b388f4e105 | 20132d827a96afa4f33da2424dbb52f58f01a844 | /Hash/Lessons42579/gamjapark_solution.py | bf234183778a0c3d46a8bfacfae6c1483158d56c | [
"MIT"
] | permissive | StudyForCoding/Programmers | 270cddd6e9dcfbad2916fbae7ae6f127844b64bd | bb520ba2cc188af932a222c76b9a508e3567f7f8 | refs/heads/master | 2022-07-16T03:00:25.227522 | 2020-05-21T11:32:18 | 2020-05-21T11:32:18 | 265,508,921 | 0 | 0 | MIT | 2020-05-21T11:32:19 | 2020-05-20T09:02:31 | Python | UTF-8 | Python | false | false | 965 | py | def solution(genres, plays):
answer = []
genre = dict()
for g in genres:
genre[g] = 0
PlayLength = len(plays)
play = [0 for _ in range(PlayLength)]
i = 0
for p, g in zip(plays,genres) :
play[i] = {g:p}
i += 1
for p in play:
genre[list(p.keys())[0]] += list(p.values())[0]
genre = sorted(genre.items(), key=lambda t: t[1], reverse=True)
for g in genre:
result_dict = dict()
i = 0
for p in play:
key = list(p.keys())[0]
if g[0] == key:
value = list(p.values())[0]
result_dict[i] = value
i += 1
result_dict = sorted(result_dict.items(), key=lambda kv: (-kv[1], kv[0]))
count = 0
for result in result_dict:
answer.append(result[0])
count += 1
if count == 2:
break
return answer | [
"gojang4@gmail.com"
] | gojang4@gmail.com |
dbd9fccd7ac5204f453bf57235a36d02f7ee7daa | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_342/ch7_2020_09_09_12_13_31_428073.py | 8f23596b9f6828be09ba337736b27df39a7b4582 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | def calcula_area_do_triangulo(b,h):
area_triangulo=((b*h)/2)
return area_do_triangulo
base=10
altura=5
print(base,altura)
| [
"you@example.com"
] | you@example.com |
0885fc81f9c934a44e70c6659f760daee032544c | 1a07ef7e046c6cc278cfbd2e3d2e15b03d9e11b5 | /collections-namedtuple.py | 56c2e6ab87774041da0f213182dfd47b551c928d | [] | no_license | tjguk/lightning-collections | 273b7da18ad6cf225186fb074685ad670d59bab1 | 6e78a2afbf880d611af7ebe52c8d764bf400b245 | refs/heads/master | 2020-06-02T10:26:17.518183 | 2012-09-06T12:42:35 | 2012-09-06T12:42:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | import os, sys
from collections import namedtuple
import csv
with open("files.csv", "rb") as f:
reader = csv.reader(f)
fields = reader.next()
Row = namedtuple("Row", fields)
files = [Row(*row) for row in reader]
for f in files[:10]:
print f
| [
"mail@timgolden.me.uk"
] | mail@timgolden.me.uk |
ac76c3ad644fa22d248fc653a359b84f9b4c9d09 | 434b9b85bb901a4e50977e14a74aa4592b583ea2 | /old/config/share/databases/defaultdb/lenny/postgresql_server/script-config | f29b1fe4c02ca77a804fc4f94c0dbd57897f5473 | [] | no_license | umeboshi2/paella | 12dd9f4785588cd501d7916171a5179b7c29bb31 | df4cf032a100ea1c109bcf02b1c74bb217bc84c6 | refs/heads/master | 2021-01-25T12:07:20.785787 | 2015-06-02T16:03:30 | 2015-06-02T16:03:30 | 7,497,284 | 5 | 3 | null | 2015-01-23T22:25:19 | 2013-01-08T07:11:58 | Python | UTF-8 | Python | false | false | 4,911 | #!/usr/bin/python
import sys
from useless.base.path import path
from paella.installer.toolkit import InstallerTools
print "config script for postgresql_server"
it = InstallerTools()
env = it.env()
recreate_template1_instructions="""UPDATE pg_database SET datallowconn = TRUE where datname = 'template0';
\c template0
UPDATE pg_database SET datistemplate = FALSE where datname = 'template1';
drop database template1;
create database template1 with template = template0 encoding = '%s';
UPDATE pg_database SET datistemplate = TRUE where datname = 'template1';
\c template1
UPDATE pg_database SET datallowconn = FALSE where datname = 'template0';
"""
# TYPE DATABASE USER CIDR-ADDRESS METHOD
# "local" is for Unix domain socket connections only
# paella local connections
# IPv4 local connections:
# paella ipv4 connections
# IPv6 local connections:
# paella ipv6 connections
def is_marker(line):
if line.startswith('# paella local connections'):
return 'local'
elif line.startswith('# paella ipv4 connections'):
return 'ipv4'
elif line.startswith('# paella ipv6 connections'):
return 'ipv6'
else:
return ''
def get_connections(conntype):
key = 'postgresql_server:%s_connections' % conntype
number_of_connections = int(it.get(key))
connections = []
# we start counting at 1 instead of 0
for index in range(1, number_of_connections + 1):
key = 'postgresql_server:%s_connection_%d' % (conntype, index)
line = it.get(key)
connections.append(line)
return connections
def configure_pghba_conf(toolkit):
it = toolkit
pghba_filename = it.target / 'etc/postgresql/8.3/main/pg_hba.conf'
orig_lines = pghba_filename.lines()
new_lines = []
for line in orig_lines:
new_lines.append(line)
conntype = is_marker(line)
if conntype:
connections = get_connections(conntype)
new_lines += connections
pghba_filename.write_lines(new_lines)
# here the cmd must be a space separated
# shell type command, a list won't work
def su_postgres_cmd(toolkit, cmd):
su_cmd = ['su', 'postgres', '-c', cmd]
toolkit.chroot(su_cmd)
def recreate_template1(toolkit):
it = toolkit
encoding = it.get('postgresql_server:default_encoding')
print "creating new template1 with encoding:", encoding
cmd = ['su', 'postgres', '-c', 'psql template1']
proc = it.chroot_proc(cmd, stdin=it.PIPE)
instructions = recreate_template1_instructions % encoding
proc.stdin.write(instructions)
proc.stdin.flush()
proc.stdin.close()
retval = proc.wait()
if retval:
raise RuntimeError , "Problem with dropping template1"
def create_pg_users(toolkit):
it = toolkit
users = it.getcsv('postgresql_server:postgresql_users')
for user in users:
opt = it.get('postgresql_server:createuser_opts_%s' % user)
cmd = 'createuser %s %s' % (opt, user)
print "Creating postgresql user", user
su_postgres_cmd(it, cmd)
def create_language(toolkit, language, database):
cmd = 'createlang %s %s' % (language, database)
print "Creating language, %s in database %s" % (language, database)
su_postgres_cmd(toolkit, cmd)
# all initial databases are copies of
# the template1 database. If you want
# a language that is specific to a database,
# it will have to be done by other means.
def create_template1_languages(toolkit):
it = toolkit
languages = it.getcsv('postgresql_server:template1_languages')
for language in languages:
create_language(toolkit, language, 'template1')
def create_initial_databases(toolkit):
it = toolkit
databases = it.getcsv('postgresql_server:initial_databases')
for database in databases:
cmd = 'createdb %s' % database
print "Creating database", database
su_postgres_cmd(toolkit, cmd)
configure_pghba_conf(it)
recreate_template1(it)
create_pg_users(it)
create_template1_languages(it)
create_initial_databases(it)
#-- Connect as the postgres superuser, e.g.:
# -- psql -U postgres template1
# -- Then run:
# UPDATE pg_database SET datallowconn = TRUE where datname = 'template0';
# \c template0
# UPDATE pg_database SET datistemplate = FALSE where datname = 'template1';
# drop database template1;
# create database template1 with template = template0 encoding = 'UNICODE';
# UPDATE pg_database SET datistemplate = TRUE where datname = 'template1';
# \c template1
# UPDATE pg_database SET datallowconn = FALSE where datname = 'template0';
| [
"umeboshi@cfc4e7be-4be4-0310-bcfe-fc894edce94f"
] | umeboshi@cfc4e7be-4be4-0310-bcfe-fc894edce94f | |
6fbc0d258e0586e2a8a11eadc79b68c6fd0decf4 | b2ccb163ea78887c32c9ce7e4513ae9db577e3cf | /Machine Learning A-Z/Part 7 - Natural Language Processing/Section 36 - Natural Language Processing/runstep3.py | 6a33dd541bda3329bc92aa552e7c08a366ff6589 | [] | no_license | Ukabix/machine-learning | f5966fec211d140e1297a2364789444f464a7caa | 0f80ff342cf186803320084bcc4a5e0e73d1fe8f | refs/heads/master | 2021-11-08T07:35:43.515249 | 2021-10-26T10:00:03 | 2021-10-26T10:00:03 | 213,165,560 | 0 | 0 | null | 2019-10-08T09:46:40 | 2019-10-06T12:36:05 | Python | UTF-8 | Python | false | false | 847 | py | # NLP - Bag of words
# Natural Language Processing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3) # tsv specific, quiting skips ""
# cleaning the texts
# stemming
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
# import stemmer
from nltk.stem.porter import PorterStemmer
review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][0])
review = review.lower()
review = review.split()
# call stemmer
ps = PorterStemmer()
# update the loop
review = [ps.stem(word) for word in review if not word in stopwords.words('english')]
# for larger texts use below to create a set:
# review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
| [
"54454097+Ukabix@users.noreply.github.com"
] | 54454097+Ukabix@users.noreply.github.com |
eeb6e3d020373099a4465df961980ece301619ae | 11771f5dd90a74d5c76765f27f0d9a9cb044f57b | /route/user_setting_top_menu.py | b9016c93c76dfbf472686e9b9939945095559130 | [
"BSD-3-Clause"
] | permissive | openNAMU/openNAMU | cc031ea848ac6d829ad243fcf59da26adf0f0814 | 868107e4ef53e4e78af15c590673b78ee385baa5 | refs/heads/beta | 2023-08-24T10:20:00.245680 | 2023-08-23T14:09:53 | 2023-08-23T14:09:53 | 78,184,261 | 86 | 75 | BSD-3-Clause | 2023-09-13T21:36:03 | 2017-01-06T07:22:10 | Python | UTF-8 | Python | false | false | 2,222 | py | from .tool.func import *
def user_setting_top_menu():
with get_db_connect() as conn:
curs = conn.cursor()
ip = ip_check()
if ban_check(ip) == 1:
return re_error('/ban')
if flask.request.method == 'POST':
curs.execute(db_change("select name from other where name = 'top_menu'"))
if curs.fetchall():
curs.execute(db_change("update user_set set data = ? where name = 'top_menu' and id = ?"), [flask.request.form.get('content', ''), ip])
else:
curs.execute(db_change("insert into user_set (name, data, id) values ('top_menu', ?, ?)"), [flask.request.form.get('content', ''), ip])
conn.commit()
return redirect('/setting/top_menu')
else:
curs.execute(db_change("select data from user_set where name = 'top_menu' and id = ?"), [ip])
db_data = curs.fetchall()
db_data = db_data[0][0] if db_data else ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('user_added_menu'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<span>
EX)
<br>
ONTS
<br>
https://2du.pythonanywhere.com/
<br>
FrontPage
<br>
/w/FrontPage
</span>
<hr class="main_hr">
''' + load_lang('not_support_skin_warning') + '''
<hr class="main_hr">
<form method="post">
<textarea class="opennamu_textarea_500" placeholder="''' + load_lang('enter_top_menu_setting') + '''" name="content" id="content">''' + html.escape(db_data) + '''</textarea>
<hr class="main_hr">
<button id="opennamu_save_button" type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['setting', load_lang('return')]]
)) | [
"min08101@naver.com"
] | min08101@naver.com |
ff8d99be9ba8504e548a98344c65a23d7a5cdb13 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_172/ch35_2020_04_10_18_41_08_169350.py | ac2467fe93019c432612c8377d076ebdb0ddf1f0 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | x = int(input('numero: '))
soma = 0
y = True
while y:
if x != 0:
soma = soma + x
x = int(input('numero: '))
y = True
elif x == 0:
y = False
print (soma) | [
"you@example.com"
] | you@example.com |
f8f02a246f4d5c420abe71734d426e5f77389c3b | bb5b63774924abe86c2cb0d8a09795fcf1a4d822 | /chat/views.py | 0b613a45f48e1e833992141c24ab0fda3260f81c | [] | no_license | IdenTiclla/realtime_chat_app | 769bf432e993ee79cb93bd54489305db3526f4d5 | d2a5187bb9f257c5e8fefe6735d23e5d0eec64e6 | refs/heads/master | 2023-06-23T17:47:41.766605 | 2021-07-21T21:00:25 | 2021-07-21T21:00:25 | 387,920,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | from typing import List
from django.shortcuts import render, redirect
from chat.models import Room, Message
from django.http import HttpResponse, JsonResponse
# Create your views here.
def home(request):
return render(request, 'home.html')
def room(request, room):
username = request.GET['username']
room_details = Room.objects.get(name=room)
return render(request, 'room.html', {'username': username, 'room_details': room_details, 'room': room})
def checkview(request):
room = request.POST['room_name']
username = request.POST['username']
if Room.objects.filter(name=room).exists():
return redirect('/'+room+'/?username=' + username)
else:
new_room = Room.objects.create(name=room)
new_room.save()
return redirect('/'+room+'/?username=' + username)
def send(request):
message = request.POST['message']
username = request.POST['username']
room_id = request.POST['room_id']
new_message = Message.objects.create(value=message, user=username, room=room_id)
new_message.save()
return HttpResponse('message sent successfully')
def getMessages(request, room):
room_details = Room.objects.get(name=room)
messages = Message.objects.filter(room=room_details.id)
return JsonResponse({
"messages": list(messages.values())
})
| [
"iden.ticlla@gmail.com"
] | iden.ticlla@gmail.com |
2728b2991bd08e88147fbdd4f649c902775aeb96 | 48c65330f577d11cedb29fd970aee35788ab72c6 | /ctrl_api_magento2_tierprice__upload.py | 49630511be6a3891ec8208159908d6b16d1bc9d4 | [] | no_license | yeboyebo/elganso_sync | 309ecbaba3127493abe001cd1704cc7098234baa | 66f033a0e27a05c1fc6704ec6ba2bd474d204b7e | refs/heads/master | 2023-07-22T00:17:48.201252 | 2023-07-19T07:48:40 | 2023-07-19T07:48:40 | 173,096,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | import json
from django.http import HttpResponse
from sync.tasks import task_manager
# @class_declaration interna_upload #
class interna_upload():
pass
# @class_declaration elganso_sync_upload #
from models.flsyncppal import flsyncppal_def as syncppal
class elganso_sync_upload(interna_upload):
@staticmethod
def start(pk, data):
result = None
status = None
if "passwd" in data and data["passwd"] == syncppal.iface.get_param_sincro('apipass')['auth']:
response = task_manager.task_executer("mg2_tierprice_upload", data)
result = response["data"]
status = response["status"]
else:
result = {"msg": "Autorización denegada"}
status = 401
return HttpResponse(json.dumps(result), status=status, content_type="application/json")
# @class_declaration upload #
class upload(elganso_sync_upload):
pass
| [
"jesus.yeboyebo@gmail.com"
] | jesus.yeboyebo@gmail.com |
d206f151b94adc10fe4c49e4dbcce4e98915b17d | b01f25b447d5ec3d6bc08380ae2601d5badb6af3 | /sortbypower.py | 915263174c405422441a673ddfc3037fb3ddf3eb | [] | no_license | SurajPatil314/Leetcode-problems | 0b05faab17214437a599d846dd1c9a7ea82b9c4c | 9201a87246842855281c90a9705f83fce24d1137 | refs/heads/master | 2021-09-05T02:20:05.274438 | 2021-08-09T21:24:05 | 2021-08-09T21:24:05 | 203,467,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | '''
The power of an integer x is defined as the number of steps needed to transform x into 1 using the following steps:
if x is even then x = x / 2
if x is odd then x = 3 * x + 1
For example, the power of x = 3 is 7 because 3 needs 7 steps to become 1 (3 --> 10 --> 5 --> 16 --> 8 --> 4 --> 2 --> 1).
Given three integers lo, hi and k. The task is to sort all integers in the interval [lo, hi] by the power value in ascending order, if two or more integers have the same power value sort them by ascending order.
Return the k-th integer in the range [lo, hi] sorted by the power value.
Notice that for any integer x (lo <= x <= hi) it is guaranteed that x will transform into 1 using these steps and that the power of x is will fit in 32 bit signed integer.
'''
class Solution:
def getKth(self, lo: int, hi: int, k: int) -> int:
if lo - hi == 0:
return lo
hmap = {}
for i in range(lo, hi + 1):
temp = i
steps = 0
while (temp != 1):
if temp % 2 == 1:
temp = temp * 3 + 1
steps += 1
else:
temp = temp / 2
steps += 1
hmap[i] = steps
sorted_hmap = sorted(hmap.items(), key=operator.itemgetter(1))
i1 = 0
for i2 in sorted_hmap:
if i1 == k - 1:
return i2[0]
i1 += 1
| [
"spatil2@umbc.edu"
] | spatil2@umbc.edu |
7e6bc2f7d0f3a1bc0d8311b35964e5b1f1bbad93 | cc632d66ccceb5f7bd739553cdb4054a0f1c0035 | /account/migrations/0001_initial.py | eee939a7d781d678f814c0532bd7e242beb8fde4 | [] | no_license | raimbaev223/django-by-example__bookmarks | b3b197c75906d0e49c495f5d8511517ddef62e08 | 59c4b35f5c739b824bd3e8073993a81b3a4e8845 | refs/heads/master | 2023-03-19T10:39:13.003427 | 2021-03-18T06:45:07 | 2021-03-18T06:45:07 | 347,860,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | # Generated by Django 3.1.7 on 2021-03-15 05:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_of_birth', models.DateField(blank=True, null=True)),
('photo', models.ImageField(blank=True, upload_to='users/%Y/%m/%d/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"raimbaev.223@gmail.com"
] | raimbaev.223@gmail.com |
0765de0d02b8ed5de3c97d6966c417566f8a965b | 11bb0cbe6de2a0a4e94fc0ba610f61894d5593a1 | /VBS_Zgamma/RunII2018/Ntuples_2018/PKUTreeMaker/test/Zcrab/crab3_analysisZA.py | 6048c5a01d4c8023c1caecfb688a9c6e01a15f5d | [] | no_license | AnYpku/PKU-Cluster | 0dc4a88445aeb3ca239b2d7d7f796c6a67f3f69c | f9ffbcb7988053f4618fd015c1bb656d92ff51c6 | refs/heads/master | 2022-11-01T23:46:59.442037 | 2022-10-21T06:37:43 | 2022-10-21T06:37:43 | 188,202,345 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'Z-ZA_smearing'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Autumn18_V8_MC_L1FastJet_AK4PFchs.txt','Autumn18_V8_MC_L2Relative_AK4PFchs.txt','Autumn18_V8_MC_L3Absolute_AK4PFchs.txt','Autumn18_V8_MC_L1FastJet_AK4PFPuppi.txt','Autumn18_V8_MC_L2Relative_AK4PFPuppi.txt','Autumn18_V8_MC_L3Absolute_AK4PFPuppi.txt']
# Name of the CMSSW configuration file
config.JobType.psetName = 'Zanalysis.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
config.Data.inputDataset = '/ZGToLLG_01J_5f_TuneCP5_13TeV-amcatnloFXFX-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15_ext1-v2/MINIAODSIM'
config.Data.allowNonValidInputDataset = True
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outputDatasetTag = 'Z-ZA'
config.section_("Site")
#config.Site.storageSite = 'T2_CN_Beijing'
config.Site.storageSite = 'T3_US_FNALLPC'
| [
"ying.an@cern.ch"
] | ying.an@cern.ch |
be2c6b067ca851d5e4016d68def182a7dd5a0109 | 83b67a0800ceb5d5828c8e2011ff31b5faa311f8 | /experiments/save_exp.py | 47f9c2c3deb37a6f84566cdb9566403e8279cbbc | [] | no_license | xKHUNx/scatnet_learn | 77d89da4025f9c3cdbe74c957cf2a3e8626e3a01 | 9b2efbc9764118b58146950320215d33b6dc3240 | refs/heads/master | 2022-03-29T18:42:49.644481 | 2020-01-15T13:17:21 | 2020-01-15T13:17:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | # Fergal Cotter
#
# Future modules
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import time
import git
import shutil
TEMPLATE = """Invariant Layer Experiment
==========================
This experiment was run on {day} at {time}.
The command used to run the program was:
.. code::
{runcmd}
The repo commit used at running this command was::
{githash}
The numpy/pytorch random seed was::
{seed}
The number of learnable parameters is::
{num_params}
Description
-----------
"""
ACC_TEMPLATE = """
Best Result
-----------
The best acc was {best:.3f} and the last acc was {last:.3f}
"""
FOLD_TEMPLATE = """
Fold {k}: {best:.3f}"""
def get_githash(module):
try:
git_repo = git.Repo(module.__file__, search_parent_directories=True)
hash = str(git_repo.git.rev_parse('HEAD'))
except git.InvalidGitRepositoryError:
hash = "?"
return hash
def break_run_cmd(params):
cmd = 'python {file} {}'.format(' \n '.join(
params[1:]), file=params[0])
return cmd
def get_num_params(net):
n = 0
if net is None:
return '?'
else:
for p in net.parameters():
if p.requires_grad:
n += p.numel()
if n < 1e5:
s = '{:.2f}k'.format(n/1e3)
elif n < 1e6:
s = '{:.3f}M'.format(n/1e6)
elif n < 1e7:
s = '{:.2f}M'.format(n/1e6)
else:
s = '{:.1f}M'.format(n/1e6)
return s
def save_experiment_info(outdir, seed, no_comment=False, net=None):
""" Creates an experiment info file in the output directory
Args:
outdir: the output directory
net: the network object
Returns:
None
"""
file = os.path.join(outdir, 'INFO.rst')
with open(file, 'w') as f:
f.write(TEMPLATE.format(
day=time.strftime('%Y/%m/%d'),
time=time.strftime("%H-%M-%S", time.gmtime(time.time())),
runcmd='python {}'.format(' '.join(sys.argv)),
githash="?",
seed=seed,
num_params=get_num_params(net)
))
if 'debug' not in outdir and not no_comment:
os.system('vim + {file}'.format(file=file))
print('Saved info file. Copying source')
copytree(os.path.join(os.path.dirname(__file__), '..'), outdir)
def copytree(src, dst):
""" Copies all the python files in src to dst recursively"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
if not os.path.isdir(d):
os.mkdir(d)
copytree(s, d)
elif os.path.splitext(s)[1] == '.py':
if not os.path.exists(d):
shutil.copy2(s, d)
def save_acc(outdir, best, last):
""" Append the best accuracy to the info file"""
file = os.path.join(outdir, 'INFO.rst')
if os.path.exists(file):
with open(file, 'a') as f:
f.write(ACC_TEMPLATE.format(best=best, last=last))
def save_kfoldacc(outdir, fold, r2):
""" Append the best accuracy to the info file"""
file = os.path.join(outdir, 'INFO.rst')
if os.path.exists(file):
with open(file, 'a') as f:
if fold == 0:
f.write("\nKFOLD Results\n-------------")
f.write(FOLD_TEMPLATE.format(k=fold, best=r2))
| [
"fbcotter90@gmail.com"
] | fbcotter90@gmail.com |
de06d49e0d61870db7688cf2ef395cb2ffcc3935 | 81026fb32d5fe66e291c824f8bb8e251d6ce56d5 | /04 Functions/using_math.py | 32fbfddf7efc078860cced164909527f9c0badc5 | [] | no_license | rifqirosyidi/coding-python | b1e148d1787d741cdc0ce2c36dd13ff6b8d2c17b | 0d98d55d0aaf2cca4129f1b98365a5866eb28dd2 | refs/heads/master | 2020-08-11T12:51:57.567318 | 2019-10-29T15:38:14 | 2019-10-29T15:38:14 | 214,567,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | import math
def get_luas_bidang(value):
bidang = value.lower()
if bidang == "segitiga":
bidang_segitiga()
elif bidang == "segiempat":
bidang_segiempat()
elif bidang == "lingkaran":
bidang_lingkaran()
else:
print("Hanya Menangani Segitiga, Lingkaran dan Segiempat")
def bidang_segitiga():
alas = float(input("Masukkan Alas : "))
tinngi = float(input("Masukkan Tinggi : "))
luas = (0.5 * alas) * tinngi
print("Luas Segi Tiga : ", luas)
def bidang_segiempat():
lebar = float(input("Masukkan Lebar : "))
panjang = float(input("Masukkan Panjang : "))
luas = panjang * lebar
print("Luas Segi Empat : ", luas)
def bidang_lingkaran():
jari_jari = float(input("Masukkan Jari Jari Lingkaran : "))
luas = math.pi * math.pow(jari_jari, 2)
print("Luas Lingkaran : ", round(luas, 2))
def main():
bentuk_bidang = input("Masukkan Bidang Apa Yang Ingin di Hitung : ")
get_luas_bidang(bentuk_bidang)
main() | [
"rief.rosyidi@gmail.com"
] | rief.rosyidi@gmail.com |
0792882963b6c69117dab0d94d597d48eff39ae2 | be9e32a9182d16fe92d937f5965a2a3a3ec11dc8 | /bundler.py | b6d11c6deacf7a9dd8756f4b1a0058978e5e60df | [
"ISC"
] | permissive | theexiled1/CageTheUnicorn | 1a649f974298109d68e4af3401976855ddf98c83 | e695a2d9660eed3bdde4909f755c52d66beef7da | refs/heads/master | 2020-12-03T11:59:29.995881 | 2017-08-21T16:43:20 | 2017-08-21T16:43:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | import struct, sys
from glob import glob
def main(dumpdir, mainaddr, wkcaddr):
mainaddr = int(mainaddr.replace('0x', ''), 16)
wkcaddr = int(wkcaddr.replace('0x', ''), 16)
with file('membundle.bin', 'wb') as fp:
files = glob('%s/*.bin' % dumpdir)
fp.write(struct.pack('<IQQ', len(files), mainaddr, wkcaddr))
for fn in files:
addr = int(fn[11:].rsplit('/', 1)[-1].split(' ', 1)[0], 16)
end = int(fn[11:].rsplit('/', 1)[-1].split(' - ')[1], 16)
data = file(fn, 'rb').read()
print '%x size %x -- real %x' % (addr, end - addr, len(data))
if end - addr != len(data):
print 'MISMATCHED SIZE! CORRUPT DUMP'
raw_input()
fp.write(struct.pack('<QI', addr, len(data)))
fp.write(data)
if __name__=='__main__':
main(*sys.argv[1:])
| [
"cody.brocious@gmail.com"
] | cody.brocious@gmail.com |
8a555b206e436193b34566826633625c548602f5 | 4cdf99ed3fd91f2406fe908fff77284fbc2cd3c3 | /setup.py | 2c5bf3aad0abc71d2d5317b265460620b0199da7 | [
"MIT"
] | permissive | shinroo/mocr | cb4173d22413b9ba7e140118832e9ce6aac2da09 | 5d33a812172b87d126bf3f7de0238b0919c2ab86 | refs/heads/master | 2020-04-12T00:34:45.462919 | 2018-12-09T20:50:00 | 2018-12-09T20:50:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import re
import codecs
from setuptools import setup, find_packages
cwd = os.path.abspath(os.path.dirname(__file__))
def read(filename):
with codecs.open(os.path.join(cwd, filename), 'rb', 'utf-8') as h:
return h.read()
metadata = read(os.path.join(cwd, 'mocr', '__init__.py'))
def extract_metaitem(meta):
meta_match = re.search(r"""^__{meta}__\s+=\s+['\"]([^'\"]*)['\"]""".format(meta=meta),
metadata, re.MULTILINE)
if meta_match:
return meta_match.group(1)
raise RuntimeError('Unable to find __{meta}__ string.'.format(meta=meta))
setup(
name='mocr',
version=extract_metaitem('version'),
license=extract_metaitem('license'),
description=extract_metaitem('description'),
long_description=(read('README.rst')),
author=extract_metaitem('author'),
author_email=extract_metaitem('email'),
maintainer=extract_metaitem('author'),
maintainer_email=extract_metaitem('email'),
url=extract_metaitem('url'),
download_url=extract_metaitem('download_url'),
packages=find_packages(exclude=('tests', 'docs')),
include_package_data=True,
platforms=['Any'],
install_requires=['opencv-python', 'opencv-contrib-python', 'pillow', 'pytesseract', 'imutils', 'numpy'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
keywords='ocr, optical character recognition, identity card, deep learning, opencv, meaningful',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| [
"abdullahselek@gmail.com"
] | abdullahselek@gmail.com |
d2698bcccc58c5147cae9d2d59f3b99c7942463e | f7ed942c685bd0e77eb207b901ccae78b1844cfc | /three_sum.py | 4b5d927f83a2e27455519b1b6deaceb43c3a115e | [] | no_license | axu4github/leetcode.answers | beeeec30e2958a9fb5727fe1f77e5e919655becc | d2dd4a211a2c380f9816e0454c1a8c817545c1d7 | refs/heads/master | 2020-03-27T07:15:46.179010 | 2020-01-08T13:29:21 | 2020-01-08T13:29:21 | 146,176,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | # coding=utf-8
class Solution(object):
"""
15. 三数之和
(https://leetcode-cn.com/problems/3sum/description/)
给定一个包含 n 个整数的数组 nums,
判断 nums 中是否存在三个元素 a,b,c ,
使得 a + b + c = 0 ?找出所有满足条件且不重复的三元组。
注意:答案中不可以包含重复的三元组。
例如, 给定数组 nums = [-1, 0, 1, 2, -1, -4],
满足要求的三元组集合为:
[
[-1, 0, 1],
[-1, -1, 2]
]
"""
def two_sum(self, nums, target):
all_res, _dict = [], {}
for i, one_num in enumerate(nums):
two_num = target - one_num
if two_num in _dict:
all_res.append([one_num, two_num])
else:
_dict[one_num] = i
return all_res
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
_dict, processed, nums = {}, {}, sorted(nums)
for i, num in enumerate(nums):
if num > 0 or num in processed:
continue
correct_nums = self.two_sum(nums[i + 1:], num * -1)
if len(correct_nums) > 0:
processed[num] = None
for correct_num in correct_nums:
correct = tuple(sorted(correct_num + [num]))
if correct not in _dict:
_dict[correct] = None
return map(lambda correct: list(correct), _dict.keys())
| [
"axu.home@gmail.com"
] | axu.home@gmail.com |
ee3d1185c32a04865b9ad8088059e235a5492772 | eee5fc5e9e1bd9ababc9cf8ccb8add19c9219ca3 | /ABC/151/d_bfs.py | 21951db67400789ba2fc6f7fa0e087d70d5afb84 | [] | no_license | matatabinoneko/Atcoder | 31aa0114bde28ab1cf528feb86d1e70d54622d84 | 07cc54894b5bcf9bcb43e57a67f2a0cbb2714867 | refs/heads/master | 2021-11-13T04:39:13.824438 | 2021-10-31T01:42:52 | 2021-10-31T01:42:52 | 196,823,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | import queue
import copy
def main():
h,w = map(int,input().split())
maze = []
maze.append(['#' for i in range(w+2)])
for i in range(h):
tmp = input()
tmp = list('#' + tmp + '#')
maze.append(tmp)
maze.append(['#' for i in range(w+2)])
# print(maze)
dx_ = [0,1,0,-1]
dy_ = [1,0,-1,0]
ans = -1
q = queue.Queue()
for i in range(1,h+1):
for j in range(1,w+1):
new_maze = copy.deepcopy(maze)
dis = [[-1 for a in range(w+2)]for b in range(h+2)]
# for a in range(1,h):
# for b in range(1,w):
# dis[a][b] = -1
if new_maze[i][j]=='.':
dis[i][j]=0
q.put([i,j])
max_dis = -1
while(not q.empty()):
[x,y] = q.get()
new_maze[x][y]='#'
# print(x,y)
if max_dis < dis[x][y]:
max_dis = dis[x][y]
for dx,dy in zip(dx_,dy_):
# print(x+dx,y+dy)
if dis[x+dx][y+dy] == -1 and new_maze[x+dx][y+dy] != '#':
# new_maze[x+dx][y+dy] = '#'
dis[x+dx][y+dy] = dis[x][y]+1
q.put([x+dx,y+dy])
# print("put",x+dx,y+dy)
# for i in range(len(maze)):
# print(maze[i])
# print()
# print("max is",max_dis)
if ans < max_dis:
ans = max_dis
print(ans)
return 0
if __name__ == "__main__":
main() | [
"matatabinoneko0721@gmail.com"
] | matatabinoneko0721@gmail.com |
ae82513226f91ab38fa6ae76d5f8bc57af2d9873 | 6e47be4e22ab76a8ddd7e18c89f5dc4f18539744 | /venv/openshift/lib/python3.6/site-packages/kubernetes/client/models/v1_aws_elastic_block_store_volume_source.py | ef20d19c36e713bc035972b2b71b0b4bad61a1a5 | [] | no_license | georgi-mobi/redhat_ocp4.5_training | 21236bb19d04a469c95a8f135188d3d1ae473764 | 2ccaa90e40dbbf8a18f668a5a7b0d5bfaa1db225 | refs/heads/main | 2023-03-30T10:47:08.687074 | 2021-04-01T05:25:49 | 2021-04-01T05:25:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,378 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1AWSElasticBlockStoreVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fs_type': 'str',
'partition': 'int',
'read_only': 'bool',
'volume_id': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'partition': 'partition',
'read_only': 'readOnly',
'volume_id': 'volumeID'
}
def __init__(self, fs_type=None, partition=None, read_only=None, volume_id=None):
"""
V1AWSElasticBlockStoreVolumeSource - a model defined in Swagger
"""
self._fs_type = None
self._partition = None
self._read_only = None
self._volume_id = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if partition is not None:
self.partition = partition
if read_only is not None:
self.read_only = read_only
self.volume_id = volume_id
@property
def fs_type(self):
"""
Gets the fs_type of this V1AWSElasticBlockStoreVolumeSource.
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:return: The fs_type of this V1AWSElasticBlockStoreVolumeSource.
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""
Sets the fs_type of this V1AWSElasticBlockStoreVolumeSource.
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:param fs_type: The fs_type of this V1AWSElasticBlockStoreVolumeSource.
:type: str
"""
self._fs_type = fs_type
@property
def partition(self):
"""
Gets the partition of this V1AWSElasticBlockStoreVolumeSource.
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).
:return: The partition of this V1AWSElasticBlockStoreVolumeSource.
:rtype: int
"""
return self._partition
@partition.setter
def partition(self, partition):
"""
Sets the partition of this V1AWSElasticBlockStoreVolumeSource.
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).
:param partition: The partition of this V1AWSElasticBlockStoreVolumeSource.
:type: int
"""
self._partition = partition
@property
def read_only(self):
"""
Gets the read_only of this V1AWSElasticBlockStoreVolumeSource.
Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:return: The read_only of this V1AWSElasticBlockStoreVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1AWSElasticBlockStoreVolumeSource.
Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:param read_only: The read_only of this V1AWSElasticBlockStoreVolumeSource.
:type: bool
"""
self._read_only = read_only
@property
def volume_id(self):
"""
Gets the volume_id of this V1AWSElasticBlockStoreVolumeSource.
Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:return: The volume_id of this V1AWSElasticBlockStoreVolumeSource.
:rtype: str
"""
return self._volume_id
@volume_id.setter
def volume_id(self, volume_id):
"""
Sets the volume_id of this V1AWSElasticBlockStoreVolumeSource.
Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:param volume_id: The volume_id of this V1AWSElasticBlockStoreVolumeSource.
:type: str
"""
if volume_id is None:
raise ValueError("Invalid value for `volume_id`, must not be `None`")
self._volume_id = volume_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1AWSElasticBlockStoreVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"student@workstation.lab.example.com"
] | student@workstation.lab.example.com |
e1f003b7a5710057b5f83fb6a06df9cb3bdbece6 | 0129b016055daa1aaa1e9e0911f271fa7b38e27e | /programacao_estruturada/20192_166/for/fabio03_questao07.py | cd4394adf0a9e02d7f4115cc0e5c3e96b425cfb6 | [] | no_license | rogeriosilva-ifpi/teaching-tds-course | 7c43ff17d6677aef7b42071929b3de8361748870 | 771ccdc4dc932d0ef5ce6ba61a02b5ee11920d4c | refs/heads/master | 2022-04-04T01:08:45.157185 | 2020-01-30T19:36:57 | 2020-01-30T19:36:57 | 206,439,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | def programa():
numero = int(input('Número: '))
limite = numero + 1
soma = 0
for i in range(1, limite):
soma = soma + i
print('Resultado:', soma)
programa()
| [
"rogerio.silva@ifpi.edu.br"
] | rogerio.silva@ifpi.edu.br |
6dcf58f3fdc4af5e1c6f72c92a3d68ba9e34b60c | 34745a8d54fa7e3d9e4237415eb52e507508ad79 | /Python Fundamentals/04 Functions/Exercises/09_Factorial_Division.py | 7a8f98167d64d2071b173c43e8018a4076dbb28f | [] | no_license | DilyanTsenkov/SoftUni-Software-Engineering | 50476af0dc88b267d72c56fa87eeb88d841164b2 | fe446e3a50a00bb2e48d71ab8f783e0a4a406094 | refs/heads/main | 2023-08-12T18:18:42.144210 | 2021-09-25T11:10:38 | 2021-09-25T11:10:38 | 317,235,419 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | def factorial(num1, num2):
first_factorial = first_number
second_factorial = second_number
for i in range(1, num1):
first_factorial *= (first_number - i)
for i in range(1, num2):
second_factorial *= (second_number - i)
return first_factorial / second_factorial
first_number = int(input())
second_number = int(input())
print(f"{(factorial(first_number, second_number)):.2f}")
| [
"noreply@github.com"
] | DilyanTsenkov.noreply@github.com |
15c17f24a24cf077d10cb8262277e8ae5cbf8997 | d696454b3e3473a45e0bb486e93f3742493c86a0 | /music/views/playlist.py | 259f2b90dfa81dcf5dc5cea323f74b53d55c7dde | [] | no_license | jmg/music_camping | 430d04e1c8dec28816975daa8fa5375f2eb02435 | 72816647f70bb0afca5899bad5d1cfbaef4ff889 | refs/heads/master | 2021-06-20T01:57:30.540852 | 2017-01-04T05:11:28 | 2017-01-04T05:11:28 | 40,454,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,274 | py | from base import BaseView
from music.services.song import SongService
from music.services.playlist import PlayListService
from music.models import PlayList, Song, PlayListSong
from django.conf import settings
import threading
class PlayingView(BaseView):
url = r"^playlist$"
def get(self, *args, **kwrags):
playlist = PlayListService().get_playlist()
player_data = SongService().get_player_data()
return self.render_to_response({"playlist": playlist, "player_data": player_data, "settings": settings})
class ChangeSongView(BaseView):
def post(self, *args, **kwrags):
is_next = self.request.POST.get("next") is not None
is_prev = self.request.POST.get("prev") is not None
try:
SongService().play_next_song(is_next=is_next, is_prev=is_prev)
except Exception, e:
return self.json_response({"error": str(e)})
return self.response(playlist_song.song.to_json())
class PlayView(BaseView):
def post(self, *args, **kwrags):
SongService().play_song(self.request.POST.get("song_id"))
return self.response("ok")
class StopView(BaseView):
def post(self, *args, **kwrags):
SongService().stop_song()
return self.response("ok")
class PauseView(BaseView):
def post(self, *args, **kwrags):
SongService().pause_song()
return self.response("ok")
class SelectView(BaseView):
url = r"^$"
def get(self, *args, **kwrags):
return self.render_to_response({})
class AddView(BaseView):
def post(self, *args, **kwrags):
song_id = self.request.POST.get("song_id")
try:
SongService().add_song(song_id)
except Exception, e:
return self.json_response({"error": str(e)})
return self.response("ok")
class DeleteView(BaseView):
def post(self, *args, **kwrags):
song_id = self.request.POST.get("song_id")
try:
SongService().delete_song(song_id)
except Exception, e:
return self.json_response({"error": str(e)})
return self.response("ok")
class PlayingListView(BaseView):
def get(self, *args, **kwargs):
playlist = PlayListService().get_playlist()
qs = [x.song for x in PlayListSong.objects.filter(playlist=playlist)]
columnIndexNameMap = {
0: lambda song: self.render("playlist/song_stream.html", {"song": song, "playlist": playlist }),
1: 'album',
2: 'artist',
3: lambda song: self.render("playlist/actions.html", {"song": song, "playlist": playlist })
}
sortIndexNameMap = {
0: 'name' ,
1: 'album' ,
2: 'artist' ,
3: None,
}
return SongService().open_search(self.request, columnIndexNameMap, sortIndexNameMap, qs=qs)
class CurrentSongView(BaseView):
def get(self, *args, **kwrags):
playlist = PlayListService().get_playlist()
player_data = SongService().get_player_data()
return self.render_to_response({"playlist": playlist, "player_data": player_data })
class MoveSongView(BaseView):
def post(self, *args, **kwrags):
song_id = self.request.POST.get("song_id")
direction = self.request.POST.get("direction")
try:
SongService().move_song(song_id, direction)
except Exception, e:
return self.json_response({"error": str(e)})
return self.response("ok")
class BulkMoveSongView(BaseView):
def post(self, *args, **kwrags):
data = self.request.POST.get("data")
SongService().bulk_move_songs(data)
return self.response("ok")
class SetVolumeView(BaseView):
def post(self, *args, **kwrags):
volume = self.request.POST.get("volume")
SongService().set_volume(volume)
return self.response("ok")
class SetPositionView(BaseView):
def post(self, *args, **kwrags):
position = int(self.request.POST.get("position", 0))
SongService().set_position(position)
return self.response("ok")
class ClearView(BaseView):
def post(self, *args, **kwrags):
PlayListService().clear()
return self.response("ok") | [
"jmg.utn@gmail.com"
] | jmg.utn@gmail.com |
f45a472394de6a4dc8569170a4d0b8cb76b5f712 | 8a5f8dfdd038590a579d14a84558cce2bb930b22 | /AICamera/app/src/main/cpp/caffe2/python/scope.py | 7aa881c0ca815c4f51643bb33d182b18b967b95a | [
"MIT"
] | permissive | blackxer/AICamera | ebc94c663e6f2ea6e8c81290a64bce4e7d369ed9 | 4f0a6a09a2288da2ec7140744b5c2862df114c78 | refs/heads/master | 2020-08-11T19:53:42.388828 | 2019-10-16T01:19:59 | 2019-10-16T01:19:59 | 214,616,987 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,465 | py | ## @package scope
# Module caffe2.python.scope
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import threading
from past.builtins import basestring
from caffe2.proto import caffe2_pb2
# The name scope and device scope when creating a new operator.
_NAMESCOPE_SEPARATOR = '/'
_threadlocal_scope = threading.local()
def CurrentNameScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "namescope"):
_threadlocal_scope.namescope = ''
return _threadlocal_scope.namescope
def CurrentDeviceScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "devicescope"):
_threadlocal_scope.devicescope = None
return _threadlocal_scope.devicescope
@contextlib.contextmanager
def NameScope(prefix, reset=False):
global _threadlocal_scope
assert isinstance(prefix, basestring) or prefix is None, \
"NameScope takes in a string as its argument."
old_scope = CurrentNameScope()
prefix = prefix + _NAMESCOPE_SEPARATOR if prefix else ''
if reset:
_threadlocal_scope.namescope = prefix
else:
_threadlocal_scope.namescope = _threadlocal_scope.namescope + prefix
try:
yield
finally:
assert _threadlocal_scope.namescope.endswith(prefix), \
"The namescope variable is changed from outside NameScope() calls."
_threadlocal_scope.namescope = old_scope
@contextlib.contextmanager
def DeviceScope(scope, node_name=None):
new_scope = caffe2_pb2.DeviceOption()
if scope:
assert isinstance(scope, caffe2_pb2.DeviceOption), \
"DeviceScope takes in a caffe2_pb2.DeviceOption as its argument."
new_scope.CopyFrom(scope)
else:
assert node_name, "At least one argument should be non-null in DeviceScope"
# rewrite node_name if it is explicitly given
if node_name:
new_scope.node_name = node_name
global _threadlocal_scope
old_scope = CurrentDeviceScope()
# nested scope should inherit the node_name if it is not explicitly set
if old_scope and old_scope.HasField('node_name') and \
not new_scope.HasField('node_name'):
new_scope.node_name = old_scope.node_name
# nested scope should inherit the extra_info and merged it with new extra_info
if old_scope and hasattr(old_scope, 'extra_info'):
new_scope.extra_info.extend(old_scope.extra_info)
new_scope.extra_info.sort()
_threadlocal_scope.devicescope = new_scope
try:
yield
finally:
assert _threadlocal_scope.devicescope == new_scope, \
"The device scope is changed from outside DeviceScope() calls."
_threadlocal_scope.devicescope = old_scope
@contextlib.contextmanager
def EmptyDeviceScope():
"""
Allow users to 'disable' the device scope behaviour (so it can be
controlled at a NetDef::DeviceOption level, not overridden at
OperatorDef::DeviceOption level).
This sets the CurrentDeviceScope() to None, so that the field is
not set in CreateOperator(...), etc.
"""
old_scope = CurrentDeviceScope()
try:
_threadlocal_scope.devicescope = None
yield
finally:
_threadlocal_scope.devicescope = old_scope
return
| [
"zhangwei@egova.com.cn"
] | zhangwei@egova.com.cn |
32721484c00e4274922c0d5dce36abc0b6575e1b | d1c67f2031d657902acef4411877d75b992eab91 | /swagger_client/models/team_routing_rule.py | 91124b752ab07d247307aaaa3c98962cbd641c5d | [] | no_license | Certn/opsgenie-python | c6e6a7f42394499e5224d679cc9a449042fcf9c3 | bd5f402f97d591e4082b38c938cbabca4cf29787 | refs/heads/master | 2023-01-01T10:45:13.132455 | 2020-10-27T17:40:01 | 2020-10-27T17:40:01 | 307,769,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,731 | py | # coding: utf-8
"""
Opsgenie REST API
Opsgenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TeamRoutingRule(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'is_default': 'bool',
'order': 'int',
'criteria': 'Filter',
'timezone': 'str',
'time_restriction': 'TimeRestrictionInterval',
'notify': 'Recipient'
}
attribute_map = {
'id': 'id',
'name': 'name',
'is_default': 'isDefault',
'order': 'order',
'criteria': 'criteria',
'timezone': 'timezone',
'time_restriction': 'timeRestriction',
'notify': 'notify'
}
def __init__(self, id=None, name=None, is_default=None, order=None, criteria=None, timezone=None, time_restriction=None, notify=None): # noqa: E501
"""TeamRoutingRule - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._is_default = None
self._order = None
self._criteria = None
self._timezone = None
self._time_restriction = None
self._notify = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if is_default is not None:
self.is_default = is_default
if order is not None:
self.order = order
if criteria is not None:
self.criteria = criteria
if timezone is not None:
self.timezone = timezone
if time_restriction is not None:
self.time_restriction = time_restriction
if notify is not None:
self.notify = notify
@property
def id(self):
"""Gets the id of this TeamRoutingRule. # noqa: E501
:return: The id of this TeamRoutingRule. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TeamRoutingRule.
:param id: The id of this TeamRoutingRule. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this TeamRoutingRule. # noqa: E501
:return: The name of this TeamRoutingRule. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TeamRoutingRule.
:param name: The name of this TeamRoutingRule. # noqa: E501
:type: str
"""
self._name = name
@property
def is_default(self):
"""Gets the is_default of this TeamRoutingRule. # noqa: E501
:return: The is_default of this TeamRoutingRule. # noqa: E501
:rtype: bool
"""
return self._is_default
@is_default.setter
def is_default(self, is_default):
"""Sets the is_default of this TeamRoutingRule.
:param is_default: The is_default of this TeamRoutingRule. # noqa: E501
:type: bool
"""
self._is_default = is_default
@property
def order(self):
"""Gets the order of this TeamRoutingRule. # noqa: E501
:return: The order of this TeamRoutingRule. # noqa: E501
:rtype: int
"""
return self._order
@order.setter
def order(self, order):
"""Sets the order of this TeamRoutingRule.
:param order: The order of this TeamRoutingRule. # noqa: E501
:type: int
"""
self._order = order
@property
def criteria(self):
"""Gets the criteria of this TeamRoutingRule. # noqa: E501
:return: The criteria of this TeamRoutingRule. # noqa: E501
:rtype: Filter
"""
return self._criteria
@criteria.setter
def criteria(self, criteria):
"""Sets the criteria of this TeamRoutingRule.
:param criteria: The criteria of this TeamRoutingRule. # noqa: E501
:type: Filter
"""
self._criteria = criteria
@property
def timezone(self):
"""Gets the timezone of this TeamRoutingRule. # noqa: E501
:return: The timezone of this TeamRoutingRule. # noqa: E501
:rtype: str
"""
return self._timezone
@timezone.setter
def timezone(self, timezone):
"""Sets the timezone of this TeamRoutingRule.
:param timezone: The timezone of this TeamRoutingRule. # noqa: E501
:type: str
"""
self._timezone = timezone
@property
def time_restriction(self):
"""Gets the time_restriction of this TeamRoutingRule. # noqa: E501
:return: The time_restriction of this TeamRoutingRule. # noqa: E501
:rtype: TimeRestrictionInterval
"""
return self._time_restriction
@time_restriction.setter
def time_restriction(self, time_restriction):
"""Sets the time_restriction of this TeamRoutingRule.
:param time_restriction: The time_restriction of this TeamRoutingRule. # noqa: E501
:type: TimeRestrictionInterval
"""
self._time_restriction = time_restriction
@property
def notify(self):
"""Gets the notify of this TeamRoutingRule. # noqa: E501
:return: The notify of this TeamRoutingRule. # noqa: E501
:rtype: Recipient
"""
return self._notify
@notify.setter
def notify(self, notify):
"""Sets the notify of this TeamRoutingRule.
:param notify: The notify of this TeamRoutingRule. # noqa: E501
:type: Recipient
"""
self._notify = notify
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TeamRoutingRule, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TeamRoutingRule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"john@oram.ca"
] | john@oram.ca |
b962777592420ddaec641e394930ccb7f4714f4b | 70b339d0b2638a7914d0d56c5edf8a2637c9f4b0 | /Microsoft-budgetCombination.py | 7c5b8974df0f7b5706643a7f2353cba23ce79435 | [] | no_license | pflun/advancedAlgorithms | 9991da7514024e18ba08de8688966b9220e12571 | 5520dbcd26999b98e1229bf03c2f62dd690a2ddc | refs/heads/master | 2023-02-19T12:05:26.902535 | 2023-02-14T06:08:54 | 2023-02-14T06:08:54 | 189,055,701 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | # -*- coding: utf-8 -*-
# 给你一个menu,是一个map,key是菜名,value是价格,比如
# "apple": 3.25,
# "chicken": 4.55,
# "cake":10.85,
#
# 然后给你一个budget,比如7.80.
# 要你给出所有菜名的combination,总价要正好符合budget,次序不重要,但不能有重复。
# 比如,如果budget是7.80,他就要求结果是[["apple", "chicken"]],不能是[["apple", "chicken"],["chicken","apple"]]
# 比如,如果budget是6.50,他就要求结果是[["apple", "apple"]]
class Solution:
def budgetCombination(self, menu, budget):
self.res = []
self.used = set()
# corner case: 去掉单一overbudget的
def dfs(menu, budget, tmp, prev):
if len(tmp) > 1 and getSum(tmp) > budget:
res = tmp[:]
self.res.append(res[:-1])
return
for food, price in menu.items():
if food in self.used:
continue
self.used.add(food)
tmp.append(food)
dfs(menu, budget, tmp)
tmp.pop()
self.used.remove(food) | [
"zgao@gwu.edu"
] | zgao@gwu.edu |
f343e24f6f579b9ecca603705a2a764f1b6153c7 | 52c5b78f3afab4573926dd6d0a49e10ee1a77e26 | /myproject/boards/models.py | be05814d747d51aef868e0b17217166717b84237 | [] | no_license | zime-py/eight | d9eefc28a00a8411f3a58b0e931807492bc5bfc2 | 2138b2a8884dea299654ff7c41060c72f183486c | refs/heads/master | 2023-01-11T23:03:53.062441 | 2020-11-14T14:43:04 | 2020-11-14T14:43:04 | 312,831,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | from django.db import models
class Do(models.Model):
name = models.CharField(max_length=100)
roll = models.CharField(max_length=100) #ForeignKey('auth.user',on_delete=models.CASCADE,)
def __str__(self):
return self.name
class Did(models.Model):
result = models.CharField(max_length=100) #ForeignKey('auth.user',on_delete=models.CASCADE,)
do = models.ForeignKey('Do',on_delete=models.CASCADE,)
def __str__(self):
return self.result
class Done(models.Model):
year = models.CharField(max_length=100) #ForeignKey('auth.user',on_delete=models.CASCADE,)
did = models.ForeignKey('Did',on_delete=models.CASCADE,)
def __str__(self):
return self.year
class Go(models.Model):
position = models.CharField(max_length=100)
do = models.ForeignKey('Do',on_delete=models.CASCADE,)
did = models.ForeignKey('Did',on_delete=models.CASCADE,)
done = models.ForeignKey('Done',on_delete=models.CASCADE,)
def __str__(self):
return self.position
| [
"mahmudhossain836@gmail.com"
] | mahmudhossain836@gmail.com |
e1b381d9aa05348e59d8373f067baaca3e76ac38 | 951d62ecd155103fa77efaa68fb7611ac4b9af4b | /testtask/views.py | 000f7b29ff9b717ae435c4111fc440ba7ef938da | [] | no_license | darkdkl/stdvor_dev | 352cb46e45d943c3b31d430bd84d684979456d7e | c4b9dc8b691f11cf15dda565214036367141765a | refs/heads/master | 2021-09-26T11:14:57.299457 | 2020-01-15T08:08:31 | 2020-01-15T08:08:31 | 234,028,893 | 0 | 0 | null | 2021-09-22T18:23:23 | 2020-01-15T07:59:51 | Python | UTF-8 | Python | false | false | 2,290 | py | from django.shortcuts import render
from django.http import JsonResponse
from testtask.models import Order, Сontact, Product
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.http import QueryDict
# Create your views here.
data = [{'name': 'Test', 'e-mail': 'linux@org.ru'},
{'name': 'Test2', 'e-mail': 'linux2@org.ru'}]
def index(request):
print(request.GET)
data = [{'name': 'Test', 'e-mail': 'linux@org.ru'},
{'name': 'Test2', 'e-mail': 'linux2@org.ru'}]
return JsonResponse(data, safe=False)
def serialize(order):
return {
'order_num': order.number,
'date_create':order.date,
'byer': [{ 'name':f'{order.contact.first_name} {order.contact.last_name}',
'tel':f'{order.contact.tel_number}',
'email':order.contact.email,
'address':order.contact.address,
}],
'amount':sum([product.cost for product in order.products.all()])
}
# @method_decorator(csrf_exempt,name='dispatch')
# class ApiView(View):
# http_method_names = ['get', 'post', 'put', 'delete']
# def post(self, *args, **kwargs):
# print(self.request.POST.get('test2'))
# return JsonResponse(data, safe=False)
# def put(self, *args, **kwargs):
# orders = Order.objects.all()
# data = [serialize(order) for order in orders]
# return JsonResponse(data, safe=False)
# def delete(self, *args, **kwargs):
# # print(self.request.POST.get('_method') )
# get = QueryDict(self.request.body)
# print(get.dict())
# return JsonResponse(data, safe=False)
@csrf_exempt
@csrf_exempt
def get_api(request, pk=None):
if request.method == "GET":
orders = Order.objects.all()
data=[serialize(order) for order in orders]
return JsonResponse(data, safe=False)
if request.method == "PUT":
q=QueryDict(request)
print(q.values())
orders = Order.objects.all()
data=[serialize(order) for order in orders]
return JsonResponse(data, safe=False)
| [
"dark.dmake@gmail.com"
] | dark.dmake@gmail.com |
2c600cd8520114fcf732b05492d70efb49e64f23 | 58af092b07edb8d34d8d03886d6bd56d5c34af42 | /english_munchers_dj/telegram_bot/migrations/0001_initial.py | 366e348ec9e185f732a5237448ed2d8c76a0051d | [] | no_license | Vido/freela-em | ced9f79cecc247bd1a42aae744a32155f07d9124 | 78d9ea732d6d869af5bdf78666e41078d7777e0b | refs/heads/master | 2022-12-08T19:27:21.186061 | 2019-03-04T21:54:29 | 2019-03-04T21:54:29 | 110,721,168 | 0 | 6 | null | 2022-12-07T23:49:35 | 2017-11-14T17:15:46 | JavaScript | UTF-8 | Python | false | false | 620 | py | # Generated by Django 2.0 on 2017-12-11 14:40
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UpdateResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_request_id', models.IntegerField()),
('update_dict', django.contrib.postgres.fields.jsonb.JSONField()),
],
),
]
| [
"vido@usp.br"
] | vido@usp.br |
c910db3bd9b777a4af8a0435d71e2fe3a8998987 | be02fd6adb789e8b5f5c8f77b2635b71b1b24a52 | /prob.py | 7e69c9913f33a9eba9130d35476f74ca184f195b | [] | no_license | pavlin-policar/rosalind | 05cd66bec512e7b3ca414effd00e4d1b4ffd563a | d9c8b2ab20e950ef543964fc5e1c47bbf21b8362 | refs/heads/master | 2021-11-25T08:20:28.083661 | 2021-11-07T17:28:50 | 2021-11-07T17:30:50 | 71,159,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | import operator
from functools import reduce
from math import log10
if __name__ == '__main__':
sequence = input()
probabilites = map(float, input().split())
prob = lambda l, p: p / 2 if l in ('C', 'G') else (1 - p) / 2
logs = [log10(reduce(operator.mul, [prob(l, p) for l in sequence], 1))
for p in probabilites]
print(*logs)
| [
"pavlin.g.p@gmail.com"
] | pavlin.g.p@gmail.com |
d98628b7b3a81ff0fe9d3ae51d115f5d3ded0262 | 6444622ad4a150993955a0c8fe260bae1af7f8ce | /djangoenv/bin/python-config | f60d09bc69d85bc162d02850a5fd49608851224d | [] | no_license | jeremyrich/Lesson_RestAPI_jeremy | ca965ef017c53f919c0bf97a4a23841818e246f9 | a44263e45b1cc1ba812059f6984c0f5be25cd234 | refs/heads/master | 2020-04-25T23:13:47.237188 | 2019-03-22T09:26:58 | 2019-03-22T09:26:58 | 173,138,073 | 0 | 0 | null | 2019-03-22T09:26:59 | 2019-02-28T15:34:19 | Python | UTF-8 | Python | false | false | 2,403 | #!/home/mymy/Desktop/Python_agility/cours/Hugo/Lessons_RestAPI/Lesson_RestAPI/djangoenv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"jeremyrich@free.fr"
] | jeremyrich@free.fr | |
cae0e6ea55aba796f6f8be2d75ee40d5756d5a32 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /sLb2Fs6aGRQBYAXqQ_11.py | 87b06555ae8f879dd67c342359ad561108009eb4 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | """
Given a Rubik's Cube with a side length of `n`, return the number of
individual stickers that are needed to cover the whole cube.

* The Rubik's cube of side length `1` has **6 stickers**.
* The Rubik's cube of side length `2` has **24 stickers**.
* The Rubik's cube of side length `3` has **54 stickers**.
### Examples
how_many_stickers(1) ➞ 6
how_many_stickers(2) ➞ 24
how_many_stickers(3) ➞ 54
### Notes
* Keep in mind there are six faces to keep track of.
* Expect only positive whole numbers.
"""
def how_many_stickers(n):
return 6*pow(n,2)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
cdbc0a4ed1d136f416ba29f80d6ac70a7f07cf3b | 5b71e2952f34dd3bb20148874d952fee06d31857 | /app/mf/crud/migrations/0068_auto_20210130_1317.py | dd034f797a90346ddce19c87f0439ee92d564331 | [] | no_license | isela1998/facebook | a937917cddb9ef043dd6014efc44d59d034102b1 | a0f2f146eb602b45c951995a5cb44409426250c5 | refs/heads/master | 2023-07-18T02:14:50.293774 | 2021-08-28T03:26:06 | 2021-08-28T03:26:06 | 400,613,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | # Generated by Django 3.1.1 on 2021-01-30 17:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud', '0067_auto_20210130_1104'),
]
operations = [
migrations.RemoveField(
model_name='sale',
name='type_exchange',
),
migrations.AddField(
model_name='sale',
name='exchange1',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=30, verbose_name='Cambio'),
),
migrations.AddField(
model_name='sale',
name='exchange2',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=30, verbose_name='Cambio'),
),
]
| [
"infantefernandezisela@gmail.com"
] | infantefernandezisela@gmail.com |
a04fd2d3b518817724d9cab376f37c2b71f9a3be | 6c40a17cee8777dbf4e0b6d85e624eacefd67a69 | /ez2pay/models/permission.py | 4d79a47c58bd77bd06ab42d129503eadcdf61164 | [
"MIT"
] | permissive | fangpenlin/ez2pay | ae5125c8c05bad0178d7c8bb0f0c256489e0127a | 13ce4782d3c673a0cb07003a826a10bdfbe6a9ad | refs/heads/master | 2021-05-28T06:01:56.627261 | 2013-12-03T01:46:23 | 2013-12-03T01:46:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | from __future__ import unicode_literals
from . import tables
from .base import BaseTableModel
class PermissionModel(BaseTableModel):
"""Permission data model
"""
TABLE = tables.Permission
def get_by_name(self, permission_name):
"""Get a permission by name
"""
permission = (
self.session
.query(tables.Permission)
.filter_by(permission_name=permission_name)
.first()
)
return permission
def create(
self,
permission_name,
description=None,
):
"""Create a new permission and return its id
"""
permission = tables.Permission(
permission_name=unicode(permission_name),
description=unicode(description) if description is not None else None,
)
self.session.add(permission)
# flush the change, so we can get real id
self.session.flush()
assert permission.permission_id is not None, \
'Permission id should not be none here'
permission_id = permission.permission_id
self.logger.info('Create permission %s', permission_name)
return permission_id
def update_permission(self, permission_id, **kwargs):
"""Update attributes of a permission
"""
permission = self.get(permission_id, raise_error=True)
if 'description' in kwargs:
permission.description = kwargs['description']
if 'permission_name' in kwargs:
permission.permission_name = kwargs['permission_name']
self.session.add(permission)
| [
"bornstub@gmail.com"
] | bornstub@gmail.com |
0b5493b2e903ebb798213959fd1101e12390742e | 9f5557fd6a1d809e7026e23d58f3da57a0c0cbcc | /vega/service/user_service.py | 82ae4a5a0971b198ec36c76d8d8a0e8f7daf8b9d | [] | no_license | biao111/learn_python2 | e5150b7bb7cdd0166330ff159d83a809ca9d81d7 | 60f8fc1f7da8ae22dae2314b55dbe669b404d95a | refs/heads/master | 2023-01-07T16:56:42.123504 | 2020-11-08T05:53:10 | 2020-11-08T05:53:10 | 310,993,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | from db.user_dao import UserDao
class UserService:
__user_dao = UserDao()
#验证用户登录
def login(self,username,password):
result = self.__user_dao.login(username,password)
return result
#查询角色
def search_user_role(self,username):
role = self.__user_dao.search_user_role(username)
return role
# 添加用户
def insert_user(self, username, password, email, role_id):
self.__user_dao.insert_user(username, password, email, role_id)
# 查询用户列表
def search_list(self, page):
result = self.__user_dao.search_list(page)
return result
# 查询用户总页数
def search_count_page(self):
count_page = self.__user_dao.search_count_page()
return count_page
#修改用户信息
def update_user(self,username,password,email,role_id,id):
self.__user_dao.update_user(username,password,email,role_id,id)
# 删除用户信息
def delete_by_id(self, id):
self.__user_dao.delete_by_id(id)
#查询用户ID
def search_userid(self,username):
userid = self.__user_dao.search_userid(username)
return userid
| [
"18211149974@163.com"
] | 18211149974@163.com |
bce6b7949363087074d3daaff106c143744040a5 | aa81ba4d6ae20dee412acb24b5ee0eccb502767f | /venv/bin/jwk_create.py | 35c37f0a49523cc2709765191324b825386ec5fa | [] | no_license | CarlosGonzalezLuzardo/SECAS | 32c3e0b9c176333d2c20b7b3fed3adc9de8c0216 | 4455de4eb61fb4bddf6cfa8a4ce9e5f9f8e9d812 | refs/heads/master | 2020-03-14T11:11:33.922067 | 2018-06-14T10:54:14 | 2018-06-14T10:54:14 | 131,585,370 | 0 | 2 | null | 2018-05-08T13:51:43 | 2018-04-30T11:00:45 | Python | UTF-8 | Python | false | false | 1,300 | py | #!/home/alejandro/Proyectos/SECAS/Internals/derimanfranco/py-multifactor/venv/bin/python
import json
from Cryptodome.PublicKey import RSA
import argparse
import os
from jwkest.jwk import RSAKey
__author__ = 'rolandh'
def create_and_store_rsa_key_pair(name="pyoidc", path=".", size=1024):
key = RSA.generate(size)
keyfile = os.path.join(path, name)
f = open("%s.key" % keyfile, "w")
f.write(key.exportKey("PEM"))
f.close()
f = open("%s.pub" % keyfile, "w")
f.write(key.publickey().exportKey("PEM"))
f.close()
rsa_key = RSAKey(key=key)
rsa_key.serialize()
# This will create JWK from the public RSA key
jwk_spec = json.dumps(rsa_key.to_dict(), "enc")
f = open(keyfile + ".jwk", "w")
f.write(str(jwk_spec))
f.close()
return key
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-n', dest="name", default="pyoidc",
help="file names")
parser.add_argument('-p', dest="path", default=".",
help="Path to the directory for the files")
parser.add_argument('-s', dest="size", default=1024,
help="Key size", type=int)
args = parser.parse_args()
create_and_store_rsa_key_pair(args.name, args.path, args.size)
| [
"carlos.gonzalez@edosoft.es"
] | carlos.gonzalez@edosoft.es |
1f7e38df13d990b7710695bc820c7e9bb278fe64 | a554605ff97c7b688f457a8493d521d2c54101a3 | /scripts/ratatosk_run.py | 83c4c4149ac0757140b8531bb94696f183e417ec | [
"Apache-2.0"
] | permissive | SciLifeLab/ratatosk | 9c0c9b15cc0bf1c515bb5144f38ada3dd02e9610 | 4e9c9d8dc868b19a7c70eb7b326422c87bc3d7c0 | refs/heads/master | 2020-12-25T09:57:52.696398 | 2013-03-25T13:42:36 | 2013-03-25T13:42:36 | 8,794,985 | 0 | 0 | null | 2013-03-25T13:43:17 | 2013-03-15T08:36:17 | Python | UTF-8 | Python | false | false | 850 | py | import luigi
import os
import sys
import ratatosk.lib.align.bwa
import ratatosk.lib.tools.gatk
import ratatosk.lib.tools.samtools
import ratatosk.lib.tools.picard
from ratatosk.pipeline.haloplex import HaloPlex
from ratatosk.pipeline.align import AlignSeqcap
from ratatosk.pipeline import config_dict
if __name__ == "__main__":
if len(sys.argv) > 1:
task = sys.argv[1]
else:
task = None
if task == "HaloPlex":
args = sys.argv[2:] + ['--config-file', config_dict['haloplex']]
luigi.run(args, main_task_cls=ratatosk.pipeline.haloplex.HaloPlex)
elif task == "AlignSeqcap":
args = sys.argv[2:] + ['--config-file', config_dict['seqcap']]
luigi.run(args, main_task_cls=ratatosk.pipeline.align.AlignSeqcap)
# Whatever other task/config the user wants to run
else:
luigi.run()
| [
"per.unneberg@scilifelab.se"
] | per.unneberg@scilifelab.se |
1fbde5a70e2caec3b5424736dc0badecdc100998 | 12ec7d731a465e43ad211235882e2939cc5c031d | /bills/views.py | b359ced2a2b590bdfc286341744e1b28bc9da6a3 | [] | no_license | munikarmanish/merojob-bill | 770101e3c42be46569f26037d5e012065aa94392 | 882dd2aeafd1030f38d2d679268607b48c6a84aa | refs/heads/master | 2021-01-19T05:41:44.926766 | 2017-01-15T05:27:09 | 2017-01-15T05:27:09 | 78,848,703 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,658 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views import generic
from clients.models import Client
from .forms import BillForm
from .models import Bill
class CreateView(LoginRequiredMixin, generic.edit.CreateView):
template_name = 'bills/bill_form.html'
model = Bill
form_class = BillForm
success_url = reverse_lazy('bills:list')
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['page_title'] = 'Add bill'
return context
def get_initial(self):
initial = super(CreateView, self).get_initial()
# Check if client is already given
client_id = self.request.GET.get('client_id')
if client_id:
try:
initial['client'] = Client.objects.get(id=client_id)
except Client.DoesNotExist:
pass
return initial
class UpdateView(LoginRequiredMixin, generic.edit.UpdateView):
template_name = 'bills/bill_form.html'
model = Bill
form_class = BillForm
success_url = reverse_lazy('bills:list')
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['page_title'] = 'Edit bill'
return context
class DeleteView(LoginRequiredMixin, generic.edit.DeleteView):
model = Bill
success_url = reverse_lazy('bills:list')
class ListView(generic.ListView):
template_name = 'bills/bill_list.html'
context_object_name = 'bills'
def get_queryset(self):
return Bill.objects.order_by('-created')
| [
"munikarmanish@gmail.com"
] | munikarmanish@gmail.com |
8d4f206ec3a0026c9b1da9c8a234299dde23340e | fb1ea456040a36037c3be87ffdc51dc3d8aaa7bb | /setup.py | 3e2e04ec60eb274e3400eb0d1b155cd61204cfe3 | [
"MIT"
] | permissive | rpatterson/python-main-wrapper | badfb894afe7980afb261bda9d3ce84af39e8a10 | eb549cee920bf144c4021632f7784b7d425b6c40 | refs/heads/master | 2023-09-05T04:26:53.052777 | 2021-09-21T18:45:00 | 2021-09-21T18:45:00 | 259,238,110 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | """
main wrapper foundation or template, distribution/package metadata.
"""
import setuptools
with open("README.rst", "r") as readme:
LONG_DESCRIPTION = readme.read()
tests_require = ["six", 'contextlib2;python_version<"3"']
setuptools.setup(
name="main-wrapper",
author="Ross Patterson",
author_email="me@rpatterson.net",
description=(
"Set up global environment and run another script within, "
"ala pdb, profile, etc.."
),
long_description=LONG_DESCRIPTION,
long_description_content_type="text/x-rst",
url="https://github.com/rpatterson/python-main-wrapper",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Utilities",
],
python_requires=">=2.7",
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
use_scm_version=dict(
write_to="src/mainwrapper/version.py",
local_scheme="no-local-version",
),
setup_requires=[
'setuptools_scm;python_version>="3"',
# BBB: Python 2.7 compatibility
'setuptools_scm<6;python_version<"3"',
],
install_requires=["six", 'pathlib2;python_version<"3"'],
tests_require=tests_require,
extras_require=dict(
dev=tests_require
+ [
"pytest",
"pre-commit",
"coverage",
"flake8",
"autoflake",
"autopep8",
'flake8-black;python_version>="3"',
"rstcheck",
]
),
entry_points=dict(console_scripts=["python-main-wrapper=mainwrapper:main"]),
)
| [
"me@rpatterson.net"
] | me@rpatterson.net |
f03ceb97b4ae1e32b9355c096ce23972956537e2 | 1a166165ab8287d01cbb377a13efdb5eff5dfef0 | /sdk/communication/azure-communication-sms/samples/send_sms_to_multiple_recipients_sample.py | 6254d388103c13c07088107f6e0a113ee9601b20 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | manoj0806/azure-sdk-for-python | 7a14b202ff80f528abd068bf50334e91001a9686 | aab999792db1132232b2f297c76800590a901142 | refs/heads/master | 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 | MIT | 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null | UTF-8 | Python | false | false | 2,093 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: send_sms_to_multiple_recipients_sample.py
DESCRIPTION:
This sample demonstrates sending an SMS message to multiple recipients. The SMS client is
authenticated using a connection string.
USAGE:
python send_sms_to_multiple_recipients_sample.py
Set the environment variable with your own value before running the sample:
1) AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING - the connection string in your ACS account
"""
import os
import sys
from azure.communication.sms import SmsClient
sys.path.append("..")
class SmsMultipleRecipientsSample(object):
connection_string = os.getenv("AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING")
phone_number = os.getenv("AZURE_COMMUNICATION_SERVICE_PHONE_NUMBER")
def send_sms_to_multiple_recipients(self):
sms_client = SmsClient.from_connection_string(self.connection_string)
# calling send() with sms values
sms_responses = sms_client.send(
from_=self.phone_number,
to=[self.phone_number, self.phone_number],
message="Hello World via SMS",
enable_delivery_report=True, # optional property
tag="custom-tag") # optional property
for sms_response in sms_responses:
if (sms_response.successful):
print("Message with message id {} was successful sent to {}"
.format(sms_response.message_id, sms_response.to))
else:
print("Message failed to send to {} with the status code {} and error: {}"
.format(sms_response.to, sms_response.http_status_code, sms_response.error_message))
if __name__ == '__main__':
sample = SmsMultipleRecipientsSample()
sample.send_sms_to_multiple_recipients()
| [
"noreply@github.com"
] | manoj0806.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.