blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
78f8b5c22b9ff3f3ef52fe996a14a3184da876c5
|
bd1b1fda138e6687dadc57317c3e312bc8872600
|
/mycode/lintcode/Binary Tree & Divide Conquer/69 Binary Tree Level Order Traversal.py
|
773f44f7eaff2c8f8e55118bc84c3419688ff279
|
[] |
no_license
|
dundunmao/lint_leet
|
fc185038f57e0c5cbb82a74cebd4fe00422416cb
|
5788bd7b154649d2f787bbc4feb717ff2f4b4c59
|
refs/heads/master
| 2020-11-30T04:56:25.553327
| 2017-10-22T07:11:01
| 2017-10-22T07:11:01
| 96,705,212
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,696
|
py
|
# -*- encoding: utf-8 -*-
# 给出一棵二叉树,返回其节点值的层次遍历(逐层从左往右访问)
#
# 您在真实的面试中是否遇到过这个题? Yes
# 样例
# 给一棵二叉树 {3,9,20,#,#,15,7} :
#
# 3
# / \
# 9 20
# / \
# 15 7
# 返回他的分层遍历结果:
#
# [
# [3],
# [9,20],
# [15,7]
# ]
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: The root of binary tree.
@return: Level order in a list of lists of integers
"""
def levelOrder(self, root):
# write your code here
# write your code here
if root is None:
return []
result = []
stack = []
stack.append(root)
while stack != []:
l = len(stack)
list_level = []
for i in range(l):
if stack[0].left:
stack.append(stack[0].left)
if stack[0].right:
stack.append(stack[0].right)
top = stack.pop(0)
list_level.append(top.val)
result.append(list_level)
return result
from Queue import Queue
class Solution2:
"""
@param root: The root of binary tree.
@return: Level order in a list of lists of integers
"""
def levelOrder(self, root):
# write your code here
# write your code here
if root is None:
return []
result = []
queue = Queue()
queue.put(root)
while queue.qsize()>0:#这里不能写成 while queue:
l = queue.qsize()
list_level = []
for i in range(l):
q = queue.get()
if q.left:
queue.put(q.left)
if q.right:
queue.put(q.right)
list_level.append(q.val)
result.append(list_level)
return result
if __name__ == '__main__':
# TREE 1
# Construct the following tree
# 1
# / \
# 2 3
# / \
# 4 5
# / \
# 6 7
# \
# 8
P = TreeNode(1)
P.left = TreeNode(2)
P.left.left = TreeNode(4)
P.left.right = TreeNode(5)
P.left.right.left = TreeNode(6)
P.left.right.right = TreeNode(7)
P.left.right.right.right = TreeNode(8)
P.right = TreeNode(3)
#
#
# Q = Node(26)
# Q.left = Node(10)
# Q.left.left = Node(4)
# Q.left.right = Node(6)
# Q.right = Node(3)
# # Q.right.right = Node(3)
s = Solution2()
print s.levelOrder(P)
|
[
"dundunmao@gmail.com"
] |
dundunmao@gmail.com
|
ce047380b982dcab9cf772b0ee014ca21ac67c17
|
774353c913eb170ec15ca881cd2bae43121b99e1
|
/58135918-give-grayscale-image-color/give_grayscale_image_color.py
|
d81a90bd165f1d11361546301466835bf984d0f2
|
[
"MIT"
] |
permissive
|
nathancy/stackoverflow
|
b83bdca4f44fd523259b551301a7371e03fb8493
|
ed5a00319ad3a2c7631825e17963c392aee5a103
|
refs/heads/master
| 2022-05-19T15:09:37.883623
| 2022-05-14T20:21:19
| 2022-05-14T20:21:19
| 175,527,064
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
import cv2
import numpy as np
before = cv2.imread('2.png')
b, g, r = cv2.split(before)
np.multiply(b, 1.5, out=b, casting="unsafe")
np.multiply(g, .75, out=g, casting="unsafe")
np.multiply(r, 1.25, out=r, casting="unsafe")
after = cv2.merge([b, g, r])
cv2.imshow('before', before)
cv2.imshow('after', after)
cv2.waitKey()
|
[
"nathancy@hawaii.edu"
] |
nathancy@hawaii.edu
|
026fccd199ba8df764511f787fe16bf0d38b5c75
|
9068f861ce5ee8908866b0da94dc375fbec1bfa3
|
/manage.py
|
93e1e652793e954f073805e16f5b57d0a01cd0e8
|
[] |
no_license
|
nanfengpo/flask_flasky7
|
b1e421e4a64284aabf42a1f6c559863068a13e45
|
170a19b3bd0bdb59a7ddaee62f49a74fa19c1657
|
refs/heads/master
| 2021-01-02T08:11:27.315798
| 2017-08-03T09:13:35
| 2017-08-03T09:13:35
| 98,955,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
#!/usr/bin/env python
from app import create_app, db
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = create_app('default')
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
[
"lsxxxxxx@126.com"
] |
lsxxxxxx@126.com
|
5aa485fdb364c75c6362321c665b13828904d5fc
|
328afd873e3e4fe213c0fb4ce6621cb1a450f33d
|
/GeeksforGeeks/insertionsort.py
|
0bbde62db8c7da07f2e9b7c847cde885a6d3d3e5
|
[] |
no_license
|
TorpidCoder/Python
|
810371d1bf33c137c025344b8d736044bea0e9f5
|
9c46e1de1a2926e872eee570e6d49f07dd533956
|
refs/heads/master
| 2021-07-04T08:21:43.950665
| 2020-08-19T18:14:09
| 2020-08-19T18:14:09
| 148,430,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
def insertion(arr):
for vals in arr:
values = arr.index(vals)
while(values>0):
if(arr[values-1]>arr[values]):
arr[values-1] , arr[values] = arr[values] , arr[values-1]
else:
break
values-=1
return arr
arr = [1,12,3,14,6,7]
print(insertion(arr))
|
[
"sahilexemplary@gmail.com"
] |
sahilexemplary@gmail.com
|
9214fcf06416f698b932ed0b9d0c76ea4f1e7d85
|
64643d3f814c2eb30dd2f86850980f48ac1486ba
|
/spektral/layers/convolutional/gin.py
|
79a988972dc60a5c0d95f8e8b989a4d19b6a29c7
|
[
"MIT"
] |
permissive
|
Prashant118/spektral
|
275e550baf08a2bd5354e8fefdf60a6a686d0af0
|
dbf769b0ad47318f354a2de40a87ed8893d9b2fe
|
refs/heads/master
| 2022-04-23T10:37:58.084038
| 2020-04-21T10:05:53
| 2020-04-21T10:05:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,466
|
py
|
import tensorflow as tf
from tensorflow.keras import activations, backend as K
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from spektral.layers import ops
from spektral.layers.convolutional.gcn import GraphConv
class GINConv(GraphConv):
r"""
A Graph Isomorphism Network (GIN) as presented by
[Xu et al. (2018)](https://arxiv.org/abs/1810.00826).
**Mode**: single.
**This layer expects sparse inputs.**
This layer computes for each node \(i\):
$$
\Z_i = \textrm{MLP}\big( (1 + \epsilon) \cdot \X_i + \sum\limits_{j \in \mathcal{N}(i)} \X_j \big)
$$
where \(\textrm{MLP}\) is a multi-layer perceptron.
**Input**
- Node features of shape `([batch], N, F)`;
- Binary adjacency matrix of shape `([batch], N, N)`.
**Output**
- Node features with the same shape of the input, but the last dimension
changed to `channels`.
**Arguments**
- `channels`: integer, number of output channels;
- `epsilon`: unnamed parameter, see
[Xu et al. (2018)](https://arxiv.org/abs/1810.00826), and the equation above.
This parameter can be learned by setting `epsilon=None`, or it can be set
to a constant value, which is what happens by default (0). In practice, it
is safe to leave it to 0.
- `mlp_hidden`: list of integers, number of hidden units for each hidden
layer in the MLP (if None, the MLP has only the output layer);
- `mlp_activation`: activation for the MLP layers;
- `activation`: activation function to use;
- `use_bias`: whether to add a bias to the linear transformation;
- `kernel_initializer`: initializer for the kernel matrix;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the kernel matrix;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the kernel matrix;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(self,
channels,
epsilon=None,
mlp_hidden=None,
mlp_activation='relu',
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(channels,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.epsilon = epsilon
self.mlp_hidden = mlp_hidden if mlp_hidden else []
self.mlp_activation = activations.get(mlp_activation)
def build(self, input_shape):
assert len(input_shape) >= 2
layer_kwargs = dict(
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint
)
mlp_layers = []
for i, channels in enumerate(self.mlp_hidden):
mlp_layers.append(Dense(channels, self.mlp_activation, **layer_kwargs))
mlp_layers.append(
Dense(self.channels, self.activation, **layer_kwargs)
)
self.mlp = Sequential(mlp_layers)
# Parameter for propagating features
if self.epsilon is None:
self.eps = self.add_weight(shape=(1,),
initializer=self.bias_initializer,
name='eps')
else:
# If epsilon is given, keep it constant
self.eps = K.constant(self.epsilon)
self.built = True
def call(self, inputs):
features = inputs[0]
fltr = inputs[1]
# Enforce sparse representation
if not K.is_sparse(fltr):
fltr = ops.dense_to_sparse(fltr)
# Propagation
targets = fltr.indices[:, -2]
sources = fltr.indices[:, -1]
messages = tf.gather(features, sources)
aggregated = ops.scatter_sum(targets, messages, N=tf.shape(features)[0])
hidden = (1.0 + self.eps) * features + aggregated
# MLP
output = self.mlp(hidden)
return output
def get_config(self):
config = {
'epsilon': self.epsilon,
'mlp_hidden': self.mlp_hidden,
'mlp_activation': self.mlp_activation
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@staticmethod
def preprocess(A):
return A
|
[
"daniele.grattarola@gmail.com"
] |
daniele.grattarola@gmail.com
|
16b09f8ce664b418c75168ef19854b8ba981583b
|
6ef3fc3ffa5f33e6403cb7cb0c30a35623a52d0d
|
/samples/snippets/product_search/import_product_sets.py
|
e2937509d632f68848329098d09d96a678ee704e
|
[
"Apache-2.0"
] |
permissive
|
vam-google/python-vision
|
61405506e3992ab89e6a454e4dda9b05fe2571f2
|
09e969fa30514d8a6bb95b576c1a2ae2c1e11d54
|
refs/heads/master
| 2022-08-15T08:40:35.999002
| 2022-07-18T16:04:35
| 2022-07-18T16:04:35
| 254,789,106
| 0
| 0
|
Apache-2.0
| 2020-04-11T03:59:02
| 2020-04-11T03:59:01
| null |
UTF-8
|
Python
| false
| false
| 3,472
|
py
|
#!/usr/bin/env python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to perform import product sets operations
on Product set in Cloud Vision Product Search.
For more information, see the tutorial page at
https://cloud.google.com/vision/product-search/docs/
"""
import argparse
# [START vision_product_search_tutorial_import]
from google.cloud import vision
# [END vision_product_search_tutorial_import]
# [START vision_product_search_import_product_images]
def import_product_sets(project_id, location, gcs_uri):
"""Import images of different products in the product set.
Args:
project_id: Id of the project.
location: A compute region name.
gcs_uri: Google Cloud Storage URI.
Target files must be in Product Search CSV format.
"""
client = vision.ProductSearchClient()
# A resource that represents Google Cloud Platform location.
location_path = f"projects/{project_id}/locations/{location}"
# Set the input configuration along with Google Cloud Storage URI
gcs_source = vision.ImportProductSetsGcsSource(
csv_file_uri=gcs_uri)
input_config = vision.ImportProductSetsInputConfig(
gcs_source=gcs_source)
# Import the product sets from the input URI.
response = client.import_product_sets(
parent=location_path, input_config=input_config)
print('Processing operation name: {}'.format(response.operation.name))
# synchronous check of operation status
result = response.result()
print('Processing done.')
for i, status in enumerate(result.statuses):
print('Status of processing line {} of the csv: {}'.format(
i, status))
# Check the status of reference image
# `0` is the code for OK in google.rpc.Code.
if status.code == 0:
reference_image = result.reference_images[i]
print(reference_image)
else:
print('Status code not OK: {}'.format(status.message))
# [END vision_product_search_import_product_images]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
parser.add_argument(
'--project_id',
help='Project id. Required',
required=True)
parser.add_argument(
'--location',
help='Compute region name',
default='us-west1')
import_product_sets_parser = subparsers.add_parser(
'import_product_sets', help=import_product_sets.__doc__)
import_product_sets_parser.add_argument('gcs_uri')
args = parser.parse_args()
if args.command == 'import_product_sets':
import_product_sets(args.project_id, args.location, args.gcs_uri)
|
[
"noreply@github.com"
] |
vam-google.noreply@github.com
|
84f9a4867be76726bae44972eac88cfb0c3d2da4
|
e61e0558b459b9880b3bc103a0c4035c1fc52be5
|
/azure-cognitiveservices-language-textanalytics/azure/cognitiveservices/language/textanalytics/models/error_response.py
|
a522682d06949f555050322c4624ccb2bc321960
|
[
"MIT"
] |
permissive
|
OnlyAGhost/azure-sdk-for-python
|
67f713702fe573d14dde3590ca634a4a36130721
|
6bbab4181bbabf5db1c278dda870598acc9f0021
|
refs/heads/master
| 2021-05-13T13:55:53.118773
| 2018-01-05T02:17:19
| 2018-01-05T02:17:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,703
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ErrorResponse(Model):
"""ErrorResponse.
:param code:
:type code: str
:param message:
:type message: str
:param target:
:type target: str
:param inner_error:
:type inner_error:
~azure.cognitiveservices.language.textanalytics.models.InternalError
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'inner_error': {'key': 'innerError', 'type': 'InternalError'},
}
def __init__(self, code=None, message=None, target=None, inner_error=None):
self.code = code
self.message = message
self.target = target
self.inner_error = inner_error
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
|
[
"laurent.mazuel@gmail.com"
] |
laurent.mazuel@gmail.com
|
cc6e729c10f0317cca13583dd97f7ab225bbe522
|
c6760258b3ad3dd912f0842b8ae03cbea188a8c4
|
/fsleyes/gl/gl21/glmip_funcs.py
|
eec134d2181a654a6a1d2bbcdeaf389801c27998
|
[
"BSD-3-Clause",
"CC-BY-3.0",
"Apache-2.0"
] |
permissive
|
sanjayankur31/fsleyes
|
aa822f627cde38ec766180fb591c9af7d18d2126
|
46ccb4fe2b2346eb57576247f49714032b61307a
|
refs/heads/master
| 2020-04-09T08:41:18.380424
| 2018-12-03T11:44:51
| 2018-12-03T11:44:51
| 160,204,259
| 1
| 0
| null | 2018-12-03T14:31:31
| 2018-12-03T14:31:31
| null |
UTF-8
|
Python
| false
| false
| 3,501
|
py
|
#!/usr/bin/env python
#
# glmip_funcs.py - Functions used by GLMIP for rendering in an OpenGL 2.1
# environment.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module contains functions used by the :class:`.GLMIP` class for
rendering in an OpenGL 2.1 environment.
"""
import numpy as np
import fsl.utils.transform as transform
import fsleyes.gl.shaders as shaders
from . import glvolume_funcs
def init(self):
"""Initialise the shader programs. """
self.shader = None
compileShaders( self)
updateShaderState(self)
def destroy(self):
"""Destroy the shader programs. """
self.shader.destroy()
self.shader = None
def compileShaders(self):
"""Compiles vertex and fragment shaders. """
if self.shader is not None:
self.shader.destroy()
vertSrc = shaders.getVertexShader( 'glvolume')
fragSrc = shaders.getFragmentShader('glmip')
self.shader = shaders.GLSLShader(vertSrc, fragSrc)
def updateShaderState(self):
"""Updates the vertex/fragment shader state based on the current
state of the :class:`.MIPOpts` instance.
"""
if not self.ready():
return
opts = self.opts
shader = self.shader
vmin, vmax = self.overlay.dataRange
# Convert clipping values from voxel value
# range totexture value range (0.0 - 1.0).
imgXform = self.imageTexture.invVoxValXform
clipLow = opts.clippingRange[0] * imgXform[0, 0] + imgXform[0, 3]
clipHigh = opts.clippingRange[1] * imgXform[0, 0] + imgXform[0, 3]
textureMin = vmin * imgXform[0, 0] + imgXform[0, 3]
textureMax = vmax * imgXform[0, 0] + imgXform[0, 3]
imageShape = self.image.shape[:3]
# Create a single transformation matrix
# which transforms from image texture values
# to voxel values, and scales said voxel
# values to colour map texture coordinates.
img2CmapXform = transform.concat(
self.cmapTexture.getCoordinateTransform(),
self.imageTexture.voxValXform)
# sqrt(3) so the window is 100%
# along the diagonal of a cube
window = np.sqrt(3) * opts.window / 100.0
shader.load()
changed = False
changed |= shader.set('imageTexture', 0)
changed |= shader.set('cmapTexture', 1)
changed |= shader.set('textureMin', textureMin)
changed |= shader.set('textureMax', textureMax)
changed |= shader.set('img2CmapXform', img2CmapXform)
changed |= shader.set('imageShape', imageShape)
changed |= shader.set('useSpline', opts.interpolation == 'spline')
changed |= shader.set('clipLow', clipLow)
changed |= shader.set('clipHigh', clipHigh)
changed |= shader.set('invertClip', opts.invertClipping)
changed |= shader.set('window', window)
changed |= shader.set('useMinimum', opts.minimum)
changed |= shader.set('useAbsolute', opts.absolute)
shader.unload()
return changed
def draw2D(self, zpos, axes, xform=None, bbox=None):
"""Draws a 2D slice at the given ``zpos``. Uses the
:func:`.glvolume_funcs.draw2D` function.
"""
self.shader.load()
viewmat = self.canvas.viewMatrix
cdir, rayStep = self.opts.calculateRayCastSettings(viewmat)
self.shader.set('cameraDir', cdir)
self.shader.set('rayStep', rayStep)
glvolume_funcs.draw2D(self, zpos, axes, xform, bbox)
self.shader.unloadAtts()
self.shader.unload()
|
[
"pauldmccarthy@gmail.com"
] |
pauldmccarthy@gmail.com
|
5b2c3503eb04efacd2e7919ac35ddba5250f7509
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_necklace.py
|
1fac6e98b5fa8751e7cb2dad5ed8902cbbc4a46f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
#calss header
class _NECKLACE():
def __init__(self,):
self.name = "NECKLACE"
self.definitions = [u'a piece of jewellery worn around the neck, such as a chain or a string of decorative stones, beads, etc.: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
8f4f19a5ccafc9679fc5e0d74c6526c7dbc95e29
|
b3b68efa404a7034f0d5a1c10b281ef721f8321a
|
/Scripts/simulation/sims/university/university_constraint_helper.py
|
909716771efb3ac9f35d60d052f093a8011886dc
|
[
"Apache-2.0"
] |
permissive
|
velocist/TS4CheatsInfo
|
62195f3333076c148b2a59f926c9fb5202f1c6fb
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
refs/heads/main
| 2023-03-08T01:57:39.879485
| 2021-02-13T21:27:38
| 2021-02-13T21:27:38
| 337,543,310
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,186
|
py
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\sims\university\university_constraint_helper.py
# Compiled at: 2020-04-10 00:58:10
# Size of source mod 2**32: 4467 bytes
from interactions import ParticipantType
from sims4.resources import Types
from sims4.tuning.tunable import TunableEnumEntry, TunableReference, HasTunableSingletonFactory, TunableSet, AutoFactoryInit, TunableEnumWithFilter, TunableMapping
from singletons import EMPTY_SET
from tag import Tag
import services, sims4.log, sims4.resources
logger = sims4.log.Logger('UniversityConstraints', default_owner='nabaker')
class UniversityCourseReferenceSpawnPointTags(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'_course_slot': TunableReference(description='\n Course slot from which to pull the spawn point tags.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.CAREER)),
class_restrictions=('UniversityCourseCareerSlot', ))}
def get_tags(self, sim_info, interaction):
return self._course_slot.get_spawn_point_tags(sim_info)
class UniversitySpecificSpawnPointTags(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'spawn_point_tags': TunableMapping(description='\n University specific classroom tags.\n ',
key_type=TunableReference(manager=(services.get_instance_manager(Types.UNIVERSITY))),
value_type=TunableSet(tunable=TunableEnumWithFilter(tunable_type=Tag,
default=(Tag.INVALID),
filter_prefixes=('Spawn', )),
minlength=1))}
def get_tags(self, sim_info, interaction):
degree_tracker = sim_info.degree_tracker
if degree_tracker is None:
logger.error('Trying to get University Specific spawn point from sim {} with no degree tracker', sim_info)
return EMPTY_SET
university = degree_tracker.get_university()
if university not in self.spawn_point_tags:
return EMPTY_SET
return self.spawn_point_tags[university]
class UniversityCourseCareerSISpawnPointTags(HasTunableSingletonFactory, AutoFactoryInit):
def get_tags(self, sim_info, interaction):
if interaction is None:
return EMPTY_SET
else:
career_uid = interaction.interaction_parameters.get('career_uid')
if career_uid is None:
logger.error('Trying to get University Specific spawn point via career SI from invalid interaction: {}', interaction)
return EMPTY_SET
career = services.get_instance_manager(sims4.resources.Types.CAREER).get(career_uid)
return career is None or hasattr(career, 'get_spawn_point_tags') or EMPTY_SET
return career.get_spawn_point_tags(sim_info)
class UniversityCourseParticipantSpawnPointTags(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'participant': TunableEnumEntry(description='\n The participant from which the career ID will be obtained. \n Typically should be PickedItemId if this interaction comes via a \n CareerPickerSuperInteraction.\n ',
tunable_type=ParticipantType,
default=(ParticipantType.PickedItemId))}
def get_tags(self, sim_info, interaction):
if interaction is None:
return EMPTY_SET
else:
career_uid = interaction.get_participant(self.participant)
if career_uid is None:
logger.error('Trying to get University Specific spawn point via invalid participant {}: {}', self.participant)
return EMPTY_SET
career = services.get_instance_manager(sims4.resources.Types.CAREER).get(career_uid)
return career is None or hasattr(career, 'get_spawn_point_tags') or EMPTY_SET
return career.get_spawn_point_tags(sim_info)
|
[
"cristina.caballero2406@gmail.com"
] |
cristina.caballero2406@gmail.com
|
c747fc3ca11e638cc89b1543712cdff9c07f6b21
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/enums/types/spending_limit_type.py
|
8966869ba97c885d8bc4cfee59347982afb0ed3a
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.enums',
marshal='google.ads.googleads.v4',
manifest={
'SpendingLimitTypeEnum',
},
)
class SpendingLimitTypeEnum(proto.Message):
r"""Message describing spending limit types."""
class SpendingLimitType(proto.Enum):
r"""The possible spending limit types used by certain resources
as an alternative to absolute money values in micros.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INFINITE = 2
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
a37f3f31d2132f4be09fa414f44faa579301711c
|
cbf70750d6c265e4043fd9d1d3bd835662cd680f
|
/customer/migrations/0010_auto_20200924_1350.py
|
bb3021dc69a3b0803147861040ddda0bae105fac
|
[
"Apache-2.0"
] |
permissive
|
xxcfun/DJANGO_CRM
|
c54e249a9a3da9edaeb5d9b49e852d351c7e359a
|
1f8d2d7a025f9dc54b5bf498e7a577469f74c612
|
refs/heads/master
| 2023-01-14T05:21:54.995601
| 2020-11-27T03:23:40
| 2020-11-27T03:23:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,590
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-09-24 05:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0009_auto_20200922_1015'),
]
operations = [
migrations.RemoveField(
model_name='customerinvoiceaddress',
name='customer',
),
migrations.RemoveField(
model_name='customershopaddress',
name='customer',
),
migrations.AddField(
model_name='customer',
name='invoice_address',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='发票详细地址'),
),
migrations.AddField(
model_name='customer',
name='invoice_area',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='发票区域'),
),
migrations.AddField(
model_name='customer',
name='invoice_phone',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='发票收货人电话'),
),
migrations.AddField(
model_name='customer',
name='invoice_province',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='发票省份'),
),
migrations.AddField(
model_name='customer',
name='invoice_town',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='发票街道'),
),
migrations.AddField(
model_name='customer',
name='invoice_username',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='发票地收货人'),
),
migrations.AddField(
model_name='customer',
name='shop_address',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='收货详细地址'),
),
migrations.AddField(
model_name='customer',
name='shop_area',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货区域'),
),
migrations.AddField(
model_name='customer',
name='shop_city',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货市区'),
),
migrations.AddField(
model_name='customer',
name='shop_phone',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货收货人电话'),
),
migrations.AddField(
model_name='customer',
name='shop_province',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货省份'),
),
migrations.AddField(
model_name='customer',
name='shop_town',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货街道'),
),
migrations.AddField(
model_name='customer',
name='shop_username',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='收货地收货人'),
),
migrations.DeleteModel(
name='CustomerInvoiceAddress',
),
migrations.DeleteModel(
name='CustomerShopAddress',
),
]
|
[
"55070348+hhdMrLion@users.noreply.github.com"
] |
55070348+hhdMrLion@users.noreply.github.com
|
cb51fe3e4eb76ad651e9fa12d44760ebbee4a239
|
9a9e0398f26cee9864d48c4618c0a482e5475e83
|
/Python/code/insert_into_a_binary_search_tree.py
|
13866ad295bc26375cc3cc15a81946b087db49f6
|
[] |
no_license
|
CNife/leetcode
|
92693c653bb41780ee431293286c3e909009e9b0
|
7cdd61692ecb52dd1613169e80b924dd39d35996
|
refs/heads/main
| 2021-06-22T21:22:12.997253
| 2021-03-18T07:07:15
| 2021-03-18T07:07:15
| 206,955,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
from leetcode import TreeNode, test, new_tree, inorder_traverse
def insert_into_bst(root: TreeNode, val: int) -> TreeNode:
node, prev, is_left = root, None, False
while node:
prev = node
if val < node.val:
node, is_left = node.left, True
else:
node, is_left = node.right, False
new_node = TreeNode(val)
if prev is None:
return new_node
if is_left:
prev.left = new_node
else:
prev.right = new_node
return root
test(
insert_into_bst,
[(new_tree(4, 2, 7, 1, 3), 5, [1, 2, 3, 4, 5, 7]), (new_tree(), 1, [1])],
equals_func=lambda actual, expect: inorder_traverse(actual) == expect,
)
|
[
"CNife@vip.qq.com"
] |
CNife@vip.qq.com
|
ffc282cd0dd6bde3ffe884d9dae29cfcd248d22c
|
cf99f0dfd2ae3a50ac4dfe95dddd74d2308e7fd4
|
/src/scalbo/scalbo/benchmark/dhb_navalpropulsion.py
|
63e0681177c8fb2d58959ea10846ea3e91845071
|
[
"BSD-2-Clause"
] |
permissive
|
deephyper/scalable-bo
|
33923598181799410b790addcaf4ea799b276444
|
44f0afc28a19213252b59868f76a8f6918f8aabc
|
refs/heads/main
| 2023-07-28T10:33:52.291460
| 2023-07-20T09:44:50
| 2023-07-20T09:44:50
| 464,852,027
| 2
| 2
|
BSD-2-Clause
| 2022-10-18T12:15:19
| 2022-03-01T10:43:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 233
|
py
|
import os
os.environ["DEEPHYPER_BENCHMARK_TASK"] = "navalpropulsion"
import deephyper_benchmark as dhb
dhb.load("HPOBench/tabular")
from deephyper_benchmark.lib.hpobench.tabular import hpo
hp_problem = hpo.problem
run = hpo.run
|
[
"romainegele@gmail.com"
] |
romainegele@gmail.com
|
b7e437e89e358ec335a6332b2e9fd513a60a9b1f
|
59fb17c240b261040026d713a6ac9c97d6a9f265
|
/gym/gym/envs/robotics/hand_env.py
|
2e9d2cf735797ab4dc19d54edb06d0a5c2b45936
|
[
"MIT"
] |
permissive
|
dmeger/TeachingImitation
|
3fb97499e76929959913266f127154f6ae5a8e99
|
5f4dba7e49987924c3d55cd27579cad4c71ef7a4
|
refs/heads/master
| 2023-03-28T13:25:01.307382
| 2021-04-06T15:07:08
| 2021-04-06T15:07:08
| 355,223,500
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,094
|
py
|
import os
import copy
import numpy as np
import gym
from gym import error, spaces
from gym.utils import seeding
from gym.envs.robotics import robot_env
class HandEnv(robot_env.RobotEnv):
def __init__(self, model_path, n_substeps, initial_qpos, relative_control):
self.relative_control = relative_control
super(HandEnv, self).__init__(
model_path=model_path, n_substeps=n_substeps, n_actions=20,
initial_qpos=initial_qpos)
# RobotEnv methods
# ----------------------------
def _set_action(self, action):
assert action.shape == (20,)
ctrlrange = self.sim.model.actuator_ctrlrange
actuation_range = (ctrlrange[:, 1] - ctrlrange[:, 0]) / 2.
if self.relative_control:
actuation_center = np.zeros_like(action)
for i in range(self.sim.data.ctrl.shape[0]):
actuation_center[i] = self.sim.data.get_joint_qpos(
self.sim.model.actuator_names[i].replace(':A_', ':'))
for joint_name in ['FF', 'MF', 'RF', 'LF']:
act_idx = self.sim.model.actuator_name2id(
'robot0:A_{}J1'.format(joint_name))
actuation_center[act_idx] += self.sim.data.get_joint_qpos(
'robot0:{}J0'.format(joint_name))
else:
actuation_center = (ctrlrange[:, 1] + ctrlrange[:, 0]) / 2.
self.sim.data.ctrl[:] = actuation_center + action * actuation_range
self.sim.data.ctrl[:] = np.clip(self.sim.data.ctrl, ctrlrange[:, 0], ctrlrange[:, 1])
def _viewer_setup(self):
body_id = self.sim.model.body_name2id('robot0:palm')
lookat = self.sim.data.body_xpos[body_id]
for idx, value in enumerate(lookat):
self.viewer.cam.lookat[idx] = value
self.viewer.cam.distance = 0.5
self.viewer.cam.azimuth = 55.
self.viewer.cam.elevation = -25.
def render(self, mode='human', width=500, height=500):
return super(HandEnv, self).render(mode, width, height)
|
[
"david.meger@gmail.com"
] |
david.meger@gmail.com
|
21829bcc8abaacf0d9312bc98db461f1459f05bf
|
64ec8731553aa08c33373b212bbe431b1a23b97c
|
/docs/source/examples/geochem/lambdas.py
|
9633ff7db7d91920bbbc02759569103e076723dd
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
ChetanNathwani/pyrolite
|
98947fde265b25beea839f24495d68bbdb726eed
|
8de9c67855305115517418e127bf26de84ff062d
|
refs/heads/master
| 2023-07-26T18:57:28.024540
| 2021-07-08T09:19:02
| 2021-07-08T09:19:02
| 367,300,779
| 0
| 0
|
NOASSERTION
| 2021-05-14T09:23:47
| 2021-05-14T08:35:50
| null |
UTF-8
|
Python
| false
| false
| 6,232
|
py
|
"""
lambdas: Parameterising REE Profiles
=====================================
Orthogonal polynomial decomposition can be used for dimensional reduction of smooth
function over an independent variable, producing an array of independent values
representing the relative weights for each order of component polynomial. This is an
effective method to parameterise and compare the nature of smooth profiles.
In geochemistry, the most applicable use case is for reduction Rare Earth Element (REE)
profiles. The REE are a collection of elements with broadly similar physicochemical
properties (the lanthanides), which vary with ionic radii. Given their similar behaviour
and typically smooth function of normalised abundance vs. ionic radii, the REE profiles
and their shapes can be effectively parameterised and dimensionally reduced (14 elements
summarised by 3-4 shape parameters).
Here we generate some example data, reduce these to lambda values, and visualise the
results.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pyrolite.plot
# sphinx_gallery_thumbnail_number = 2
np.random.seed(82)
########################################################################################
# First we'll generate some example synthetic data based around Depleted Morb Mantle:
#
from pyrolite.util.synthetic import example_spider_data
df = example_spider_data(
noise_level=0.05,
nobs=100,
start="DMM_WH2005",
norm_to="Chondrite_PON",
offsets={"Eu": 0.2},
)
########################################################################################
# Let's have a quick look at what this REE data looks like:
#
df.pyroplot.REE(alpha=0.05, c="k", unity_line=True)
plt.show()
########################################################################################
# From this REE data we can fit a series of orthogonal polynomials, and subsequently used
# the regression coefficients ('lambdas') as a parameterisation of the REE
# pattern/profile. This example data is already normalised to Chondrite, so to avoid
# double-normalising, we pass :code:`norm_to=None`:
#
ls = df.pyrochem.lambda_lnREE(degree=4, norm_to=None)
########################################################################################
# So what's actually happening here? To get some idea of what these λ coefficients
# correspond to, we can pull this process apart and visualse our REE profiles as
# the sum of the series of orthogonal polynomial components of increasing order.
# As lambdas represent the coefficients for the regression of log-transformed normalised
# data, we'll first need to take the logarithm.
#
# With our data, we've then fit a function of ionic radius with the form
# :math:`f(r) = \lambda_0 + \lambda_1 f_1 + \lambda_2 f_2 + \lambda_3 f_3...`
# where the polynomial components of increasing order are :math:`f_1 = (r - \beta_0)`,
# :math:`f_2 = (r - \gamma_0)(r - \gamma_1)`,
# :math:`f_3 = (r - \delta_0)(r - \delta_1)(r - \delta_2)` and so on. The parameters
# :math:`\beta`, :math:`\gamma`, :math:`\delta` are pre-computed such that the
# polynomial components are indeed independent. Here we can visualise how these
# polynomial components are summed to produce the regressed profile, using the last REE
# profile we generated above as an example:
#
from pyrolite.util.lambdas import plot_lambdas_components
ax = df.iloc[-1, :].apply(np.log).pyroplot.REE(color="k", label="Data", logy=False)
plot_lambdas_components(ls.iloc[-1, :], ax=ax)
ax.legend(frameon=False, facecolor=None, bbox_to_anchor=(1, 1))
plt.show()
########################################################################################
# Note that we've not used Eu in this regression - Eu anomalies are a deviation from
# the 'smooth profile' we need to use this method. Consider this if your data might also
# exhibit significant Ce anomalies, you might need to exclude this data.
#
# Now that we've gone through a brief introduction to how the lambdas are generated,
# let's quickly check what the coefficient values themselves look like:
#
fig, ax = plt.subplots(1, 3, figsize=(9, 3))
for ix in range(ls.columns.size - 1):
ls[ls.columns[ix : ix + 2]].pyroplot.scatter(ax=ax[ix], alpha=0.1, c="k")
plt.tight_layout()
########################################################################################
# But what do these parameters correspond to? From the deconstructed orthogonal
# polynomial above, we can see that :math:`\lambda_0` parameterises relative enrichement
# (this is the mean value of the logarithm of Chondrite-normalised REE abundances),
# :math:`\lambda_1` parameterises a linear slope (here, LREE enrichemnt), and higher
# order terms describe curvature of the REE pattern. Through this parameterisation,
# the REE profile can be effectively described and directly linked to geochemical
# processes. While the amount of data we need to describe the patterns is lessened,
# the values themselves are more meaningful and readily used to describe the profiles
# and their physical significance.
#
# The visualisation of :math:`\lambda_1`-:math:`\lambda_2` can be particularly useful
# where you're trying to compare REE profiles.
#
# We've used a synthetic dataset here which is by design approximately normally
# distrtibuted, so the values themeselves here are not particularly revealing,
# but they do illustrate the expected mangitudes of values for each of the parameters.
#
# For more on using orthogonal polynomials to describe geochemical pattern data, dig
# into the paper which introduced the method to geochemists:
# O’Neill, H.S.C., 2016. The Smoothness and Shapes of Chondrite-normalized Rare Earth
# Element Patterns in Basalts. J Petrology 57, 1463–1508.
# `doi: 10.1093/petrology/egw047 <https://doi.org/10.1093/petrology/egw047>`__.
#
# .. seealso::
#
# Examples:
# `Ionic Radii <ionic_radii.html>`__,
# `REE Radii Plot <../plotting/REE_radii_plot.html>`__
#
# Functions:
# :func:`~pyrolite.geochem.pyrochem.lambda_lnREE`,
# :func:`~pyrolite.geochem.ind.get_ionic_radii`,
# :func:`pyrolite.plot.pyroplot.REE`
#
|
[
"morgan.j.williams@hotmail.com"
] |
morgan.j.williams@hotmail.com
|
8fb745233b67a3ca9ae885b064cd5d202c60325f
|
1108586cf9e962a5be71536fc58a3837196c31e1
|
/core/urls.py
|
b354185636a66849ba918552b8cd4c20b4f206de
|
[] |
no_license
|
DeepakDarkiee/drf_search_viewset
|
4abad47aad7c26bb729081e23b930b33483f24cf
|
ca014fe39a148e2e7f68cbb9b94cf71e93f971f7
|
refs/heads/master
| 2023-05-05T02:30:41.203749
| 2021-05-21T11:08:03
| 2021-05-21T11:08:03
| 369,508,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
from django.conf.urls import url
from rest_framework import routers
from core.views import StudentViewSet, UniversityViewSet
router = routers.DefaultRouter()
router.register('students', StudentViewSet)
router.register('universities', UniversityViewSet)
urlpatterns = router.urls
|
[
"mdipakpatidar@gmail.com"
] |
mdipakpatidar@gmail.com
|
0ea9cf7e3399ef2186f2a21d7e5bbad6f4a1d92c
|
f797c5fc3243944855ff9304a678f9d89ff85f93
|
/src/state/ui/ui.py
|
4c7a98ae01d648f5511fbd433450f312f67f8f1d
|
[] |
no_license
|
thydungeonsean/Rainbowmancer
|
551e857a85b76489b1cee3feb0e40f7832919712
|
b8395fd2c25b83c84239a1aa198a0d134d7c60be
|
refs/heads/master
| 2021-01-25T06:57:05.968319
| 2017-07-17T12:00:07
| 2017-07-17T12:00:07
| 93,628,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,337
|
py
|
from crystal_panel import CrystalPanel
from character_panel import CharacterPanel
from ability_panel import AbilityPanel
class UI(object):
def __init__(self, game):
self.game = game
self.ui_objects = []
self.panels = {}
self.initialize()
def initialize(self):
crystal = CrystalPanel(self)
self.add_ui_object(crystal)
self.panels['crystal'] = crystal
character = CharacterPanel(self)
self.add_ui_object(character)
self.panels['character'] = character
ability = AbilityPanel(self)
self.add_ui_object(ability)
self.panels['ability'] = ability
def add_ui_object(self, obj):
self.ui_objects.append(obj)
def remove_ui_object(self, obj):
self.ui_objects.remove(obj)
def draw(self, surface, tick):
for obj in self.ui_objects:
obj.draw(surface, tick)
def run(self):
pass
def click(self, point):
for panel in self.ui_objects:
clicked = panel.click(point)
if clicked:
return True
return False
def right_click(self, point):
for panel in self.ui_objects:
right_clicked = panel.right_click(point)
if right_clicked:
return True
return False
|
[
"marzecsean@gmail.com"
] |
marzecsean@gmail.com
|
14051aef8bd1c1cf2f5c709908ed00a5830eb788
|
153c7c69c0e249dfd7b8bc9cfe18724b9b45ebf2
|
/PyOptim/core/datainterface.py
|
cda80072be9ed1b91aca352de023e7ff2650c594
|
[
"BSD-3-Clause"
] |
permissive
|
bitfort/py-optim
|
e729ad8c80bffb3a843ccdded4a5051c516dd9f5
|
22e774008cd3c83c9e69de546b8733bd21729554
|
refs/heads/master
| 2021-01-18T10:11:23.853286
| 2013-04-13T01:33:50
| 2013-04-13T01:33:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,612
|
py
|
from random import shuffle
from scipy import reshape, array
from numpy.matlib import repmat
from pybrain.utilities import setAllArgs
class SampleProvider(object):
""" Unified interface for interacting with a model:
given a data sample and a parameter vector, it produces
gradients, loss values, and potentially other terms like
diagonal Hessians.
The samples are iteratively generated, either from a dataset, or from a
function, individually or in minibatches, shuffled or not.
"""
batch_size = 1
#optional function that generates diagonal Hessian approximations
diaghess_fun = None
def __init__(self, paramdim, loss_fun, gradient_fun, **kwargs):
self.paramdim = paramdim
self.loss_fun = loss_fun
self.gradient_fun = gradient_fun
setAllArgs(self, kwargs)
def nextSamples(self, how_many):
""" Obtain a certain number of samples. """
self.batch_size = how_many
self._provide()
def _provide(self):
""" abstract """
def reset(self):
""" abstract """
def currentLosses(self, params):
return self.loss_fun(params)
def currentGradients(self, params):
return self.gradient_fun(params)
def currentDiagHess(self, params):
if self.diaghess_fun is None:
return
return self.diaghess_fun(params)
class FunctionWrapper(SampleProvider):
""" Specialized case for a function that can generate samples on the fly. """
record_samples = False
def __init__(self, dim, stochfun, **kwargs):
self.stochfun = stochfun
self._seen = []
SampleProvider.__init__(self, dim, loss_fun=stochfun._f,
gradient_fun=stochfun._df,
diaghess_fun=stochfun._ddf, **kwargs)
stochfun._retain_sample = True
def _provide(self):
self.stochfun._newSample(self.paramdim*self.batch_size, override=True)
if self.record_samples:
ls = self.stochfun._lastseen
if self.batch_size == 1:
self._seen.append(ls)
else:
for l in reshape(ls, (self.batch_size, self.paramdim)):
self._seen.append(reshape(l, (1, self.paramdim)))
def currentLosses(self, params):
if self.batch_size > 1:
params = repmat(params, 1, self.batch_size)
res = self.loss_fun(params)
return reshape(res, (self.batch_size, self.paramdim))
else:
return self.loss_fun(params)
def currentGradients(self, params):
if self.batch_size > 1:
params = repmat(params, 1, self.batch_size)
res = self.gradient_fun(params)
return reshape(res, (self.batch_size, self.paramdim))
else:
return self.gradient_fun(params)
def currentDiagHess(self, params):
if self.diaghess_fun is None:
return
if self.batch_size > 1:
params = repmat(params, 1, self.batch_size)
res = self.diaghess_fun(params)
return reshape(res, (self.batch_size, self.paramdim))
else:
return self.diaghess_fun(params)
def __str__(self):
return self.stochfun.__class__.__name__+" n=%s curv=%s "%(self.stochfun.noiseLevel, self.stochfun.curvature)
class DatasetWrapper(SampleProvider):
""" Specialized case for datasets """
shuffling = True
def reset(self, dataset=None):
if dataset is not None:
self.dataset = dataset
assert len(dataset) > 0, 'Must be non-empty'
self._indices = range(len(self.dataset))
self._counter = 0
def getIndex(self):
tmp = self._counter % len(self.dataset)
if tmp + self.batch_size > len(self.dataset):
# dataset is not a multiple of batchsizes
tmp = 0
if tmp == 0 and self.shuffling:
shuffle(self._indices)
#if len(self.dataset) < self.batch_size:
# print 'WARNING: Dataset smaller than batchsize'
return self._indices[tmp]
class ModuleWrapper(DatasetWrapper):
""" A wrapper around a PyBrain module that defines a forward-backward,
and a corresponding dataset.
Assumption: MSE of targets is the criterion used. """
def __init__(self, dataset, module, **kwargs):
setAllArgs(self, kwargs)
self.module = module
self.paramdim = module.paramdim
self._ready = False
self.reset(dataset)
def _provide(self):
start = self.getIndex()
# reuse samples multiple times if the dataset is too smalle
self._currentSamples = [self.dataset.getSample(si%len(self.dataset)) for si in range(start, start+self.batch_size)]
self._counter += self.batch_size
self._ready = False
def loss_fun(self, params):
self._forwardBackward(params)
return self._last_loss
def gradient_fun(self, params):
self._ready = False
self._forwardBackward(params)
return self._last_grads
def _forwardBackward(self, params):
if self._ready:
return
losses = []
grads = []
for inp, targ in self._currentSamples:
self.module._setParameters(params)
self.module.resetDerivatives()
self.module.reset()
outp = self.module.activate(inp)
losses.append(0.5 * sum((outp - targ)**2))
self.module.backActivate(outp-targ)
grads.append(self.module.derivs.copy())
self._last_loss = array(losses)
self._last_grads = reshape(array(grads), (self.batch_size, self.paramdim))
self._ready = True
class DataFunctionWrapper(DatasetWrapper, FunctionWrapper):
""" Data from a stochastic function. """
def __init__(self, dataset, stochfun, **kwargs):
dim = dataset[0].size
FunctionWrapper.__init__(self, dim, stochfun, **kwargs)
self.reset(dataset)
def _provide(self):
i = self.getIndex()
if self.batch_size == 1:
x = self.dataset[i]
else:
x = array(self.dataset[i:i+self.batch_size])
self.stochfun._lastseen = reshape(x, (1, self.batch_size * self.paramdim))
self._counter += self.batch_size
|
[
"schaul@gmail.com"
] |
schaul@gmail.com
|
2bc24f18339ff57dfd8fe78df5b0674bbcea8621
|
1f82b95d45c6eed81a4361c7ed4cdc04789249d3
|
/studentassign/domain/Student.py
|
07daad24126be7343c576199e8360f26e7d127fd
|
[] |
no_license
|
andidh/Python
|
dc06728ba4b9e54a6e9ff52afbbe75d43b855b36
|
8b629d160be541a6955d3799ac91358cecf5986a
|
refs/heads/master
| 2020-12-24T20:10:49.132192
| 2016-04-13T15:34:36
| 2016-04-13T15:34:36
| 56,164,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
'''
Created on Nov 16, 2015
@author: AndiD
'''
class Student:
def __init__(self, student_id, name, group):
"""
:param student_id:
:param name:
:param group:
:return:
"""
self._student_id = student_id
self._name = name
self._group = group
def get_student_id(self):
return self._student_id
def get_name(self):
return self._name
def get_group(self):
return self._group
def set_student_id(self, student_id):
self._student_id = student_id
def set_name(self, name):
self._name = name
def set_group(self, group):
self._group = group
def __repr__(self):
return "{" + str(self._student_id) + ", " + self._name + ", " +str(self._group) + "}"
|
[
"andi.deh30@icloud.com"
] |
andi.deh30@icloud.com
|
b8101f1890fe1cde1b9d02a092f473d3bce4a8ba
|
93f0d70bea431064897698bef580e07159a2a4a5
|
/backend/src/settings.py
|
41035d5d9525c4ad66f364e6adc28df508adda00
|
[] |
no_license
|
azizcruz/react_rdf_app
|
5ba9ffdb18c7d11187d58255ac84d5c37620823e
|
11dc836798ba4becdafdd0f04e7ea0164116fc4c
|
refs/heads/master
| 2020-05-04T18:43:26.853445
| 2019-04-07T11:52:41
| 2019-04-07T11:52:41
| 179,364,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,446
|
py
|
"""
Django settings for src project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'bootstrap_datepicker_plus',
'todo.apps.TodoConfig',
'corsheaders',
'rest_framework',
'todo_api.apps.TodoApiConfig'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'src.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'src.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
BOOTSTRAP4 = {
'include_jquery': True,
}
# we whitelist localhost:3000 because that's where frontend will be served
CORS_ORIGIN_WHITELIST = (
'localhost:3000/'
)
|
[
"edu@localhost.localdomain"
] |
edu@localhost.localdomain
|
11e0ab6427e70a176bc90a0504c0b2891bee1de9
|
1244d693ae8d7d68721f972de82970c321b2f06f
|
/examples/network/create.py
|
e948ccc8a51f10108ac3879164c846742f42f31e
|
[
"Apache-2.0"
] |
permissive
|
jasonzhuyx/python-openstacksdk
|
ae3b07b0729a55a2ab2faceee23ee6a8eb20b43a
|
087140278d8c2e3f457093375bc480bd0045f86f
|
refs/heads/master
| 2020-04-08T19:39:13.634700
| 2015-09-03T19:20:07
| 2015-09-03T19:30:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,353
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Network examples
Create all the pieces parts to have a working network.
To run:
python examples/network/create.py
"""
import sys
from examples import common
from examples import connection
def create(conn, name, opts, ports_to_open=[80, 22]):
dns_nameservers = opts.data.pop('dns_nameservers', '206.164.176.34')
cidr = opts.data.pop('cidr', '10.3.3.0/24')
network = conn.network.find_network(name)
if network is None:
network = conn.network.create_network(name=name)
print(str(network))
subnet = conn.network.find_subnet(name)
if subnet is None:
args = {
"name": name,
"network_id": network.id,
"ip_version": "4",
"dns_nameservers": [dns_nameservers],
"cidr": cidr,
}
subnet = conn.network.create_subnet(**args)
print(str(subnet))
extnet = conn.network.find_network("Ext-Net")
router = conn.network.find_router(name)
if router is None:
args = {
"name": name,
"external_gateway_info": {"network_id": extnet.id}
}
router = conn.network.create_router(**args)
conn.network.router_add_interface(router, subnet.id)
print(str(router))
sg = conn.network.find_security_group(name)
if sg is None:
sg = conn.network.create_security_group(name=name)
for port in ports_to_open:
conn.network.security_group_open_port(sg.id, port)
conn.network.security_group_allow_ping(sg.id)
print(str(sg))
return network
def run_network(opts):
name = opts.data.pop('name', 'netty')
conn = connection.make_connection(opts)
return(create(conn, name, opts))
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_network))
|
[
"terrylhowe@gmail.com"
] |
terrylhowe@gmail.com
|
4722d5e476f88f65763e3bd8d8ad036dc9ae67e2
|
87ad372898e793faf1ad89f4bb3b6e84a8002131
|
/tests/unit/Strategy/test_set_next_time_lock.py
|
5db244ebd53e53b7059dbf9c6899718d72115239
|
[] |
no_license
|
atsignhandle/unagii-vault-v2
|
6a9a96c11d34257bc3fdae57455ec3b2f9c0029a
|
548f715f34329eb5abebffe40acbeb56a31cb6f3
|
refs/heads/main
| 2023-08-27T00:59:48.080152
| 2021-09-28T02:47:36
| 2021-09-28T02:47:36
| 413,448,825
| 0
| 0
| null | 2021-10-04T14:07:37
| 2021-10-04T14:07:36
| null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
import brownie
import pytest
def test_set_next_time_lock(strategyTest, user):
timeLock = strategyTest.timeLock()
# not time lock
with brownie.reverts("!time lock"):
strategyTest.setNextTimeLock(user, {"from": user})
tx = strategyTest.setNextTimeLock(user, {"from": timeLock})
assert strategyTest.nextTimeLock() == user
assert tx.events["SetNextTimeLock"].values() == [user]
|
[
"tsk.nakamura@gmail.com"
] |
tsk.nakamura@gmail.com
|
578b5939179d1b07ba88a691c7e64e34ab5f3c0c
|
61fb12fd550291bd59c15b244a99fd9394cbcbc2
|
/wajju.py
|
a86073946bb0a59e7493e9d803fbd0c3d2364154
|
[] |
no_license
|
Mujju-palaan/Python
|
9ab304a826964ef8d6643e326293e85631f03880
|
a5b14736954e87596974690e9706bcb227b38b82
|
refs/heads/master
| 2020-06-12T19:55:45.327317
| 2019-08-08T05:23:11
| 2019-08-08T05:23:11
| 194,407,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
import math
def sum(numbers):
return number1 + number2
print("Enter a number :")
number1 = int(raw_input())
print("Enter another number")
number2 = int(raw_input())
print("The sum is :" + str(number1) + str(number2))
|
[
"mdmujahid97@gmail.com"
] |
mdmujahid97@gmail.com
|
dbf428402a937cb09a98af44a2e148b105a3368f
|
77c641fd0708b279dddbe01f6af32a8531b93185
|
/marketsim/gen/_out/math/Moving/_Min.py
|
7cde0c4e008cb74439117ad736967208c24d45c3
|
[] |
no_license
|
abensrhir/marketsimulator
|
aea286afd2bb2e0c8a547bfa879601aef21c0cd5
|
f9f55c72fb34cdbec42b96737ca20839f26c6299
|
refs/heads/master
| 2020-12-13T20:55:55.795344
| 2014-02-24T22:52:24
| 2014-02-24T22:52:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,660
|
py
|
from marketsim import registry
from marketsim.gen._out._observable import Observablefloat
from marketsim.gen._intrinsic.observable.minmax import Min_Impl
from marketsim.gen._out._iobservable import IObservablefloat
@registry.expose(["Statistics", "Min"])
class Min_IObservableFloatFloat(Observablefloat,Min_Impl):
"""
"""
def __init__(self, source = None, timeframe = None):
from marketsim.gen._out._observable import Observablefloat
from marketsim.gen._out._const import const_Float as _const_Float
from marketsim import event
from marketsim import rtti
Observablefloat.__init__(self)
self.source = source if source is not None else _const_Float(1.0)
event.subscribe(self.source, self.fire, self)
self.timeframe = timeframe if timeframe is not None else 100.0
rtti.check_fields(self)
Min_Impl.__init__(self)
@property
def label(self):
return repr(self)
_properties = {
'source' : IObservablefloat,
'timeframe' : float
}
def __repr__(self):
return "Min_{n=%(timeframe)s}(%(source)s)" % self.__dict__
def Min(source = None,timeframe = None):
from marketsim.gen._out._iobservable import IObservablefloat
from marketsim import rtti
if source is None or rtti.can_be_casted(source, IObservablefloat):
if timeframe is None or rtti.can_be_casted(timeframe, float):
return Min_IObservableFloatFloat(source,timeframe)
raise Exception('Cannot find suitable overload for Min('+str(source) +':'+ str(type(source))+','+str(timeframe) +':'+ str(type(timeframe))+')')
|
[
"anton.kolotaev@gmail.com"
] |
anton.kolotaev@gmail.com
|
6e06a75c02bd30795563b83eb66fd76197ba0f57
|
ac5cba0f382ff833e215b3aec164cd70ce86572a
|
/tests/controllers/test_validation.py
|
f8c5d107be8ceca233d8c863cbf0a8a6a75545a4
|
[] |
no_license
|
numberoverzero/moldyboot
|
c724141d4db6ec1dc7be550cfb95de135ea19ac0
|
10bec1e76ddb6c9f8d826936056eb7730b64bdd7
|
refs/heads/master
| 2021-09-10T15:58:26.137509
| 2017-11-27T08:58:19
| 2017-11-27T08:58:19
| 60,145,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,063
|
py
|
import base64
import uuid
import bcrypt
import pytest
from cryptography.hazmat.primitives import serialization
from tests.helpers import as_der
from moldyboot.controllers import InvalidParameter, validate
from moldyboot.security.jwk import i2b64
valid_uuids = [
uuid.uuid1(),
uuid.uuid4()
]
invalid_uuids = [
None,
"",
"not a uuid",
]
valid_usernames = ["abc", "aaa", "a00"]
invalid_usernames = ["", "aa", "ab!", "0ab"]
invalid_emails = ["", "a@", "@a", "aaa"]
valid_emails = ["a@c", "!@!", "@@@"]
invalid_signatures = [
"",
# missing sections
'''Signature headers="" id="@"''',
'''Signature headers="" signature=""''',
'''Signature id="@" signature=""''',
# out of order
'''Signature id="@" headers="" signature=""''',
# capitalization
'''Signature HEADERS="" ID="@" SIGNATURE=""''',
# quote style
"""Signature headers='' id='@' signature=''""",
# bad id
'''Signature headers="" id="" signature=""''',
# extra whitespace
''' Signature headers="" id="@" signature=""''',
'''Signature headers="" id="@" signature=""''',
'''Signature headers="" id="@" signature="" '''
]
def test_validate_unknown_parameter():
with pytest.raises(KeyError):
validate("not a real parameter name", "unused value")
@pytest.mark.parametrize("parameter_name", ["user_id", "key_id", "verification_code"])
@pytest.mark.parametrize("valid_uuid", valid_uuids)
def test_valid_uuid(parameter_name, valid_uuid):
same = validate(parameter_name, valid_uuid)
also_same = validate(parameter_name, str(valid_uuid))
assert valid_uuid == same == also_same
@pytest.mark.parametrize("parameter_name", ["user_id", "key_id"])
@pytest.mark.parametrize("invalid_uuid", invalid_uuids)
def test_invalid_uuid(parameter_name, invalid_uuid):
with pytest.raises(InvalidParameter) as excinfo:
validate(parameter_name, invalid_uuid)
exception = excinfo.value
assert parameter_name == exception.parameter_name
assert invalid_uuid == exception.value
assert "must be a UUID" == exception.message
@pytest.mark.parametrize("invalid_signature", invalid_signatures)
def test_invalid_authorization_header(invalid_signature):
with pytest.raises(InvalidParameter) as excinfo:
validate("authorization_header", invalid_signature)
assert "authorization_header" == excinfo.value.parameter_name
assert invalid_signature == excinfo.value.value
def test_valid_authorization_header():
valid = '''Signature headers="a" id="b@c" signature="d"'''
expected = {
"headers": "a",
"user_id": "b",
"key_id": "c",
"signature": "d"}
actual = validate("authorization_header", valid)
assert actual == expected
@pytest.mark.parametrize("valid_email", valid_emails)
def test_valid_email(valid_email):
assert validate("email", valid_email) == valid_email
@pytest.mark.parametrize("invalid_email", invalid_emails)
def test_invalid_email(invalid_email):
with pytest.raises(InvalidParameter) as excinfo:
validate("email", invalid_email)
assert "email" == excinfo.value.parameter_name
@pytest.mark.parametrize("valid_username", valid_usernames)
def test_valid_username(valid_username):
assert validate("username", valid_username) == valid_username
@pytest.mark.parametrize("invalid_username", invalid_usernames)
def test_invalid_username(invalid_username):
with pytest.raises(InvalidParameter) as excinfo:
validate("username", invalid_username)
assert "username" == excinfo.value.parameter_name
def test_valid_public_key(rsa_pub):
valid_keys = [
# RSAPublicKey
rsa_pub,
# DER
rsa_pub.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo
),
rsa_pub.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.PKCS1
),
# PEM
rsa_pub.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
),
rsa_pub.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.PKCS1
),
rsa_pub.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
).decode("utf-8"),
# JWK
{
"n": i2b64(rsa_pub.public_numbers().n),
"e": i2b64(rsa_pub.public_numbers().e)
}
]
for valid_key in valid_keys:
validated = validate("public_key", valid_key)
assert as_der(validated) == as_der(rsa_pub)
def test_invalid_public_key(rsa_pub):
# base64 of DER encoding fails (just use PEM)
encoded_bytes = base64.b64encode(as_der(rsa_pub))
invalid_keys = [
encoded_bytes,
encoded_bytes.decode("utf-8"), # as string
"",
b""
]
for invalid_key in invalid_keys:
with pytest.raises(InvalidParameter) as excinfo:
validate("public_key", invalid_key)
assert "public_key" == excinfo.value.parameter_name
def test_valid_password_hash():
hash = bcrypt.hashpw(b"hunter2", bcrypt.gensalt(4))
assert hash == validate("password_hash", hash)
assert hash == validate("password_hash", hash.decode("utf-8"))
def test_invalid_password_hash():
invalid_hashes = [
"$2a$06$" + "a"*53, # Wrong type (2a, not 2b)
"$2b$aa$" + "a"*53, # rounds must be decimals
"$2b$06$" + "a"*52, # Wrong salt+hash length
"$2b$o6$" + "a"*54, # Wrong salt+hash length
"$2b$o6$" + "?"*53, # Invalid base64 character
"$2b$o6$" + "+"*53, # Nonstandard b64 doesn't include +
]
for invalid_hash in invalid_hashes:
with pytest.raises(InvalidParameter) as excinfo:
validate("password_hash", invalid_hash)
assert "password_hash" == excinfo.value.parameter_name
|
[
"joe.mcross@gmail.com"
] |
joe.mcross@gmail.com
|
7dd73e917d89ec04f027cae79d65301d6b7c2939
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2390/60737/268647.py
|
2acafd5ca353caea1d158f69e30a7ae788966398
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,349
|
py
|
n=int(input())
src=[int(x) for x in input().split()]
ans = 0
def swap_in_array(x,y,k):
for i in range(2**k):
src[x+i],src[y+i]=src[y+i],src[x+i]
def fac(x):
if x<=1:
return 1
return x*fac(x-1)
def judge(start,kind):
for i in range(1,2**kind):
if src[start+i]!=src[start+i-1]+1:
return True
return False
def dfs(kind,before):
global ans
if kind == n + 1:
ans += fac(before)
return
opt1, opt2 = -1, -1
l=2**(kind-1)
for i in range(0,2**n,2*l):
if judge(i,kind):
if opt1==-1:
opt1=i
elif opt2==-1:
opt2=i
else:
break
if opt1==-1 and opt2==-1:
dfs(kind+1,before)
return
elif opt1!=-1 and opt2==-1:
swap_in_array(opt1,opt1+l,kind-1)
dfs(kind+1,before+1)
swap_in_array(opt1, opt1 + l, kind - 1)
elif opt1!=-1 and opt2!=-1:
for i in range(0,l+1,l):
for j in range(0,l+1,l):
swap_in_array(opt1+i,opt2+j,kind-1)
if not judge(opt1,kind) and not judge(opt2,kind):
dfs(kind+1,before+1)
swap_in_array(opt1 + i, opt2 + j, kind-1)
break
swap_in_array(opt1 + i, opt2 + j, kind-1)
dfs(1,0)
print(ans)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
b98315d1a5ab6145cfe5f1ac344a84defb8be0b5
|
8f1c3c76bf8514818b733ba29fe575d8a5243add
|
/eduerp_facility/models/facility_line.py
|
88d78e63223b5ccf9c0bb05ec4633ea3fbb65e53
|
[
"Apache-2.0"
] |
permissive
|
westlyou/eduerp
|
27f1c7dcd0d2badf50cb6c69f5e761d7f0c6a898
|
968d79b5adc729bc81192604f1fc223517d38ccf
|
refs/heads/master
| 2021-06-04T05:11:13.858246
| 2016-09-12T07:21:17
| 2016-09-12T07:21:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
###############################################################################
from openerp import models, fields, api
from openerp.exceptions import ValidationError
class OpFacilityLine(models.Model):
_name = 'op.facility.line'
_rec_name = 'facility_id'
facility_id = fields.Many2one('op.facility', 'Facility', required=True)
quantity = fields.Float('Quantity', required=True)
@api.constrains('quantity')
def check_quantity(self):
if self.quantity <= 0.0:
raise ValidationError("Enter proper Quantity in Facilities!")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"huysamdua@yahoo.com"
] |
huysamdua@yahoo.com
|
9943e93a01401ca1cc7c610830d49e72c444c77f
|
59880d47a533cf1f45f927adafff22d5ffb4796a
|
/Python/python_fundamentals/make_read_dictionary.py
|
725d1d1fa73aa6b64157ebfd126bea054281fcdc
|
[] |
no_license
|
mightymcalpine/DojoAssignments
|
2bc7bb791630040dbb62da917a26b74bbdd574e4
|
9c0d80953f6ddbe840314f3d333b5f4590e0c9f4
|
refs/heads/master
| 2021-01-18T00:07:07.128554
| 2017-06-05T16:38:35
| 2017-06-05T16:38:35
| 84,257,743
| 0
| 0
| null | 2017-06-02T05:34:36
| 2017-03-07T23:47:27
|
Python
|
UTF-8
|
Python
| false
| false
| 268
|
py
|
mclp = {
'name': 'Lars',
'age': 35,
'origin': 'USA',
'lang': 'Python'
}
def aboutMe(obj):
print 'My name is', obj['name']
print 'My age is', obj['age']
print 'My country of origin is', obj['origin']
print 'My favorite language is', obj['lang']
aboutMe(mclp)
|
[
"larscodus@gmail.com"
] |
larscodus@gmail.com
|
2d067b2ac1695d0e8ac6f1a1a6161669c6fdea99
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/CsvdwQvNe8hYomcwB_11.py
|
6927a3aaa072012090d89ad2c5a0818099ffc0f4
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
"""
Create a function similar to Processings "map" function (check the
**Resources** tab), in which a value and its range is taken and remapped to a
new range.
The function takes 5 numbers:
* Value: `value`
* Range: `low1` and `high1`
* Range: `low2` and `high2`
### Examples
remap(7, 2, 12, 0, 100) ➞ 50
remap(17, 5, 55, 100, 30) ➞ 83.2
remap(50, 1, 51, 0, 100) ➞ 98
### Notes
* Test input will always be numbers.
* If the input range is `0`, return `0`.
"""
def remap(value, low1, high1, low2, high2):
h,i,j=value-low1,high1-low1,high2-low2
return 0 if high1-low1==0 else low2+(j/(i/h)) if low1>high1 or low2>high2 or low2<0 else (j/(i/h))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
20a7a8a8bd2ed8e48b7e48d46e2173c988a07490
|
d094ba0c8a9b1217fbf014aa79a283a49aabe88c
|
/env/lib/python3.6/site-packages/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py
|
7fa8b77d63648c814d1199051fd25f8268c9e5b3
|
[
"Apache-2.0"
] |
permissive
|
Raniac/NEURO-LEARN
|
d9274e0baadd97bb02da54bdfcf6ca091fc1c703
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
refs/heads/master
| 2022-12-25T23:46:54.922237
| 2020-09-06T03:15:14
| 2020-09-06T03:15:14
| 182,013,100
| 9
| 2
|
Apache-2.0
| 2022-12-09T21:01:00
| 2019-04-18T03:57:00
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,860
|
py
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..registration import ExpertAutomatedRegistration
def test_ExpertAutomatedRegistration_inputs():
input_map = dict(
affineMaxIterations=dict(argstr='--affineMaxIterations %d', ),
affineSamplingRatio=dict(argstr='--affineSamplingRatio %f', ),
args=dict(argstr='%s', ),
bsplineMaxIterations=dict(argstr='--bsplineMaxIterations %d', ),
bsplineSamplingRatio=dict(argstr='--bsplineSamplingRatio %f', ),
controlPointSpacing=dict(argstr='--controlPointSpacing %d', ),
environ=dict(
nohash=True,
usedefault=True,
),
expectedOffset=dict(argstr='--expectedOffset %f', ),
expectedRotation=dict(argstr='--expectedRotation %f', ),
expectedScale=dict(argstr='--expectedScale %f', ),
expectedSkew=dict(argstr='--expectedSkew %f', ),
fixedImage=dict(
argstr='%s',
position=-2,
),
fixedImageMask=dict(argstr='--fixedImageMask %s', ),
fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ),
initialization=dict(argstr='--initialization %s', ),
interpolation=dict(argstr='--interpolation %s', ),
loadTransform=dict(argstr='--loadTransform %s', ),
metric=dict(argstr='--metric %s', ),
minimizeMemory=dict(argstr='--minimizeMemory ', ),
movingImage=dict(
argstr='%s',
position=-1,
),
movingLandmarks=dict(argstr='--movingLandmarks %s...', ),
numberOfThreads=dict(argstr='--numberOfThreads %d', ),
randomNumberSeed=dict(argstr='--randomNumberSeed %d', ),
registration=dict(argstr='--registration %s', ),
resampledImage=dict(
argstr='--resampledImage %s',
hash_files=False,
),
rigidMaxIterations=dict(argstr='--rigidMaxIterations %d', ),
rigidSamplingRatio=dict(argstr='--rigidSamplingRatio %f', ),
sampleFromOverlap=dict(argstr='--sampleFromOverlap ', ),
saveTransform=dict(
argstr='--saveTransform %s',
hash_files=False,
),
verbosityLevel=dict(argstr='--verbosityLevel %s', ),
)
inputs = ExpertAutomatedRegistration.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ExpertAutomatedRegistration_outputs():
output_map = dict(
resampledImage=dict(),
saveTransform=dict(),
)
outputs = ExpertAutomatedRegistration.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
[
"leibingye@outlook.com"
] |
leibingye@outlook.com
|
e2ea868419ba343d752775edae4710654301638e
|
cbd6d41e836348e4aabd122e5677e47e270fd417
|
/logwatch.py
|
d76ed7718777cef3481c7e576afa9e6ae190dd2d
|
[] |
no_license
|
gkasieczka/cms-bot
|
750879de82a5f51f781e3d3dacc7e59dd582c681
|
06a07a9c389a13a9694578883a171f597af60998
|
refs/heads/master
| 2021-01-23T01:46:25.212167
| 2016-08-29T09:47:54
| 2016-08-29T09:47:54
| 66,856,373
| 0
| 0
| null | 2016-08-29T15:34:44
| 2016-08-29T15:34:43
| null |
UTF-8
|
Python
| false
| false
| 2,116
|
py
|
#!/usr/bin/env python
from os.path import exists, join, basename
from sys import exit
from commands import getstatusoutput
from hashlib import sha256
def run_cmd (cmd, exit_on_error=True):
err, out = getstatusoutput (cmd)
if err and exit_on_error:
print out
exit (1)
return out
class logwatch (object):
def __init__ (self, service, log_dir="/var/log"):
self.log_dir = join(log_dir,"logwatch_" + service)
def process(self, logs, callback, **kwrds):
if not logs: return True, 0
info_file = join(self.log_dir, "info")
if not exists ("%s/logs" % self.log_dir): run_cmd ("mkdir -p %s/logs" % self.log_dir)
prev_lnum, prev_hash, count, data = -1, "", 0, []
if exists(info_file):
prev_hash,ln = run_cmd("head -1 %s" % info_file).strip().split(" ",1)
prev_lnum = int(ln)
if prev_lnum<1: prev_lnum=1
for log in reversed(logs):
service_log = join (self.log_dir, "logs", basename(log))
run_cmd ("rsync -a %s %s" % (log, service_log))
cur_hash = sha256(run_cmd("head -1 %s" % service_log)).hexdigest()
data.insert(0,[log , service_log, 1, cur_hash, False])
if (prev_lnum>0) and (cur_hash == prev_hash):
data[0][2] = prev_lnum
break
data[-1][4] = True
for item in data:
lnum, service_log = item[2], item[1]
get_lines_cmd = "tail -n +%s %s" % (str(lnum), service_log)
if lnum<=1: get_lines_cmd = "cat %s" % service_log
print "Processing %s:%s" % (item[0], str(lnum))
lnum -= 1
for line in run_cmd (get_lines_cmd).split ("\n"):
count += 1
lnum += 1
try: ok = callback(line, count, **kwrds)
except: ok = False
if not ok:
if (prev_lnum!=lnum) or (prev_hash!=item[3]):
run_cmd("echo '%s %s' > %s" % (item[3], str(lnum),info_file))
return status, count
if (prev_lnum!=lnum) or (prev_hash!=item[3]):
prev_lnum=-1
cmd = "echo '%s %s' > %s" % (item[3], str(lnum),info_file)
if not item[4]: cmd = cmd + " && rm -f %s" % service_log
run_cmd(cmd)
return True, count
|
[
"Shahzad.Malik.Muzaffar@cern.ch"
] |
Shahzad.Malik.Muzaffar@cern.ch
|
aa03e5c285a6c880213dfae57d40bc28041b2a2c
|
b0d2033578705c14d9a65a604519be06450d85ae
|
/Leetcode/Two_Sum.py
|
90888ad7e70dfd52198219b8ae1a5603b3ee4cf0
|
[] |
no_license
|
Pavithra-Rajan/DSA-Practice
|
cc515c4dd4f5a37b026640b9a5edfdabf8f9be9d
|
5fee51ee91c2125c084b768a5d5b35a48031506c
|
refs/heads/main
| 2023-09-03T11:47:25.791511
| 2021-11-16T19:39:00
| 2021-11-16T19:39:00
| 374,404,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
dic={} # potential value dictionary
for i in range(len(nums)):
if nums[i] in dic:
return [dic[nums[i]],i]
else:
dic[target-nums[i]]=i
"""ret=[]
for i in range(0,len(nums)-1):
for j in range(i+1,len(nums)):
if nums[i]+nums[j]==target:
ret.append(i)
ret.append(j)
break
return ret"""
|
[
"pavithra.rajan01@gmail.com"
] |
pavithra.rajan01@gmail.com
|
0ac75cb30e5edbac6f78fe149404bea2aa7cbaf4
|
add74ecbd87c711f1e10898f87ffd31bb39cc5d6
|
/xcp2k/classes/_each197.py
|
4136b1ca69a3780d7e42a9415431bdb3b05ed28e
|
[] |
no_license
|
superstar54/xcp2k
|
82071e29613ccf58fc14e684154bb9392d00458b
|
e8afae2ccb4b777ddd3731fe99f451b56d416a83
|
refs/heads/master
| 2021-11-11T21:17:30.292500
| 2021-11-06T06:31:20
| 2021-11-06T06:31:20
| 62,589,715
| 8
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
from xcp2k.inputsection import InputSection
class _each197(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Just_energy': 'JUST_ENERGY', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Xas_scf': 'XAS_SCF', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Rot_opt': 'ROT_OPT', 'Cell_opt': 'CELL_OPT', 'Band': 'BAND', 'Ep_lin_solver': 'EP_LIN_SOLVER', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Replica_eval': 'REPLICA_EVAL', 'Bsse': 'BSSE', 'Shell_opt': 'SHELL_OPT', 'Tddft_scf': 'TDDFT_SCF'}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
36c5c8842a88345e586c78a92b069ef8e139664d
|
06f7ffdae684ac3cc258c45c3daabce98243f64f
|
/vsts/vsts/extension_management/v4_0/models/extension_state.py
|
9118bdb8dc6b0f345d585860138c0bce5bf482df
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
kenkuo/azure-devops-python-api
|
7dbfb35f1c9637c9db10207824dd535c4d6861e8
|
9ac38a97a06ee9e0ee56530de170154f6ed39c98
|
refs/heads/master
| 2020-04-03T17:47:29.526104
| 2018-10-25T17:46:09
| 2018-10-25T17:46:09
| 155,459,045
| 0
| 0
|
MIT
| 2018-10-30T21:32:43
| 2018-10-30T21:32:42
| null |
UTF-8
|
Python
| false
| false
| 2,344
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .installed_extension_state import InstalledExtensionState
class ExtensionState(InstalledExtensionState):
"""ExtensionState.
:param flags: States of an installed extension
:type flags: object
:param installation_issues: List of installation issues
:type installation_issues: list of :class:`InstalledExtensionStateIssue <extension-management.v4_0.models.InstalledExtensionStateIssue>`
:param last_updated: The time at which this installation was last updated
:type last_updated: datetime
:param extension_name:
:type extension_name: str
:param last_version_check: The time at which the version was last checked
:type last_version_check: datetime
:param publisher_name:
:type publisher_name: str
:param version:
:type version: str
"""
_attribute_map = {
'flags': {'key': 'flags', 'type': 'object'},
'installation_issues': {'key': 'installationIssues', 'type': '[InstalledExtensionStateIssue]'},
'last_updated': {'key': 'lastUpdated', 'type': 'iso-8601'},
'extension_name': {'key': 'extensionName', 'type': 'str'},
'last_version_check': {'key': 'lastVersionCheck', 'type': 'iso-8601'},
'publisher_name': {'key': 'publisherName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, flags=None, installation_issues=None, last_updated=None, extension_name=None, last_version_check=None, publisher_name=None, version=None):
super(ExtensionState, self).__init__(flags=flags, installation_issues=installation_issues, last_updated=last_updated)
self.extension_name = extension_name
self.last_version_check = last_version_check
self.publisher_name = publisher_name
self.version = version
|
[
"tedchamb@microsoft.com"
] |
tedchamb@microsoft.com
|
7435d09d750a32d9ad61e573a52cf8f14a6cd851
|
88e200b437f6867b525b680615982b86a1950052
|
/pyramid_pony/route_factory.py
|
21cfb9a4757fe43bc78aaed0c133e0ddb9fa47c3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
knzm/pyramid_pony
|
3abe089ecbc12824a2b4e06e6c2799c8c04de7ed
|
1c9d5503c6e3ce73c7416e215621c481f817be1b
|
refs/heads/master
| 2021-01-20T21:59:24.236296
| 2012-12-21T13:14:51
| 2012-12-21T13:14:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
import base64
import zlib
from pyramid.response import Response
from pyramid.decorator import reify
from .pony import PONY, UNICORN, TEMPLATE
class PonyContext(object):
def __init__(self, request):
self.request = request
if request.params.get("horn"):
self.data = UNICORN
self.link = "remove horn!"
self.url = request.path
else:
self.data = PONY
self.link = "add horn!"
self.url = request.path + "?horn=1"
@reify
def home(self):
self.request.script_name or "/"
def decode(self, data):
data = base64.b64decode(data)
return zlib.decompress(data).decode('ascii')
def view(request):
context = request.context
data = context.data
html = TEMPLATE.format(
animal=context.decode(data),
url=context.url,
link=context.link,
home=context.home)
return Response(html)
def includeme(config):
config.add_route("pony", "/pony", factory=PonyContext)
config.add_view(view, route_name='pony')
|
[
"nozom.kaneko@gmail.com"
] |
nozom.kaneko@gmail.com
|
cc1b44e28dc088fc406e02d8e4a51fc84ad24327
|
033d29637f5839a5b18c0a93296efabaf1f532ce
|
/misc/largest_k_-k.py
|
abc2ea7d70c38b1a3537c58b2333b837ee81d87d
|
[] |
no_license
|
dhumindesai/Problem-Solving
|
80ea996010b7d802b6479d91117e981e88139d17
|
97dab280378bf8d950b75caec9f7f62c71db812c
|
refs/heads/master
| 2021-12-04T07:29:40.851218
| 2021-11-27T17:33:18
| 2021-11-27T17:33:18
| 240,163,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
'''
nlogn
1
'''
def solution(nums):
if len(nums) < 2:
return 0
nums.sort()
first = 0
last = len(nums) - 1
while first < last and nums[first] < 0 and nums[last] > 0:
if abs(nums[first]) == nums[last]:
return nums[last]
if abs(nums[first]) > nums[last]:
first += 1
else:
last -= 1
return 0
'''
O(n)
O(n)
'''
def solution_2(nums):
if len(nums) < 2:
return 0
seen = set()
for num in nums:
seen.add(num)
result = 0
for num in nums:
if num < 0 and abs(num) in seen:
result = max(result, abs(num))
if num > 0 and -num in seen:
result = max(result, num)
return result
print(solution([3, 2, -2, 5, -3]))
print(solution([1, 2, 3, -4]))
print(solution([100, 100, -100, -2, -2, 2, 1, -1]))
print()
print(solution_2([3, 2, -2, 5, -3]))
print(solution_2([1, 2, 3, -4]))
print(solution_2([100, 100, -100, -2, -2, 2, 1, -1]))
|
[
"dhrumin.desai28@gmail.com"
] |
dhrumin.desai28@gmail.com
|
840a73d19aa4538be797d0689823d100f210cb1c
|
e35c72a64a0c279bfb223fef74cc4274626b75f8
|
/MovieProject/MovieProject/wsgi.py
|
effee42efb370320fc56988c74390c6e42855f6b
|
[
"MIT"
] |
permissive
|
zhumakova/MovieProject
|
0b28a8c02c22e4888796567d922d0620ded2e65e
|
751326367ddf25a8762617c2cc6bdcb8e0f38eff
|
refs/heads/main
| 2023-06-03T23:51:00.952682
| 2021-06-29T09:39:03
| 2021-06-29T09:39:03
| 376,356,669
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for MovieProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MovieProject.settings')
application = get_wsgi_application()
|
[
"bzhumakova@gmail.com"
] |
bzhumakova@gmail.com
|
455f694797086a12a6d3cf38d631a3b1619ce791
|
071416b05026dfc5e21e32c7f1846ab8475acf97
|
/regularexpresion/vhcle.py
|
002ceb0f770b467d834b8e03d0e3f5a4a92594b1
|
[] |
no_license
|
Sreerag07/bankproject
|
a7acf65b45c9e5c3ccace7ff3d755c33cf8a4fb0
|
bb28e7c92cbfa1c1810d20eb4767a479eee5f015
|
refs/heads/master
| 2023-04-20T01:15:47.496059
| 2021-05-19T08:03:54
| 2021-05-19T08:03:54
| 368,788,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
# import re
#
# n=input("Enter the number")
# x='[A-Z]{2}[\d]{2}[A-Z]{1}[\d]{4}$'
# match=re.fullmatch(x,n)
# if match is not None:
# print("valid")
# else:
# print("invalid")
import re
n=input("Enter the number")
x='[A-Za-z0-9][@][g][m][a][i][l][.][c][o][m]$'
match=re.fullmatch(x,n)
if match is not None:
print("valid")
else:
print("invalid")
|
[
"sgpakkam@gmail.com"
] |
sgpakkam@gmail.com
|
4c56aadf4698771dcbc22a690ca36a0bc8fb8628
|
6ac0aeea8229c4e2c7a041e85c3afeeb106c6b01
|
/use_kapl_util.py
|
38117a5ea53acf4d7f2d27eef75d14b2812f312f
|
[] |
no_license
|
waiteb15/py3intro
|
325dafaaa642052280d6c050eacf8b406b40e01d
|
68b30f147e7408220490a46d3e595acd60513e9e
|
refs/heads/master
| 2020-03-27T10:50:25.928836
| 2019-02-28T21:47:11
| 2019-02-28T21:47:11
| 146,448,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
#!/usr/bin/env python
import KAPL_UTIL
KAPL_UTIL.spam()
KAPL_UTIL.ham()
# DON't do this, its supposed to be private...ssshhhh!
#KAPL_UTIL._eggs()
|
[
"waiteb15@gmail.com"
] |
waiteb15@gmail.com
|
125feb3e297701f0b402bc58baa66f53b7b43a05
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03427/s426447268.py
|
447a51a40727345bcb9f33c63d6d42f8cb865b83
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
N = list(input())
num = len(N)
if N.count('9') == num:
print(9*num)
elif N.count('9') == num-1 and N[0] != '9':
print(int(N[0])+9*(num-1))
else:
if num <= 1:
print(''.join(N))
else:
ans = int(N[0])-1
ans += 9*(num-1)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e7d145c6fcadca01a666a56be951e42f0637c3d7
|
aea7b59d10a72ccac35a06e6d31a508771efc5a0
|
/Day 26/AppendAndDelete.py
|
e2135a134841178d356dbe8bc4a59ede08a84ed8
|
[] |
no_license
|
divyatejakotteti/100DaysOfCode
|
d52871b27146cd1b1ef2b997e93b9a96ca8ac5a9
|
3c8555b021482565f56d7fb86fa5dacb304dfd3c
|
refs/heads/master
| 2023-02-01T10:48:19.082268
| 2020-12-21T06:34:02
| 2020-12-21T06:34:02
| 294,888,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,827
|
py
|
'''
You have a string of lowercase English alphabetic letters. You can perform two types of operations on the string:
Append a lowercase English alphabetic letter to the end of the string.
Delete the last character in the string. Performing this operation on an empty string results in an empty string.
Given an integer,
, and two strings, and , determine whether or not you can convert to by performing exactly of the above operations on . If it's possible, print Yes. Otherwise, print No.
Function Description
Complete the appendAndDelete function in the editor below. It should return a string, either Yes or No.
appendAndDelete has the following parameter(s):
s: the initial string
t: the desired string
k: an integer that represents the number of operations
Input Format
The first line contains a string
, the initial string.
The second line contains a string , the desired final string.
The third line contains an integer
, the number of operations.
Constraints
s and t consist of lowercase English alphabetic letters,.
Output Format
Print Yes if you can obtain string
by performing exactly operations on
. Otherwise, print No.
Sample Input 0
hackerhappy
hackerrank
9
Sample Output 0
Yes
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the appendAndDelete function below.
def appendAndDelete(s, t, k):
count=0
for i,j in zip(s,t):
if(i==j):
count+=1
else:
break
t_len=len(s)-len(t)
if(t_len<=2*count+k and t_len%2==k%2 or t_len<k):
return "Yes"
else:
return "No"
if __name__ == '__main__':
s = input()
t = input()
k = int(input())
result = appendAndDelete(s, t, k)
|
[
"noreply@github.com"
] |
divyatejakotteti.noreply@github.com
|
de0396149e70d39521730b0fbb62dbbccd8ee1ee
|
0193e4024c8236db023558d70233f988e55b5a21
|
/sdk/python/tekton_pipeline/models/v1beta1_pipeline.py
|
353baffa0a352f493e12c828334ae2d4be61bd28
|
[
"Apache-2.0"
] |
permissive
|
tektoncd/experimental
|
3ae1202cab489b3ba631dfc15223b13ac9215ed1
|
ee13de632e126a5595944d6303bae36cad4555b7
|
refs/heads/main
| 2023-08-31T16:57:09.444985
| 2023-06-22T15:59:01
| 2023-07-03T11:22:17
| 180,445,039
| 101
| 138
|
Apache-2.0
| 2023-09-14T11:41:30
| 2019-04-09T20:32:54
|
Python
|
UTF-8
|
Python
| false
| false
| 7,084
|
py
|
# Copyright 2021 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Tekton
Tekton Pipeline # noqa: E501
The version of the OpenAPI document: v0.17.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from tekton_pipeline.configuration import Configuration
class V1beta1Pipeline(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1PipelineSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1beta1Pipeline - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1beta1Pipeline. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1Pipeline. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1Pipeline.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1Pipeline. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1beta1Pipeline. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1Pipeline. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1Pipeline.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1Pipeline. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1Pipeline. # noqa: E501
:return: The metadata of this V1beta1Pipeline. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1Pipeline.
:param metadata: The metadata of this V1beta1Pipeline. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1beta1Pipeline. # noqa: E501
:return: The spec of this V1beta1Pipeline. # noqa: E501
:rtype: V1beta1PipelineSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1beta1Pipeline.
:param spec: The spec of this V1beta1Pipeline. # noqa: E501
:type: V1beta1PipelineSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1Pipeline):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1Pipeline):
return True
return self.to_dict() != other.to_dict()
|
[
"48335577+tekton-robot@users.noreply.github.com"
] |
48335577+tekton-robot@users.noreply.github.com
|
673c796757af9133dba1e31c837b91a67964f6db
|
60e34c75afec810f4b1c2c82495d8d3017f32d33
|
/02栈和队列/01Stack_list.py
|
0150ee592e0a398ad418dd1bc8eed5a13afc24a8
|
[] |
no_license
|
ares5221/Data-Structures-and-Algorithms
|
af97c6b34b810c37f152af595846870a7b9b304b
|
7c51eee0c375136f995cc063ffc60d33a520d748
|
refs/heads/master
| 2021-07-17T21:18:46.556958
| 2018-12-03T07:30:13
| 2018-12-03T07:30:13
| 144,227,642
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
# coding = utf-8
class Stack(object):
'''用顺序表实现栈'''
def __init__(self):
'''初始化栈为空列表'''
self.items = []
def isEmpty(self):
'''判断栈是否为空'''
return self.items == []
def size(self):
'''返回栈的大小'''
return len(self.items)
def pop(self):
'''出栈'''
if len(self.items) == 0:
print("Stack is empty, you can not pop anything")
return
del self.items[len(self.items) - 1]
return
def push(self, num):
'''入栈'''
self.items.append(num)
return
def peek(self):
'''返回栈顶元素'''
if self.items == []:
print("Stack is empty, no peek")
return
return self.items[len(self.items) - 1]
def printStack(self):
'''打印栈'''
for k in self.items:
print(k, end=" ")
print("")
def delete(self):
'''销毁栈'''
k = len(self.items)
i = 0
while k > 0:
del self.items[0]
k -= 1
del k
del i
print("delete Stack successfully!")
if '__main__' == __name__:
List = [1, 2, 3, 4, 5, 6]
l = Stack()
print("将List压入栈中:", end=" ")
for i in List:
l.push(i)
l.printStack()
print("栈是否为空:", end=" ")
print("空" if l.isEmpty() == True else "非空")
print("栈的大小为:%d" % l.size())
print("出栈:", end=" ")
l.pop()
l.printStack()
print("入栈(num=10):", end=" ")
l.push(10)
l.printStack()
print("栈顶元素为:%d" % l.peek())
l.delete()
|
[
"674361437@qq.com"
] |
674361437@qq.com
|
79247f284ea58a8aab0d7a2f936b3d0b229c43a0
|
141c5ef07df60b1c9f726e4605b78a2a7c1243e9
|
/meross_iot/model/plugin/power.py
|
d9e4a85dc275ae2f69b7c4409a9f4aac0062d785
|
[
"MIT"
] |
permissive
|
albertogeniola/MerossIot
|
cd8abaac236a7fb442bdf9613c7e6760123c8bd3
|
de1c22696511eee106961da3f22d3030ed9c254c
|
refs/heads/0.4.X.X
| 2023-09-01T11:11:09.793153
| 2023-04-01T15:15:50
| 2023-04-01T15:15:50
| 146,365,723
| 467
| 102
|
MIT
| 2023-09-11T06:42:13
| 2018-08-27T23:30:56
|
Python
|
UTF-8
|
Python
| false
| false
| 780
|
py
|
from datetime import datetime
class PowerInfo(object):
def __init__(self, current_ampere: float, voltage_volts: float, power_watts: float, sample_timestamp: datetime):
self._current = current_ampere
self._voltage = voltage_volts
self._power = power_watts
self._sample_timestamp = sample_timestamp
@property
def power(self) -> float:
return self._power
@property
def voltage(self) -> float:
return self._voltage
@property
def current(self) -> float:
return self._current
@property
def sample_timestamp(self) -> datetime:
return self._sample_timestamp
def __str__(self):
return f"POWER = {self._power} W, VOLTAGE = {self._voltage} V, CURRENT = {self._current} A"
|
[
"albertogeniola@gmail.com"
] |
albertogeniola@gmail.com
|
9d40a317734b30fd9371baa7c53d54f329c4474e
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/binaryTree_20200623143621.py
|
f8964b2ad95245e959bc54fa0f8e60157601b933
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
# define node class
class Node(object):
# constructor
def __init__(self,value):
self.value = value
self.left = None
self.right = None
# define binary tree class
class BinaryTree(object):
def __init__(self,root):
# converting data
self.root = Node(root)
def print_tree(self,traversal_type):
if traversal_type == "preorder":
return self.preorder_print(tree.root," ")
elif traversal_type == "inorder":
return self.inorder_print (tree.root,"",0)
elif traversal_type == "postorder":
return self.postorder_print(tree.root," ")
else:
print("Traversal type" + str(traversal_type) + "is not supported.")
return False
# root -->left--->right(preorder)
def preorder_print(self,start,traversal):
if start:
traversal += (str(start.value) + "-")
# calling the function recursively
traversal = self.preorder_print(start.left,traversal)
traversal = self.preorder_print(start.right,traversal)
return traversal
# left - root -right
def inorder_print(self,start,traversal,count):
if start:
traversal = self.inorder_print(start.left,traversal,count =count+1)
traversal += (str(start.value) + "-")
traversal = self.inorder_print(start.right,traversal,count =count+1)
return (traversal
# left ->right -> root
def postorder_print(self,start,traversal):
if start:
traversal = self.postorder_print(start.left,traversal)
traversal = self.postorder_print(start.right,traversal)
traversal +=(str(start.value) + "-" )
return traversal
# 1 is root
# creating left child
'''
1
# / \
2 3
/ \
4 5
'''
tree = BinaryTree(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
tree.root.right.right = Node(7)
tree.root.right.left = Node(6)
# print(tree.print_tree("preorder"))
print(tree.print_tree("inorder"))
# print(tree.print_tree("postorder"))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
e30693a3a31fad2a6436482b4dcaae11d3c8a9ef
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/295/92773/submittedfiles/testes.py
|
edca1d262cf945e495df921179da0f07739e2f99
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,041
|
py
|
a = []
n = 4
for i in range(0,n,1):
a.append(input("Digite umvalor:"))
for i in range(0,n,1):
if a[n] %2 ==0:
print(a[n])
a = [2,4,6,8,10]
n = 2
for i in range (0,len(a),1):
print (a[i]//2)
print("----------------------------------------------------")
for i in range(1,11,1):
a.append(float(input("Digite o elemento :")))
print(a)
print(sum(a))
print(len(a))
del a[1]
print(a)
print(len(a))
for i in range(9 , -1, -1):
print(a[i])
print("----------------------------------------------------")
a = []
for i in range(1,11,1):
a.append(input("Digite o elemento :"))
print(a)
for i in range(9 , -1, -1):
print(a[i])
n = int(input("Digite o numero de notas :"))
while n<=1:
n = int(input("Digite o numero de notas :"))
notas = []
for i in range(0,n,1):
notas.append(float(input("Digite a nota%d[%d]: "% (i+1, i))))
media = 0
for i in range(0,n,1):
media += notas[i]/float(n)
print(notas)
print(media)
notas = []
for i in range(0,50,1):
notas.append(float(input("Digite a nota%d[%d]: "% (i+1, i))))
media = 0
for i in range(0,50,1):
media += notas[i]/50.0
print(notas)
print(media)
n = int(input("Digite um número inteiro nao negativo :"))
i = 1
cont = 1
while i<=n:
if n>0:
cont = cont*i
i = i + 1
print("%d! = %d" % (n,cont))
def primo(n):
contador = 0
for i in range(2,n,1):
if n%i == 0:
contador += 1
break
if contador == 0:
return True
else:
return False
print(primo(11))
def raiz(x,n):
resultado = x**(1/float(n))
return resultado
print(raiz(8,3))
n = int(input("Digite o valor de n :"))
i = 1
cont = 0
while i<n:
if i%2==1:
cont = cont + 1
i = i + 1
print(cont)
a = float(input("Digite um numero :"))
b = a/15
print("%.4f" % b)
a = int(input("Digite um numero :"))
if a%2==0:
print("par")
else:
print("impar")
a = int(input("Digite um numero a:"))
b = int(input("Digite um numero b:"))
if (a + b) > 10:
print(a)
if (a + b) > 10:
print(b)
a = float(input("Digite um numero :"))
if a > 20:
print(a)
p1 = float(input("Digite o peso 1 :"))
c1 = float(input("Digite o comprimento 1 :"))
p2 = float(input("Digite o peso 2 :"))
c2 = float(input("Digite o comprimento 2 :"))
if (p1*c1) == (p2*c2):
print("0")
elif (p1*c1) > (p2*c2):
print("-1")
else:
print("1")
p = float(input("Digite o peso: "))
h = float(input("Digite a altura: "))
imc = p/(h**2)
if imc<20:
print("ABAIXO")
#ENTRADA
a = int(input("que horas são? (0-23) "))
#PROCESSAMENTO E SAÍDA
if a >= 3 and a < 12:
print("BOM DIA")
elif a >= 12 and a < 18:
print("Boa tarde")
elif a < 3:
print("Boa noite")
elif a >= 18:
print("Boa noite")
else:
print("hora invalida")
"""if a < 0 or a > 23:
print("Hora invalida")
else:
if a > 3 and a < 12:
print("Bom dia")
elif a >= 12 and a < 18:
print("Boa tarde")
else:
print("Boa noite")"""
a= (5%2)!=0
print(a)
a = float(input("digite o ano:"))
b = float(input("é o mundial do palmeiras ou a cachaça? digite 1 ou 2 respectivamente:"))
c = float(input("se responder mundial ta falso"))
n1 = float(input("Digite n1:"))
n2 = float(input("Digite n2:"))
n3 = float(input("Digite n3:"))
total = (n1+n2+n3)
print(total)
a = (10//5)%3
print(a)
a = float(input("Digite a:"))
b = float(input("Digite b:"))
c = a+b/2
print(c)
a = 5.2
print("a=%.5f" % a)
unidade=float(input("digite uma medida em metros: "))
converte=(unidade*100)
print("o valor em cetimetros da unidade é %2.f" %converte)
nota1=float(input("digite nota 1: "))
print(nota1)
nota2=float(input("digite nota 2: "))
print(nota2)
nota3=float(input("digite nota 3: "))
print(nota3)
nota4=float(input("digite nota 4: "))
print(nota4)
media=((nota1+nota2+nota3+nota4)/4)
print("----------------------------")
print("a media do aluno eh %2.f" % media)
print("----------------------------")
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
b2e92562247cf0977bc31277c4a90d20cf359b35
|
cf39421d0948655f587445a484bf04fd1986a06f
|
/microsvc/subgraph.py
|
11607d214b7cebee5e957d00f18d476d4148f5c3
|
[
"CC0-1.0"
] |
permissive
|
aidaakuyeva/RichContextMetadata
|
1b475a94bd3d7ad0118bc5faeb585cd9e7209f59
|
2b038e69a6cc234dd5354e6e056b5b46fec2f3ba
|
refs/heads/master
| 2023-01-11T04:04:15.630422
| 2020-11-16T16:12:29
| 2020-11-16T16:12:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
#!/usr/bin/env python
# encoding: utf-8
import json
import networkx as nx
import os
import rdflib
import sys
import tempfile
TTL_PREAMBLE = """
@prefix cito: <http://purl.org/spar/cito/> .
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
"""
def wrap_token (t):
if t.startswith("http"):
return "<{}>".format(t)
else:
return "\"{}\"".format(t)
def write_triple (f, s, p, o):
line = "{} {} {} .\n".format(wrap_token(s), wrap_token(p), wrap_token(o))
f.write(line.encode("utf-8"))
if __name__ == "__main__":
filename = sys.argv[1]
term = sys.argv[2]
## load the JSON-LD context
with open("vocab.json", "r") as f:
context = json.load(f)
## write TTL results to a temporary file, for JSON-LD conversion later
f = tempfile.NamedTemporaryFile(delete=False)
f.write(TTL_PREAMBLE.encode("utf-8"))
# load the graph, collected triples related to the search term
graph = rdflib.Graph().parse(filename, format="n3")
for s, p, o in graph:
if s.endswith(term):
write_triple(f, s, p, o)
elif o.endswith(term):
write_triple(f, s, p, o)
f.close()
# serialize the graph as JSON-LD
graph = rdflib.Graph().parse(f.name, format="n3")
os.unlink(f.name)
response = graph.serialize(format="json-ld", context=context, indent=None)
print(response)
|
[
"ceteri@gmail.com"
] |
ceteri@gmail.com
|
39f792696bd72e0fde485771c5c094cc4889aeca
|
5095047656d0c2e64f65d1236dbdd3e30ee091eb
|
/lintcode/easy/39_recover_rotated_sorted_array.py
|
5edeabdc9b69503f8830e8a7a40bbc2c86ed449d
|
[] |
no_license
|
simonfqy/SimonfqyGitHub
|
3799fa9e868010864973700fdb8be5d37f6c2560
|
fa3704af37d9e04ab6fd13b7b17cc83c239946f7
|
refs/heads/master
| 2023-04-05T00:33:00.989677
| 2023-03-29T06:58:21
| 2023-03-29T06:58:21
| 33,021,240
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
'''
Link: https://www.lintcode.com/problem/recover-rotated-sorted-array/description
'''
# This is my own solution based on the teachings in Jiuzhang.com. It is sorting in-place.
class Solution:
"""
@param nums: An integer array
@return: nothing
"""
def recoverRotatedSortedArray(self, nums):
# write your code here
if not len(nums):
return
ind_smallest_ele = 0
smallest_ele = nums[0]
for i, num in enumerate(nums):
if num < smallest_ele:
ind_smallest_ele = i
smallest_ele = num
self.reverse(nums, 0, ind_smallest_ele - 1)
self.reverse(nums, ind_smallest_ele, len(nums) - 1)
self.reverse(nums, 0, len(nums) - 1)
def reverse(self, nums, start, end):
while start < end:
left_ele = nums[start]
nums[start] = nums[end]
nums[end] = left_ele
start += 1
end -= 1
|
[
"noreply@github.com"
] |
simonfqy.noreply@github.com
|
721df6fff854cdc440ec2a31030ed78152f343bc
|
352454c055f91b6997e742bbda3e3a17580a499f
|
/src/scrub/__init__.py
|
9ce55cb33f5646907c4bd090bfd3dea99fd1792c
|
[] |
no_license
|
dushyantkhosla/ds-docker-walkthru-titanic
|
533fcca39e54de7e904b41978437788c9206490e
|
7534b29776f829633e4efcb999b37694ad9e27b3
|
refs/heads/master
| 2021-09-12T09:31:59.098462
| 2018-04-05T13:55:00
| 2018-04-05T13:55:00
| 117,235,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from .scrub import get_clean_data, scrub_raw_data
from .compress import compress_numeric, compress_categorical
from .clip import clip_categorical
|
[
"dushyant.khosla@pmi.com"
] |
dushyant.khosla@pmi.com
|
1a33515f51413fc03f8802cb84b6db01c0016b74
|
2b3ea7bb0df4be7f55d2ac188e23d801e497df8d
|
/fcsm_eos_api_client/models/aws_encrypted_password.py
|
49a5f202a24b352dabcd4e1f2f72fa8919fc1b65
|
[] |
no_license
|
mikespub/fcsm-eos-api-client
|
12b663b4e79ac5d86c2162dec168bfa240a85f0c
|
107a3a7733c55ae6a750e32497268300c6be590e
|
refs/heads/master
| 2020-08-01T18:13:17.229375
| 2019-10-29T14:30:56
| 2019-10-29T14:30:56
| 211,071,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,358
|
py
|
# coding: utf-8
"""
Combined FCSM EOS API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class AwsEncryptedPassword(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'encrypted_password': 'str'
}
attribute_map = {
'encrypted_password': 'encryptedPassword'
}
def __init__(self, encrypted_password=None): # noqa: E501
"""AwsEncryptedPassword - a model defined in OpenAPI""" # noqa: E501
self._encrypted_password = None
self.discriminator = None
self.encrypted_password = encrypted_password
@property
def encrypted_password(self):
"""Gets the encrypted_password of this AwsEncryptedPassword. # noqa: E501
:return: The encrypted_password of this AwsEncryptedPassword. # noqa: E501
:rtype: str
"""
return self._encrypted_password
@encrypted_password.setter
def encrypted_password(self, encrypted_password):
"""Sets the encrypted_password of this AwsEncryptedPassword.
:param encrypted_password: The encrypted_password of this AwsEncryptedPassword. # noqa: E501
:type: str
"""
if encrypted_password is None:
raise ValueError("Invalid value for `encrypted_password`, must not be `None`") # noqa: E501
self._encrypted_password = encrypted_password
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AwsEncryptedPassword):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"github@mikespub.net"
] |
github@mikespub.net
|
88f358459d0853b847609698ac42ef8312da6936
|
4ec6ed4ebcb9346042669e6aa03be0e502ed48b3
|
/leetcode/minimum-path-sum.py
|
55df64015414dc0f842c2c148e4cf087b7cef48e
|
[] |
no_license
|
shonihei/road-to-mastery
|
79ed41cb1ad0dc2d0b454db2ccc7dd9567b03801
|
312bdf5101c3c1fc9a4d0b6762b5749ca57efe08
|
refs/heads/master
| 2021-01-22T19:59:17.038641
| 2017-11-16T15:21:55
| 2017-11-16T15:21:55
| 85,266,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
"""
Given a m x n grid filled with non-negative numbers, find a path from top
left to bottom right which minimizes the sum of all numbers along its path.
Note: You can only move either down or right at any point in time.
"""
def minPathSum(grid):
for i in range(1, len(grid[0])):
grid[0][i] = grid[0][i] + grid[0][i - 1]
for i in range(1, len(grid)):
grid[i][0] = grid[i][0] + grid[i - 1][0]
for i in range(1, len(grid)):
for j in range(1, len(grid[0])):
grid[i][j] = min(grid[i - 1][j], grid[i][j - 1]) + grid[i][j]
return grid[-1][-1]
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test1(self):
self.assertEqual(minPathSum([[1, 2], [1, 1]]), 3)
unittest.main()
|
[
"shonihei@gmail.com"
] |
shonihei@gmail.com
|
f497c7f24447f373e69f0647e9382b1f70699ff9
|
ead94ae26fa54b0a81ca7bf6bc9a32e2d6ec946c
|
/0x0C-python-almost_a_circle/models/base.py
|
8bd97ec17fe9ebf1e2e3450eb7b49a39da972364
|
[] |
no_license
|
Ritapeace/holbertonschool-higher_level_programming
|
4d02049843869695b67a148c0b58ec0063ab0bfc
|
c95a215dbaa07bc73b1e7c3e5a051a4a0afed1c8
|
refs/heads/master
| 2023-03-16T20:58:08.538187
| 2020-04-16T18:28:24
| 2020-04-16T18:28:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,561
|
py
|
#!/usr/bin/python3
"""
main class of all the proyect
"""
# import turtle
import csv
import os
import json
class Base:
""" class base """
__nb_objects = 0
def __init__(self, id=None):
"""
class constructor
if id is None increment the private class attribute __nb_objects
else asign to self.id the value of id
"""
if id is None:
Base.__nb_objects += 1
self.id = Base.__nb_objects
else:
self.id = id
@staticmethod
def to_json_string(list_dictionaries):
""" to_json_string: return an object in json format"""
if list_dictionaries is None or len(list_dictionaries) == 0:
return ("[]")
else:
return(json.dumps(list_dictionaries))
@classmethod
def save_to_file(cls, list_objs):
""" save_to_fie: save in a fil the dicts of each instance passed"""
filename = cls.__name__ + ".json"
new = []
result = ""
with open(filename, 'w') as fd:
if list_objs is None:
result = cls.to_json_string(new)
else:
for elem in list_objs:
new.append(elem.to_dictionary())
result = cls.to_json_string(new)
fd.write(result)
@staticmethod
def from_json_string(json_string):
""" from_json_string: return an object from a json string"""
if json_string is None or len(json_string) == 0:
return ([])
else:
return (json.loads(json_string))
@classmethod
def create(cls, **dictionary):
"""
create: create a new instance depending of the cls.__name__
it is necesary to initialize the variables width, height
if it is Rectangle or size if it is Square
"""
if cls.__name__ == "Rectangle":
dummy = cls(2, 2)
if cls.__name__ == "Square":
dummy = cls(5)
dummy.update(**dictionary)
return (dummy)
@classmethod
def load_from_file(cls):
"""
load_from_file: reads fro file.json and returns the objects
"""
filename = cls.__name__ + ".json"
variable = ""
result = []
inst = []
if os.path.exists(filename) is True:
with open(filename, 'r') as fd:
variable = fd.read()
result = cls.from_json_string(variable)
for elem in result:
inst.append(cls.create(**elem))
return(inst)
else:
return (result)
@classmethod
def save_to_file_csv(cls, list_objs):
"""
save_to_file_csv: save a dir in a csv file
"""
filename = cls.__name__ + ".csv"
result = ""
new = []
big = []
with open(filename, 'w') as fd:
if list_objs is None:
result = csv.writer(fd, delimiter=',')
result.writerow([])
else:
result = csv.writer(fd, delimiter=',')
if cls.__name__ == "Rectangle":
for elem in list_objs:
new = ['id', 'width', 'height', 'x', 'y']
var = []
for i in new:
var.append(getattr(elem, i))
result.writerow(var)
if cls.__name__ == "Square":
for elem in list_objs:
new = ['id', 'size', 'x', 'y']
var = []
for i in new:
var.append(getattr(elem, i))
result.writerow(var)
@classmethod
def load_from_file_csv(cls):
"""
load_from_file_csv: loads froom csv file and create objects
"""
filename = cls.__name__ + ".csv"
inst = []
d = {}
if os.path.exists(filename) is True:
with open(filename) as fd:
result = csv.reader(fd, delimiter=',')
for row in result:
a = []
for elem in row:
a.append(int(elem))
if cls.__name__ == "Rectangle":
new = ['id', 'width', 'height', 'x', 'y']
for i in range(len(a)):
d[new[i]] = a[i]
inst.append(cls.create(**d))
if cls.__name__ == "Square":
new = ['id', 'size', 'x', 'y']
for i in range(len(a)):
d[new[i]] = a[i]
inst.append(cls.create(**d))
return(inst)
else:
return(result)
"""
@staticmethod
def draw(list_rectangles, list_squares):
turtle = turtle.Turtle()
for elem in list_rectangles:
turtle.goto(elem.x, elem.y)
for i in range(2):
turtle.up()
turtle.forward(elem.width)
turtle.left(90)
turtle.forward(elem.height)
turtle.left(90)
turtle.hidde()
for elem in list_squares:
turtle.goto(elem.x, elem.y)
for i in range(2):
turtle.up()
turtle.forward(elem.width)
turtle.left(90)
turtle.forward(elem.width)
turtle.left(90)
turtle.hidde()
turtle.done()
"""
|
[
"947@holbertonschool.com"
] |
947@holbertonschool.com
|
8e6d03f8372c860983813081ad8a53ea5ba0f293
|
4529dd6b9c257f00bf08301ea744be6f1b4c70ce
|
/blog/migrations/0001_initial.py
|
c8c588e7f5622ea448b2dab6521bb82bfde9590e
|
[] |
no_license
|
ljingen/studycode
|
bb3bc8b031a8ab5d9a86dbeca7ad8a8c9a6a0bfa
|
c4a925cc3f60f2729eb5ee415ed513dd41569d88
|
refs/heads/master
| 2021-06-23T13:51:03.756646
| 2017-07-30T04:57:08
| 2017-07-30T04:57:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-26 10:05
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('body', models.TextField()),
('created_at', models.DateTimeField(default=datetime.datetime(2017, 7, 26, 10, 5, 56, 450747), verbose_name='添加时间')),
('updated_at', models.DateTimeField(default=datetime.datetime(2017, 7, 26, 10, 5, 56, 450774), verbose_name='修改时间')),
('status', models.CharField(choices=[('draft', '草稿'), ('public', '公开')], default='draft', max_length=8)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='姓名')),
('mail', models.EmailField(max_length=100, verbose_name='邮箱')),
],
),
migrations.AddField(
model_name='entry',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entries', to='blog.User'),
),
]
|
[
"luojingen@aliyun.com"
] |
luojingen@aliyun.com
|
9526a8ec66dccafb2ee7d3b42af3c02cf18f4915
|
28a124b6a2f22a53af3b6bb754e77af88b4138e1
|
/DJANGO/companytodo/reports/migrations/0002_auto_20191202_2322.py
|
4d1cefcb2466476c55f4a795a2a681a753fc7215
|
[] |
no_license
|
mebaysan/LearningKitforBeginners-Python
|
f7c6668a9978b52cad6cc2b969990d7bbfedc376
|
9e1a47fb14b3d81c5b009b74432902090e213085
|
refs/heads/master
| 2022-12-21T03:12:19.892857
| 2021-06-22T11:58:27
| 2021-06-22T11:58:27
| 173,840,726
| 18
| 4
| null | 2022-12-10T03:00:22
| 2019-03-04T23:56:27
|
Python
|
UTF-8
|
Python
| false
| false
| 862
|
py
|
# Generated by Django 2.2.7 on 2019-12-02 20:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0002_product_description'),
('areas', '0001_initial'),
('reports', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='report',
name='product',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to='products.Product'),
preserve_default=False,
),
migrations.AddField(
model_name='report',
name='production_line',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to='areas.ProductionLine'),
preserve_default=False,
),
]
|
[
"menesbaysan@gmail.com"
] |
menesbaysan@gmail.com
|
4b89b693039712326e41f12efd84ada959490eb8
|
c8335705ff06641622668c9b0a3020df9213bc77
|
/core/migrations/0005_homepage_hero_image.py
|
b81f7fd0a13311af70fe4bc543ec37280b9382e4
|
[] |
no_license
|
Richardh36/ANS
|
0adedcc760a6acbf539c8cbedde8edc28186218a
|
2c46d36cf349f3ab8556bf713d2a0125c415029a
|
refs/heads/master
| 2016-09-11T02:42:21.952145
| 2015-05-03T14:03:10
| 2015-05-03T14:03:10
| 34,852,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0005_make_filter_spec_unique'),
('core', '0004_auto_20150501_1528'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='hero_image',
field=models.ForeignKey(to='wagtailimages.Image', null=True, on_delete=django.db.models.deletion.SET_NULL),
preserve_default=True,
),
]
|
[
"karlhobley10@gmail.com"
] |
karlhobley10@gmail.com
|
4354b781f08321717f27ae24f9ca7b2823049b1e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/117/usersdata/163/26262/submittedfiles/al2.py
|
755eb8db7363ddb972d635033445678a25e04869
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
from __future__ import division
#INICIE SEU CODIGO AQUI
n= float('(input('Digite um numero real:'))
Real= n-inteiro
print('(inteiro)
print('%.2f' %real)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
d53fba9d4a171874a43be778f4b75c108a7f4482
|
c54f5a7cf6de3ed02d2e02cf867470ea48bd9258
|
/pyobjc/pyobjc-framework-Quartz/PyObjCTest/test_PDFAnnotationPopup.py
|
d3f43c1657a8c051103ca7d463852aef2104814e
|
[
"MIT"
] |
permissive
|
orestis/pyobjc
|
01ad0e731fbbe0413c2f5ac2f3e91016749146c6
|
c30bf50ba29cb562d530e71a9d6c3d8ad75aa230
|
refs/heads/master
| 2021-01-22T06:54:35.401551
| 2009-09-01T09:24:47
| 2009-09-01T09:24:47
| 16,895
| 8
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
from PyObjCTools.TestSupport import *
from Quartz.PDFKit import *
class TestPDFAnnotationPopup (TestCase):
def testMethods(self):
self.failUnlessResultIsBOOL(PDFAnnotationPopup.isOpen)
self.failUnlessArgIsBOOL(PDFAnnotationPopup.setIsOpen_, 0)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25"
] |
ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25
|
12b143198ee564e1b2311b19892e0b052b92a34b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02677/s129355694.py
|
8a3a6c982603a763e0eda9f0b0cd3a4f65454318
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
import math
a, b, h, m = map(int , input().split())
theta = abs( (h/12) + (m/60) * (1/12) - m/60)*2*math.pi
print(math.sqrt(b**2 * math.sin(theta)**2 + (b*math.cos(theta) -a) **2))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
63d117d1b2d5478d51e1f8fc837b4aeb7c54cc24
|
cc619d6e81c39fe54d4875e3c6936e25bb8a7ebd
|
/demos/multiple_problems.py
|
195c16a0fc049f57e60b621cecf8204fe7713ed0
|
[] |
no_license
|
joshua4289/python3-examples
|
cb01060f649c7dc97185566b00fa0d59a1ffdca3
|
70c1fd0b1e5bf25e82697257fb9f92cd06e922b7
|
refs/heads/master
| 2020-12-19T08:19:13.310071
| 2017-04-28T13:48:01
| 2017-04-28T13:48:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
class UnrecoverableError(Exception): pass
class MyException(Exception): pass
def main():
try:
part1()
part2()
part3()
part4()
except UnrecoverableError as e:
print("UnrecoverableError")
except MyException as e:
print("that's all folks")
def part1():
try:
raise MyException("oops")
print("part 1")
except MyException as e:
# log it
print(e)
raise
def part2():
try:
print("part 2")
except MyException as e:
print(e)
def part3():
try:
raise UnrecoverableError("oops")
print("part 3")
except MyException as e:
print(e)
def part4():
try:
print("part 4")
except MyException as e:
print(e)
main()
|
[
"seddon-software@keme.co.uk"
] |
seddon-software@keme.co.uk
|
404ecb75c55291f907d7c3903ecc926fadd45862
|
343eb5d9ea4bfb29191c8f967585278dc2892b3f
|
/RedBimEngine/constants.py
|
c434f8a5f202af2622c1eb7bd0cd389c9bb85b18
|
[] |
no_license
|
NauGaika/PikBim
|
11bd1929f7ca63d740692c9a7bcc31715f11b38b
|
28f3ce2d8d27eb062ee5173fb4e21c262c30ba4b
|
refs/heads/master
| 2020-07-28T20:43:11.928011
| 2019-09-19T11:10:37
| 2019-09-19T11:10:37
| 209,531,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
# -*- coding: utf-8 -*-
"""Хранятся константы.
USERNAME
HOME_DIR
DIR_SCRIPTS
STATIC_IMAGE
USER_SCRIPTS
LOGO
"""
import os
def get_username():
"""Получить текущее имя пользователя."""
uname = __revit__.Application.Username
uname = uname.split('@')[0]
uname = uname.replace('.', '')
return uname
def parent_dir(dir, count=1):
"""Получает родительску дирректорию."""
dir_name = dir
while count:
count -= 1
dir_name = os.path.dirname(dir_name)
dir_name = os.path.abspath(dir_name)
return dir_name
USERNAME = get_username()
HOME_DIR = '\\\\picompany.ru\\pikp\\Dep\\LKP4\\WORK\\scripts'
LOADER = os.path.join(HOME_DIR, 'loader')
GOOGLE = os.path.join(LOADER, 'Google')
DIR_SCRIPTS = os.path.join(HOME_DIR, 'scripts')
DIR_SYSTEM_SCRIPTS = os.path.join(HOME_DIR, 'systemscripts')
STATIC_IMAGE = os.path.join(HOME_DIR, 'static\\img')
USER_SCRIPTS = os.path.join(HOME_DIR, 'scripts\\Пользовательские.tab\\Скрипты.panel')
USER_SCRIPT_TEMP = os.path.join(HOME_DIR, 'scripts\\Пользовательские.tab\\Временный.panel\\Временный.pushbutton\\__init__.py')
LOGO = 'RB'
START_SCRIPT = os.path.join(HOME_DIR, 'common_scripts\\start_of_script.py')
__all__ = ['USERNAME', 'HOME_DIR', 'DIR_SCRIPTS', 'STATIC_IMAGE',
'USER_SCRIPTS', 'LOGO', 'START_SCRIPT', 'USER_SCRIPT_TEMP',
'LOADER', 'GOOGLE']
|
[
"skliarenkovm@pik.ru"
] |
skliarenkovm@pik.ru
|
6c47db3bb3320ccd1a523f04599fd9af3d2faa4d
|
b46dd521ffab869a32bf4580d2cbf35c94e550c6
|
/domain/BallotDomain.py
|
5aded2408c3e155ea708285a9fa57c2199518353
|
[] |
no_license
|
pawan-manishka/results-tabulation-tallysheets
|
b698b080c6393cf728ae3f79891e20dd2d309add
|
7d2f065dfd03304e3aa8f079145bbc96d03c481c
|
refs/heads/master
| 2020-06-14T19:02:21.523754
| 2019-07-03T08:34:05
| 2019-07-03T08:34:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
from config import db
from models import BallotModel as Model
from domain import InvoiceItemDomain
def get_all():
result = Model.query.all()
return result
def create(body):
invoice_item = InvoiceItemDomain.create()
result = Model(
ballotId=body["ballotId"],
invoiceItemId=invoice_item.invoiceItemId
)
db.session.add(result)
db.session.commit()
return result
|
[
"l.dinukadesilva@gmail.com"
] |
l.dinukadesilva@gmail.com
|
d21d32ebb9613caff29cdf4a1c7c456ce7f5f81c
|
3f6c16ea158a8fb4318b8f069156f1c8d5cff576
|
/.PyCharm2019.1/system/python_stubs/-1850396913/_imp.py
|
0b80f25b1fa57f9f166bff38ae22e695685dc730
|
[] |
no_license
|
sarthak-patidar/dotfiles
|
08494170d2c0fedc0bbe719cc7c60263ce6fd095
|
b62cd46f3491fd3f50c704f0255730af682d1f80
|
refs/heads/master
| 2020-06-28T23:42:17.236273
| 2019-10-01T13:56:27
| 2019-10-01T13:56:27
| 200,369,900
| 0
| 0
| null | 2019-08-03T12:56:33
| 2019-08-03T11:53:29
|
Shell
|
UTF-8
|
Python
| false
| false
| 5,612
|
py
|
# encoding: utf-8
# module _imp
# from (built-in)
# by generator 1.147
""" (Extremely) low-level import machinery bits as used by importlib and imp. """
# no imports
# functions
def acquire_lock(*args, **kwargs): # real signature unknown
"""
Acquires the interpreter's import lock for the current thread.
This lock should be used by import hooks to ensure thread-safety when importing
modules. On platforms without threads, this function does nothing.
"""
pass
def create_builtin(*args, **kwargs): # real signature unknown
""" Create an extension module. """
pass
def create_dynamic(*args, **kwargs): # real signature unknown
""" Create an extension module. """
pass
def exec_builtin(*args, **kwargs): # real signature unknown
""" Initialize a built-in module. """
pass
def exec_dynamic(*args, **kwargs): # real signature unknown
""" Initialize an extension module. """
pass
def extension_suffixes(*args, **kwargs): # real signature unknown
""" Returns the list of file suffixes used to identify extension modules. """
pass
def get_frozen_object(*args, **kwargs): # real signature unknown
""" Create a code object for a frozen module. """
pass
def init_frozen(*args, **kwargs): # real signature unknown
""" Initializes a frozen module. """
pass
def is_builtin(*args, **kwargs): # real signature unknown
""" Returns True if the module name corresponds to a built-in module. """
pass
def is_frozen(*args, **kwargs): # real signature unknown
""" Returns True if the module name corresponds to a frozen module. """
pass
def is_frozen_package(*args, **kwargs): # real signature unknown
""" Returns True if the module name is of a frozen package. """
pass
def lock_held(*args, **kwargs): # real signature unknown
"""
Return True if the import lock is currently held, else False.
On platforms without threads, return False.
"""
pass
def release_lock(*args, **kwargs): # real signature unknown
"""
Release the interpreter's import lock.
On platforms without threads, this function does nothing.
"""
pass
def _fix_co_filename(*args, **kwargs): # real signature unknown
"""
Changes code.co_filename to specify the passed-in file path.
code
Code object to change.
path
File path to use.
"""
pass
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7ffb5f337048>, 'find_spec': <classmethod object at 0x7ffb5f337080>, 'find_module': <classmethod object at 0x7ffb5f3370b8>, 'create_module': <classmethod object at 0x7ffb5f3370f0>, 'exec_module': <classmethod object at 0x7ffb5f337128>, 'get_code': <classmethod object at 0x7ffb5f337198>, 'get_source': <classmethod object at 0x7ffb5f337208>, 'is_package': <classmethod object at 0x7ffb5f337278>, 'load_module': <classmethod object at 0x7ffb5f3372b0>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
__spec__ = None # (!) real value is "ModuleSpec(name='_imp', loader=<class '_frozen_importlib.BuiltinImporter'>)"
|
[
"sarthakpatidar15@gmail.com"
] |
sarthakpatidar15@gmail.com
|
c87a9b71d55168e5522480af101d1447f3f937bb
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_146/ch39_2020_04_13_14_51_58_180379.py
|
17ddbc5af6d01d87824c7162766fd8f3643c4f1e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
def collatz(numero):
print(numero)
if numero == 1:
return
if numero % 2 == 0:
collatz(numero / 2)
else:
collatz(3 * numero + 1)
collatz(50)
|
[
"you@example.com"
] |
you@example.com
|
3ac41dad4d1438d2d9107b3e37b52d502857cc07
|
116acf603f5db8d626247355bf786c339ba95ea9
|
/sendmsg/aliyun/aliyunsdkcore/auth/rpc_signature_composer.py
|
d5491de52ef7e3c4dd6ffb21d77def0155c0c21c
|
[] |
no_license
|
dahunuaa/ZhihuiSMB_python3
|
0857afeec2337b44571986a9c70c26e716142ccb
|
8db2708efccd5eefa393738500e326bd7fb65c21
|
refs/heads/master
| 2021-01-25T14:32:32.201879
| 2018-03-11T05:59:10
| 2018-03-11T05:59:10
| 123,703,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,900
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#coding=utf-8
__author__ = 'alex jiang'
import os
import sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
from . import sha_hmac1 as mac1
import urllib
import urllib.request
from ..utils import parameter_helper as helper
def __init__():
pass
# this function will append the necessary parameters for signer process.
# parameters: the orignal parameters
# signer: sha_hmac1 or sha_hmac256
# accessKeyId: this is aliyun_access_key_id
# format: XML or JSON
def __refresh_sign_parameters(parameters, access_key_id, accept_format="JSON", signer=mac1):
if parameters is None or not isinstance(parameters, dict):
parameters = dict()
parameters["Timestamp"] = helper.get_iso_8061_date()
parameters["SignatureMethod"] = signer.get_signer_name()
parameters["SignatureVersion"] = signer.get_singer_version()
parameters["SignatureNonce"] = helper.get_uuid()
parameters["AccessKeyId"] = access_key_id
if accept_format is not None:
parameters["Format"] = accept_format
return parameters
def __pop_standard_urlencode(query):
ret = urllib.parse.urlencode(query)
ret = ret.replace('+', '%20')
ret = ret.replace('*', '%2A')
ret = ret.replace('%7E', '~')
return ret
def __compose_string_to_sign(method, queries):
canonicalized_query_string = ""
sorted_parameters = sorted(queries.items(), key=lambda queries: queries[0])
string_to_sign = method + "&%2F&" + urllib.request.pathname2url(__pop_standard_urlencode(sorted_parameters))
return string_to_sign
def __get_signature(string_to_sign, secret, signer=mac1):
return signer.get_sign_string(string_to_sign, secret + '&')
def get_signed_url(params, ak, secret, accept_format, method, signer=mac1):
sign_params = __refresh_sign_parameters(params, ak, accept_format, signer)
string_to_sign = __compose_string_to_sign(method, sign_params)
signature = __get_signature(string_to_sign, secret, signer)
sign_params['Signature'] = signature
url = '/?' + __pop_standard_urlencode(sign_params)
return url
|
[
"dahu yao"
] |
dahu yao
|
83fab9a5b14ab84b25fc69ef404b2b327393cf8e
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/wroscoe_donkey/donkey-master/scripts/upload.py
|
de1854714fd61e71babfcf07623b5a3b0f002c07
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 221
|
py
|
import donkey as dk
sess
dk.sessions.pickle_sessions(sessions_folder='/home/wroscoe/donkey_data/sessions/',
session_names=['f8'],
file_path='/home/wroscoe/f8.pkl')
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
f6dd5238086e238bdf7f038a4485afc984f0e1c1
|
ee8fc61e653410c377dcc8e2e2652f82ab03fd0a
|
/scripts/mode_background.py
|
2c05b7997c193b8b78548761a28f7fc111f3cbeb
|
[
"Apache-2.0"
] |
permissive
|
alcinos/dps
|
f8b2360b55676db95aa6f717eca935a77e46eb3e
|
5467db1216e9f9089376d2c71f524ced2382e4f6
|
refs/heads/master
| 2020-05-18T19:07:37.809901
| 2019-04-30T13:43:17
| 2019-04-30T13:43:17
| 184,602,771
| 0
| 0
|
Apache-2.0
| 2019-05-02T15:09:36
| 2019-05-02T15:09:35
| null |
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import collections
from dps.utils import sha_cache, NumpySeed
from dps.datasets.atari import StaticAtariDataset
def compute_background(data, mode_threshold):
assert data.dtype == np.uint8
mask = np.zeros(data.shape[1:3])
background = np.zeros(data.shape[1:4])
for i in range(data.shape[1]):
for j in range(data.shape[2]):
print("Doing {}".format((i, j)))
channel = [tuple(cell) for cell in data[:, i, j, ...]]
counts = collections.Counter(channel)
mode, mode_count = counts.most_common(1)[0]
if mode_count / data.shape[0] > mode_threshold:
mask[i, j] = 1
background[i, j, ...] = mode_count
else:
mask[i, j] = 0
return mask, background
@sha_cache("compute_background")
def f(game, N, in_colour, threshold, seed):
print("Computing background...")
with NumpySeed(seed):
dset = StaticAtariDataset(game=game, after_warp=not in_colour)
X = dset.x
if N:
X = X[:N]
mask, background = compute_background(X, threshold)
return mask, background
game = "IceHockeyNoFrameskip-v4"
in_colour = False
N = 1000
threshold = 0.8
seed = 0
mask, background = f(game, N, in_colour, threshold, seed)
if not in_colour:
background = background[..., 0]
fig, axes = plt.subplots(1, 2)
axes[0].imshow(mask)
axes[0].set_title("Mask")
axes[1].imshow(background)
axes[1].set_title("Background")
plt.show()
|
[
"eric.crawford@mail.mcgill.ca"
] |
eric.crawford@mail.mcgill.ca
|
6cfac94954dfc1ed9b3f323587ba8366b3f4bc6c
|
186158704058dcbeef84caf6d0fa220d127719dc
|
/bin/unzip-bpo.py
|
ff2a23ced8fa03791d5bbc10a1bf624b58af33ed
|
[] |
no_license
|
davidmcclure/quotes
|
a18afc88315b3157ddb69f14ca0e8e69bdd6ff68
|
1460a732091afb5f39d484a4644e8c05dd1af201
|
refs/heads/master
| 2021-01-11T07:59:35.648905
| 2017-02-18T04:04:56
| 2017-02-18T04:04:56
| 72,132,830
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
#!/usr/bin/env python
from quotes.services import config
from quotes.jobs.unzip_bpo import UnzipBPO
if __name__ == '__main__':
job = UnzipBPO(corpus_dir=config['bpo_corpus_dir'])
job()
|
[
"davidwilliammcclure@gmail.com"
] |
davidwilliammcclure@gmail.com
|
a0939899f8fceb40a4dd8a2deed214d8879e78cc
|
0d8486c1d55c40bebea7c5428930f18165d2d0e9
|
/tests/wasp1/AllAnswerSets/aggregates_count_propagation_3.test.py
|
27e773802ad09d5c6d3abad9eda4941834b6af27
|
[
"Apache-2.0"
] |
permissive
|
bernardocuteri/wasp
|
6f81bf6aa8fb273c91bbf68ecce4ecb195a55953
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
refs/heads/master
| 2021-06-08T11:58:25.080818
| 2020-10-05T16:57:37
| 2020-10-05T16:57:37
| 124,245,808
| 0
| 0
|
Apache-2.0
| 2018-03-07T14:13:16
| 2018-03-07T14:13:16
| null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
input = """
a(1) v a(2).
a(3) v a(4).
ok :- not #count{T: a(T)} > 2, #count{V : a(V)} > 1.
"""
output = """
{a(1), a(3), ok}
{a(1), a(4), ok}
{a(2), a(3), ok}
{a(2), a(4), ok}
"""
|
[
"mario@alviano.net"
] |
mario@alviano.net
|
cc445242ba9b1c7f923e254cdb7bd7111f34354d
|
5aec124d5c006fab649d562603e00cff5fc8eafb
|
/HCU-311-V1.0.1/sailing_robot_control/build/ball_detected/catkin_generated/pkg.develspace.context.pc.py
|
64c847321c4ed3ddf0ed6b3f1d89f11041bd7378
|
[] |
no_license
|
supcon-nzic/HCU311_Sailing_Robot
|
d935e0e2383b0f17eeeec42a94adc027ba22a9f1
|
e42d8c10455e6bf88c7a9e9a3be65b7afd12466e
|
refs/heads/master
| 2021-05-21T08:36:59.693130
| 2020-05-27T02:29:56
| 2020-05-27T02:29:56
| 252,621,616
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/root/HCU-311/sailing_robot_control/devel/include".split(';') if "/root/HCU-311/sailing_robot_control/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ball_detected"
PROJECT_SPACE_DIR = "/root/HCU-311/sailing_robot_control/devel"
PROJECT_VERSION = "0.0.0"
|
[
"qiuyunpeng@nz-ic.com"
] |
qiuyunpeng@nz-ic.com
|
8688ca373fba0c6353719eb5259f982eb9d99d70
|
00cd46c5722fbb4623d8cefc33bbce6e4c6bf970
|
/BFS/120.Word Ladder/Solution_BFS.py
|
5e0d498187e3ce628b96de5545b6fd6f4933da91
|
[
"MIT"
] |
permissive
|
jxhangithub/lintcode
|
9126d0d951cdc69cd5f061799313f1a96ffe5ab8
|
afd79d790d0a7495d75e6650f80adaa99bd0ff07
|
refs/heads/master
| 2022-04-02T22:02:57.515169
| 2020-02-26T21:32:02
| 2020-02-26T21:32:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,350
|
py
|
from collections import deque
class Solution:
"""
@param: start: a string
@param: end: a string
@param: dict: a set of string
@return: An integer
"""
def ladderLength(self, start, end, dictionary):
# write your code here
if start == end:
return 1
dictionary.add(end)
steps = {start:1}
queue = deque([start])
while queue:
word = queue.popleft()
if word == end:
return steps[word]
for candidate in self._next_words(word):
if candidate not in dictionary or candidate in steps:
continue
queue.append(candidate)
steps[candidate] = steps[word] + 1
return 0
# O(26 * L^2)
# L is the length of word
def _next_words(self, word):
words = []
for i in range(len(word)):
left, right = word[:i], word[i + 1:]
for char in 'abcdefghijklmnopqrstuvwxyz':
if word[i] == char:
continue
words.append(left + char + right)
return words
# 分层遍历
class Solution:
"""
@param: start: a string
@param: end: a string
@param: dict: a set of string
@return: An integer
"""
def ladderLength(self, start, end, dict):
dict.add(end)
queue = collections.deque([start])
visited = set([start])
distance = 0
while queue:
distance += 1
for i in range(len(queue)):
word = queue.popleft()
if word == end:
return distance
for next_word in self.get_next_words(word):
if next_word not in dict or next_word in visited:
continue
queue.append(next_word)
visited.add(next_word)
return 0
# O(26 * L^2)
# L is the length of word
def get_next_words(self, word):
words = []
for i in range(len(word)):
left, right = word[:i], word[i + 1:]
for char in 'abcdefghijklmnopqrstuvwxyz':
if word[i] == char:
continue
words.append(left + char + right)
return words
|
[
"32248549+Zhenye-Na@users.noreply.github.com"
] |
32248549+Zhenye-Na@users.noreply.github.com
|
98e0cfbb25ce168aee899791913abb63cb37db2f
|
94e9bdf9a79b63d29f6d4cd0d299feaaaf2f346f
|
/tp/gremlin.py
|
c62a5cb9bba5e6e63ebcb0f3eff7f4f2f1872638
|
[
"Apache-2.0"
] |
permissive
|
BITPlan/pyjanusgraph
|
797fe7940672bef939b6119727811fd99b6271ca
|
b7166250a96c5d1cc919a821269ca6740e50c510
|
refs/heads/master
| 2022-04-14T19:34:26.342483
| 2020-04-13T09:41:47
| 2020-04-13T09:41:47
| 255,103,518
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,841
|
py
|
'''
Created on 2020-03-30
@author: wf
'''
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.structure.graph import Graph
from shutil import copyfile
import os
import csv
class RemoteGremlin(object):
'''
helper for remote gremlin connections
:ivar server: the server to connect to
:ivar port: the port to connect to
:ivar sharepoint: the directory that is the shared with the janusgraph instance e.g. via a docker bind/mount or volume
:ivar sharepath: the path o the sharepoint as seens by the janusgraph server
'''
debug=False
def __init__(self, server='localhost', port=8182):
'''
construct me with the given server and port
Args:
server(str): the server to use
port(int): the port to use
'''
self.server=server
self.port=port
def setSharepoint(self,sharepoint,sharepath):
'''
set up a sharepoint
Args:
sharepoint(str): the directory that is the shared with the janusgraph instance e.g. via a docker bind/mount or volume
sharepath(str): the path o the sharepoint as seens by the janusgraph server
'''
self.sharepoint=sharepoint
self.sharepath=sharepath
def share(self,file):
'''
share the given file and return the path as seen by the server
Args:
file(str): path to the file to share
'''
fbase=os.path.basename(file)
target=self.sharepoint+fbase
if RemoteGremlin.debug:
print("copying %s to %s" % (file,target))
copyfile(file,target)
return self.sharepath+fbase
def open(self):
'''
open the remote connection
Returns:
GraphTraversalSource: the remote graph traversal source
'''
self.graph = Graph()
self.url='ws://%s:%s/gremlin' % (self.server,self.port)
self.connection = DriverRemoteConnection(self.url, 'g')
# The connection should be closed on shut down to close open connections with connection.close()
self.g = self.graph.traversal().withRemote(self.connection)
return self.g
def close(self):
'''
close the remote connection
'''
self.connection.close()
def clean(self):
'''
clean the graph database by removing all vertices
'''
# drop the existing content of the graph
self.g.V().drop().iterate()
class TinkerPopAble(object):
'''
mixin for classes to store and retrieve from tinkerpop graph database
'''
debug=False
def storeFields(self,fieldList):
'''
define the fields to be stored as tinkerpop vertice properties
Args:
fieldList(list): list of fields to be stored
'''
if not hasattr(self,'tpfields'):
self.tpfields={}
fields=vars(self)
for field in fieldList:
self.tpfields[field]=fields[field]
def toVertex(self,g):
'''
create a vertex from me
Args:
g(GraphTraversalSource): where to add me as a vertex
'''
label=type(self).__name__;
t=g.addV(label)
if TinkerPopAble.debug:
print(label)
tpfields=TinkerPopAble.fields(self)
for name,value in tpfields.items():
if TinkerPopAble.debug:
print("\t%s=%s" % (name,value))
if value is not None:
t=t.property(name,value)
t.iterate()
def fromMap(self,pMap):
'''
fill my attributes from the given pMap dict
Args:
pmap(dict): the dict to fill my attributes from
'''
for name,value in pMap.items():
self.__setattr__(name, value[0]) #
@staticmethod
def fields(instance):
'''
Returns:
dict: either the vars of the instance or the fields specified by the tpfields attribute
'''
# if there is a pre selection of fields store only these
if hasattr(instance,'tpfields'):
tpfields=instance.tpfields
else:
# else use all fields
tpfields=vars(instance)
return tpfields
@staticmethod
def writeCSV(csvfileName,objectList,fieldnames=None):
'''
write the given objectList to a CSV file
Args:
csvfileName(str): the path for the CSV File to write to
objectList(list): a list of instances for which CSV lines should be created
fieldnames(list): an optional list of fieldnames - if set to None the fields will be derived from the first instance in the objectList
'''
if fieldnames is None:
if len(objectList)<1:
raise("writCSV needs at least one object in ObjectList when fieldnames are not specified")
headerInstance=objectList[0]
fieldnames=TinkerPopAble.fields(headerInstance).keys()
with open(csvfileName, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for instance in objectList:
rowdict={}
for fieldname in fieldnames:
fields=TinkerPopAble.fields(instance)
rowdict[fieldname]=fields[fieldname]
writer.writerow(rowdict)
@staticmethod
def cache(rg,gfile,clazz,objectList,initFunction):
'''
generic caching
Args:
gfile(str): the graph storage file
clazz(class): the class of the objects in the objectList
objectList(list): a list of instances to fill or read
initFunction(function): a function to call to fill the cache
'''
g=rg.g
cachefile=rg.sharepoint+gfile
clazzname=clazz.__name__
if os.path.isfile(cachefile):
g.io(rg.sharepath+gfile).read().iterate()
for pMap in g.V().hasLabel(clazzname).valueMap().toList():
if TinkerPopAble.debug:
print (pMap)
instance=clazz.ofMap(pMap)
objectList.append(instance)
if TinkerPopAble.debug:
print (instance)
else:
initFunction()
for instance in objectList:
if TinkerPopAble.debug:
print(instance)
instance.toVertex(g)
g.io(rg.sharepath+gfile).write().iterate()
return cachefile
|
[
"wf@bitplan.com"
] |
wf@bitplan.com
|
5fd64adbe388809df800e8d25b4768762ae01b4f
|
63b0fed007d152fe5e96640b844081c07ca20a11
|
/ABC/ABC200~ABC299/ABC258/b.py
|
9df401169330584353a01a0d5b334e77713e1f27
|
[] |
no_license
|
Nikkuniku/AtcoderProgramming
|
8ff54541c8e65d0c93ce42f3a98aec061adf2f05
|
fbaf7b40084c52e35c803b6b03346f2a06fb5367
|
refs/heads/master
| 2023-08-21T10:20:43.520468
| 2023-08-12T09:53:07
| 2023-08-12T09:53:07
| 254,373,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
n = int(input())
a = []
for _ in range(n):
a.append(list(input()))
ans = 0
dx = [-1, -1, -1, 0, 0, 1, 1, 1]
dy = [-1, 0, 1, -1, 1, -1, 0, 1]
for i in range(n):
for j in range(n):
for k in range(8):
tmp = ''
nx = dx[k]
ny = dy[k]
for m in range(n):
tmp += a[i][j]
i += nx
j += ny
i %= n
j %= n
ans = max(ans, int(tmp))
print(ans)
|
[
"ymdysk911@gmail.com"
] |
ymdysk911@gmail.com
|
5d0e57eddce91b4d61e123d45d851339227494c9
|
51f2492a5c207e3664de8f6b2d54bb93e313ca63
|
/atcoder/arc095/d.py
|
c2157f467e42ce2ef4a64ede2861cce4c65cbefc
|
[
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
abeaumont/competitive-programming
|
23c5aabd587d7bb15a61efd3428838cb934233dd
|
a24c9b89941a59d344b51dc1010de66522b1a0dd
|
refs/heads/master
| 2023-09-01T09:50:58.267361
| 2023-07-31T18:00:10
| 2023-07-31T18:00:10
| 117,589,708
| 618
| 262
|
WTFPL
| 2023-07-12T17:36:20
| 2018-01-15T20:00:56
|
C++
|
UTF-8
|
Python
| false
| false
| 291
|
py
|
#!/usr/bin/env python3
# https://arc095.contest.atcoder.jp/tasks/arc095_b
n = int(input())
a = [int(x) for x in input().split()]
a.sort()
y = a[-1]
m = None
x = None
for i in range(n - 1):
k = a[i]
l = max(k, y - k)
if m is None or m > l:
m = l
x = k
print(y, x)
|
[
"alfredo.beaumont@gmail.com"
] |
alfredo.beaumont@gmail.com
|
10f9b023d025a4040f73e9e8d651b69aa9b0dd26
|
f5c9e20987413a46c7f2e856404813d27cde26e7
|
/mysite/pages/views.py
|
69e176db25833b22ebab966802d685dbeca9178a
|
[] |
no_license
|
PriyankaBuchkul/practice
|
dc1573209edf7cc5d8abecfce485d15de7f029fd
|
16d864b1a6082814978f68ee0daf8fa404a89f2a
|
refs/heads/master
| 2020-03-13T05:17:41.406838
| 2018-04-25T09:55:11
| 2018-04-25T09:55:11
| 130,980,621
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Posts
# Create your views here.
def index(request):
#return HttpResponse('Hello From Post')
posts=Posts.objects.all()[:10]
contexts = {
'title':'latest Posts',
'posts':posts
}
return render(request,'pages/index.html',contexts)
def details(request,id):
post=Posts.objects.get(id=id)
context = {
'post' : post
}
return render(request,'pages/details.html',context)
|
[
"priyankabuchkul@gmail.com"
] |
priyankabuchkul@gmail.com
|
eebe441b71964e68194495cd7d2c119654278901
|
5955ea34fd72c719f3cb78fbb3c7e802a2d9109a
|
/_STRUCTURES/LIST/Create/create_list_1.py
|
8d96f1119bfb4c6ad8eb7b8b7f41d1c5a7fc986f
|
[] |
no_license
|
AndreySperansky/TUITION
|
3c90ac45f11c70dce04008adc1e9f9faad840b90
|
583d3a760d1f622689f6f4f482c905b065d6c732
|
refs/heads/master
| 2022-12-21T21:48:21.936988
| 2020-09-28T23:18:40
| 2020-09-28T23:18:40
| 299,452,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
A = ['red', 'green', 'blue']
print(' '.join(A))
# red green blue
c = ['alfa', 'betta', 'gamma']
print(' '.join(map(str, c)))
# alfa betta gamma
d = [1, 2, 3]
print(' '.join(map(str, d)))
# 1 2 3
|
[
"andrey.speransky@gmail.com"
] |
andrey.speransky@gmail.com
|
afde19ef47f9828ec2b07a66971ff95d77feb5d4
|
9e45801526b372ea364e1aaf8df4f8ce6be4d754
|
/tnd_server/handlers/group.py
|
f4cf698a3a46187b8153799f34b7ccab92169693
|
[] |
no_license
|
niyoufa/ods
|
c09c5bd4e429cd6d4043d76ce1d89f413946a9d1
|
468ffe3fa34e17fecd0c872bdbaa9701b81b50d5
|
refs/heads/master
| 2021-01-19T21:23:43.812758
| 2016-07-08T09:49:04
| 2016-07-08T09:49:04
| 59,827,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,919
|
py
|
#coding=utf-8
"""
author : niyoufa
date : 2016-06-30
"""
import sys, pdb, json, datetime, pymongo, urllib
import tornado.web
import ods.tnd_server.status as status
import ods.utils as utils
import ods.tnd_server.settings as settings
import ods.tnd_server.handler as handler
import ods.clients.curl as curl
import ods.clients.rong as rong
import ods.dhui.dhui_task as dt
class DhuiGroupJoinHandler(handler.APIHandler):
#加入群组
def post(self):
result = utils.init_response_data()
try:
user_id=self.get_argument('user_id')
group_id=self.get_argument('group_id')
group_name=self.get_argument('group_name')
except Exception, e:
result = utils.reset_response_data(status.Status.PARMAS_ERROR,str(e))
self.finish(result)
return
#
self.finish(result)
class DhuiGroupQuitHandler(handler.APIHandler):
#退出群组
def post(self):
result = utils.init_response_data()
try:
user_id=self.get_argument('user_id')
group_id=self.get_argument('group_id')
except Exception, e:
result = utils.reset_response_data(status.Status.PARMAS_ERROR,str(e))
self.finish(result)
return
self.finish(result)
class DhuiGroupUserQueryHandler(handler.APIHandler):
#查询群成员
def get(self):
result = utils.init_response_data()
try:
group_id=self.get_argument('group_id')
except Exception, e:
result = utils.reset_response_data(status.Status.PARMAS_ERROR,str(e))
self.finish(result)
return
self.finish(result)
class DhuiGroupUserDetailQueryHandler(handler.APIHandler):
#查询群组内所有用户的user表中的信息
def get(self):
result = utils.init_response_data()
try:
group_id=self.get_argument('group_id','')
except Exception, e:
result = utils.reset_response_data(status.Status.PARMAS_ERROR,str(e))
self.finish(result)
return
self.finish(result)
class DhuiUserGroupQueryHandler(handler.APIHandler):
#获取某用户所在所有群组的group_id和group_name
def get(self):
result = utils.init_response_data()
try:
user_id = self.get_argument('user_id','')
except Exception, e:
result = utils.reset_response_data(status.Status.PARMAS_ERROR,str(e))
self.finish(result)
return
self.finish(result)
handlers = [
(r"/odoo/api/group/join",DhuiGroupJoinHandler),
(r"/odoo/api/group/quit",DhuiGroupQuitHandler),
(r"/odoo/api/group/user/query",DhuiGroupUserQueryHandler),
(r"/odoo/api/group/user/detail/query",DhuiGroupUserDetailQueryHandler),
(r"/odoo/api/user/group",DhuiUserGroupQueryHandler),
]
|
[
"niyoufa@tmlsystem.com"
] |
niyoufa@tmlsystem.com
|
4df9bc50ac2c592bccd0426d6011c97ff2d0b362
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/rihSbQq6x8R2D4aoa_9.py
|
3e356f033f949374071534aae585fb0fbbe6eded
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,286
|
py
|
"""
As you know, the function `range()` returns a range of numbers, but it doesn't
work on alphabets. In this challenge, we try to fill this gap.
Write a function `alpha-range()` which takes three arguments `start`, `stop`,
and `step` (which its default value is one). The function must return a list
of alphabetical characters, ranging from start character to stop character
based on `step` value.
The function must follow these conditions:
* If `step` is zero or more than 26 or less than -26, return `"step must be a non-zero value between -26 and 26, exclusive"`.
* Both `start` and `stop` must share the same case, otherwise, return `"both start and stop must share the same case"`.
Like `range()` function:
* `step` must not be zero.
Unlike `range()` function:
* returned list must be inclusive.
* the order of characters doesn't affect the output (i.e. the output of `alpha_range("a", "f")` is the same as `alpha_range("f", "a")`, see examples).
### Examples
alpha_range("a", "f") ➞ ["a", "b", "c", "d", "e", "f"]
alpha_range("f", "a") ➞ ["a", "b", "c", "d", "e", "f"]
alpha_range("a", "f", -1) ➞ ["f", "e", "d", "c", "b", "a"]
alpha_range("f", "a", -1) ➞ ["f", "e", "d", "c", "b", "a"]
alpha_range("A", "F", -1) ➞ ["F", "E", "D", "C", "B", "A"]
alpha_range("A", "F", 0) ➞ "step must be a non-zero value between -26 and 26, exclusive"
alpha_range("A", "F", -26) ➞ "step must be a non-zero value between -26 and 26, exclusive"
alpha_range("a", "F", -1) ➞ "both start and stop must share the same case"
### Notes
All the `start` and `stop` values in the tests are valid alphabetical
characters.
"""
def alpha_range(start, stop, step=1):
if step == 0 or step < -26 or step > 26:
return "step must be a non-zero value between -26 and 26, exclusive"
if start.islower() and stop.isupper() or start.isupper() and stop.islower():
return "both start and stop must share the same case"
if step>0:
start, stop = min(ord(start), ord(stop)), max(ord(start), ord(stop))
return [chr(i) for i in range(start, stop+1, step)]
else:
start, stop = max(ord(start), ord(stop)), min(ord(start), ord(stop))
return [chr(i) for i in range(start, stop-1, step)]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
810b973f3daffeae7e9ec96715c0a41d5c404fb7
|
c85e68eda2058433d9b43256a121dcc3190af38f
|
/npx/command/plot.py
|
a4a0fb901c734b18bc3f10792496253b8cc489ff
|
[] |
no_license
|
nickstenning/npx
|
f87686d11b6612af373ada850878856b0bab36a3
|
385816c7725b6ce196fc2ddff369d199985683cb
|
refs/heads/master
| 2023-06-22T11:13:53.405289
| 2011-11-30T01:07:22
| 2011-11-30T01:07:22
| 2,723,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
import os
import sys
from matplotlib import pyplot as plt
def main():
data = [float(line) for line in sys.stdin]
plt.plot(data)
if os.isatty(sys.stdout.fileno()):
plt.show()
else:
plt.savefig(sys.stdout, format='png')
if __name__ == '__main__':
main()
|
[
"nick@whiteink.com"
] |
nick@whiteink.com
|
3893842b288a7438c1453710f3e9b12b003c6a7a
|
5c5fdbda26a8a066e2023c73d5a4188a37223c2d
|
/validator/wsgi.py
|
17da9409496b4f2266e66efb74e831077241167e
|
[] |
no_license
|
Vivekyadv/SE-validator
|
abb806d6ac8217a3846476106d4ba27de98e9d2d
|
c11671cc9ecc3538a8fba522f9b6b350dce966bb
|
refs/heads/master
| 2023-04-25T15:17:10.587731
| 2021-05-19T10:27:12
| 2021-05-19T10:27:12
| 358,607,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for validator project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'validator.settings')
application = get_wsgi_application()
|
[
"vivek.yadav2611@gmail.com"
] |
vivek.yadav2611@gmail.com
|
68c5f217b3f515a70496a04ae87399d87f2d2d50
|
32c56293475f49c6dd1b0f1334756b5ad8763da9
|
/google-cloud-sdk/lib/googlecloudsdk/api_lib/container/binauthz/kms.py
|
dd5eb041a8db3904921f0db96705cb7e2d0eee0c
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
bopopescu/socialliteapp
|
b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494
|
85bb264e273568b5a0408f733b403c56373e2508
|
refs/heads/master
| 2022-11-20T03:01:47.654498
| 2020-02-01T20:29:43
| 2020-02-01T20:29:43
| 282,403,750
| 0
| 0
|
MIT
| 2020-07-25T08:31:59
| 2020-07-25T08:31:59
| null |
UTF-8
|
Python
| false
| false
| 3,213
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for interacting with the cloudkms API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.command_lib.kms import get_digest
from googlecloudsdk.command_lib.kms import maps
import six
API_NAME = 'cloudkms'
V1 = 'v1'
DEFAULT_VERSION = V1
class Client(object):
"""A client to access cloudkms for binauthz purposes."""
def __init__(self, api_version=None):
"""Creates a Cloud KMS client.
Args:
api_version: If provided, the cloudkms API version to use.
"""
if api_version is None:
api_version = DEFAULT_VERSION
self.client = apis.GetClientInstance(API_NAME, api_version)
self.messages = apis.GetMessagesModule(API_NAME, api_version)
def GetPublicKey(self, key_ref):
"""Retrieves the public key for given CryptoKeyVersion."""
req = self.messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetPublicKeyRequest(
name=key_ref)
return (
self.client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions.
GetPublicKey(req))
def AsymmetricSign(self, key_ref, digest_algorithm, plaintext):
"""Sign a string payload with an asymmetric KMS CryptoKeyVersion.
Args:
key_ref: The CryptoKeyVersion relative resource name to sign with.
digest_algorithm: The name of the digest algorithm to use in the signing
operation. May be one of 'sha256', 'sha384', 'sha512'.
plaintext: The plaintext bytes to sign.
Returns:
An AsymmetricSignResponse.
"""
digest = get_digest.GetDigestOfFile(
digest_algorithm, six.BytesIO(plaintext))
req = self.messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsAsymmetricSignRequest(
name=key_ref,
asymmetricSignRequest=self.messages.AsymmetricSignRequest(
digest=digest))
return (
self.client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions.
AsymmetricSign(req))
def GetKeyUri(key_ref):
"""Returns the URI used as the default for KMS keys.
This should look something like '//cloudkms.googleapis.com/v1/...'
Args:
key_ref: A CryptoKeyVersion Resource.
Returns:
The string URI.
"""
return key_ref.SelfLink().split(':', 1)[1]
def GetAlgorithmDigestType(key_algorithm):
"""Returns the digest name associated with the given CryptoKey Algorithm."""
for digest_name in maps.DIGESTS:
if digest_name in key_algorithm.name.lower():
return digest_name
|
[
"jonathang132298@gmail.com"
] |
jonathang132298@gmail.com
|
9173692ea365b340a64f0d8af8f685daf1708995
|
b0d763b2eace81e82eb3405fba13f2da04495f34
|
/alshamelah_api/apps/users/migrations/0022_auto_20200713_0050.py
|
ca98b7fbb84ab28cb6e208b7cca1f6bcbf7b8500
|
[
"MIT"
] |
permissive
|
devna-dev/durar-backend
|
7b57fe93e2687a64168ac75758b436109394bd9c
|
36ea29bafd4cb95098e4057eb71df211dc923008
|
refs/heads/master
| 2022-12-15T08:25:30.079110
| 2020-07-29T11:50:19
| 2020-07-29T11:50:19
| 295,212,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
# Generated by Django 3.0.8 on 2020-07-13 00:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0021_auto_20200703_0223'),
]
operations = [
migrations.CreateModel(
name='PasswordOTP',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('users.otp',),
),
migrations.AlterField(
model_name='otp',
name='type',
field=models.CharField(choices=[('E', 'Email'), ('P', 'Phone'), ('PW', 'Passwod')], max_length=2, verbose_name='Type'),
),
]
|
[
"adhm_n4@yahoo.com"
] |
adhm_n4@yahoo.com
|
97258bed67c0ef562954fa2a7300ced997d0377e
|
2dd4f43f5d519f5533b65ff9f844eb3fe2d57fb6
|
/FusionIIIT/applications/office_module/views.py
|
8350173e8671a501a040d3af489a78d7de0067e7
|
[] |
no_license
|
sumitkumar15061977/FusionIIIT
|
840c5b736eb7d4f221ee16cb5cdb246fa930a49c
|
67cac264c34f5fe1a10a766eef573cf840def84c
|
refs/heads/master
| 2021-09-09T08:21:25.167091
| 2018-03-14T10:58:52
| 2018-03-14T10:58:52
| 125,206,505
| 1
| 0
| null | 2018-03-14T12:06:44
| 2018-03-14T12:06:43
| null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
from django.shortcuts import render
def officeOfDeanStudents(request):
context = {}
return render(request, "officeModule/officeOfDeanStudents/officeOfDeanStudents.html", context)
def officeOfPurchaseOfficr(request):
return render(request, "officeModule/officeOfPurchaseOfficer/officeOfPurchaseOfficer.html", {})
def officeOfRegistrar(request):
context = {}
return render(request, "officeModule/officeOfRegistrar/officeOfRegistrar.html", context)
def officeOfDeanRSPC(request):
context = {}
return render(request, "officeModule/officeOfDeanRSPC/officeOfDeanRSPC.html", context)
def genericModule(request):
context = {}
return render(request, "officeModule/genericModule/genericModule.html", context)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
3fd73e4197c65c49fbfae88fa986693566e107bf
|
9e2f24027e4044252639563461116a895acce039
|
/biosteam/units/facilities/_cleaning_in_place.py
|
c878effb78118dc3773aec8f7b0f67780f21eac1
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"NCSA"
] |
permissive
|
yalinli2/biosteam
|
5010b5d430cc746f6fa00a23805a1c1f5cac7a81
|
e7385ca1feac642881a357ffbc4461382549c3a4
|
refs/heads/master
| 2022-03-20T23:57:06.824292
| 2022-02-22T15:55:11
| 2022-02-22T15:55:11
| 190,422,353
| 0
| 0
|
MIT
| 2019-06-05T15:39:04
| 2019-06-05T15:39:03
| null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2021, Yoel Cortes-Pena <yoelcortes@gmail.com>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
from . import Facility
from ..decorators import cost
__all__ = ('CIPpackage',)
# %%
@cost('Flow rate', units='kg/hr',
S=63, cost=421e3, CE=522, BM=1.8, n=0.6)
class CIPpackage(Facility):
ticket_name = 'CIP'
line = 'CIP Package'
network_priority = 0
_N_ins = 1
_N_outs = 1
|
[
"yoelcortes@gmail.com"
] |
yoelcortes@gmail.com
|
e6fab0f4066de7f9c522035dbe66caa6aaa0bb4d
|
489a45659476fafb66934427e42bfce3d60a0116
|
/Assets/Python/Smeagolheart/StarSigns.py
|
888b3d9cc9c20597e8dbe168ab8c0669687215cf
|
[] |
no_license
|
billw2012/Caveman2Cosmos
|
3a8c6ea347e75dbe2de9519fe70e6b38e0cf6dbe
|
2382877536e1669972dd024ce2d0f3d0d5ffd988
|
refs/heads/master
| 2020-07-19T00:14:48.856106
| 2019-09-03T23:20:42
| 2019-09-03T23:21:02
| 197,989,388
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Star Signs
from CvPythonExtensions import *
import CvUtil
def give(GC, TRNSLTR, GAME, CyUnit, CyPlayer, CyCity = None, iPlayer = None):
aStarSignList = (
"PROMOTION_AQUARIUS",
"PROMOTION_ARIES",
"PROMOTION_CANCER",
"PROMOTION_CAPRICORN",
"PROMOTION_GEMINI",
"PROMOTION_LEO",
"PROMOTION_LIBRA",
"PROMOTION_PISCES",
"PROMOTION_SAGITTARIUS",
"PROMOTION_SCORPIO",
"PROMOTION_TAURUS",
"PROMOTION_VIRGO"
)
iChance = GAME.getSorenRandNum(12, "Star Signs") # integer 0-11
iPromotion = GC.getInfoTypeForString(aStarSignList[iChance])
CyUnit.setHasPromotion(iPromotion, True)
if CyPlayer.isHuman():
if CyCity:
szTxt = TRNSLTR.getText("TXT_KEY_MESSAGE_STARSIGN_BUILD", (CyCity.getName(),))
else:
szTxt = TRNSLTR.getText("TXT_KEY_MESSAGE_STARSIGN_CREATE", ())
iPlayer = CyUnit.getOwner()
szIcon = GC.getPromotionInfo(iPromotion).getButton()
CvUtil.sendMessage(szTxt, iPlayer, 16, szIcon, ColorTypes(44), CyUnit.getX(), CyUnit.getY(), True, True)
|
[
"alberts2@live.de"
] |
alberts2@live.de
|
897cf237b24d935398d13bf5539afb7950f6e027
|
8bd6b0784de9a1e6a39d0f5f23f2d8fb50c73d49
|
/MethodRefine-Abs/blocks-world/MethodRefine/blockworld_benchmark-mid/validating/validating_17.py
|
0a88642e5064667fcf0c11e786072baad5f28a04
|
[] |
no_license
|
sysulic/MethodRefine
|
a483d74e65337dff4bc2539ce3caa3bf83748b48
|
adbb22d4663041d853d3132f75032b7561bf605c
|
refs/heads/master
| 2020-09-14T10:45:55.948174
| 2020-05-01T09:13:59
| 2020-05-01T09:13:59
| 223,104,986
| 3
| 2
| null | 2020-04-27T11:01:36
| 2019-11-21T06:33:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
#!/usr/bin/env python
# coding=utf-8
import sys
sys.path.insert(0, './')
from blockworld import *
import new_tihtn_planner
state0 = new_tihtn_planner.State('state0')
allow = False
state0.on = {'block-2':'block-5','block-3':'block-2','block-5':False,'block-1':False,'block-4':False,}
state0.down = {'block-2':'block-3','block-3':False,'block-5':'block-2','block-1':False,'block-4':False,}
state0.clear = {'block-2':False,'block-3':False,'block-5':True,'block-1':True,'block-4':True,}
state0.on_table = {'block-2':False,'block-3':True,'block-5':False,'block-1':True,'block-4':True,}
state0.holding = False
new_tihtn_planner.declare_types({'block':['block-1','block-2','block-3','block-4','block-5',],'nothing':[()]})
new_tihtn_planner.declare_funs({pick_up:['block'],put_down:['block'],stack:['block', 'block'],checkpile1:['nothing'],checkpile2:['nothing'],checkpile3:['nothing'],checkpile4:['nothing']})
new_tihtn_planner.instance()
def execute(completable):
return new_tihtn_planner.pyhop(completable, allow, state0,[('tower5','block-1','block-2', 'block-3', 'block-4', 'block-5')], [],9)
def add_methods(fun_obj_list):
for fun in fun_obj_list:
new_tihtn_planner.add_method(fun.func_name.split('__')[0], fun)
def reverse_methods():
new_tihtn_planner.reverse_methods()
|
[
"526552330@qq.com"
] |
526552330@qq.com
|
55548e1bdf4fe09c27ce7c0c0ad3ff691558058c
|
9b0bdebe81e558d3851609687e4ccd70ad026c7f
|
/算法思想/数学/03.判断质数.py
|
77655309944993d93033d7fbcc954a4636e287ec
|
[] |
no_license
|
lizenghui1121/DS_algorithms
|
645cdad007ccbbfa82cc5ca9e3fc7f543644ab21
|
9690efcfe70663670691de02962fb534161bfc8d
|
refs/heads/master
| 2022-12-13T22:45:23.108838
| 2020-09-07T13:40:17
| 2020-09-07T13:40:17
| 275,062,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
"""
@Author: Li Zenghui
@Date: 2020-08-08 16:39
"""
def is_prime(a):
if a < 2:
return False
for i in range(2, a // 2 + 1):
if a % i == 0:
return False
return True
print(is_prime(17))
print(is_prime(19))
print(is_prime(21))
|
[
"954267393@qq.com"
] |
954267393@qq.com
|
68096293210ed5bcaffcdbc2a75c9b67bd4cea6c
|
57f733a3b470505e582528042cd37cb87eb5f03f
|
/probabilistic_programming/chapter1_4_text_msg.py
|
4d612268f91df622674e0b1baf626dd797c126a6
|
[] |
no_license
|
auroua/test
|
23246df57fc8644f0c2fd480d0f8c69e9b01a295
|
8bf601c886db42e0abe4f86fbcf33adef21a6470
|
refs/heads/master
| 2016-08-12T02:55:49.118185
| 2015-12-11T05:19:27
| 2015-12-11T05:19:27
| 44,439,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
__author__ = 'auroua'
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
count_data = np.loadtxt("txtdata.csv")
n_count_data = len(count_data)
plt.bar(np.arange(n_count_data), count_data, color="#348ABD")
plt.xlabel("Time (days)")
plt.ylabel("count of text-msgs received")
plt.title("Did the user's texting habits change over time?")
plt.xlim(0, n_count_data)
plt.show()
|
[
"auroua@yeah.net"
] |
auroua@yeah.net
|
61304fc69048fd47ea1dede9b57c3795c6fe7a29
|
7d90d2ce27c6ee0af74391b09909edbd45fdc2f0
|
/renix_py_api/api_gen/StartStreamArpCommand_Autogen.py
|
ff74cef8ed9094594b147ae8c531f391544f731c
|
[] |
no_license
|
gaoxingyu-hub/54testframework-master-e284
|
d7ea0d4a715b65c8652430e963a86b9522a7237a
|
57dd2197e7d91b8ad8fb2bd0e3503f10afa08544
|
refs/heads/master
| 2023-04-30T05:50:41.542402
| 2021-05-28T09:19:37
| 2021-05-28T09:19:37
| 309,922,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
"""
Auto-generated File
Create Time: 2019-12-27 02:33:25
"""
from .ROMEnum_Autogen import *
from renix_py_api.renix_common_api import *
from renix_py_api import rom_manager
from .ROMCommand_Autogen import ROMCommand
@rom_manager.rom
class StartStreamArpCommand(ROMCommand):
def __init__(self, StreamHandles=None, **kwargs):
self._StreamHandles = StreamHandles # Stream Handles
properties = kwargs.copy()
if StreamHandles is not None:
properties['StreamHandles'] = StreamHandles
# call base class function, and it will send message to renix server to create a class.
super(StartStreamArpCommand, self).__init__(**properties)
@property
def StreamHandles(self):
"""
get the value of property _StreamHandles
"""
return self._StreamHandles
@StreamHandles.setter
def StreamHandles(self, value):
self._StreamHandles = value
def _set_streamhandles_with_str(self, value):
tmp_value = value.strip()
if tmp_value.startswith('{'):
tmp_value = tmp_value[1:-1]
self._StreamHandles = tmp_value.split()
|
[
"gaoxingyu@example.com"
] |
gaoxingyu@example.com
|
92d9ebe5980d4ffdc0a6b8bd7097185bf06741ad
|
ebf7427c8605d8654c67e3386b8adb2bd7503b44
|
/LeetCode Pattern/8. LinkedList/237_easy_delete_node_in_a_linked_list.py
|
180eb686a740d132ad848e352f5d77c15b8f1bac
|
[] |
no_license
|
ryoman81/Leetcode-challenge
|
78e5bc4800a440052f8515c75829e669484fed40
|
fac3a49c49d2f62eafffb201a9d9cfac988ad30a
|
refs/heads/master
| 2023-09-04T05:21:54.569459
| 2021-10-26T14:14:08
| 2021-10-26T14:14:08
| 291,615,959
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
'''
Write a function to delete a node in a singly-linked list.
You will not be given access to the head of the list, instead you will be given access to the node to be deleted directly.
It is guaranteed that the node to be deleted is not a tail node in the list.
Example 1:
Input: head = [4,5,1,9], node = 5
Output: [4,1,9]
Explanation: You are given the second node with value 5, the linked list should become 4 -> 1 -> 9 after calling your function.
Constraints:
The number of the nodes in the given list is in the range [2, 1000].
-1000 <= Node.val <= 1000
The value of each node in the list is unique.
The node to be deleted is in the list and is not a tail node
'''
class Solution:
'''
MY CODE VERSION
Thought:
An interesting setting is that we have NO ACCESS to the node before the target
if we want to remove current node
- let the current node value becomes the next one's value
- and make the current node point to the next next one
WHAT AN INSANE QUESTION!!!! 可以想象跳出思维定式有多重要
Complexity:
Time: O(1)
Space: O(1)
'''
def deleteNode(self, node):
node.val = node.next.val
node.next = node.next.next
return
## No need for testing......
|
[
"sqygg1002@gmail.com"
] |
sqygg1002@gmail.com
|
b08b1c91edffb24ecff9cb2287e3d35255dd7fcb
|
4656c9b22bee48b4156eb3524bab3215a1993d83
|
/packages/gui/__init__.py
|
1999100759b2efb9cb603a4fdc0524162e60863b
|
[] |
no_license
|
mikebourbeauart/tempaam
|
0bc9215de0d967788b3c65b481a5fd3c7153dddc
|
c2582b5cc1fc45042c5b435f703786d7c04a51a2
|
refs/heads/master
| 2021-03-27T10:35:43.378899
| 2018-09-06T04:46:18
| 2018-09-06T04:46:18
| 120,359,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from publish import PubTab
from main_window import MainAAM
from .folders_widget import FoldersWidget
from .assets_widget import AssetsWidget
from .selection_tab import SelTabWidget
from .options_tab import OptionsTabWidget
|
[
"borbs727@gmail.com"
] |
borbs727@gmail.com
|
3e54d34efb3d978167a45f797aa25c0d34098665
|
238e46a903cf7fac4f83fa8681094bf3c417d22d
|
/VTK/vtk_7.1.1_x64_Debug/lib/python2.7/site-packages/vtk/tk/__init__.py
|
19d7f3c0b6596ecf6b8b6844c8c2c89d0d7ea161
|
[
"BSD-3-Clause"
] |
permissive
|
baojunli/FastCAE
|
da1277f90e584084d461590a3699b941d8c4030b
|
a3f99f6402da564df87fcef30674ce5f44379962
|
refs/heads/master
| 2023-02-25T20:25:31.815729
| 2021-02-01T03:17:33
| 2021-02-01T03:17:33
| 268,390,180
| 1
| 0
|
BSD-3-Clause
| 2020-06-01T00:39:31
| 2020-06-01T00:39:31
| null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
"""Tkinter widgets for VTK."""
__all__ = ['vtkTkRenderWidget', 'vtkTkImageViewerWidget',
'vtkTkRenderWindowInteractor', 'vtkTkPhotoImage']
|
[
"l”ibaojunqd@foxmail.com“"
] |
l”ibaojunqd@foxmail.com“
|
9226ed8ba885ea45ad7f7bb2eb7bc69649bcb9de
|
8a92f5860a44c8ca6816af18295f26d3b364a25f
|
/tests/cli/test_nuttercli.py
|
e9367a5e8c9afd2f412e418b32ff14d76ee3941f
|
[
"MIT"
] |
permissive
|
microsoft/nutter
|
b86dd58acbac02a25f2fc6e590e28073202862c2
|
368248bb3c2ed88a60ba6f5953b89fcc2cd0364e
|
refs/heads/master
| 2023-06-29T23:59:12.723142
| 2022-12-16T16:30:53
| 2022-12-16T16:30:53
| 219,394,533
| 225
| 36
|
MIT
| 2023-09-14T10:57:09
| 2019-11-04T01:43:34
|
Python
|
UTF-8
|
Python
| false
| false
| 8,129
|
py
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
"""
import pytest
import os
import json
import cli.nuttercli as nuttercli
from cli.nuttercli import NutterCLI
from common.apiclientresults import ExecuteNotebookResult
import mock
from common.testresult import TestResults, TestResult
from cli.reportsman import ReportWriterManager, ReportWritersTypes, ReportWriters
def test__get_cli_version__without_build__env_var__returns_value():
version = nuttercli.get_cli_version()
assert version is not None
def test__get_cli_header_value():
version = nuttercli.get_cli_version()
header = 'Nutter Version {}\n'.format(version)
header += '+' * 50
header += '\n'
assert nuttercli.get_cli_header() == header
def test__get_cli_version__with_build__env_var__returns_value(mocker):
version = nuttercli.get_cli_version()
build_number = '1.2.3'
mocker.patch.dict(
os.environ, {nuttercli.BUILD_NUMBER_ENV_VAR: build_number})
version_with_build_number = nuttercli.get_cli_version()
assert version_with_build_number == '{}.{}'.format(version, build_number)
def test__get_version_label__valid_string(mocker):
mocker.patch.dict(os.environ, {'DATABRICKS_HOST': 'myhost'})
mocker.patch.dict(os.environ, {'DATABRICKS_TOKEN': 'mytoken'})
version = nuttercli.get_cli_version()
expected = 'Nutter Version {}'.format(version)
cli = NutterCLI()
version_from_cli = cli._get_version_label()
assert expected == version_from_cli
def test__nutter_cli_ctor__handles__version_and_exits_0(mocker):
mocker.patch.dict(os.environ, {'DATABRICKS_HOST': 'myhost'})
mocker.patch.dict(os.environ, {'DATABRICKS_TOKEN': 'mytoken'})
with pytest.raises(SystemExit) as mock_ex:
cli = NutterCLI(version=True)
assert mock_ex.type == SystemExit
assert mock_ex.value.code == 0
def test__run__pattern__display_results(mocker):
test_results = TestResults().serialize()
cli = _get_cli_for_tests(
mocker, 'SUCCESS', 'TERMINATED', test_results)
mocker.patch.object(cli, '_display_test_results')
cli.run('my*', 'cluster')
assert cli._display_test_results.call_count == 1
def test__nutter_cli_ctor__handles__configurationexception_and_exits_1(mocker):
mocker.patch.dict(os.environ, {'DATABRICKS_HOST': ''})
mocker.patch.dict(os.environ, {'DATABRICKS_TOKEN': ''})
with pytest.raises(SystemExit) as mock_ex:
cli = NutterCLI()
assert mock_ex.type == SystemExit
assert mock_ex.value.code == 1
def test__run__one_test_fullpath__display_results(mocker):
test_results = TestResults().serialize()
cli = _get_cli_for_tests(
mocker, 'SUCCESS', 'TERMINATED', test_results)
mocker.patch.object(cli, '_display_test_results')
cli.run('test_mynotebook2', 'cluster')
assert cli._display_test_results.call_count == 1
def test__run_one_test_junit_writter__writer_writes(mocker):
test_results = TestResults().serialize()
cli = _get_cli_for_tests(
mocker, 'SUCCESS', 'TERMINATED', test_results)
mocker.patch.object(cli, '_get_report_writer_manager')
mock_report_manager = ReportWriterManager(ReportWriters.JUNIT)
mocker.patch.object(mock_report_manager, 'write')
mocker.patch.object(mock_report_manager, 'add_result')
cli._get_report_writer_manager.return_value = mock_report_manager
cli.run('test_mynotebook2', 'cluster')
assert mock_report_manager.add_result.call_count == 1
assert mock_report_manager.write.call_count == 1
assert not mock_report_manager._providers[ReportWritersTypes.JUNIT].has_data(
)
def test__list__none__display_result(mocker):
cli = _get_cli_for_tests(
mocker, 'SUCCESS', 'TERMINATED', 'IHAVERETURNED')
mocker.patch.object(cli, '_display_list_results')
cli.list('/')
assert cli._display_list_results.call_count == 1
def _get_cli_for_tests(mocker, result_state, life_cycle_state, notebook_result):
mocker.patch.dict(os.environ, {'DATABRICKS_HOST': 'myhost'})
mocker.patch.dict(os.environ, {'DATABRICKS_TOKEN': 'mytoken'})
cli = NutterCLI()
mocker.patch.object(cli._nutter, 'run_test')
cli._nutter.run_test.return_value = _get_run_test_response(
result_state, life_cycle_state, notebook_result)
mocker.patch.object(cli._nutter, 'run_tests')
cli._nutter.run_tests.return_value = _get_run_tests_response(
result_state, life_cycle_state, notebook_result)
mocker.patch.object(cli._nutter, 'list_tests')
cli._nutter.list_tests.return_value = _get_list_tests_response()
return cli
def _get_run_test_response(result_state, life_cycle_state, notebook_result):
data_json = """
{"notebook_output":
{"result": "IHaveReturned", "truncated": false},
"metadata":
{"execution_duration": 15000,
"run_type": "SUBMIT_RUN",
"cleanup_duration": 0,
"number_in_job": 1,
"cluster_instance":
{"cluster_id": "0925-141d1222-narcs242",
"spark_context_id": "803963628344534476"},
"creator_user_name": "abc@microsoft.com",
"task": {"notebook_task": {"notebook_path": "/test_mynotebook"}},
"run_id": 7, "start_time": 1569887259173,
"job_id": 4,
"state": {"result_state": "SUCCESS", "state_message": "",
"life_cycle_state": "TERMINATED"}, "setup_duration": 2000,
"run_page_url": "https://westus2.azuredatabricks.net/?o=14702dasda6094293890#job/4/run/1",
"cluster_spec": {"existing_cluster_id": "0925-141122-narcs242"}, "run_name": "myrun"}}
"""
data_dict = json.loads(data_json)
data_dict['notebook_output']['result'] = notebook_result
data_dict['metadata']['state']['result_state'] = result_state
data_dict['metadata']['state']['life_cycle_state'] = life_cycle_state
return ExecuteNotebookResult.from_job_output(data_dict)
def _get_list_tests_response():
result = {}
result['test_mynotebook'] = '/test_mynotebook'
result['test_mynotebook2'] = '/test_mynotebook2'
return result
def _get_run_tests_response(result_state, life_cycle_state, notebook_result):
data_json = """
{"notebook_output":
{"result": "IHaveReturned", "truncated": false},
"metadata":
{"execution_duration": 15000,
"run_type": "SUBMIT_RUN",
"cleanup_duration": 0,
"number_in_job": 1,
"cluster_instance":
{"cluster_id": "0925-141d1222-narcs242",
"spark_context_id": "803963628344534476"},
"creator_user_name": "abc@microsoft.com",
"task": {"notebook_task": {"notebook_path": "/test_mynotebook"}},
"run_id": 7, "start_time": 1569887259173,
"job_id": 4,
"state": {"result_state": "SUCCESS", "state_message": "",
"life_cycle_state": "TERMINATED"}, "setup_duration": 2000,
"run_page_url": "https://westus2.azuredatabricks.net/?o=14702dasda6094293890#job/4/run/1",
"cluster_spec": {"existing_cluster_id": "0925-141122-narcs242"}, "run_name": "myrun"}}
"""
data_dict = json.loads(data_json)
data_dict['notebook_output']['result'] = notebook_result
data_dict['metadata']['state']['result_state'] = result_state
data_dict['metadata']['state']['life_cycle_state'] = life_cycle_state
data_dict2 = json.loads(data_json)
data_dict2['notebook_output']['result'] = notebook_result
data_dict2['metadata']['state']['result_state'] = result_state
data_dict2['metadata']['task']['notebook_task']['notebook_path'] = '/test_mynotebook2'
data_dict2['metadata']['state']['life_cycle_state'] = life_cycle_state
results = []
results.append(ExecuteNotebookResult.from_job_output(data_dict))
results.append(ExecuteNotebookResult.from_job_output(data_dict2))
return results
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
217b1de8d4c07839bfa55e381c6b8ea32bf45360
|
d8cbc94a4207337d709a64447acb9c8fe501c75a
|
/correspondence_retrieval/code/feature.py
|
1131c54b3b9d2cf9174b74c8a8c0d3673e09ef25
|
[
"MIT"
] |
permissive
|
sripathisridhar/acav100m
|
6f672384fa723a637d94accbbe11a9a962f5f87f
|
13b438b6ce46d09ba6f79aebb84ad31dfa3a8e6f
|
refs/heads/master
| 2023-09-06T01:05:21.188822
| 2021-11-18T08:08:08
| 2021-11-18T08:08:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,140
|
py
|
from functools import partial
from collections import defaultdict
from tqdm import tqdm
import torch
import torchvision
import torchvision.transforms as transforms
from model import get_model
def get_loaders(num_workers):
# Datasets and Dataloaders
mean, std = [0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010] # color normalization
Rot90 = partial(
transforms.functional.rotate,
angle=90,
)
class SingleToPairTransform:
"""Pair of (original, modified)"""
def __init__(self, funcs):
self.funcs = funcs
def run(self, x):
for func in self.funcs:
x = func(x)
return x
def __call__(self, x):
return x, self.run(x)
class PairTransform:
def __init__(self, funcs):
self.funcs = funcs
def __call__(self, xs):
res = []
for x in xs:
for func in self.funcs:
x = func(x)
res.append(x)
return res
view_transform = transforms.Compose(
[
SingleToPairTransform([Rot90]),
PairTransform([transforms.ToTensor()]),
PairTransform([transforms.Normalize(mean, std)]),
]
)
datasets = {
'train': torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=view_transform),
'test': torchvision.datasets.CIFAR10(
root='./data', train=False, download=True, transform=view_transform),
}
loaders = {key: torch.utils.data.DataLoader(
dataset, batch_size=128, shuffle=False, num_workers=num_workers)
for key, dataset in datasets.items()}
return loaders
def _get_feature(model, loaders, device):
views_features = defaultdict(lambda: defaultdict(list))
print("extracting features")
# view / class / index
with torch.no_grad():
for loader in loaders.values():
for views, labels in tqdm(loader, ncols=80):
outputs = []
for view_index, view in enumerate(views):
view = view.to(device)
outputs = model(view)
outputs = outputs.detach().cpu()
for i in range(len(labels)):
views_features[view_index][labels[i].item()].append(outputs[i].detach().cpu())
dataset_size = sum(len(class_features) for class_features in views_features[0].values())
nclasses = len(views_features[0])
for view_index, view_features in views_features.items():
for class_index, class_feature in view_features.items():
views_features[view_index][class_index] = torch.stack(class_feature, dim=0)
return views_features, dataset_size, nclasses
def get_feature(num_workers, device, finetune=False, sample=False):
loaders = get_loaders(num_workers)
models = get_model(device, num_workers, finetune=finetune, sample=sample)
model = models['model']
features, dataset_size, nclasses = _get_feature(model, loaders, device)
return features, dataset_size, nclasses
|
[
"sangho.lee@vision.snu.ac.kr"
] |
sangho.lee@vision.snu.ac.kr
|
d088f9806c9ae54531c32bef8fa1818a989c00e1
|
f6a9b1a1b66f369c00e8bfeb3907f927b999e77f
|
/test/onnx/test_onnx_opset.py
|
5c701e3d48a1a095609335eeebcffdea39b2ef71
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
cjlovering/pytorch
|
62f1606be4190c910a44186a3046cd36df1be9db
|
78a376592d39859b06a10ce47b77db7be5118ebb
|
refs/heads/master
| 2020-06-01T06:56:35.753689
| 2019-06-07T03:47:49
| 2019-06-07T03:57:12
| 190,688,055
| 0
| 0
|
NOASSERTION
| 2019-06-07T04:31:17
| 2019-06-07T04:31:16
| null |
UTF-8
|
Python
| false
| false
| 5,395
|
py
|
from test_pytorch_common import TestCase, run_tests
import torch
import torch.onnx
from torch.nn import Module
import onnx
import io
from torch.onnx.symbolic_helper import _export_onnx_opset_version
from torch.onnx import ir_version, producer_name, producer_version
def check_onnx_opset_operator(model, ops, opset_version=_export_onnx_opset_version):
# check_onnx_components
assert model.ir_version == ir_version and \
model.producer_name == producer_name and \
model.producer_version == producer_version and \
model.opset_import[0].version == opset_version
# check the schema with the onnx checker
onnx.checker.check_model(model)
# check target type and attributes
graph = model.graph
# ops should contain an object for each node
# in graph.node, in the right order.
# At least the op_name should be specified,
# but the op's attributes can optionally be
# specified as well
assert len(ops) == len(graph.node)
for i in range(0, len(ops)):
assert graph.node[i].op_type == ops[i]['op_name']
if "attributes" in ops[i] :
attributes = ops[i]['attributes']
assert len(attributes) == len(graph.node[i].attribute)
for j in range(0, len(attributes)):
for attribute_field in attributes[j].keys():
assert attributes[j][attribute_field] == getattr(graph.node[i].attribute[j], attribute_field)
def check_onnx_opsets_operator(module, x, ops, opset_versions, training=False):
for opset_version in opset_versions:
f = io.BytesIO()
torch.onnx.export(module, x, f, opset_version=opset_version, training=training)
model = onnx.load(io.BytesIO(f.getvalue()))
check_onnx_opset_operator(model, ops[opset_version], opset_version)
class TestONNXOpset(TestCase):
def test_opset_fallback(self):
class MyModule(Module):
def forward(self, x):
return torch.isnan(x)
ops = [{"op_name" : "IsNaN"},
{"op_name" : "Cast", "attributes" : [{"name" : "to", "i" : 2, "type" : 2}]}]
ops = {9 : ops, 10 : ops}
x = torch.tensor([1.0, float('nan'), 2.0])
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
def test_topk(self):
class MyModule(Module):
def forward(self, x):
return torch.topk(x, 3)
ops_9 = [{"op_name" : "TopK", "attributes" : [{"name" : "axis", "i" : -1, "type" : 2},
{"name" : "k", "i" : 3, "type" : 2}]}]
ops_10 = [{"op_name" : "Constant", "attributes" : [{"name" : "value", "type" : 4}]},
{"op_name" : "Unsqueeze", "attributes" : [{"name" : "axes", "ints" : [0], "type" : 7}]},
{"op_name" : "TopK", "attributes" : [{"name" : "axis", "i" : -1, "type" : 2}]}]
ops = {9 : ops_9, 10 : ops_10}
x = torch.arange(1., 6., requires_grad=True)
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
def test_maxpool(self):
module = torch.nn.MaxPool1d(2, stride=1)
ops_9 = [{"op_name" : "MaxPool",
"attributes" :
[{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7}]}]
ops_10 = [{"op_name" : "MaxPool",
"attributes" :
[{"name": "ceil_mode", "i": 0, "type": 2},
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7}]}]
ops = {9 : ops_9, 10 : ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(module, x, ops, opset_versions=[10])
# add test with dilations
module = torch.nn.MaxPool1d(2, stride=1, dilation=2)
ops_10 = [{"op_name" : "MaxPool",
"attributes" :
[{"name": "ceil_mode", "i": 0, "type": 2},
{"name": "dilations", "ints": [2], "type": 7},
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7}]}]
ops = {9 : ops_9, 10 : ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(module, x, ops, opset_versions=[10])
def test_dropout(self):
class MyModule(Module):
def __init__(self):
super(MyModule, self).__init__()
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
return self.dropout(x)
x = torch.randn(1, 2, 3)
# we should only export the onnx Dropout op in training mode; test both modes
# test training mode
ops = [{"op_name" : "Dropout", "attributes" : [{"name" : "ratio", "f" : 0.5, "type" : 1}]}]
ops = {9 : ops, 10 : ops}
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10], training=True)
# test eval mode
ops = []
ops = {9 : ops, 10 : ops}
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10], training=False)
if __name__ == '__main__':
run_tests()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
f88444e475a9343e565e7690680f1c21bae132aa
|
5a4436884af5341ce855c0e84866b972a0f61c05
|
/day5/recursion/6.py
|
04c04f75d05c5974bac22c2c6d636a59e4b74582
|
[] |
no_license
|
sreejithev/pythoncodes
|
74a420c4f025b893e27f17ba85632a4a096f17fd
|
70df14871a9687916d1c4ada76c055607f13e8ce
|
refs/heads/master
| 2021-01-21T20:59:47.056167
| 2017-06-19T09:43:17
| 2017-06-19T09:43:17
| 92,292,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
# handling tree-like structure
my_expression = 12
def eval(expr):
if isinstance(expr, int):
return expr
print eval(my_expression)
|
[
"sreejithevwyd@gmail.com"
] |
sreejithevwyd@gmail.com
|
799893b61cb92573ebd28b6e1e155e97ada16636
|
2e6f37e664d2cc85d0c704f20de05b2cae86771d
|
/options/options.py
|
102d63f4a1736cad3d0d940bb8cd7134dcaf4736
|
[
"MIT"
] |
permissive
|
LEOGML/cv_template
|
5bee5e43efb490649f63a7c4e1b77e62a3e1d948
|
c1a87465f0aeb79dab63b0cae88861a6282c045c
|
refs/heads/master
| 2023-01-30T21:32:38.240103
| 2020-12-15T09:39:14
| 2020-12-15T09:39:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,594
|
py
|
import argparse
import sys
import torch
import misc_utils as utils
"""
Arg parse
opt = parse_args()
"""
def parse_args():
# experiment specifics
parser = argparse.ArgumentParser()
parser.add_argument('--tag', type=str, default='cache',
help='folder name to save the outputs')
parser.add_argument('--gpu_ids', '--gpu', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
# dirs (NOT often Changed)
parser.add_argument('--data_root', type=str, default='./datasets/')
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--log_dir', type=str, default='./logs', help='logs are saved here')
parser.add_argument('--result_dir', type=str, default='./results', help='results are saved here')
#######################
parser.add_argument('--model', type=str, default=None, help='which model to use')
parser.add_argument('--norm', type=str, choices=['batch', 'instance', None], default=None,
help='[instance] normalization or [batch] normalization')
# batch size
parser.add_argument('--batch_size', '-b', type=int, default=1, help='input batch size')
parser.add_argument('--workers', '-w', type=int, default=4, help='dataloader workers')
# optimizer and scheduler
parser.add_argument('--optimizer', choices=['adam', 'sgd', 'radam', 'lookahead', 'ranger'], default='adam')
parser.add_argument('--scheduler', choices=['cos', 'step', 'exp', 'cyclic', 'lambda', 'None'], default='cos')
# data augmentation
parser.add_argument('--aug', action='store_true', help='Randomly scale, jitter, change hue, saturation and brightness')
parser.add_argument('--norm-input', action='store_true')
parser.add_argument('--random-erase', action='store_true', help='debug mode')
# scale
parser.add_argument('--scale', type=int, default=None, help='scale images to this size')
parser.add_argument('--crop', type=int, default=256, help='then crop to this size')
# for datasets
parser.add_argument('--dataset', default='', help='training dataset')
parser.add_argument('--transform', default='crop256', help='transform')
parser.add_argument('--val_set', type=str, default=None)
parser.add_argument('--test_set', type=str, default=None)
# init weights
parser.add_argument('--init', type=str, default=None, help='{normal, xavier, kaiming, orthogonal}')
# loss weight
parser.add_argument('--weight_ssim', type=float, default=1.1) # SSIM loss
parser.add_argument('--weight_l1', type=float, default=0.75) # l1 loss
parser.add_argument('--weight_vgg', type=float, default=0.) # content loss(vgg loss)
parser.add_argument('--weight_grad', type=float, default=0.) # gradient loss
# training options
parser.add_argument('--debug', action='store_true', help='debug mode')
parser.add_argument('--load', type=str, default=None, help='load checkpoint')
parser.add_argument('--resume', action='store_true', help='resume training, only used when --load')
parser.add_argument('--reset', action='store_true', help='reset training, only used when --load')
parser.add_argument('--epochs', '--max_epoch', type=int, default=50, help='epochs to train')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--save_freq', type=int, default=10, help='freq to save models')
parser.add_argument('--eval_freq', '--val_freq', type=int, default=10, help='freq to eval models')
parser.add_argument('--log_freq', type=int, default=1, help='freq to vis in tensorboard')
# test options
parser.add_argument('--tta', action='store_true', help='test with augmentation')
parser.add_argument('--tta-x8', action='store_true', help='test with augmentation x8')
return parser.parse_args()
opt = parse_args()
opt.device = 'cuda:' + opt.gpu_ids if torch.cuda.is_available() and opt.gpu_ids != '-1' else 'cpu'
if opt.debug:
opt.save_freq = 1
opt.eval_freq = 1
opt.log_freq = 1
def get_command_run():
args = sys.argv.copy()
args[0] = args[0].split('/')[-1]
if sys.version[0] == '3':
command = 'python3'
else:
command = 'python'
for i in args:
command += ' ' + i
return command
if opt.tag != 'cache':
with open('run_log.txt', 'a') as f:
f.writelines(utils.get_time_str(fmt="%Y-%m-%d %H:%M:%S") + ' ' + get_command_run() + '\n')
# utils.print_args(opt)
|
[
"523131316@qq.com"
] |
523131316@qq.com
|
933f4b58c7854b784701dd2df483ef4272ae3cfc
|
77683abaded7f9f4f538c6b02635fcf342d26886
|
/settings/base.py
|
a6c5df5d9c28a0a57e9c45082891ffd76cf32125
|
[] |
no_license
|
tchappui/flask-model
|
6a3a79ef04f0e6e437b7e97be789690b69d14bd4
|
5238534b4ba6450e92cf9ff413eb0edc282305e5
|
refs/heads/main
| 2023-06-03T20:02:20.419972
| 2021-06-11T09:32:51
| 2021-06-11T09:32:51
| 375,974,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
import os
import pathlib
BASE_DIR = pathlib.Path(__file__).resolve().parent
INSTALLED_BLUEPRINTS = [
{"module": "flaskt.home", "name": "home", "prefix": ""}
]
SECRET_KEY = os.environ.get("SECRET_KEY", "very secret and unguessable value")
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
[
"tchappui@gmail.com"
] |
tchappui@gmail.com
|
1f0bedae459bf6d8f4e19b7d5042a0d02edeffc7
|
25040bd4e02ff9e4fbafffee0c6df158a62f0d31
|
/www/htdocs/wt/lapnw/data/item_20_6.tmpl.py
|
6315b38d936cbe899af9e04cbed51763e6399ddc
|
[] |
no_license
|
erochest/atlas
|
107a14e715a058d7add1b45922b0f8d03bd2afef
|
ea66b80c449e5b1141e5eddc4a5995d27c2a94ee
|
refs/heads/master
| 2021-05-16T00:45:47.585627
| 2017-10-09T10:12:03
| 2017-10-09T10:12:03
| 104,338,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
from lap.web.templates import GlobalTemplate, SubtemplateCode
class main(GlobalTemplate):
title = 'Page.Item: 20.6'
project = 'lapnw'
class page(SubtemplateCode):
pass
|
[
"eric@eric-desktop"
] |
eric@eric-desktop
|
66edb24833ca5e1a04c44358218a3c5742502084
|
3637fe729395dac153f7abc3024dcc69e17f4e81
|
/reference/ucmdb/discovery/vcloud_director_by_vcloud_api.py
|
8e70ff594f25df698f6a246a425441fca52ea0a9
|
[] |
no_license
|
madmonkyang/cda-record
|
daced6846c2456f20dddce7f9720602d1583a02a
|
c431e809e8d0f82e1bca7e3429dd0245560b5680
|
refs/heads/master
| 2023-06-15T08:16:46.230569
| 2021-07-15T16:27:36
| 2021-07-15T16:27:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,935
|
py
|
#coding=utf-8
import logger
import vcloud_discover
from appilog.common.system.types.vectors import ObjectStateHolderVector
import vcloud_report
TRIGGER_IPS = "ip_addresses"
TRIGGER_VCD_ID = "vCloudDirectorId"
PARAM_REPORT_POWERED_OFF_VMS = "reportPoweredOffVms"
def DiscoveryMain(Framework):
OSHVResult = ObjectStateHolderVector()
ips = Framework.getTriggerCIDataAsList(TRIGGER_IPS)
vcloudDirectorId = Framework.getDestinationAttribute(TRIGGER_VCD_ID)
reportPoweredOffVms = 0
reportPoweredOffVmsValue = Framework.getParameter(PARAM_REPORT_POWERED_OFF_VMS)
if reportPoweredOffVmsValue and reportPoweredOffVmsValue.lower() == 'true':
reportPoweredOffVms = 1
if ips:
#configure how connections should be discovered/established
connectionDiscoverer = vcloud_discover.ConnectionDiscoverer(Framework)
urlGenerator = vcloud_discover.UrlByIpGenerator()
connectionDiscoverer.setUrlGenerator(urlGenerator)
connectionDiscoverer.setIps(ips)
#configure how established/failed connection should be used
connectionHandler = vcloud_discover.BaseDiscoveryConnectionHandler(Framework)
topologyDiscoverer = vcloud_discover.createVcloudDiscoverer(Framework)
topologyReporter = vcloud_report.createVcloudReporter(Framework, vcloudDirectorId, reportPoweredOffVms)
connectionHandler.setDiscoverer(topologyDiscoverer)
connectionHandler.setReporter(topologyReporter)
connectionDiscoverer.setConnectionHandler(connectionHandler)
connectionDiscoverer.initConnectionConfigurations()
connectionDiscoverer.discover(firstSuccessful=0)
if not connectionHandler.connected:
for errorMsg in connectionHandler.connectionErrors:
Framework.reportError(errorMsg)
for warningMsg in connectionHandler.connectionWarnings:
Framework.reportWarning(warningMsg)
else:
logger.warn("Job triggered on destination without any IP")
return OSHVResult
|
[
"silentbalanceyh@126.com"
] |
silentbalanceyh@126.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.