blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9fea3d155012b6af9e4d7879b888a09a41598709 | 1b496449867e60fcec110d1f0d6b7bc0bc8cddf6 | /mydeploy/myapp/setup.py | 35cc2459c6856afa68137edf4f9b170e088b2e91 | [] | no_license | sebbekarlsson/tarship | f4cca6dc27174fc0d31ee3ceb8ba2a8864070e42 | c7b57472a3e0146d38260c3607473914750e1ffd | refs/heads/master | 2020-03-28T21:37:40.004937 | 2018-12-06T12:02:33 | 2018-12-06T12:02:33 | 149,170,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | from setuptools import setup, find_packages
setup(
name='myapp',
version='1.0',
install_requires=[
'flask'
],
packages=find_packages()
)
| [
"sebbekarlsson97@gmail.com"
] | sebbekarlsson97@gmail.com |
fe79ba37dfe75f6ff0503aad5ac0e6a96ee458f1 | 68c29e7a17d87e34b1d6613c3e2e70a36fd2adcc | /easy/485_max_consecutive_ones.py | 67224cb9e68d8e2bbf47be570831d8cff6dfae9b | [
"MIT"
] | permissive | Sukhrobjon/leetcode | 284242fbfded3e47a57ce9230f9bc1175685cd7a | 547c200b627c774535bc22880b16d5390183aeba | refs/heads/master | 2022-02-26T20:56:57.347119 | 2022-02-05T01:58:49 | 2022-02-05T01:58:49 | 192,158,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | """
Given a binary array, find the maximum number of consecutive 1s in this array.
Example 1:
Input: [1,1,0,1,1,1]
Output: 3
Explanation: The first two digits or the last three digits are consecutive 1s.
The maximum number of consecutive 1s is 3.
NOTE:
The input array will only contain 0 and 1.
The length of input array is a positive integer and will not exceed 10,000
"""
class Solution(object):
def findMaxConsecutiveOnes(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
count = 0
max_len = 0
for digit in nums:
print(count, digit)
if digit == 1:
count += 1
else:
count = 0
max_len = max(max_len, count)
return max_len
nums = [1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1]
obj = Solution()
result = obj.findMaxConsecutiveOnes(nums)
print(result)
| [
"sgolibbo@mail.ccsf.edu"
] | sgolibbo@mail.ccsf.edu |
ae3069ea5b56ee41d89ee8c8e24a57ba2f5ca18e | 675c5e97a84cfda399ca74c1804e0218c43b7c70 | /xTool/contextmanagers/temp.py | 78739d83c8720635e0978184092eabe4db5c3ca6 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"Python-2.0"
] | permissive | fengzhongzhu1621/xTool | 37a232e5ea220c8e5cb48aaf6868bc9cf24181e8 | 57d745ce6be531c000a3b477c38bfdd4c2ac74e3 | refs/heads/master | 2023-07-20T22:31:50.278926 | 2023-07-18T02:29:21 | 2023-07-18T02:29:21 | 88,688,127 | 3 | 4 | Apache-2.0 | 2023-05-09T21:53:19 | 2017-04-19T01:49:20 | Python | UTF-8 | Python | false | false | 304 | py | # -*- coding: utf-8 -*-
from contextlib import contextmanager
from pathlib import Path
from tempfile import TemporaryDirectory
@contextmanager
def temp_path(name):
""" a simple cross platform replacement for NamedTemporaryFile """
with TemporaryDirectory() as td:
yield Path(td, name)
| [
"jinyinqiao@gmail.com"
] | jinyinqiao@gmail.com |
f73b54bc15fb86b8360de52a82dabc4c873ff957 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/contrib/tensor_forest/proto/fertile_stats_pb2.py | 584ce5ba078ba2067643a2757cab80357484b93a | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4ad07e1cfb048ba607c58cfdff9fbfc290deaa4051df70b3ef39683a1fe3896b
size 20881
| [
"github@cuba12345"
] | github@cuba12345 |
5fa61eb6f1f27f9cd58eebdfa987ab4e30cc3809 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/application_gateway_available_ssl_options.py | 9e1d2f87d101b4f926d8a7a1660b409e667bd8eb | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 3,052 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ApplicationGatewayAvailableSslOptions(Resource):
"""Response for ApplicationGatewayAvailableSslOptions API service call.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param predefined_policies: List of available Ssl predefined policy.
:type predefined_policies:
list[~azure.mgmt.network.v2018_10_01.models.SubResource]
:param default_policy: Name of the Ssl predefined policy applied by
default to application gateway. Possible values include:
'AppGwSslPolicy20150501', 'AppGwSslPolicy20170401',
'AppGwSslPolicy20170401S'
:type default_policy: str or
~azure.mgmt.network.v2018_10_01.models.ApplicationGatewaySslPolicyName
:param available_cipher_suites: List of available Ssl cipher suites.
:type available_cipher_suites: list[str or
~azure.mgmt.network.v2018_10_01.models.ApplicationGatewaySslCipherSuite]
:param available_protocols: List of available Ssl protocols.
:type available_protocols: list[str or
~azure.mgmt.network.v2018_10_01.models.ApplicationGatewaySslProtocol]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'predefined_policies': {'key': 'properties.predefinedPolicies', 'type': '[SubResource]'},
'default_policy': {'key': 'properties.defaultPolicy', 'type': 'str'},
'available_cipher_suites': {'key': 'properties.availableCipherSuites', 'type': '[str]'},
'available_protocols': {'key': 'properties.availableProtocols', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayAvailableSslOptions, self).__init__(**kwargs)
self.predefined_policies = kwargs.get('predefined_policies', None)
self.default_policy = kwargs.get('default_policy', None)
self.available_cipher_suites = kwargs.get('available_cipher_suites', None)
self.available_protocols = kwargs.get('available_protocols', None)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
74f2b12334b2c53165e62947970d47f48a275f4c | a7f459bcc3da31e4cce7c838e716e089a31cb662 | /tables.py | fc44e3abdad6c6227d08dbff3ea4f18197e6433a | [] | no_license | ravenusmc/myDB | 3d5246a2ad2ffc367d5a540eaa3e71322ed55ace | 09aa1d88e4cdc7fb19a01807d3d678bf9e3d777a | refs/heads/master | 2020-03-23T13:49:29.450115 | 2018-10-08T23:49:18 | 2018-10-08T23:49:18 | 141,639,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | #This file only create the database name for each user.
#importing needed files
import mysql.connector
class Tables():
def __init__(self):
self.conn = mysql.connector.connect(user='ted',
password='pass',
host='localhost',
port=3306)
self.cursor = self.conn.cursor()
def create_database(self, database_name):
sql = 'CREATE DATABASE ' + database_name
self.cursor.execute(sql)
| [
"mcuddy77@gmail.com"
] | mcuddy77@gmail.com |
9505f1bdb5def62407913d377071ecfcf1f5306b | 226e8d309e978240fbd6d4b31238daa357f51042 | /core.py | 77f3c9d7a2c130bdeda4fcc7d9b7f9f9a0167e06 | [] | no_license | zokis/Zy | e183f9ffb2fdfbe2b253666d5a17093b4929658d | 9a4959661d8a221cb359e119945febd6573b5165 | refs/heads/master | 2021-01-01T19:30:18.393976 | 2015-04-30T21:46:21 | 2015-04-30T21:46:21 | 33,011,032 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,501 | py | # coding: utf-8
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import re
ESC_STR = r'#[!]#'
class Symbol(str):
pass
class Lambda(object):
def __init__(self, parms, body, env):
self.parms, self.body, self.env = parms, body, env
def __call__(self, *args):
return zy_eval(self.body, Env(self.parms, args, self.env))
class Env(dict):
def __init__(self, parms=(), args=(), outer=None):
self.outer = outer
self.update(zip(parms, args))
def find(self, var):
return self if (var in self) else self.outer.find(var)
class ZyString(str):
def __div__(self, other):
return map(ZyString, self.split(other))
__truediv__ = __div__
def __sub__(self, other):
return ZyString(self.replace(other, ''))
def __add__(self, other):
return ZyString(super(ZyString, self).__add__(other))
def __mul__(self, other):
return ZyString(super(ZyString, self).__mul__(other))
class ZyBool(object):
true = True
false = False
def __new__(cls, val):
if val:
if cls.true is True:
cls.true = super(ZyBool, cls).__new__(cls, cls.true)
return cls.true
else:
if cls.false is False:
cls.false = super(ZyBool, cls).__new__(cls, cls.false)
return cls.false
def __init__(self, val):
self.val = val
def __nonzero__(self):
return self.val
def __repr__(self):
return '#t' if self.val else '#f'
__str__ = __repr__
ZyTrue = ZyBool(True)
ZyFalse = ZyBool(False)
def atom(token):
if token[0] == '"':
return ZyString(token[1:-1].decode('utf-8'))
try:
return float(token)
except ValueError:
return Symbol(token)
def tokenize(program):
program_iter = iter(program)
strings = []
while True:
try:
c = program_iter.next()
except StopIteration:
break
if c == '"':
r = []
while True:
try:
c = program_iter.next()
except StopIteration:
break
if c == '"':
strings.append(''.join(r).replace('"', ''))
break
else:
r.append(c)
tokens = re.sub('\"(.+?)\"', ESC_STR, program).replace(')', ' ) ').replace('(', ' ( ').split()
str_index = 0
for k, t in enumerate(tokens):
if t == ESC_STR:
tokens[k] = '"%s"' % strings[str_index]
str_index += 1
return tokens
def atomize(tokens):
if len(tokens) == 0:
raise SyntaxError('unexpected EOF')
token = tokens.pop(0)
if token == '(':
r = []
while tokens[0] != ')':
r.append(atomize(tokens))
tokens.pop(0)
return r
elif token == ')':
raise SyntaxError('unexpected )')
else:
return atom(token)
def parse(program):
return atomize(tokenize(program))
def standard_env():
env = Env()
env.update({
'.': lambda *args, **kwargs: None,
'!': lambda x: ZyBool(x),
'!!': lambda x: ZyBool(not x),
'#pi': 3.141592653589793,
'#nil': None,
'#f': ZyFalse,
'#t': ZyTrue,
'*': lambda x, y: x * y,
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'/': lambda x, y: x / y,
'<': lambda x, y: x < y,
'>': lambda x, y: x > y,
'=': lambda x, y: x == y,
'**': lambda x, y: x ** y,
'++': lambda x: x + 1.,
'--': lambda x: x - 1.,
'..': lambda x, y, s=1: range(int(x), int(y), int(s)),
'/]': lambda x: float(int(x)),
'/[': round,
'[]': lambda *x: list(x),
'[:]': lambda x, y: y[int(x)],
',': float,
"'": ZyString,
'<=': lambda x, y: x <= y,
'>=': lambda x, y: x >= y,
'<->': lambda x, y: [y, x],
'>>': print,
'<<': raw_input,
})
return env
GLOBAL_ENV = standard_env()
def zy_eval(x, env=GLOBAL_ENV):
if isinstance(x, Symbol):
return env.find(x)[x]
elif not isinstance(x, list):
return x
elif x[0] == '?':
_, test, _if, _else = x
exp = (_if if zy_eval(test, env) else _else)
return zy_eval(exp, env)
elif x[0] == '->':
_, var, exp = x
env[var] = zy_eval(exp, env)
elif x[0] == ',->':
x = x[1:]
ln = int(len(x) / 2)
params, args = x[:ln], x[ln:]
if len(params) != len(args):
raise ValueError('It has not been possible to do the unpack')
for i in range(ln):
env[params[i]] = zy_eval(args[i], env)
elif x[0] == '@':
_, parms, body = x
return Lambda(parms, body, env)
elif x[0] == '*>':
_, var, _list, body, r = x
_env = env
for w in zy_eval(_list, _env):
_env = Env([var], [w], _env)
zy_eval(body, _env)
return zy_eval(r, _env)
else:
return zy_eval(x[0], env)(*[zy_eval(exp, env) for exp in x[1:]])
def to_zy_str(exp):
if isinstance(exp, Symbol):
return exp
elif isinstance(exp, ZyString):
return '"%s"' % exp.encode('utf-8').replace('"', r'\"')
elif isinstance(exp, list):
return "(%s)" % ' '.join(map(to_zy_str, exp))
else:
return str(exp)
| [
"marcelo.zokis@gmail.com"
] | marcelo.zokis@gmail.com |
50c7c5264c127c5133745ffb614f121b6f470cc6 | 78316ffc5c14d6c0c6a144c25be0ba695ae6f4db | /svgout/manipulator.py | 82dab696782ef31e1663d4413bd0b01725d1065e | [] | no_license | meyt/svgout | 235fad9ee59b05f6caddde8ad5b67823c3f8916b | 21770144dade3c22314143291030a3bc24a5b248 | refs/heads/master | 2023-02-02T08:55:14.760035 | 2020-12-07T09:28:15 | 2020-12-07T09:28:15 | 319,265,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,928 | py | import re
import yaml
import logging
import cssutils
from os.path import join
from bs4 import BeautifulSoup
cssutils.log.setLevel(logging.CRITICAL)
class ElementStyle:
def __init__(self, bs_element):
self.el = bs_element
self.style = cssutils.parseStyle(self.el["style"])
def __getitem__(self, key):
return self.style[key]
def __setitem__(self, key, val):
self.style[key] = val
self.el["style"] = self.style.cssText
def __delitem__(self, key):
self.style.removeProperty(key)
self.el["style"] = self.style.cssText
class Element:
def __init__(self, bs_element):
self.el = bs_element
@property
def style(self):
return ElementStyle(self.el)
def hide(self):
self.style["display"] = "none"
def show(self):
del self.style["display"]
class Manipulator:
def __init__(self, config_filename: str, svg_filename: str):
with open(config_filename, "r") as f:
self.config = yaml.load(f, Loader=yaml.Loader)
with open(svg_filename, "r") as f:
self.bs = BeautifulSoup(f.read(), "xml")
def save(self, filename):
with open(filename, "w", encoding="utf-8") as f:
f.write(str(self.bs))
def process(self, output_dir: str, stdout: bool = True):
config = self.config
bs = self.bs
for outkey, outval in config.items():
output_filename = join(output_dir, outkey + ".svg")
if stdout:
print(output_filename)
for command, elementpatterns in outval.items():
for elementpattern in elementpatterns:
elements = bs.findAll(id=re.compile(elementpattern))
for bs_element in elements:
el = Element(bs_element)
getattr(el, command)()
self.save(output_filename)
| [
"pasd3000@gmail.com"
] | pasd3000@gmail.com |
6fa3c5bb33d1e17219f01a2c7a0ac9688776ac2a | 35e892b01d2dfeea6f66a29fa336b2478e06bcea | /src/mcedit2/widgets/mcedockwidget.py | 8bb08d93090fa2ac5f3d094f99e0e8efe57bc3a2 | [
"BSD-3-Clause"
] | permissive | theomission/mcedit2 | bf1d0b7e00eaf3523b386b5909b3e3796e73dc2f | 39a717b3cab5dd8366ed8542a070e4120386eb92 | refs/heads/master | 2020-12-31T02:00:52.356814 | 2015-11-09T21:43:44 | 2015-11-09T21:44:52 | 46,336,522 | 1 | 0 | null | 2015-11-17T09:26:11 | 2015-11-17T09:26:11 | null | UTF-8 | Python | false | false | 1,294 | py | """
mcedockwidget
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from PySide import QtGui, QtCore
import logging
log = logging.getLogger(__name__)
class MCEDockWidget(QtGui.QDockWidget):
def __init__(self, *a, **kw):
super(MCEDockWidget, self).__init__(*a, **kw)
self._unfocusedOpacity = 1.0
def setUnfocusedOpacity(self, value):
self._unfocusedOpacity = value
def animate(self, value):
self.setWindowOpacity(value)
def enterEvent(self, event):
if self._unfocusedOpacity == 1.0:
return
self.animation = animation = QtCore.QPropertyAnimation(self, 'windowOpacity')
animation.setDuration(100)
animation.setStartValue(self.windowOpacity())
animation.setEndValue(1.0)
animation.valueChanged.connect(self.animate)
animation.start()
def leaveEvent(self, event):
if self._unfocusedOpacity == 1.0:
return
self.animation = animation = QtCore.QPropertyAnimation(self, 'windowOpacity')
animation.setDuration(250)
animation.setStartValue(self.windowOpacity())
animation.setEndValue(self._unfocusedOpacity)
animation.valueChanged.connect(self.animate)
animation.start()
| [
"codewarrior@hawaii.rr.com"
] | codewarrior@hawaii.rr.com |
c0e5d3a39d855c37ca27c9e9df1c27e7e57350ab | 7b667511748ded171b66bf313d1dffe6f875289e | /tests/matrix_add_global_addr_offset/matrix_add_global_addr_offset.py | 82fa1a80ab86e72ea6ffc595134fafa0757fd3f2 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | herosugi/nngen | 3b94301ba43ba0684be31c42c4977e1f72a081de | ce09cd1dba55d815163adfe901c7cca65dc0709f | refs/heads/master | 2020-09-09T02:14:04.746559 | 2019-11-12T09:11:33 | 2019-11-12T09:11:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,536 | py | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import functools
import math
import numpy as np
if sys.version_info.major < 3:
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
import nngen as ng
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def run(a_shape=(15, 15), b_shape=(15, 15),
a_dtype=ng.int32, b_dtype=ng.int32, c_dtype=ng.int32,
par=1, axi_datawidth=32, silent=False, global_addr_offset=0,
filename=None, simtype='iverilog', outputfile=None):
# create target hardware
a = ng.placeholder(a_dtype, shape=a_shape, name='a')
b = ng.placeholder(b_dtype, shape=b_shape, name='b')
c = ng.add(a, b, dtype=c_dtype, par=par)
targ = ng.to_veriloggen([c], 'matrix_add_global_addr_offset', silent=silent,
config={'maxi_datawidth': axi_datawidth,
'default_global_addr_offset': global_addr_offset})
# verification data
va = np.arange(a.length, dtype=np.int64).reshape(a.shape) % [5]
vb = (np.arange(b.length, dtype=np.int64).reshape(b.shape) + [100]) % [6]
vc = ng.verify.add(va, vb, par=par,
dtype=c_dtype,
x_dtype=a_dtype, y_dtype=b_dtype)
# to memory image
size_max = int(math.ceil(max(a.memory_size, b.memory_size, c.memory_size) / 4096)) * 4096
check_addr = max(a.addr, b.addr, c.addr) + size_max
size_check = size_max
tmp_addr = check_addr + size_check
memimg_datawidth = 32
mem = np.zeros([1024 * 1024 * 8 // memimg_datawidth], dtype=np.int64)
mem = mem + [100]
axi.set_memory(mem, va, memimg_datawidth,
a_dtype.width, a.addr + global_addr_offset,
max(int(math.ceil(axi_datawidth / a_dtype.width)), par))
axi.set_memory(mem, vb, memimg_datawidth,
b_dtype.width, b.addr + global_addr_offset,
max(int(math.ceil(axi_datawidth / b_dtype.width)), par))
axi.set_memory(mem, vc, memimg_datawidth,
c_dtype.width, check_addr + global_addr_offset,
max(int(math.ceil(axi_datawidth / c_dtype.width)), par))
# test controller
m = Module('test')
params = m.copy_params(targ)
ports = m.copy_sim_ports(targ)
clk = ports['CLK']
resetn = ports['RESETN']
rst = m.Wire('RST')
rst.assign(Not(resetn))
# AXI memory model
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
memory = axi.AxiMemoryModel(m, 'memory', clk, rst,
datawidth=axi_datawidth,
memimg=mem, memimg_name=memimg_name,
memimg_datawidth=memimg_datawidth)
memory.connect(ports, 'maxi')
# AXI-Slave controller
_saxi = vthread.AXIMLite(m, '_saxi', clk, rst, noio=True)
_saxi.connect(ports, 'saxi')
# timer
time_counter = m.Reg('time_counter', 32, initval=0)
seq = Seq(m, 'seq', clk, rst)
seq(
time_counter.inc()
)
num_rep = functools.reduce(lambda x, y: x * y, c.shape[:-1], 1)
def ctrl():
for i in range(100):
pass
ng.sim.set_global_offset(_saxi, global_addr_offset)
ng.sim.set_global_addrs(_saxi, tmp_addr)
start_time = time_counter.value
ng.sim.start(_saxi)
print('# start')
ng.sim.wait(_saxi)
end_time = time_counter.value
print('# end')
print('# execution cycles: %d' % (end_time - start_time))
# verify
ok = True
for i in range(num_rep):
for j in range(c.shape[-1]):
orig = memory.read_word(i * c.aligned_shape[-1] + j,
c.addr + global_addr_offset, c_dtype.width)
check = memory.read_word(i * c.aligned_shape[-1] + j,
check_addr + global_addr_offset, c_dtype.width)
if vthread.verilog.NotEql(orig, check):
print('NG', i, j, orig, check)
ok = False
# else:
# print('OK', i, j, orig, check)
if ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
vthread.finish()
th = vthread.Thread(m, 'th_ctrl', clk, rst, ctrl)
fsm = th.start()
uut = m.Instance(targ, 'uut',
params=m.connect_params(targ),
ports=m.connect_ports(targ))
# simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, resetn, m.make_reset(), period=100, polarity='low')
init.add(
Delay(1000000),
Systask('finish'),
)
# output source code
if filename is not None:
m.to_verilog(filename)
# run simulation
sim = simulation.Simulator(m, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'verilator' and lines[-1].startswith('-'):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(silent=False, filename='tmp.v')
print(rslt)
| [
"shta.ky1018@gmail.com"
] | shta.ky1018@gmail.com |
25aaf0c09cf04ff7e328393bfd3aac3c91ea28c4 | bc2327d2bce695bb4881be63b1912f550857fd14 | /comps_and_gens/avoid_injecting_data.py | c40913be675650a5917f6c9caa4c0590c0adef57 | [] | no_license | mentalclear/fluent-in-python | 1a1d9ad30e949e72d8633156091b84b6d52b85bc | 243cff274861abc853b4ba5d03090191df5cd7db | refs/heads/master | 2023-08-05T19:26:48.787996 | 2021-10-06T13:04:14 | 2021-10-06T13:04:14 | 402,944,060 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,528 | py | import math
def wave(amplitude, steps):
step_size = 2 * math.pi / steps
for step in range(steps):
radians = step * step_size
fraction = math.sin(radians)
output = amplitude * fraction
yield output
def transmit(output):
if output is None:
print(f'Output is None')
else:
print(f'Output: {output:>5.1f}')
def run(it):
for output in it:
transmit(output)
run(wave(3.0, 8))
def my_generator():
received = yield 1
print(f'received = {received}')
# it = iter(my_generator())
# output = next(it) # Get first generator output
# print(f'output = {output}')
# try:
# next(it) # Run generator until it exits
# except StopIteration:
# pass
it = iter(my_generator())
output = it.send(None) # Get first generator output
print(f'output = {output}')
try:
it.send('hello!') # Send value into the generator
except StopIteration:
pass
def wave_modulating(steps):
step_size = 2 * math.pi / steps
amplitude = yield # Receive initial amplitude
for step in range(steps):
radians = step * step_size
fraction = math.sin(radians)
output = amplitude * fraction
amplitude = yield output # Receive next amplitude
def run_modulating(it):
amplitudes = [
None, 7, 7, 7, 2, 2, 2, 2, 10, 10, 10, 10, 10]
for amplitude in amplitudes:
output = it.send(amplitude)
transmit(output)
run_modulating(wave_modulating(12))
def complex_wave():
yield from wave(7.0, 3)
yield from wave(2.0, 4)
yield from wave(10.0, 5)
run(complex_wave())
print("\n")
def complex_wave_modulating():
yield from wave_modulating(3)
yield from wave_modulating(4)
yield from wave_modulating(5)
run_modulating(complex_wave_modulating())
def wave_cascading(amplitude_it, steps):
step_size = 2 * math.pi / steps
for step in range(steps):
radians = step * step_size
fraction = math.sin(radians)
amplitude = next(amplitude_it) # Get next input
output = amplitude * fraction
yield output
print("\n")
def complex_wave_cascading(amplitude_it):
yield from wave_cascading(amplitude_it, 3)
yield from wave_cascading(amplitude_it, 4)
yield from wave_cascading(amplitude_it, 5)
def run_cascading():
amplitudes = [7, 7, 7, 2, 2, 2, 2, 10, 10, 10, 10, 10]
it = complex_wave_cascading(iter(amplitudes))
for amplitude in amplitudes:
output = next(it)
transmit(output)
run_cascading() | [
"mentalclear@gmail.com"
] | mentalclear@gmail.com |
4e9afc0ee7bcec1dc84aa50b3eca0655bcedea07 | 66c6df450753acc7c41db5afe66abd35d5018c8c | /cliente Rujel/bin33.py | b8af2ff866744ea341d41bab5428a230e2eef354 | [] | no_license | hanmiton/CodigoCompletoEncriptacion | a33807d9470b538842751071031c9ce60951260f | efb7898af5d39025e98c82f1f71c8e9633cce186 | refs/heads/master | 2020-03-24T02:03:08.242655 | 2018-07-25T22:41:05 | 2018-07-25T22:41:05 | 142,360,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | import sys
import math
import random
import openpyxl
LAMBDA = 16 #security parameter
N = LAMBDA
P = LAMBDA ** 2
Q = LAMBDA ** 5
def principal(m1,m2):
doc = openpyxl.load_workbook('cifrado.xlsx')
doc.get_sheet_names()
hoja = doc.get_sheet_by_name('Hoja1')
m1 = int(hoja['A7'].value)
boln1 = bin(m1)
boln2 = bin(m2)
boln1Encrypt = []
boln2Encrypt = []
sumEncrypt = []
mulEnctypt = []
res = []
aux = []
if(len(boln1) > len(boln2)):
print len(boln1) - len(boln2)
for i in range(0, len(boln1) - len(boln2)):
aux.append(0)
boln2 = aux + boln2
else:
print len(boln2) - len(boln1)
for i in range(0, len(boln2) - len(boln1)):
aux.append(0)
boln1 = aux + boln1
key = map(keygen,boln1)
boln1Encrypt = map(encrypt,key,boln1)
boln2Encrypt = map(encrypt,key,boln2)
sumEncrypt = map(add,boln1Encrypt,boln2Encrypt)
mulEnctypt = map(mult,boln1Encrypt, boln2Encrypt)
resSuma = map (decrypt, key, sumEncrypt)
strSuma = ''.join(str(e) for e in resSuma)
decSuma = int(strSuma, 2)
resMult = map (decrypt, key, mulEnctypt)
strMult = ''.join(str(e) for e in resMult)
decMult = int(strMult, 2)
return sumEncrypt
def quot(z, p):
# http://stackoverflow.com/questions/3950372/round-with-integer-division
return (z + p // 2) // p
def mod(z, p):
return z - quot(z,p) * p
def keygen(n):
key = random.getrandbits(P)
while(key % 2 == 0):
key = random.getrandbits(P)
return key
def encrypt(key, aBit):
q = random.getrandbits(Q)
m_a = 2 * random.getrandbits(N - 1)
c = key * q + m_a + aBit
return c
def decrypt(key, cipherText):
return mod(cipherText, key) % 2
def add(cipherText1, cipherText2):
return cipherText1 + cipherText2
def mult(cipherText1, cipherText2):
return cipherText1 * cipherText2
def bin(numero):
binario = ""
listaN = []
listaRn = []
if (numero >0):
while (numero >0):
if(numero%2 ==0):
listaN.append(0)
binario="0"+binario
else:
listaN.append(1)
binario = "1"+ binario
numero = int (math.floor(numero/2))
else:
if (numero ==0):
listaN.append(0)
return listaN
else:
return " no se pudo convertir el numero. ingrese solo numeros positivos"
for i in reversed(listaN):
listaRn.append(i)
return listaRn
if __name__ == '__main__':
principal(m1,m2) | [
"hanmilton_12@outlook.com"
] | hanmilton_12@outlook.com |
1ec4d6b7f1ee5824542f78212d28e4851ad938e3 | eac22714038e840028cc5abb72bc750004626ebb | /mct_camera_calibrator/src/mct_camera_calibrator/calibrator_service.py | 825e733d2ae04f8e770576b619f5196ebe8297e9 | [
"Apache-2.0"
] | permissive | iorodeo/mct | 79b19f6dab9f6567452df7274d67245bf64b1801 | fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11 | refs/heads/master | 2022-11-11T18:03:18.178182 | 2014-08-20T19:21:27 | 2014-08-20T19:21:27 | 273,790,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | from __future__ import print_function
import roslib
roslib.load_manifest('mct_camera_calibrator')
import rospy
from mct_msg_and_srv.srv import GetBool
from mct_msg_and_srv.srv import GetString
def good_enough(calibrator):
"""
Wraper for the good_enough service provided by the cameracalibrator nodes.
Given a the camera calibrator, e.g. '/mct_master/camera_1/camera/camera_calibrator',
returns boolean value (True or False) indicating whether or not the camera data
collected good enough to calculate the camera calibration.
"""
srv_name = '{0}/good_enough'.format(str(calibrator))
rospy.wait_for_service(srv_name)
proxy = rospy.ServiceProxy(srv_name,GetBool)
try:
response = proxy()
value = response.value
except rospy.ServiceException, e:
rospy.logerr('service request failed: {0}'.format(str(e)))
value = None
return value
def calibrate(calibrator):
"""
Wrapper for the calibrate service provided by the cameracalibrator nodes.
Given a the camera calibrator, e.g. '/mct_master/camera_1/camera/camera_calibrator',
this function requests that the node calculate the camera calibration given the data
collected so far. Returns True if a calibration can be calculated and False otherwise.
"""
srv_name = '{0}/calibrate'.format(str(calibrator))
rospy.wait_for_service(srv_name)
proxy = rospy.ServiceProxy(srv_name,GetBool)
try:
response = proxy()
value = response.value
except rospy.ServiceException, e:
rospy.logerr('service request failed: {0}'.format(str(e)))
value = None
return value
def get_calibration(calibrator):
"""
Wrapper for the get_calibration service proviced by the
cameracalibrator nodes. Given a camera calibrator, e.g.,
'/mct_master/camera_1/camera/camera_calibrator', returns the camera
calibration or an empty string if a calibration has not yet been calculated.
"""
srv_name = '{0}/get_calibration'.format(str(calibrator))
rospy.wait_for_service(srv_name)
proxy = rospy.ServiceProxy(srv_name,GetString)
try:
response = proxy()
data = response.data
except rospy.ServiceException, e:
rospy.logerr('service request failed: {0}'.format(str(e)))
data = None
return data
| [
"will@iorodeo.com"
] | will@iorodeo.com |
6ed74c82e53f1e6b15c7c46353e105569c667bb2 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /110_concurrency_parallelism/_exercises/templates/Learning Concurrency in Python/Chapter 08/mapPool.py | ba7352deaa660b0e307545fa085edce324923c64 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 202 | py | # ____ m.. ______ P..
# ______ ti..
#
# ___ myTask n
# t__.s.. ?+2
# r_ ?+2
#
# ___ main
# w__ P.. 4 __ p
# ___ iter __ ?.i_u.. ? |1,3,2,1
# print ?
#
# __ _________ __ ________
# ?
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
f90ab8530e28474c7c8a23196d0f20aabcc1e379 | bdc45798b67c0d12b78845c3c31e690564b40ed5 | /projects/bbs/bb/views.py | 4ac49069fe6e4eca8557f9ce4126a9d9156b3771 | [
"MIT"
] | permissive | mozillazg/django-simple-projects | c16d8105985707ef572fcb1fb53f76d7e3ed0362 | 6ccd1232cb76595f6dbafa282cef2c20edbb1148 | refs/heads/master | 2023-08-23T20:23:59.139601 | 2015-09-25T23:28:56 | 2015-09-25T23:28:56 | 7,768,010 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,055 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth import logout
from django import forms
from django.template import RequestContext
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from bb.models import Node
from bb.models import Topic
from bb.models import Reply
from bb.models import UserProfile
class SignupForm(forms.Form):
name = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput())
email = forms.EmailField()
class SigninForm(SignupForm):
def __init__(self, *args, **kwargs):
SignupForm.__init__(self, *args, **kwargs)
if 'email' in self.fields:
del self.fields['email']
class ChangePasswordForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput)
class ReplyForm(forms.Form):
reply = forms.CharField(widget=forms.Textarea())
class CreateForm(forms.ModelForm):
class Meta:
model = Topic
exclude = ('user', 'hits', 'reply_count')
def signup(request):
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
password = form.cleaned_data['password']
email = form.cleaned_data['email']
user = User.objects.create_user(name, email, password)
user.save()
return HttpResponseRedirect('/')
else:
form = SignupForm()
return render_to_response('signup.html', {'form': form},
context_instance=RequestContext(request))
def change_password(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/')
if request.method == 'POST':
form = ChangePasswordForm(request.POST)
if form.is_valid():
password = form.cleaned_data['password']
username = request.user.username
user = User.objects.get(username=username)
user.set_password(password)
user.save()
logout(request)
return HttpResponseRedirect('/account/signin')
else:
form = ChangePasswordForm()
return render_to_response('account.html', {'form': form},
context_instance=RequestContext(request))
def signin(request):
if request.method == 'POST':
form = SigninForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
password = form.cleaned_data['password']
user = authenticate(username=name, password=password)
if user:
if user.is_active:
userprofile = UserProfile.objects.get(user=user)
userprofile.login_count += 1
userprofile.save()
login(request, user)
return HttpResponseRedirect('/')
else:
form = SigninForm()
return render_to_response('signin.html', {'form': form},
context_instance=RequestContext(request))
def log_out(request):
logout(request)
return HttpResponseRedirect('/')
# def index(request):
# topics = Topic.objects.all()
# page_count = [i for i in range(len(topics)/5)] else [0]
# context =
# return render_to_response('index.html', {'topics': topics})
def page(request, page_id=1, node_id=0, popular=False):
nav_name = ''
topics = Topic.objects.order_by('-last_reply_time')
if popular:
topics = topics.order_by('-reply_count', '-last_reply_time')
nav_name = 'Popular'
elif node_id:
node = Node.objects.get(id=node_id)
topics = topics.filter(node=node)
count = topics.count()
nav_name = node.title
# Pagination
limit = 10
paginator = Paginator(topics, limit)
try:
topics = paginator.page(page_id)
except EmptyPage:
topics = paginator.page(paginator.num_pages)
user = request.user
context = {
'topics': topics,
'user': user,
'node_id': node_id,
'nav_name': nav_name,
}
return render_to_response('index.html', context,
context_instance=RequestContext(request))
def nodes(request):
nodes = Node.objects.all()
nav_name = 'Nodes'
return render_to_response('nodes.html', {'nodes': nodes,
'nav_name': nav_name},
context_instance=RequestContext(request))
def topic(request, topic_id, page_id=1):
topic_ = Topic.objects.get(id=topic_id)
replies = Reply.objects.filter(topic=topic_).order_by('-created')
topic_.hits += 1
topic_.save()
# Pagination
limit = 5
paginator = Paginator(replies, limit)
try:
replies = paginator.page(page_id)
except EmptyPage:
replies = paginator.page(paginator.num_pages)
context = {
'user': request.user,
'topic': topic_,
# 'replies': replies,
# 'form': ReplyForm(),
}
return render_to_response('topic.html', context,
context_instance=RequestContext(request))
def reply(request, topic_id):
if request.method == 'POST':
form = ReplyForm(request.POST)
if form.is_valid() and request.user.is_authenticated():
name = request.user.username
user = User.objects.get(username=name)
content = form.cleaned_data['reply']
topic_ = Topic.objects.get(id=topic_id)
reply_ = Reply(topic=topic_, user=user, content=content)
reply_.save()
topic_.reply_count += 1
topic_.save()
return HttpResponseRedirect('/topic/' + str(topic_id))
return HttpResponseRedirect('/')
def create(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/')
if request.method == 'POST':
form = CreateForm(request.POST)
if form.is_valid():
name = request.user.username
user = User.objects.get(username=name)
title = form.cleaned_data['title']
content = form.cleaned_data['content']
node_title = form.cleaned_data['node']
node = Node.objects.get(title=node_title)
topic_ = Topic(title=title, content=content, node=node,
user=user)
topic_.save()
node.topic_count += 1
node.save()
return HttpResponseRedirect('/topic/' + str(topic_.id))
else:
form = CreateForm()
context = {
'form': form,
}
return render_to_response('create.html', context,
context_instance=RequestContext(request))
| [
"opensource.mozillazg@gmail.com"
] | opensource.mozillazg@gmail.com |
90d9af82c6f7b23981f29ff3435d608517689b8f | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/Geron17Hands/B_PartI/G_Chapter7/D_RandomForests/index.py | 8ff8d66c8fd6f9f178763d49d57ebff430e40833 | [] | no_license | nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,865 | py | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
from .A_ExtraTrees.index import ExtraTrees as A_ExtraTrees
from .B_FeatureImportance.index import FeatureImportance as B_FeatureImportance
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Download from finelybook www.finelybook.com
# Sampling features results in even more predictor diversity, trading a bit more bias for
# a lower variance.
#
# Random Forests
# As we have discussed, a Random Forest9 is an ensemble of Decision Trees, generally
# trained via the bagging method (or sometimes pasting), typically with max_samples
# set to the size of the training set. Instead of building a BaggingClassifier and pass‐
# ing it a DecisionTreeClassifier, you can instead use the RandomForestClassifier
# class, which is more convenient and optimized for Decision Trees10 (similarly, there is
# a RandomForestRegressor class for regression tasks). The following code trains a
# Random Forest classifier with 500 trees (each limited to maximum 16 nodes), using
# all available CPU cores:
# from sklearn.ensemble import RandomForestClassifier
#
# rnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=-1)
# rnd_clf.fit(X_train, y_train)
#
# y_pred_rf = rnd_clf.predict(X_test)
#
# With a few exceptions, a RandomForestClassifier has all the hyperparameters of a
# DecisionTreeClassifier (to control how trees are grown), plus all the hyperpara‐
# meters of a BaggingClassifier to control the ensemble itself.11
# The Random Forest algorithm introduces extra randomness when growing trees;
# instead of searching for the very best feature when splitting a node (see Chapter 6), it
# searches for the best feature among a random subset of features. This results in a
# greater tree diversity, which (once again) trades a higher bias for a lower variance,
# generally yielding an overall better model. The following BaggingClassifier is
# roughly equivalent to the previous RandomForestClassifier:
# bag_clf = BaggingClassifier(
# DecisionTreeClassifier(splitter="random", max_leaf_nodes=16),
# n_estimators=500, max_samples=1.0, bootstrap=True, n_jobs=-1
# )
#
#
#
#
# 9 “Random Decision Forests,” T. Ho (1995).
# 10 The BaggingClassifier class remains useful if you want a bag of something other than Decision Trees.
# 11 There are a few notable exceptions: splitter is absent (forced to "random"), presort is absent (forced to
# False), max_samples is absent (forced to 1.0), and base_estimator is absent (forced to DecisionTreeClassi
# fier with the provided hyperparameters).
#
#
#
# Random Forests | 189
#
# Download from finelybook www.finelybook.com
# Extra-Trees
# When you are growing a tree in a Random Forest, at each node only a random subset
# of the features is considered for splitting (as discussed earlier). It is possible to make
# trees even more random by also using random thresholds for each feature rather than
# searching for the best possible thresholds (like regular Decision Trees do).
# A forest of such extremely random trees is simply called an Extremely Randomized
# Trees ensemble12 (or Extra-Trees for short). Once again, this trades more bias for a
# lower variance. It also makes Extra-Trees much faster to train than regular Random
# Forests since finding the best possible threshold for each feature at every node is one
# of the most time-consuming tasks of growing a tree.
# You can create an Extra-Trees classifier using Scikit-Learn’s ExtraTreesClassifier
# class. Its API is identical to the RandomForestClassifier class. Similarly, the Extra
# TreesRegressor class has the same API as the RandomForestRegressor class.
#
# It is hard to tell in advance whether a RandomForestClassifier
# will perform better or worse than an ExtraTreesClassifier. Gen‐
# erally, the only way to know is to try both and compare them using
# cross-validation (and tuning the hyperparameters using grid
# search).
#
#
# Feature Importance
# Lastly, if you look at a single Decision Tree, important features are likely to appear
# closer to the root of the tree, while unimportant features will often appear closer to
# the leaves (or not at all). It is therefore possible to get an estimate of a feature’s impor‐
# tance by computing the average depth at which it appears across all trees in the forest.
# Scikit-Learn computes this automatically for every feature after training. You can
# access the result using the feature_importances_ variable. For example, the follow‐
# ing code trains a RandomForestClassifier on the iris dataset (introduced in Chap‐
# ter 4) and outputs each feature’s importance. It seems that the most important
# features are the petal length (44%) and width (42%), while sepal length and width are
# rather unimportant in comparison (11% and 2%, respectively):
# >>> from sklearn.datasets import load_iris
# >>> iris = load_iris()
# >>> rnd_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
# >>> rnd_clf.fit(iris["data"], iris["target"])
# >>> for name, score in zip(iris["feature_names"], rnd_clf.feature_importances_):
# >>> print(name, score)
# sepal length (cm) 0.112492250999
#
#
#
# 12 “Extremely randomized trees,” P. Geurts, D. Ernst, L. Wehenkel (2005).
#
#
#
# 190 | Chapter 7: Ensemble Learning and Random Forests
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Random Forests",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class RandomForests(HierNode):
def __init__(self):
super().__init__("Random Forests")
self.add(Content(), "content")
self.add(A_ExtraTrees())
self.add(B_FeatureImportance())
# eof
| [
"lawrence.mcafee@gmail.com"
] | lawrence.mcafee@gmail.com |
d87ca8f97309bfa2251a3beb33d65a0fc9ba27bc | 21bbc3fbeb7a1616dbd6993b66dc44d9b30df3e7 | /PycharmProjects/samp_proj1/assignment1.py | 9f586fef9c106afcc8bd0ec1f73adb8da96936b9 | [] | no_license | PoornimaDevii/python_training | 6124640608d8bf14289ae61b2b28e0db3b473b6f | 42b535590a6a244a91bd48b4451b74a29c1aaa80 | refs/heads/master | 2020-04-05T19:55:49.723114 | 2018-12-04T11:49:59 | 2018-12-04T11:49:59 | 157,157,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | # Adapter Pattern (client:projector vga, dell lap hdmi mac usb)
# format() similar to __repr__
class Projector:
def __init__(self,n):
self.name = n
def __str__(self):
return 'the {} projector'.format(self.name)
def vga(self):
return 'has VGA'
# c1 = Computer('mycomp')
# print(c1)
# print(c1.execute())
# Synthesizer class
class Dell:
def __init__(self,n):
self.name = n
def __str__(self):
return 'the {} Laptop'.format(self.name)
def hdmi(self):
return 'has HDMI'
# s1 = Synthesizer('googlemusic')
# print(s1)
# print(s1.play())
class Mac:
def __init__(self,n):
self.name = n
def __str__(self):
return 'the {} Laptop'.format(self.name)
def usb(self):
return 'has USB'
# sp1 = Human('poornima')
# print(sp1)
# print(sp1.speak())
class Adapter:
def __init__(self,o, adapter_methods):
self.obj = o
self.__dict__.update(adapter_methods)
def __str__(self):
return str(self.obj)
# objects = Computer('Asus') # Client interface
# synth = Synthesizer('moog')
# human = Human('Bob')
# asy = Adapter(synth, dict(execute=synth.play))
# ahu = Adapter(human,dict(execute=human.speak))
# print(asy.execute())
# print(ahu.execute())
pro1 = Projector('myprojector')
dell1 = Dell('mydell')
mac1 = Mac('mymac')
adell = Adapter(dell1, dict(vga=dell1.hdmi))
amac = Adapter(mac1, dict(vga=mac1.usb))
print("The Dell laptop", adell.vga())
print("The Mac laptop",amac.vga())
| [
"poornimadevi.rama@gmail.com"
] | poornimadevi.rama@gmail.com |
e54bffad4d3b08796d2abad7fabdf8706e5308f7 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/minigame/PatternGameGlobals.py | 23901749a7104eb5c1e9373c735e47592d0219e8 | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | # PatternGameGlobals.py: contains pattern game stuff
# used by AI and client
import MinigameGlobals
# pattern constants
INITIAL_ROUND_LENGTH = 2
ROUND_LENGTH_INCREMENT = 2
NUM_ROUNDS = 4
TOONTOWN_WORK = 1
# how long the players have to input the pattern
InputTime = 10
# this is how long the AI server will wait for msgs from the clients
# before assuming that the msg is not coming
ClientsReadyTimeout = 5 + MinigameGlobals.latencyTolerance
InputTimeout = InputTime + MinigameGlobals.latencyTolerance
| [
"66761962+satire6@users.noreply.github.com"
] | 66761962+satire6@users.noreply.github.com |
11413c5a57372ae3eadb0c9d39eba7ac4f30600f | b79e567b9709c50588b77174a20bb3bf2a5ae210 | /fan_tools/django/models.py | 118c5c1ebe689fd845df0889f88de5497a1f22b4 | [
"MIT"
] | permissive | micro-fan/fan_tools | 325d05f46fece9fe6e49a12b7a7c8d2259d42e1f | 2a6455b206158f471295b1e4d17e35ab5f98f754 | refs/heads/master | 2023-07-25T03:40:14.963178 | 2023-07-24T18:56:21 | 2023-07-24T18:56:45 | 224,145,427 | 2 | 0 | MIT | 2022-01-16T18:33:24 | 2019-11-26T08:53:55 | Python | UTF-8 | Python | false | false | 750 | py | import os
import uuid
class UploadNameGenerator(object):
def __init__(self, model_name, field_name):
self.model_name = model_name
self.field_name = field_name
def deconstruct(self):
return (
'fan_tools.django.UploadNameGenerator',
(),
{
'model_name': self.model_name,
'field_name': self.field_name,
},
)
def __call__(self, instance, filename):
return os.path.join(
'static',
self.model_name,
'%s-%s-%s%s' % (
self.model_name,
self.field_name,
uuid.uuid1(),
os.path.splitext(filename)[1],
),
)
| [
"cybergrind@gmail.com"
] | cybergrind@gmail.com |
10c23c7085652bbb944fd917a4f62fe73419cc4f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_bulkheads.py | b62773592aff3e1d3625faee7a8c8744150fbac0 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.nouns._bulkhead import _BULKHEAD
#calss header
class _BULKHEADS(_BULKHEAD, ):
def __init__(self,):
_BULKHEAD.__init__(self)
self.name = "BULKHEADS"
self.specie = 'nouns'
self.basic = "bulkhead"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
7f0bca0bd4ab5c6fd530aa522500b9670194e8ad | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/agc024/B/4328471.py | 422d6eb2f88b6b69cd1a961089ef14ecba94644a | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | n=int(input())
P=[int(input()) for _ in range(n)]
Q=[0]*n
for i,j in enumerate(P):
Q[j-1]=i
cresc=1
cnt=1
for i in range(1,n):
if Q[i-1]<Q[i]:
cnt+=1
else:
cresc=max(cresc,cnt)
cnt=1
cresc=max(cresc,cnt)
print(n-cresc) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
a07ca542fe9a301a620158b1438fc385225f567c | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/21ffbcb3d32e0ee52eb1/snippet.py | 5cff930ed25c3e97206a7417a8478b8a596eec8c | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 796 | py | import hashlib
import itertools
import string
import time
import console
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
def encrypt(data):
return hashlib.md5(data).hexdigest()
password = encrypt('pass')
def crack(hash, charset, maxlength):
attempts = 0
for attempt in (''.join(candidate) for candidate in itertools.chain.from_iterable(itertools.product(charset, repeat=i) for i in range(1, maxlength + 1))):
attempts += 1
print 'attempts:', attempts
console.clear()
if encrypt(attempt) == hash:
print 'Found:', attempt
break
s = time.time()
print len(string.ascii_letters+string.digits)
crack(encrypt('pass'), string.ascii_letters+string.digits, 3)
print 'finished in', round(s-time.time(), 3)/-1, 'seconds'
| [
"gistshub@gmail.com"
] | gistshub@gmail.com |
35fc48a9512d4e2a4b7468acb42ffaee49821ba9 | f0b5917fe0cb6c263e892d2dda6a541094123a16 | /grammar-generator/Elements/STG/Visible/VisibleSpecifiedColumnElementForStg.py | d89df6da48734e55f3d76ab47b53d77f40784dad | [
"MIT"
] | permissive | afronski/grammar-generator | 61a7de686ecc65dfa73f29a000bfed8b699de9ae | 231bf88e28dd02b2cd2a79e0d42cb0613a90501a | refs/heads/master | 2016-09-05T10:16:33.228488 | 2014-04-27T20:09:29 | 2014-04-27T20:09:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | from Elements.STG.Base.IElementForStg import IElementForStg
class VisibleSpecifiedColumnElementForStg(IElementForStg):
def __init__(self, templates, settingsObject):
self.typeName = "Specified"
self.templateName = "SpecifiedVisibledColumn"
super(VisibleSpecifiedColumnElementForStg, self).__init__(templates, settingsObject)
def getType(self):
return self.typeName
def getTemplateName(self):
return self.templateName | [
"afronski@gmail.com"
] | afronski@gmail.com |
d92db4bf193ecaffa5187a3dbaf23ac6086d60f2 | 1070490055b5c981d936038959731134b01ce272 | /apps/utils/mixin_utils.py | 96866fa06ec2322ff13ee3b096fd4b88d85d9128 | [] | no_license | ljingen/MxOnline | 401d5be37e11cb866dc8eb78acc9b6de053c5708 | 1b471dd6b4968f79dd6866bb5e3e6413b760c8a1 | refs/heads/master | 2021-10-11T08:57:05.304124 | 2018-02-11T06:59:32 | 2018-02-11T06:59:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | # -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
class LoginRequiredMixin(object):
@method_decorator(login_required(login_url='/login/'))
def dispatch(self, request, *args, **kwars):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwars)
| [
"luojingen@aliyun.com"
] | luojingen@aliyun.com |
3318351747613ba0bee934e6538ceda331bda98c | b53c6ec03b24ad21f3ee395d085c07cd302c3402 | /tests/chainer_tests/datasets_tests/test_image_dataset.py | 1f36e27c0a68509843c447a77daf65914c659b48 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pyotr777/chainer | c0799791f85f499e32ea68636df5ecbe2c0f5675 | 8532edbd921ab0ea98c9447957565777e4601662 | refs/heads/master | 2021-04-28T05:00:16.181948 | 2018-02-20T06:41:21 | 2018-02-20T06:41:21 | 122,169,206 | 0 | 0 | MIT | 2018-02-20T07:50:22 | 2018-02-20T07:50:21 | null | UTF-8 | Python | false | false | 2,565 | py | import os
import unittest
import numpy
from chainer import datasets
from chainer.datasets import image_dataset
from chainer import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.int32],
}))
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestImageDataset(unittest.TestCase):
def setUp(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
path = os.path.join(root, 'img.lst')
self.dataset = datasets.ImageDataset(path, root=root, dtype=self.dtype)
def test_len(self):
self.assertEqual(len(self.dataset), 2)
def test_get(self):
img = self.dataset.get_example(0)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (4, 300, 300))
def test_get_grey(self):
img = self.dataset.get_example(1)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (1, 300, 300))
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.int32],
'label_dtype': [numpy.float32, numpy.int32],
}))
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestLabeledImageDataset(unittest.TestCase):
def setUp(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
path = os.path.join(root, 'labeled_img.lst')
self.dataset = datasets.LabeledImageDataset(
path, root=root, dtype=self.dtype, label_dtype=self.label_dtype)
def test_len(self):
self.assertEqual(len(self.dataset), 2)
def test_get(self):
img, label = self.dataset.get_example(0)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (4, 300, 300))
self.assertEqual(label.dtype, self.label_dtype)
self.assertEqual(label, 0)
def test_get_grey(self):
img, label = self.dataset.get_example(1)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (1, 300, 300))
self.assertEqual(label.dtype, self.label_dtype)
self.assertEqual(label, 1)
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestLabeledImageDatasetInvalidFormat(unittest.TestCase):
def test_invalid_column(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
path = os.path.join(root, 'img.lst')
with self.assertRaises(ValueError):
datasets.LabeledImageDataset(path)
testing.run_module(__name__, __file__)
| [
"unnonouno@gmail.com"
] | unnonouno@gmail.com |
63fb29a944d3dcc789050d4e71e9af3eb41d5f1c | fa9bae32c203323dfb345d9a415d4eaecb27a931 | /859. Buddy Strings.py | 2195a1fcd904a6ecbd11d94049d250458faa4dc6 | [] | no_license | IUIUN/The-Best-Time-Is-Now | 48a0c2e9d449aa2f4b6e565868a227b6d555bf29 | fab660f98bd36715d1ee613c4de5c7fd2b69369e | refs/heads/master | 2020-09-14T12:06:24.074973 | 2020-02-15T06:55:08 | 2020-02-15T06:55:08 | 223,123,743 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | class Solution:
def buddyStrings(self, A: str, B: str) -> bool:
if len(A) != len(B): return False
if A == B and len(set(A)) < len(A): return True
dif = [(a, b) for a, b in zip(A, B) if a != b]
return len(dif) == 2 and dif[0] == dif[1][::-1] | [
"liuyijun0621@hotmail.com"
] | liuyijun0621@hotmail.com |
ff1207d7894df22fdaa96e181578ed8ce57c263f | 3edb81366059a3dcb767b7b15476f264fad788e8 | /submit.py | c394ced46f734942c3ba9b60ee354f0c8650cf22 | [] | no_license | enucatl-phd/sinogram_batch_jobs | 91179c1a1ec3cc7869b0e0010977ce8b95d14517 | a5c06b8992b1ad2ed651277c1f54229847b7cc44 | refs/heads/master | 2023-08-07T20:32:58.236996 | 2017-11-23T13:17:48 | 2017-11-23T13:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | import subprocess
import os.path
folders = [
"KO202_LL_control_1",
"KO202_LL_control_2",
"KO202_LL_control_3",
"KO203_LL_control_1",
"KO203_LL_control_2",
"KO203_LL_control_3",
"ko373_LL_smoke_1",
"ko373_LL_smoke_2",
"ko373_LL_smoke_3",
"WT223_LL_control_1",
"WT223_LL_control_2",
"WT223_LL_control_3",
"WT224_LL_control_1",
"WT224_LL_control_2",
"WT224_LL_control_3",
"WT256_LL_smoke_1",
"WT256_LL_smoke_2",
"WT256_LL_smoke_3",
"WT256_LL_smoke_4",
"WT353_LL_smoke_1",
"WT353_LL_smoke_2",
"WT353_LL_smoke_3",
"WT355_LL_smoke_1",
"WT355_LL_smoke_2",
"WT355_LL_smoke_3"
]
for folder in folders:
if os.path.isdir(
os.path.join(
"..",
folder,
"sin")):
continue
command = "prj2sinSGE -d -C -f 1801,30,100,0,0 -I 1 -p {0}####.tif --jobname={0}_fltp --queue=tomcat_offline.q -Y 11.999,0.65E-6,3.7e-8,2.3e-10,0.008 -g 3 -o /sls/X02DA/data/e13657/Data10/matteo_high_resolution/{0}/fltp/ /sls/X02DA/data/e13657/Data10/matteo_high_resolution/{0}/tif/;prj2sinSGE -d -g 0 -I 0 -f 1801,0,0,0,0 -k 1 --hold={0}_fltp --jobname={0}_sin --queue=tomcat_offline.q -j 50 -p {0}####.fltp.DMP -o /sls/X02DA/data/e13657/Data10/matteo_high_resolution/{0}/sin/ /sls/X02DA/data/e13657/Data10/matteo_high_resolution/{0}/fltp/;"
print(command)
subprocess.call(command.format(folder), shell=True)
| [
"gmatteo..abis@gmail.com"
] | gmatteo..abis@gmail.com |
652d7a8d6ec191c18ef763835a9eb827497e9673 | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/classes/_r_ldos1.py | 5f1ad0f6631db49e3f0ee25cbced343e39d89476 | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from xcp2k.inputsection import InputSection
class _r_ldos1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.List = []
self.Xrange = None
self.Yrange = None
self.Zrange = None
self.Erange = None
self._name = "R_LDOS"
self._keywords = {'Xrange': 'XRANGE', 'Yrange': 'YRANGE', 'Zrange': 'ZRANGE', 'Erange': 'ERANGE'}
self._repeated_keywords = {'List': 'LIST'}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
16faddad943bf0bc84ce7a1e8f6aabb41a63554c | 45fdc51cf264bbd50e59655440eefc91451c50ea | /urlib.parse/urllib_parse_unquote.py | 51e2760854007cbf12352e7d1838aee4ad0eea3b | [] | no_license | blindij/python3_stl | 2163043f3a9113eac21a48a35685a4a01987e926 | ea138e25f8b5bbf7d8f78e4b1b7e2ae413de4735 | refs/heads/master | 2021-12-24T20:37:54.055116 | 2021-09-29T13:37:38 | 2021-09-29T13:37:38 | 191,508,648 | 0 | 0 | null | 2019-08-27T15:45:53 | 2019-06-12T06:10:30 | Python | UTF-8 | Python | false | false | 176 | py | from urllib.parse import unquote, unquote_plus
print(unquote('http%3A//localhost%3A8080/%7Ehellmann/'))
print(unquote_plus('http%3A%2F%2Flocalhost%3A8080%2F%7Ehellmann%2F'
))
| [
"blindij@users.noreply.github.com"
] | blindij@users.noreply.github.com |
a219f316ee477f4840ce21fdc1d506deb1f5a87a | a12c090eb57da4c8e1f543a1a9d497abad763ccd | /django-stubs/contrib/staticfiles/management/commands/runserver.pyi | 9542443c2c7afc7adf315bec00bc6da4dd914096 | [
"BSD-3-Clause"
] | permissive | debuggerpk/django-stubs | be12eb6b43354a18675de3f70c491e534d065b78 | bbdaebb244bd82544553f4547157e4f694f7ae99 | refs/heads/master | 2020-04-04T08:33:52.358704 | 2018-09-26T19:32:19 | 2018-09-26T19:32:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | pyi | from typing import Any, Optional
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.core.management.base import CommandParser
from django.core.management.commands.runserver import \
Command as RunserverCommand
class Command(RunserverCommand):
stderr: django.core.management.base.OutputWrapper
stdout: django.core.management.base.OutputWrapper
style: django.core.management.color.Style
help: str = ...
def add_arguments(self, parser: CommandParser) -> None: ...
def get_handler(self, *args: Any, **options: Any) -> StaticFilesHandler: ...
| [
"maxim.kurnikov@gmail.com"
] | maxim.kurnikov@gmail.com |
697a3885eff485ce7088da0fb99a37de47a132fb | 3e5b0278bb8f7c221c5d3478c0c54cae81123799 | /database/ingestFiesResults.py | 987ae5355dae68d3b9457d1671382a5f75d3ee99 | [] | no_license | jmccormac01/NOT | 717e8ecc7c157eedf320d87b796010f2cad97dd9 | 3463accce62848142dede0026fa27aba4366f45b | refs/heads/master | 2021-01-18T23:52:22.899766 | 2017-05-03T09:08:42 | 2017-05-03T09:08:42 | 54,653,166 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,211 | py | """
Script to ingest the results files from fiespipe.py
This is a copy of the ingestCafeResults.py script
Results file has the following structure:
0- Object name
1- MBJD
2- RV
3- error in RV
4- Bisector span
5- error in bisector span
6- instrument
7- pipeline
8- resolving power
9- Efective Temperture
10- log(g)
11- [Fe/H]
12- v*sin(i)
13- value of the continuum normalized CCF at it lowest point
14- standard deviation of the gaussian fitted to the CCF
15- Exposure time
16- Signal to noise ratio at ~5150A
17- path to the CCF plot file
"""
import os
import sys
import argparse as ap
import pymysql
# pylint: disable = invalid-name
def argParse():
"""
Parse the command line arguments
"""
parser = ap.ArgumentParser()
parser.add_argument('--ingest',
help='Ingest the results to the database',
action='store_true')
return parser.parse_args()
RESULTS_FILE = 'results.txt'
if __name__ == '__main__':
args = argParse()
db = pymysql.connect(host='localhost',
db='eblm',
password='mysqlpassword')
if os.path.exists(RESULTS_FILE):
night = os.getcwd().split('/')[-2].split('_')[1]
night = "{}-{}-{}".format(night[:4], night[4:6], night[6:])
print(night)
f = open(RESULTS_FILE).readlines()
for line in f:
ls = line.rstrip().split()
if len(ls) != 18:
print('ERROR: Wrong number of columns in results.txt')
sys.exit(1)
obj = ls[0]
if obj.startswith('1SWASP'):
swasp_id = obj
else:
swasp_id = None
bjd_mid = ls[1]
mask_velocity = ls[2]
mask_velocity_err = ls[3]
bisector = ls[4]
bisector_err = ls[5]
mask_ccf_height = ls[13]
mask_ccf_fwhm = ls[14]
snr_5150 = ls[16]
pdf_name = ls[17].split('/')[-1]
image_id = '{}.fits'.format(pdf_name.split('.')[0])
mask = pdf_name.split('.')[-2].split('_')[-1]
qry = """
REPLACE INTO eblm_fies (
image_id, swasp_id, object_name,
bjd_mid, mask, mask_velocity,
mask_velocity_err, mask_ccf_height,
mask_ccf_fwhm, bisector,
bisector_err, snr_5150, night, analyse
)
VALUES (
'{}', '{}', '{}', {}, '{}', {},
{}, {}, {}, {}, {}, {}, '{}', 1
)
""".format(image_id, swasp_id, obj,
bjd_mid, mask, mask_velocity,
mask_velocity_err, mask_ccf_height,
mask_ccf_fwhm, bisector,
bisector_err, snr_5150, night)
print(qry)
if args.ingest:
with db.cursor() as cur:
cur.execute(qry)
db.commit()
else:
print('{} not found...'.format(RESULTS_FILE))
| [
"jmccormac001@gmail.com"
] | jmccormac001@gmail.com |
9c1c35fa401ea152589015a7a13ebf1c10fc1825 | 628ab6e412e7c4c755bc42d8137acd3da2d4be0e | /tests/type/test_type_util.py | 75c136b72074efd78daecd55cfe6045a1eecb8c4 | [
"MIT",
"CC-BY-4.0"
] | permissive | TrendingTechnology/apysc | ffd7d9b558707b934c5df127eca817d4f12d619b | 5c6a4674e2e9684cb2cb1325dc9b070879d4d355 | refs/heads/main | 2023-06-01T20:19:20.835539 | 2021-06-20T03:53:33 | 2021-06-20T03:53:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | from apysc import Boolean
from apysc import Int
from apysc import Number
from apysc.type import type_util
def test_is_same_class_instance() -> None:
result: bool = type_util.is_same_class_instance(class_=bool, instance=1)
assert not result
result = type_util.is_same_class_instance(class_=int, instance=1)
assert result
def test_is_float_or_number() -> None:
result: bool = type_util.is_float_or_number(value=100.5)
assert result
result = type_util.is_float_or_number(value=Number(value=10.5))
assert result
result = type_util.is_float_or_number(value=100)
assert not result
result = type_util.is_float_or_number(value=Int(value=10))
assert not result
def test_is_number() -> None:
result: bool = type_util.is_number(value=Number(value=10.5))
assert result
result = type_util.is_number(value=10.5)
assert not result
result = type_util.is_number(value=Int(value=10))
assert not result
def test_is_bool() -> None:
result: bool = type_util.is_bool(value=True)
assert result
result = type_util.is_bool(value=False)
assert result
result = type_util.is_bool(value=Boolean(True))
assert result
result = type_util.is_bool(value=1)
assert not result
| [
"antisocial.sid2@gmail.com"
] | antisocial.sid2@gmail.com |
da350f298931965ee5690a173c730b6e1f634548 | 5407d32363d4806176c768ef7db65c8f7c9e7f72 | /main.py | 307959cadf45e33557f44f8dc1bf3447330b65d3 | [] | no_license | krishpranav/pyide | 173efa96d8c7b50b2505c65a0562a4af64ab303f | 587628367b0ab6535ad3ebd00850c56c33b5fcbf | refs/heads/master | 2023-04-16T09:11:13.381777 | 2021-04-20T12:29:33 | 2021-04-20T12:29:33 | 359,804,202 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,792 | py | #!/usr/bin/env/python3
#imports
from tkinter import *
from tkinter.filedialog import asksaveasfilename, askopenfilename
import subprocess
compiler = Tk()
compiler.title("IDE")
file_path = ''
def set_file_path(path):
global file_path
file_path = path
def open_file():
path = askopenfilename(filetypes=[('Python Files', '*.py')])
with open(path, 'r') as file:
code = file.read()
editor.delete('1.0', END)
editor.insert('1.0', code)
set_file_path(path)
def save_as():
if file_path == '':
path = asksaveasfilename(filetypes=[('Python Files', '*.py')])
else:
path = file_path
with open(path, 'w') as file:
code = editor.get('1.0', END)
file.write(code)
set_file_path(path)
def run():
if file_path == '':
save_prompt = Toplevel()
text = Label(save_prompt, text='Please save your code')
text.pack()
return
command = f'python {file_path}'
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate()
code_output.insert('1.0', output)
code_output.insert('1.0', error)
menu_bar = Menu(compiler)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label='Open', command=open_file)
file_menu.add_command(label='Save', command=save_as)
file_menu.add_command(label='Save As', command=save_as)
file_menu.add_command(label='Exit', command=exit)
menu_bar.add_cascade(label='File', menu=file_menu)
run_bar = Menu(menu_bar, tearoff=0)
run_bar.add_command(label='Run', command=run)
menu_bar.add_cascade(label='Run', menu=run_bar)
compiler.config(menu=menu_bar)
editor = Text()
editor.pack()
code_output = Text(height=10)
code_output.pack()
compiler.mainloop()
| [
"krisna.pranav@gmail.com"
] | krisna.pranav@gmail.com |
7b8818be9be235aca64718b52aeb09dd41aa1a45 | 8b060d38c63993a3259a80b072768206b558772b | /BlogApp/migrations/0016_user.py | cd8e9e162cda9e83045bd02588187a077f03409b | [] | no_license | mortadagzar/Simple-Python-feedingTable | d8b0a2a06c1b3d78167241a6f60a2bb00fa9c4ce | 716c68e6b9c55bd2dc8299ca14ccf39431cf0efb | refs/heads/master | 2020-03-30T19:07:16.027807 | 2018-10-14T15:05:28 | 2018-10-14T15:05:28 | 151,529,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | # Generated by Django 2.1.1 on 2018-09-24 22:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BlogApp', '0015_auto_20180922_1833'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('password', models.CharField(max_length=40)),
('email', models.TextField(blank=True, null=True)),
],
),
]
| [
"mortadagzar@gmail.com"
] | mortadagzar@gmail.com |
94406b4c0a5f5a0b725f7359720d14ae01e6dc47 | 7bb34b9837b6304ceac6ab45ce482b570526ed3c | /external/webkit/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py | 79e4cf4a97b601f70dcf60ea9438ce8a379e69d8 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.1-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] | permissive | ghsecuritylab/android_platform_sony_nicki | 7533bca5c13d32a8d2a42696344cc10249bd2fd8 | 526381be7808e5202d7865aa10303cb5d249388a | refs/heads/master | 2021-02-28T20:27:31.390188 | 2013-10-15T07:57:51 | 2013-10-15T07:57:51 | 245,730,217 | 0 | 0 | Apache-2.0 | 2020-03-08T00:59:27 | 2020-03-08T00:59:26 | null | UTF-8 | Python | false | false | 2,325 | py | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.rebaseline import BuilderToPort, Rebaseline
from webkitpy.tool.mocktool import MockTool
class RebaselineTest(unittest.TestCase):
# This just makes sure the code runs without exceptions.
def test_tests_to_update(self):
command = Rebaseline()
command.bind_to_tool(MockTool())
build = Mock()
OutputCapture().assert_outputs(self, command._tests_to_update, [build])
class BuilderToPortTest(unittest.TestCase):
def test_port_for_builder(self):
converter = BuilderToPort()
port = converter.port_for_builder("Leopard Intel Debug (Tests)")
self.assertEqual(port.name(), "mac-leopard")
| [
"gahlotpercy@gmail.com"
] | gahlotpercy@gmail.com |
53d010f5a09590ee0504499dddb723f69908eed7 | bca9c2fa3c4c3d06dd612280ce39090a9dfab9bd | /neekanee/job_scrapers/plugins/com/link/successfactors.py | d9a5a27fd52a841ade57881fa9b838d7cd797500 | [] | no_license | thayton/neekanee | 0890dd5e5cf5bf855d4867ae02de6554291dc349 | f2b2a13e584469d982f7cc20b49a9b19fed8942d | refs/heads/master | 2021-03-27T11:10:07.633264 | 2018-07-13T14:19:30 | 2018-07-13T14:19:30 | 11,584,212 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py | import re, urlparse, mechanize
from neekanee.jobscrapers.jobscraper import JobScraper
from neekanee.htmlparse.soupify import soupify, get_all_text
from neekanee_solr.models import *
COMPANY = {
'name': 'SuccessFactors',
'hq': 'San Francisco, CA',
'home_page_url': 'http://www.successfactors.com',
'jobs_page_url': 'http://jobs.successfactors.com/search',
'empcnt': [1001,5000]
}
class SuccessFactorsJobScraper(JobScraper):
def __init__(self):
super(SuccessFactorsJobScraper, self).__init__(COMPANY)
def scrape_job_links(self, url):
jobs = []
self.br.open(url)
pageno = 2
while True:
s = soupify(self.br.response().read())
r = re.compile(r'^/job/[^/]+/\d+/$$')
t = s.find('table', id='searchresults')
x = {'class': 'jobLocation'}
for a in t.findAll('a', href=r):
tr = a.findParent('tr')
sp = tr.find('span', attrs=x)
l = self.parse_location(sp.text)
if not l:
continue
job = Job(company=self.company)
job.title = a.text
job.url = urlparse.urljoin(self.br.geturl(), a['href'])
job.location = l
jobs.append(job)
try:
self.br.follow_link(self.br.find_link(text='Page %d' % pageno))
pageno += 1
break
except mechanize.LinkNotFoundError:
break
return jobs
def scrape_jobs(self):
job_list = self.scrape_job_links(self.company.jobs_page_url)
self.prune_unlisted_jobs(job_list)
new_jobs = self.new_job_listings(job_list)
for job in new_jobs:
self.br.open(job.url)
s = soupify(self.br.response().read())
x = {'class': 'jobDisplay'}
d = s.find('div', attrs=x)
job.desc = get_all_text(d)
job.save()
def get_scraper():
return SuccessFactorsJobScraper()
if __name__ == '__main__':
job_scraper = get_scraper()
job_scraper.scrape_jobs()
| [
"thayton@neekanee.com"
] | thayton@neekanee.com |
480d955400267885fb5c52823d2d84eaa53fffa3 | 25427cf7ac5ae9f8e5d421e953750a46fb2d1ebc | /OldBoy/Day68/django_model_form/django_model_form/settings.py | 6fb83e2edb64e357b4d4491ed8d44ad40dd0a810 | [] | no_license | povillechan/Python | d48e2e25c9961acef45162ca882b547e5b9d0b77 | 67e88d6d7bdbe49b0c5165d9b35f37dccf638877 | refs/heads/master | 2020-03-22T08:43:44.606336 | 2019-09-01T15:25:57 | 2019-09-01T15:25:57 | 139,786,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,482 | py | """
Django settings for django_model_form project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3k++_2zr*%tgs7#n*yrd(#s_44k$ak$!@m70(g)0vj2jb4h_h3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'app01',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = ['127.0.0.1',]
ROOT_URLCONF = 'django_model_form.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_model_form.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'default1': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db1.sqlite3'),
},
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR,'static'),
)
| [
"poville@yeah.net"
] | poville@yeah.net |
3b34c7ebcbedf568257311ee2f077aeaf90dd3a0 | cccfb7be281ca89f8682c144eac0d5d5559b2deb | /tools/perf/page_sets/desktop_ui/desktop_ui_shared_state.py | 0e9637b9690d4687b2cadd3870ee616d2e07c556 | [
"LGPL-2.0-or-later",
"MPL-1.1",
"BSD-3-Clause",
"APSL-2.0",
"MIT",
"Zlib",
"GPL-2.0-only",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only"
] | permissive | SREERAGI18/chromium | 172b23d07568a4e3873983bf49b37adc92453dd0 | fd8a8914ca0183f0add65ae55f04e287543c7d4a | refs/heads/master | 2023-08-27T17:45:48.928019 | 2021-11-11T22:24:28 | 2021-11-11T22:24:28 | 428,659,250 | 1 | 0 | BSD-3-Clause | 2021-11-16T13:08:14 | 2021-11-16T13:08:14 | null | UTF-8 | Python | false | false | 415 | py | # Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import shared_page_state
class DesktopUISharedState(shared_page_state.SharedPageState):
""" Ensures the browser is restarted for each test, for all platforms. """
def ShouldReuseBrowserForAllStoryRuns(self):
return False
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com |
0c49b5725258db042a42850d57bee23969d2e342 | 7bdb0e12359162c5dd2bddc58d2ca1d234fb29d2 | /trunk/playground/intern/2009/Pakito/pakito/gui/pspecWidget/dialogs/comarDialog.py | caece273e400a41ea31a7a0ae7e6d7bb1d1ad9c1 | [] | no_license | hitaf/Pardus-2011-Svn- | f40776b0bba87d473aac45001c4b946211cbc7bc | 16df30ab9c6ce6c4896826814e34cfeadad1be09 | refs/heads/master | 2021-01-10T19:48:33.836038 | 2012-08-13T22:57:37 | 2012-08-13T22:57:37 | 5,401,998 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from qt import *
from kdecore import KGlobal, KIcon, i18n
from kfile import KFileDialog
import kdedesigner
import os
from pakito.gui.pspecWidget.dialogs.comarDialogUI import COMARDialogUI
class COMARDialog(COMARDialogUI):
def __init__(self, parent = None, comar = None, name= None):
COMARDialogUI.__init__(self, parent, name)
self.realLoc = ""
il = KGlobal.iconLoader()
self.pbFile.setIconSet(il.loadIconSet("fileopen", KIcon.Toolbar))
self.connect(self.btnOk, SIGNAL("clicked()"), self, SLOT("accept()"))
self.connect(self.btnCancel, SIGNAL("clicked()"), self, SLOT("reject()"))
self.connect(self.pbFile, SIGNAL("clicked()"), self.slotFile)
if comar:
self.cbProvides.setCurrentText(comar[0])
self.leFile.setText(comar[1])
def slotFile(self):
self.realLoc = KFileDialog.getOpenFileName(QString.null, QString.null, self, i18n("Select COMAR Script"))
if not self.realLoc or str(self.realLoc).strip() == "":
return
self.leFile.setText(os.path.split(str(self.realLoc))[1])
def getResult(self):
res = []
res.append(str(self.cbProvides.currentText()))
res.append(str(self.leFile.text()))
res.append(str(self.realLoc))
return res
| [
"fatih@dhcppc1.(none)"
] | fatih@dhcppc1.(none) |
717d84fd828878b75823d79be5f00d7fa9321862 | 1bdaf97709a1d885e473c15d5b1ef26f8d086c44 | /pipeline_02_geocode_addresses.py | 4992b58fa3233241ce374dffddb79489e0a6c677 | [] | no_license | austinlwheat/lab-04-pipelines-and-web-services | 773924564552203d3efa59a9c808cf1646f1ccec | faa6d6a611e2da8c6a33ef0ab6d98cdcbebf12f9 | refs/heads/main | 2023-09-01T07:35:40.870284 | 2021-10-13T14:01:00 | 2021-10-13T14:01:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | """
Extract Process #2
Use the Census Geocoding API to geocode the addresses in the file that was
extracted in step one. The documentation for the API is available at:
https://geocoding.geo.census.gov/geocoder/Geocoding_Services_API.pdf
I encourage you to read it for details, but the gist is:
- You can geocode a batch of addresses by sending a POST request to
https://geocoding.geo.census.gov/geocoder/geographies/addressbatch
- The request should contain the following context:
1. A parameter named "benchmark" (set the value to "Public_AR_Current")
2. A parameter named "vintage" (set the value to "Current_Current")
3. A file labeled "addressFile" with the format described at
https://www.census.gov/programs-surveys/locations/technical-documentation/complete-technical-documentation/census-geocoder.html#ti103804043
(the file you downloaded in the previous step should conform to that
format).
Save the geocoded data to a new file.
"""
import requests
| [
"mjumbewu@gmail.com"
] | mjumbewu@gmail.com |
fba8b0cb1fe9b1aef56fa39569578536094bec5b | d85043257d93d35ac5d20fdb784656a83e141350 | /old/stm312_test/dummy_import.py | 5a533c4fc4f6b8b716042fce16cce40ea9be03c4 | [] | no_license | CINF/cinfdata_test | ca7ae74c93afb2860c2fa24d6589e25ed5c7d38a | 159c5e7f4727318a6b7b78dcce8f0ea57353abdb | refs/heads/master | 2021-01-01T04:26:36.569960 | 2016-05-13T11:10:43 | 2016-05-13T11:10:43 | 58,448,151 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #!/usr/bin/python
import sys
sys.path.insert(0, '/var/www/cinfdata/')
print "I'm the import testing script"
print 'Start'
import numpy
print 'Succes'
| [
"k.nielsen81@gmail.com"
] | k.nielsen81@gmail.com |
c728f18221747be09cb4d7bc0f4d5c4588ee119b | bc899480ea50049929e6ba7a2836e39a51d0faa3 | /leetcode/misc/linked_list/remove_nth_node_from_end_ll.py | 4e70dab6e0f371566bbe615b5bd76d412292f0f1 | [] | no_license | grewy/practice_py | 605a88f40eb54f7ac0fd54a1ab2d6bfdfae57b49 | b00f649598a6e57af30b517baa304f3094345f6d | refs/heads/master | 2021-07-09T01:33:07.158778 | 2020-10-15T12:33:06 | 2020-10-15T12:33:06 | 199,878,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
if n==0 or not head:
return head
stack = []
t = head
while t:
stack.append(t)
t= t.next
while n != 0:
curr = stack.pop(-1)
n -= 1
if stack:
prev = stack.pop(-1)
prev.next = curr.next
else:
head = curr.next
return head
"""
https://leetcode.com/problems/remove-nth-node-from-end-of-list/discuss/9032/Python-concise-one-pass-solution-with-dummy-head.
"""
def removeNthFromEnd(self, head, n):
dummy = ListNode(0)
dummy.next = head
fast = slow = dummy
for _ in xrange(n):
fast = fast.next
while fast and fast.next:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return dummy.next
| [
"mgatuiet@gmail.com"
] | mgatuiet@gmail.com |
5b3cf730f6e87e9912fba8136bfe75c1322b09af | 205407e7259fe8ffc42ca653cebdece2f63fe1dc | /config.py.tmp | b37cc1ea816751caf2cc643a7dbd5c2afa47f800 | [] | no_license | namuyan/nem-tip-bot-peg-system | 720f805ff93e45d0e2ee3bb5ca48c6cdabff4288 | aad038f6ee68523c5e8e5cdfbfb63ff0854b2ba3 | refs/heads/master | 2021-09-03T01:03:13.350492 | 2018-01-04T11:57:23 | 2018-01-04T11:57:23 | 109,551,023 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,112 | tmp | #!/user/env python3
# -*- coding: utf-8 -*-
class EmptyObject:
pass
class Config:
def __init__(self, test=True):
self.test = test
self.stop_signal = False
self.stop_ok = []
self.stop_need_obj = ("incoming",)
if test:
self.node = [("127.0.0.1", 8293), ("nukowallet.com", 8293)]
self.screen = ""
self.account_pubkey = ""
self.account_seckey = ""
self.genesis = "0x1a505395bfe4b2a8eef2f80033d68228db70e82bb695dd4ffb20e6d0cf71cb73"
self.db = {
"host": "127.0.0.1", "user": "peg",
"pass": "Q3h5GP", "db": "pegger_test",
"charset": 'utf8mb4'
}
self.twitter = {
"consumer_key": "",
"consumer_secret": "",
"access_token": "",
"access_token_secret": "",
"callback": None
}
self.login_pubkey = None
self.login_seckey = None
self.ws_host = "ws://153.122.86.46:8080"
self.rest_host = "127.0.0.1"
else:
self.node = [("127.0.0.1", 8293), ("nukowallet.com", 8293)]
self.screen = ""
self.account_pubkey = ""
self.account_seckey = ""
self.genesis = "0x1a505395bfe4b2a8eef2f80033d68228db70e82bb695dd4ffb20e6d0cf71cb73"
self.db = {
"host": "127.0.0.1", "user": "peg",
"pass": "Q3h5GP", "db": "pegger",
"charset": 'utf8mb4'
}
self.twitter = {
"consumer_key": "",
"consumer_secret": "",
"access_token": "",
"access_token_secret": "",
"callback": None
}
self.login_pubkey = None
self.login_seckey = None
self.ws_host = "ws://153.122.86.46:8088"
self.rest_host = "0.0.0.0"
MICRO_TO_WEI = 1000000000000 # 小数点以下6桁
NUKO_TO_WEI = 1000000000000000000
LOCAL_IP_ADDRESS = ("127.0.0.1", "localhost") | [
"thhjuu@yahoo.co.jp"
] | thhjuu@yahoo.co.jp |
dbdf5de27d352c1c45e3e5eee6df6fcab3a46460 | 98efe1aee73bd9fbec640132e6fb2e54ff444904 | /loldib/getratings/models/NA/na_udyr/na_udyr_bot.py | 09e8372d3859fd0e92e3fdb2bcd92f97ec80262b | [
"Apache-2.0"
] | permissive | koliupy/loldib | be4a1702c26546d6ae1b4a14943a416f73171718 | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | refs/heads/master | 2021-07-04T03:34:43.615423 | 2017-09-21T15:44:10 | 2017-09-21T15:44:10 | 104,359,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,269 | py | from getratings.models.ratings import Ratings
class NA_Udyr_Bot_Aatrox(Ratings):
pass
class NA_Udyr_Bot_Ahri(Ratings):
pass
class NA_Udyr_Bot_Akali(Ratings):
pass
class NA_Udyr_Bot_Alistar(Ratings):
pass
class NA_Udyr_Bot_Amumu(Ratings):
pass
class NA_Udyr_Bot_Anivia(Ratings):
pass
class NA_Udyr_Bot_Annie(Ratings):
pass
class NA_Udyr_Bot_Ashe(Ratings):
pass
class NA_Udyr_Bot_AurelionSol(Ratings):
pass
class NA_Udyr_Bot_Azir(Ratings):
pass
class NA_Udyr_Bot_Bard(Ratings):
pass
class NA_Udyr_Bot_Blitzcrank(Ratings):
pass
class NA_Udyr_Bot_Brand(Ratings):
pass
class NA_Udyr_Bot_Braum(Ratings):
pass
class NA_Udyr_Bot_Caitlyn(Ratings):
pass
class NA_Udyr_Bot_Camille(Ratings):
pass
class NA_Udyr_Bot_Cassiopeia(Ratings):
pass
class NA_Udyr_Bot_Chogath(Ratings):
pass
class NA_Udyr_Bot_Corki(Ratings):
pass
class NA_Udyr_Bot_Darius(Ratings):
pass
class NA_Udyr_Bot_Diana(Ratings):
pass
class NA_Udyr_Bot_Draven(Ratings):
pass
class NA_Udyr_Bot_DrMundo(Ratings):
pass
class NA_Udyr_Bot_Ekko(Ratings):
pass
class NA_Udyr_Bot_Elise(Ratings):
pass
class NA_Udyr_Bot_Evelynn(Ratings):
pass
class NA_Udyr_Bot_Ezreal(Ratings):
pass
class NA_Udyr_Bot_Fiddlesticks(Ratings):
pass
class NA_Udyr_Bot_Fiora(Ratings):
pass
class NA_Udyr_Bot_Fizz(Ratings):
pass
class NA_Udyr_Bot_Galio(Ratings):
pass
class NA_Udyr_Bot_Gangplank(Ratings):
pass
class NA_Udyr_Bot_Garen(Ratings):
pass
class NA_Udyr_Bot_Gnar(Ratings):
pass
class NA_Udyr_Bot_Gragas(Ratings):
pass
class NA_Udyr_Bot_Graves(Ratings):
pass
class NA_Udyr_Bot_Hecarim(Ratings):
pass
class NA_Udyr_Bot_Heimerdinger(Ratings):
pass
class NA_Udyr_Bot_Illaoi(Ratings):
pass
class NA_Udyr_Bot_Irelia(Ratings):
pass
class NA_Udyr_Bot_Ivern(Ratings):
pass
class NA_Udyr_Bot_Janna(Ratings):
pass
class NA_Udyr_Bot_JarvanIV(Ratings):
pass
class NA_Udyr_Bot_Jax(Ratings):
pass
class NA_Udyr_Bot_Jayce(Ratings):
pass
class NA_Udyr_Bot_Jhin(Ratings):
pass
class NA_Udyr_Bot_Jinx(Ratings):
pass
class NA_Udyr_Bot_Kalista(Ratings):
pass
class NA_Udyr_Bot_Karma(Ratings):
pass
class NA_Udyr_Bot_Karthus(Ratings):
pass
class NA_Udyr_Bot_Kassadin(Ratings):
pass
class NA_Udyr_Bot_Katarina(Ratings):
pass
class NA_Udyr_Bot_Kayle(Ratings):
pass
class NA_Udyr_Bot_Kayn(Ratings):
pass
class NA_Udyr_Bot_Kennen(Ratings):
pass
class NA_Udyr_Bot_Khazix(Ratings):
pass
class NA_Udyr_Bot_Kindred(Ratings):
pass
class NA_Udyr_Bot_Kled(Ratings):
pass
class NA_Udyr_Bot_KogMaw(Ratings):
pass
class NA_Udyr_Bot_Leblanc(Ratings):
pass
class NA_Udyr_Bot_LeeSin(Ratings):
pass
class NA_Udyr_Bot_Leona(Ratings):
pass
class NA_Udyr_Bot_Lissandra(Ratings):
pass
class NA_Udyr_Bot_Lucian(Ratings):
pass
class NA_Udyr_Bot_Lulu(Ratings):
pass
class NA_Udyr_Bot_Lux(Ratings):
pass
class NA_Udyr_Bot_Malphite(Ratings):
pass
class NA_Udyr_Bot_Malzahar(Ratings):
pass
class NA_Udyr_Bot_Maokai(Ratings):
pass
class NA_Udyr_Bot_MasterYi(Ratings):
pass
class NA_Udyr_Bot_MissFortune(Ratings):
pass
class NA_Udyr_Bot_MonkeyKing(Ratings):
pass
class NA_Udyr_Bot_Mordekaiser(Ratings):
pass
class NA_Udyr_Bot_Morgana(Ratings):
pass
class NA_Udyr_Bot_Nami(Ratings):
pass
class NA_Udyr_Bot_Nasus(Ratings):
pass
class NA_Udyr_Bot_Nautilus(Ratings):
pass
class NA_Udyr_Bot_Nidalee(Ratings):
pass
class NA_Udyr_Bot_Nocturne(Ratings):
pass
class NA_Udyr_Bot_Nunu(Ratings):
pass
class NA_Udyr_Bot_Olaf(Ratings):
pass
class NA_Udyr_Bot_Orianna(Ratings):
pass
class NA_Udyr_Bot_Ornn(Ratings):
pass
class NA_Udyr_Bot_Pantheon(Ratings):
pass
class NA_Udyr_Bot_Poppy(Ratings):
pass
class NA_Udyr_Bot_Quinn(Ratings):
pass
class NA_Udyr_Bot_Rakan(Ratings):
pass
class NA_Udyr_Bot_Rammus(Ratings):
pass
class NA_Udyr_Bot_RekSai(Ratings):
pass
class NA_Udyr_Bot_Renekton(Ratings):
pass
class NA_Udyr_Bot_Rengar(Ratings):
pass
class NA_Udyr_Bot_Riven(Ratings):
pass
class NA_Udyr_Bot_Rumble(Ratings):
pass
class NA_Udyr_Bot_Ryze(Ratings):
pass
class NA_Udyr_Bot_Sejuani(Ratings):
pass
class NA_Udyr_Bot_Shaco(Ratings):
pass
class NA_Udyr_Bot_Shen(Ratings):
pass
class NA_Udyr_Bot_Shyvana(Ratings):
pass
class NA_Udyr_Bot_Singed(Ratings):
pass
class NA_Udyr_Bot_Sion(Ratings):
pass
class NA_Udyr_Bot_Sivir(Ratings):
pass
class NA_Udyr_Bot_Skarner(Ratings):
pass
class NA_Udyr_Bot_Sona(Ratings):
pass
class NA_Udyr_Bot_Soraka(Ratings):
pass
class NA_Udyr_Bot_Swain(Ratings):
pass
class NA_Udyr_Bot_Syndra(Ratings):
pass
class NA_Udyr_Bot_TahmKench(Ratings):
pass
class NA_Udyr_Bot_Taliyah(Ratings):
pass
class NA_Udyr_Bot_Talon(Ratings):
pass
class NA_Udyr_Bot_Taric(Ratings):
pass
class NA_Udyr_Bot_Teemo(Ratings):
pass
class NA_Udyr_Bot_Thresh(Ratings):
pass
class NA_Udyr_Bot_Tristana(Ratings):
pass
class NA_Udyr_Bot_Trundle(Ratings):
pass
class NA_Udyr_Bot_Tryndamere(Ratings):
pass
class NA_Udyr_Bot_TwistedFate(Ratings):
pass
class NA_Udyr_Bot_Twitch(Ratings):
pass
class NA_Udyr_Bot_Udyr(Ratings):
pass
class NA_Udyr_Bot_Urgot(Ratings):
pass
class NA_Udyr_Bot_Varus(Ratings):
pass
class NA_Udyr_Bot_Vayne(Ratings):
pass
class NA_Udyr_Bot_Veigar(Ratings):
pass
class NA_Udyr_Bot_Velkoz(Ratings):
pass
class NA_Udyr_Bot_Vi(Ratings):
pass
class NA_Udyr_Bot_Viktor(Ratings):
pass
class NA_Udyr_Bot_Vladimir(Ratings):
pass
class NA_Udyr_Bot_Volibear(Ratings):
pass
class NA_Udyr_Bot_Warwick(Ratings):
pass
class NA_Udyr_Bot_Xayah(Ratings):
pass
class NA_Udyr_Bot_Xerath(Ratings):
pass
class NA_Udyr_Bot_XinZhao(Ratings):
pass
class NA_Udyr_Bot_Yasuo(Ratings):
pass
class NA_Udyr_Bot_Yorick(Ratings):
pass
class NA_Udyr_Bot_Zac(Ratings):
pass
class NA_Udyr_Bot_Zed(Ratings):
pass
class NA_Udyr_Bot_Ziggs(Ratings):
pass
class NA_Udyr_Bot_Zilean(Ratings):
pass
class NA_Udyr_Bot_Zyra(Ratings):
pass
| [
"noreply@github.com"
] | koliupy.noreply@github.com |
0bc290dafa4233a31706966fbaf2ce4ea89bf3fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03148/s093100517.py | ebad2fec8979471f0c0c14cb03557cc80fe50113 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | n, m = [int(i) for i in input().split()]
td = [[int(i) for i in input().split()] for n in range(n)]
td.sort(key=lambda x:-x[1])
memo = set()
a = []
for t, d in td:
if t in memo:
a.append((d, 0))
else:
a.append((d, 1))
memo.add(t)
a = [(-x, x, d) for x, d in a]
import heapq
heapq.heapify(a)
val = 0
kind = 0
b = []
for _ in range(m):
ele = heapq.heappop(a)
val += ele[1]
kind += ele[2]
if ele[2] == 0:
b.append(ele[1])
ans = val + kind ** 2
while (len(a) > 0 and len(b)>0):
val -= b.pop()
flag = False
while(len(a) > 0):
elem = heapq.heappop(a)
if elem[2] == 1:
flag = True
break
if not flag:
break
val += elem[1]
kind += 1
tmpans = val + kind ** 2
if tmpans > ans:
ans = tmpans
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
bdd58b6d01c70e104c0b4911a2c42ed7714b4725 | f71175700ba405e606eeab58d2b3ad97474bf9f5 | /link/models.py | b5e6c8526fdbdb9674a67616f4a1bf6871d27a73 | [] | no_license | Shirhussain/link_scraper | 76018965718887b71247e917babfb009d534d126 | f5e9a70160edc408fc005bc4d5c16a56834d93c7 | refs/heads/main | 2023-01-10T05:53:17.296704 | 2020-11-05T20:22:29 | 2020-11-05T20:22:29 | 310,549,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from django.db import models
class Link(models.Model):
name = models.CharField(max_length=2000, blank=True, null=True)
address = models.CharField(max_length=2000)
def __str__(self):
return self.name
| [
"sh.danishyar@gmail.com"
] | sh.danishyar@gmail.com |
0f22f542e80fab1255b87f8e1fc553feb6bd3b7d | caf71b6a4374a30a51e6dff4deefd9122ae7980d | /contest_python/tallestInClass_DWITE.py | 2e5540d8b419a15e72b7001183dcff93db427fe9 | [] | no_license | PMiskew/DP_CS_Code_PMiskew | fba73779e78bc4eb0bfafac05168e28ec11561b1 | 93d366a1dae3cc8f55acc7dd9cfdb2b224dbf539 | refs/heads/master | 2023-03-28T08:46:15.118189 | 2021-03-24T15:49:57 | 2021-03-24T15:49:57 | 294,153,175 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | '''
Recommendation:
This problem can be daunting, however, if you break it apart to some components and tackle those
it falls apart nicely.
1. Start by managing the input. Assuming you store the data as a list of strings, create three
new lists and copy in the data. The first two elements in this parallel list structure look
as follows
name = ["Jim","Sally",. . .]
height = [1.45, 187, . . . ]
units = ["m", "cm", . . .]
HL - You could copy it into a 2D list
data2D = [ ["Jim","Sally",. . .],
[1.45, 187, . . . ],
["m", "cm", . . .],
]
]
2. Start off by simplifying the problem by assuming
a) All measurements are in the same unit
b) You only want to find the single tallest.
2. Create a function that coverts any unit to meters. What it converts it to doesn't matter, but
this allows you to send any meansurment through it and get a standard measurement that can be
compared.
'''
data = ["Jim 1.45 m",
"Sally 187 cm",
"Joey 1064 mm",
"Roel 15.23 dm",
"Karl 134 cm",
"Melanie 18.9 dm",
"Jill 1.54 m",
"Sam 133 cm",
"Joel 1877 mm",
"Roger 17.83 dm",
"Karen 178 cm",
"Marnie 17.9 dm"]
name = []
height = []
units = []
for i in range(0,len(data),1):
loc = data[i].index(' ')
n = data[i][0:loc]
name.append(n)
loc1 = data[i].index(' ',loc+1)
h = data[i][loc + 1:loc1]
height.append(float(h))
u = data[i][loc1+1:]
units.append(u)
print(name)
print(height)
print(units)
| [
"pmiskew@C1MWP4S2J1WL.local"
] | pmiskew@C1MWP4S2J1WL.local |
7eca7055328d8ca5c90cf66a600c1cb07862346e | 1498148e5d0af365cd7fd16197174174a7fa9800 | /leetcode/t000840.py | f81a88fb38895df4b807234bcc9a0768a92e933b | [] | no_license | feiyanshiren/myAcm | 59a2b80fe7e02787defcb152eee3eae26135322a | 00c7082d5143ddf87aeeafbdb6ce29da46dc8a12 | refs/heads/master | 2023-09-01T12:12:19.866447 | 2023-09-01T09:09:56 | 2023-09-01T09:09:56 | 148,560,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,284 | py | # 解:
# 暴力法,注意严格看提议条件
#
# ```
from typing import List
class Solution:
def numMagicSquaresInside(self, grid: List[List[int]]) -> int:
count = 0
l1 = len(grid)
if l1 >= 3:
l2 = len(grid[0])
if l2 >= 3:
for i in range(l1 - 2):
for j in range(l2 - 2):
if self.ifh([[grid[i][j], grid[i][j + 1], grid[i][j + 2]],
[grid[i + 1][j], grid[i + 1][j + 1], grid[i + 1][j + 2]],
[grid[i + 2][j], grid[i + 2][j + 1], grid[i + 2][j + 2]]]):
count += 1
return count
def ifh(self, grid):
a = []
for i in range(3):
for j in range(3):
if grid[i][j] <= 0:
return False
if grid[i][j] > 9:
return False
if grid[i][j] in a:
return False
a.append(grid[i][j])
m1 = grid[0][0] + grid[0][1] + grid[0][2]
m2 = grid[1][0] + grid[1][1] + grid[1][2]
m3 = grid[2][0] + grid[2][1] + grid[2][2]
m4 = grid[0][0] + grid[1][0] + grid[2][0]
m5 = grid[0][1] + grid[1][1] + grid[2][1]
m6 = grid[0][2] + grid[1][2] + grid[2][2]
m7 = grid[0][0] + grid[1][1] + grid[2][2]
m8 = grid[0][2] + grid[1][1] + grid[2][0]
if m1 == m2 and m1 == m3 and m1 == m4 and m1 == m5 and m1 == m6 \
and m1 == m7 and m1 == m8:
return True
else:
return False
# 840.
# 矩阵中的幻方 - -2
#
# 3
# x
# 3
# 的幻方是一个填充有从
# 1
# 到
# 9
# 的不同数字的
# 3
# x
# 3
# 矩阵,其中每行,每列以及两条对角线上的各数之和都相等。
#
# 给定一个由整数组成的
# grid,其中有多少个
# 3 × 3
# 的 “幻方” 子矩阵?(每个子矩阵都是连续的)。
#
#
#
# 示例:
#
# 输入: [[4, 3, 8, 4],
# [9, 5, 1, 9],
# [2, 7, 6, 2]]
# 输出: 1
# 解释:
# 下面的子矩阵是一个
# 3
# x
# 3
# 的幻方:
# 438
# 951
# 276
#
# 而这一个不是:
# 384
# 519
# 762
#
# 总的来说,在本示例所给定的矩阵中只有一个
# 3
# x
# 3
# 的幻方子矩阵。
#
# 提示:
#
# 1 <= grid.length <= 10
# 1 <= grid[0].length <= 10
# 0 <= grid[i][j] <= 15
# 解:
# 打表发,其实中心是5,才可能是幻
#
# ```
class Solution(object):
def numMagicSquaresInside(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
l = [[8, 1, 6, 3, 5, 7, 4, 9, 2], [6, 1, 8, 7, 5, 3, 2, 9, 4], [4, 9, 2, 3, 5, 7, 8, 1, 6],
[2, 9, 4, 7, 5, 3, 6, 1, 8], [6, 7, 2, 1, 5, 9, 8, 3, 4], [8, 3, 4, 1, 5, 9, 6, 7, 2],
[2, 7, 6, 9, 5, 1, 4, 3, 8], [4, 3, 8, 9, 5, 1, 2, 7, 6]]
count = 0
for i in range(len(grid) - 2):
for j in range(len(grid[0]) - 2):
temp = grid[i][j:j + 3] + grid[i + 1][j:j + 3] + grid[i + 2][j:j + 3]
if temp in l:
count += 1
return count
| [
"feiyanshiren@163.com"
] | feiyanshiren@163.com |
526293e66ff54ad466476cfb1a777b846734c8af | 082474f6f6301c561ee9598843adaf1a37bcdf96 | /page_object/common/sendmail.py | 5f2e87c0ef3ac33e463697786390ed2ff90df098 | [] | no_license | guocheng45/Projects | 69e2b79e93f6a4697d1adb3a025f2c04943f37cf | df16943fcbc34341b9a0934b830da0860e2bb5ff | refs/heads/master | 2023-01-23T20:27:06.327650 | 2020-07-12T07:19:14 | 2020-07-12T07:19:14 | 197,792,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,158 | py | # coding=utf-8
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
class SendMail():
def sendMail(self,msg1,pic,receiver):
# 声明用来登录的邮箱和口令
password = 'sdjxhqksmlfsbghd' # 发信授权码
smtp_server = 'smtp.qq.com' # 发信服务器
sender = '467563369@qq.com'
receivers = ['467563369@qq.com','guozhicheng@ehaoyao.com'] # 接收邮箱
msg = MIMEMultipart('related')
# 邮件头信息
msg['From'] = sender # 发送者
msg['To'] = ";".join(receivers) # 接收者
msg['Subject'] = Header('Test Feedback Email', 'utf-8') # 邮件标题
# 邮箱正文 ,三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
# message = MIMEText('Python sendmail test', 'plain', 'utf-8')
mail_msg = MIMEText("""
<p>Python 邮件发送图文</p>
<p>测试截图:</p>
<p><img height="600" width="300" src="cid:image1"></p>
<p><a href="http://www.baidu.com">这是一个链接</a></p>
""", 'html', 'utf-8') # cid 即Content-Id java或python发邮件时使用,在HTML格式的正文中可以使用这个唯一标识号来引用内嵌资源。
msg.attach(mail_msg)
# 指定图片的目录,读取图片
file = open('test.png', 'rb')
img_data = file.read()
file.close()
# 图片植入
img=MIMEImage(img_data)
img.add_header('Content-ID','image1')
msg.attach(img)
try:
# 开启发信服务,这里使用的是加密传输
smtpObj = smtplib.SMTP_SSL()
smtpObj.connect(smtp_server, 465)
smtpObj.login(sender, password)
smtpObj.sendmail(sender, receivers, msg.as_string())
print("send mail success")
except smtplib.SMTPException:
print("Error: can not send the mail")
finally:
# 关闭服务器
smtpObj.quit() | [
"467563369@qq.com"
] | 467563369@qq.com |
644a5a3de788bd0b147de0a44fea79aee008cf69 | da5ef82554c6c0413193b7c99192edd70fed58dd | /core/lib/tests/free_ip.py | 7e2f686c2bc963208f481a2adf46fbdf94a1207b | [] | no_license | rtucker-mozilla/mozilla_inventory | d643c7713c65aa870e732e18aaf19ce677e277b7 | bf9154b0d77705d8c0fe1a9a35ce9c1bd60fcbea | refs/heads/master | 2020-12-24T17:17:37.621418 | 2013-04-11T10:39:41 | 2013-04-11T10:39:41 | 2,709,399 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,292 | py | from django.test import TestCase
from core.vlan.models import Vlan
from core.site.models import Site
from core.range.models import Range
from core.network.models import Network
from core.interface.static_intr.models import StaticInterface
from core.lib.utils import calc_free_ips_str, create_ipv4_intr_from_range
from mozdns.domain.models import Domain
from mozdns.tests.utils import create_fake_zone
from systems.models import System
class LibTestsFreeIP(TestCase):
def setUp(self):
self.system = System()
d1 = create_fake_zone("mozilla.com.com", suffix="")
soa = d1.soa
v, _ = Vlan.objects.get_or_create(name="private", number=3)
s, _ = Site.objects.get_or_create(name="phx1")
s1, _ = Site.objects.get_or_create(name="corp", parent=s)
d, _ = Domain.objects.get_or_create(name="phx1.mozilla.com.com")
d.soa = soa
d.save()
d1, _ = Domain.objects.get_or_create(name="corp.phx1.mozilla.com.com")
d1.soa = soa
d1.save()
d2, _ = Domain.objects.get_or_create(
name="private.corp.phx1.mozilla.com.com")
d2.soa = soa
d2.save()
d, _ = Domain.objects.get_or_create(name="arpa")
d, _ = Domain.objects.get_or_create(name="in-addr.arpa")
d, _ = Domain.objects.get_or_create(name="ip6.arpa")
d, _ = Domain.objects.get_or_create(name="15.in-addr.arpa")
d, _ = Domain.objects.get_or_create(name="2.in-addr.arpa")
n = Network(network_str="15.0.0.0/8", ip_type="4")
n.clean()
n.site = s1
n.vlan = v
n.save()
r = Range(start_str="15.0.0.0", end_str="15.0.0.10",
network=n)
r.clean()
r.save()
def test1_free_ip_count(self):
# Add a bunch of interfaces and make sure the calc_free_ips function is
# working
count = calc_free_ips_str("15.0.0.200", "15.0.0.204")
self.assertEqual(count, 4)
x = create_ipv4_intr_from_range("foo",
"private.corp.phx1.mozilla.com.com",
self.system, "11:22:33:44:55:66",
"15.0.0.200", "15.0.0.204")
intr, errors = x
intr.save()
self.assertEqual(errors, None)
self.assertTrue(isinstance(intr, StaticInterface))
count = calc_free_ips_str("15.0.0.200", "15.0.0.204")
self.assertEqual(count, 3)
x = create_ipv4_intr_from_range("foo",
"private.corp.phx1.mozilla.com.com",
self.system, "11:22:33:44:55:66",
"15.0.0.200", "15.0.0.204")
intr, errors = x
intr.save()
self.assertEqual(errors, None)
self.assertTrue(isinstance(intr, StaticInterface))
count = calc_free_ips_str("15.0.0.200", "15.0.0.204")
self.assertEqual(count, 2)
x = create_ipv4_intr_from_range("foo",
"private.corp.phx1.mozilla.com.com",
self.system, "11:22:33:44:55:66",
"15.0.0.200", "15.0.0.204")
intr, errors = x
intr.save()
self.assertEqual(errors, None)
self.assertTrue(isinstance(intr, StaticInterface))
count = calc_free_ips_str("15.0.0.200", "15.0.0.204")
self.assertEqual(count, 1)
x = create_ipv4_intr_from_range("foo",
"private.corp.phx1.mozilla.com.com",
self.system, "11:22:33:44:55:66",
"15.0.0.200", "15.0.0.204")
(intr, errors) = x
intr.save()
self.assertEqual(errors, None)
self.assertTrue(isinstance(intr, StaticInterface))
count = calc_free_ips_str("15.0.0.200", "15.0.0.204")
self.assertEqual(count, 0)
def test2_free_ip_count(self):
return
# Time is tight, not going to do this test yet.
# Add an Ipv6 address and make sure the rangecount function sees it.
calc_free_ips_str("2620:101:8001::", "2620:101:8001::",
ip_type='6')
| [
"uberj@onid.orst.edu"
] | uberj@onid.orst.edu |
ed1af0217a26c41db25af4fb2cbaf6824e514d91 | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /repos/flask-restful-swagger-master/setup.py | b93a88d336feff6f68a96d4730a9222aae80d2f5 | [
"MIT"
] | permissive | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("README") as file:
long_description = file.read()
setup(
name="flask-restful-swagger",
version="0.20.2",
url="https://github.com/rantav/flask-restful-swagger",
zip_safe=False,
packages=["flask_restful_swagger"],
package_data={
"flask_restful_swagger": [
"static/*.*",
"static/css/*.*",
"static/images/*.*",
"static/lib/*.*",
"static/lib/shred/*.*",
]
},
description="Extract swagger specs from your flask-restful project",
author="Ran Tavory",
license="MIT",
long_description=long_description,
install_requires=[
"Jinja2>=2.10.1,<3.0.0",
"Flask-RESTful>=0.3.6",
],
)
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
dce9c69be6f76f03da43a245de0a9184f4969bd0 | 70d39e4ee19154a62e8c82467ef75b601e584738 | /pyth3/birb_scraper.py | fb9b94b174fd01de555cd135edd1d03093b3209a | [] | no_license | babywyrm/sysadmin | 6f2724be13ae7e5b9372278856a8c072073beffb | 2a5f3d29c7529bc917d4ff9be03af30ec23948a5 | refs/heads/master | 2023-08-16T03:50:38.717442 | 2023-08-16T03:05:55 | 2023-08-16T03:05:55 | 210,228,940 | 10 | 5 | null | 2023-05-01T23:15:31 | 2019-09-22T23:42:50 | PowerShell | UTF-8 | Python | false | false | 1,590 | py | #!/usr/bin/python3
##################
##
##
import click
import requests
import re,os,sys
from bs4 import BeautifulSoup
#############################
#############################
def get_html_of(url):
resp = requests.get(url)
if resp.status_code != 200:
print(f'HTTP status code of {resp.status_code} returned, but 200 was expected. Exiting...')
exit(1)
return resp.content.decode()
def count_occurrences_in(word_list, min_length):
word_count = {}
for word in word_list:
if len(word) < min_length:
continue
if word not in word_count:
word_count[word] = 1
else:
current_count = word_count.get(word)
word_count[word] = current_count + 1
return word_count
def get_all_words_from(url):
html = get_html_of(url)
soup = BeautifulSoup(html, 'html.parser')
raw_text = soup.get_text()
return re.findall(r'\w+', raw_text)
def get_top_words_from(all_words, min_length):
occurrences = count_occurrences_in(all_words, min_length)
return sorted(occurrences.items(), key=lambda item: item[1], reverse=True)
@click.command()
@click.option('--url', '-u', prompt='Web URL', help='URL of webpage to extract from.')
@click.option('--length', '-l', default=0, help='Minimum word length (default: 0, no limit).')
def main(url, length):
the_words = get_all_words_from(url)
top_words = get_top_words_from(the_words, length)
for i in range(10):
print(top_words[i][0])
if __name__ == '__main__':
main()
###############################
##
##
| [
"noreply@github.com"
] | babywyrm.noreply@github.com |
83413238c8fee2649daebd589b1fed30ede470b5 | 46ac0965941d06fde419a6f216db2a653a245dbd | /sdks/python/test/test_UserLiteProfileResponse.py | ca0997dce4fae8c872697809ac1d3e612d3b77dc | [
"MIT",
"Unlicense"
] | permissive | b3nab/appcenter-sdks | 11f0bab00d020abb30ee951f7656a3d7ed783eac | bcc19c998b5f648a147f0d6a593dd0324e2ab1ea | refs/heads/master | 2022-01-27T15:06:07.202852 | 2019-05-19T00:12:43 | 2019-05-19T00:12:43 | 187,386,747 | 0 | 3 | MIT | 2022-01-22T07:57:59 | 2019-05-18T17:29:21 | Python | UTF-8 | Python | false | false | 985 | py | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from UserLiteProfileResponse.clsUserLiteProfileResponse import UserLiteProfileResponse # noqa: E501
from appcenter_sdk.rest import ApiException
class TestUserLiteProfileResponse(unittest.TestCase):
"""UserLiteProfileResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUserLiteProfileResponse(self):
"""Test UserLiteProfileResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsUserLiteProfileResponse.UserLiteProfileResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"b3nab@users.noreply.github.com"
] | b3nab@users.noreply.github.com |
da9be17b5af888677c2e663ed919a2370d0183b0 | 764ce53fd708bb3f81d67cc9a2366265c9a685b9 | /accounts_django/views.py | a9377b5f321014743f8f2900bb49fe562dc80934 | [] | no_license | Vaishnavi-Gajinkar/Bridgelabz | 3d17b8399432ac5643059e822ccad9a90f919e9f | e51551ab675dbb5444ba222cc88ac05fbeab49d2 | refs/heads/master | 2020-12-28T02:45:18.517627 | 2020-03-09T13:42:37 | 2020-03-09T13:42:37 | 238,153,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | from django.shortcuts import render
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def indexView(request):
return render(request, 'accountsIndex.html')
@login_required
def dashboardView(request):
return render(request,'dashboard.html')
def registerView(request):
if request.method == "Post":
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('login_url')
else:
form = UserCreationForm
return render(request,'registration/register.html',{'form':form}) | [
"vaishnavi.gajinkar@gmail.com"
] | vaishnavi.gajinkar@gmail.com |
794bb82367e77038068f0618a2f04881891f6d67 | 752116ef4b69a3049fef0cfe9b3d212548cc81b1 | /sources/scripting/wrappers/session.py | 82d27208c07181752417a4872c0c94524d3e9985 | [] | no_license | VDOMBoxGroup/runtime2.0 | e54af4af7a642f34b0e07b5d4096320494fb9ae8 | cb9932f5f75d5c6d7889f26d58aee079b4127299 | refs/heads/develop | 2023-07-07T11:06:10.817093 | 2023-07-03T06:11:55 | 2023-07-03T06:11:55 | 62,622,255 | 0 | 12 | null | 2023-05-23T02:55:00 | 2016-07-05T09:09:48 | Python | UTF-8 | Python | false | false | 1,056 | py |
import managers
class VDOM_session(object):
def _get_id(self):
return managers.request_manager.current.session().id()
def __getitem__(self, name):
if name == "response": # temporary solution for backward compability of Whole
return managers.request_manager.current.wholeAnswer
return managers.request_manager.current.session()[name]
def __setitem__(self, name, value):
if name == "response": # temporary solution for backward compability of Whole
managers.request_manager.current.wholeAnswer = value
managers.request_manager.current.session()[name] = value
def __delitem__(self, name):
del managers.request_manager.current.session()[name]
def get(self, name, default=None):
return managers.request_manager.current.session().get(name, default)
def keys(self):
return managers.request_manager.current.session().keys()
def __iter__(self):
return iter(managers.request_manager.current.session())
id = property(_get_id)
| [
"nikolay.grishkov@vdombox.ru"
] | nikolay.grishkov@vdombox.ru |
a299aa68059723e3132438cc91d76f150d1c8463 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/B/bkj123/yrs_of_educ_by_country.py | f3b9f31c458c9e8d9cd5f725423f5700f75502e0 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | import scraperwiki
html = scraperwiki.scrape("http://web.archive.org/web/20110514112442/http://unstats.un.org/unsd/demographic/products/socind/education.htm")
# print html
import lxml.html
root = lxml.html.fromstring(html)
for tr in root.cssselect("div[align='left'] tr"):
tds = tr.cssselect("td")
if len(tds)==12:
data = {
'country' : tds[0].text_content(),
'years_in_school' : int(tds[4].text_content())
}
scraperwiki.sqlite.save(unique_keys=['country'], data=data)
import scraperwiki
html = scraperwiki.scrape("http://web.archive.org/web/20110514112442/http://unstats.un.org/unsd/demographic/products/socind/education.htm")
# print html
import lxml.html
root = lxml.html.fromstring(html)
for tr in root.cssselect("div[align='left'] tr"):
tds = tr.cssselect("td")
if len(tds)==12:
data = {
'country' : tds[0].text_content(),
'years_in_school' : int(tds[4].text_content())
}
scraperwiki.sqlite.save(unique_keys=['country'], data=data)
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
97f2bc4b372ec60b32bf11ae749b8cbabc6a3842 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/OpenGL/raw/GL/KHR/context_flush_control.py | 3afcc413f9667fef3a1d01df0028809210531e33 | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 876 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_KHR_context_flush_control'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_KHR_context_flush_control',error_checker=_errors._error_checker)
GL_CONTEXT_RELEASE_BEHAVIOR=_C('GL_CONTEXT_RELEASE_BEHAVIOR',0x82FB)
GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH=_C('GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH',0x82FC)
GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR=_C('GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH_KHR',0x82FC)
GL_CONTEXT_RELEASE_BEHAVIOR_KHR=_C('GL_CONTEXT_RELEASE_BEHAVIOR_KHR',0x82FB)
GL_NONE=_C('GL_NONE',0)
GL_NONE=_C('GL_NONE',0)
| [
"justin.sostmann@googlemail.com"
] | justin.sostmann@googlemail.com |
5a8e8d819963505695e442f8feb3dc849404db3d | f03bd5bd7873c5cc33b4ef5199f219539f3a340e | /CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/plotting/maps.py | 34c95944a5b48c35565bf382c7f5185c18db72ec | [
"GPL-1.0-or-later",
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-philippe-de-muyter",
"MIT"
] | permissive | Stargrazer82301/CAAPR | 5f8a7033b16792f23abd5d07021b53b9228a5db4 | 62b2339beb2eb956565e1605d44d92f934361ad7 | refs/heads/master | 2022-08-29T02:53:33.658022 | 2022-08-05T19:06:46 | 2022-08-05T19:06:46 | 49,977,601 | 8 | 1 | MIT | 2022-08-05T19:06:47 | 2016-01-19T19:32:42 | Python | UTF-8 | Python | false | false | 3,449 | py | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.plotting.maps Contains the MapsPlotter class
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from .component import PlottingComponent
from ..maps.component import MapsComponent
from ...core.tools import filesystem as fs
from ...core.tools.logging import log
from ...magic.core.frame import Frame
from ...magic.plot.imagegrid import StandardImageGridPlotter
# -----------------------------------------------------------------
class MapsPlotter(PlottingComponent, MapsComponent):
"""
This class...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
#super(MapsPlotter, self).__init__(config) # not sure this works
PlottingComponent.__init__(self, config)
MapsComponent.__init__(self)
# -- Attributes --
# The dictionary of image frames
self.images = dict()
# -----------------------------------------------------------------
def run(self, features=None):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup()
# 2. Load the images
self.load_images()
# 3. Plot
self.plot()
# -----------------------------------------------------------------
def setup(self):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(MapsPlotter, self).setup()
# -----------------------------------------------------------------
def load_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the maps ...")
for name in ["dust", "ionizing_stars", "old_stars", "young_stars"]:
# Determine the path to the image
path = fs.join(self.maps_path, name + ".fits")
# Debugging
log.debug("Loading the " + name + " image ...")
# Open the map
image = Frame.from_file(path)
# Add the image to the dictionary
self.images[name] = image
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Create the image plotter
plotter = StandardImageGridPlotter()
# Add the images
for label in self.images: plotter.add_image(self.images[label], label)
# Determine the path to the plot file
path = fs.join(self.plot_maps_path, "maps.pdf")
plotter.colormap = "hot"
plotter.vmin = 0.0
plotter.set_title("Input maps")
# Make the plot
plotter.run(path)
# -----------------------------------------------------------------
| [
"cjrc88@gmail.com"
] | cjrc88@gmail.com |
c2aa9abcfa5c036633a093ef2a54e4933f52f65f | d820c8efb25c9adb77015650a0f7dc6f1e983bfe | /abc/abc050_c.py | 2ee42d95bfc181d741d4eb9244b9dd97dde8f042 | [] | no_license | toshikish/atcoder | 73fdaa2310f23f846279f9f7466bdb969448371f | 33676630d6820dd92ccf0931425b8906b065bedd | refs/heads/master | 2022-05-16T20:00:52.665762 | 2022-04-02T11:55:44 | 2022-04-02T11:55:44 | 173,099,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | from collections import defaultdict
N = int(input())
A = list(map(int, input().split()))
c = defaultdict(int)
for a in A:
c[a] += 1
if N % 2 == 0:
probable = True
for i in range(1, N, 2):
if c[i] != 2:
probable = False
ans = 2 ** (N // 2) % (10 ** 9 + 7) if probable else 0
else:
probable = True
if c[0] != 1:
probable = False
for i in range(2, N, 2):
if c[i] != 2:
probable = False
ans = 2 ** ((N - 1) // 2) % (10 ** 9 + 7) if probable else 0
print(ans)
| [
"toshiki@nanshika.com"
] | toshiki@nanshika.com |
372c60c9ffa1d43f6ea24aa8501c2db618e5bbce | f3e51466d00510f1dae58f1cb87dd53244ce4e70 | /LeetCodes/272. Closest Binary Search Tree Value II.py | d1d13fab5f033ce4abb1ae9ab45dffa36d528771 | [] | no_license | chutianwen/LeetCodes | 40d18e7aa270f8235342f0485bfda2bd1ed960e1 | 11d6bf2ba7b50c07e048df37c4e05c8f46b92241 | refs/heads/master | 2022-08-27T10:28:16.594258 | 2022-07-24T21:23:56 | 2022-07-24T21:23:56 | 96,836,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,395 | py | '''
272. Closest Binary Search Tree Value II
Hard
277
13
Favorite
Share
Given a non-empty binary search tree and a target value, find k values in the BST that are closest to the target.
Note:
Given target value is a floating point.
You may assume k is always valid, that is: k ≤ total nodes.
You are guaranteed to have only one unique set of k values in the BST that are closest to the target.
Example:
Input: root = [4,2,5,1,3], target = 3.714286, and k = 2
4
/ \
2 5
/ \
1 3
Output: [4,3]
Follow up:
Assume that the BST is balanced, could you solve it in less than O(n) runtime (where n = total nodes)?
'''
class Solution:
def closestKValues(self, root, target, k):
# Helper, takes a path and makes it the path to the next node
def nextpath(path, kid1, kid2):
if path:
if kid2(path):
path += kid2(path),
while kid1(path):
path += kid1(path),
else:
kid = path.pop()
while path and kid is kid2(path):
kid = path.pop()
# These customize nextpath as forward or backward iterator
kidleft = lambda path: path[-1].left
kidright = lambda path: path[-1].right
# Build path to closest node
path = []
while root:
path += root,
root = root.left if target < root.val else root.right
dist = lambda node: abs(node.val - target)
path = path[:path.index(min(path, key=dist))+1]
# Get the path to the next larger node
path2 = path[:]
nextpath(path2, kidleft, kidright)
for x in path2:
print(x.val)
# Collect the closest k values by moving the two paths outwards
vals = []
for _ in range(k):
if not path2 or path and dist(path[-1]) < dist(path2[-1]):
vals += path[-1].val,
nextpath(path, kidright, kidleft)
else:
vals += path2[-1].val,
nextpath(path2, kidleft, kidright)
return vals
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def closestKValues(self, root, target, k):
"""
:type root: TreeNode
:type target: float
:type k: int
:rtype: List[int]
"""
min_diff = float('inf')
dis = lambda x: abs(x.val - target)
path_smaller = []
while root:
path_smaller.append(root)
root = root.left if target < root.val else root.right
path_smaller = path_smaller[: path_smaller.index(min(path_smaller, key=dis)) + 1]
frontier_left = lambda path: path[-1].left
frontier_right = lambda path: path[-1].right
def next_path(path, frontier1, frontier2):
# frontier1 is for checking if next node available, frontier2 is for pooling from stack
if frontier1(path):
path.append(frontier1(path))
while frontier2(path):
path.append(frontier2(path))
else:
node = path.pop()
while path and node is frontier1(path):
node = path.pop()
path_larger = path_smaller[:]
next_path(path_larger, frontier_right, frontier_left)
for x in path_larger:
print(x.val)
res = []
dis = lambda x: abs(x.val - target)
for _ in range(k):
if not path_larger or path_smaller and dis(path_smaller[-1]) < dis(path_larger[-1]):
res.append(path_smaller[-1].val)
next_path(path_smaller, frontier_left, frontier_right)
else:
res.append(path_larger[-1].val)
next_path(path_larger, frontier_right, frontier_left)
return res
class Solution:
def closestKValues(self, root, target, k):
abs_dis = lambda x: abs(x.val - target)
stack_small = []
while root:
stack_small.append(root)
root = root.left if target < root.val else root.right
closest_cut = min(stack_small, key=abs_dis)
stack_small = stack_small[:stack_small.index(closest_cut) + 1]
stack_large = stack_small[:]
def next(stack, fun1, fun2):
if fun1(stack):
stack.append(fun1(stack))
while fun2(stack):
stack.append(fun2(stack))
else:
cur = stack.pop()
while stack and cur == fun1(stack):
cur = stack.pop()
frontier_left = lambda x: x[-1].left
frontier_right = lambda x: x[-1].right
next(stack_large, frontier_right, frontier_left)
res = []
for _ in range(k):
if not stack_large or stack_small and abs_dis(stack_small[-1]) <= abs_dis(stack_large[-1]):
res.append(stack_small[-1].val)
next(stack_small, frontier_left, frontier_right)
else:
res.append(stack_large[-1].val)
next(stack_large, frontier_right, frontier_left)
return res
| [
"tianwen.chu@fedcentric.com"
] | tianwen.chu@fedcentric.com |
269c20a755b2e945ab30e91074fb5b1c3c6610fc | c380976b7c59dadaccabacf6b541124c967d2b5a | /.history/src/data/data_20191019130008.py | 488e9d3809c5c14aace59c9e423def076851d17e | [
"MIT"
] | permissive | bkraft4257/kaggle_titanic | b83603563b4a3c995b631e8142fe72e1730a0e2e | f29ea1773773109a867278c001dbd21a9f7b21dd | refs/heads/master | 2020-08-17T12:45:28.653402 | 2019-11-15T16:20:04 | 2019-11-15T16:20:04 | 215,667,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,144 | py | import pandas as pd
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
def __init__(self, filename: Union[str, Path], drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.extract_raw()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={'age':'age_known'})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
class TransformData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"Rev.": "Mr.",
"": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(self, raw_data, adult_age_threshold_min = 13, drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.raw = raw_data
self.adult_age_threshold_min = adult_age_threshold_min
self.Xy = self.raw.Xy_raw.copy()
self.extract_title()
self.extract_last_name()
self.extract_cabin_number()
self.extract_cabin_prefix()
self.estimate_age()
self.calc_is_child()
def calc_is_child(self):
self.Xy['is_child'] = self.Xy.age < self.adult_age_threshold_min
def extract_cabin_number(self):
self.Xy['cabin_number'] = self.Xy.ticket.str.extract('(\d+)$')
def extract_cabin_prefix(self):
self.Xy['cabin_prefix'] = self.Xy.ticket.str.extract('^(.+) ')
def extract_title(self):
"""[summary]
"""
self.Xy["title"] = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
)
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(self, Xy_age_estimate=None, groupby_columns=['sex','title']):
"""[summary]
Keyword Arguments:
groupby {list} -- [description] (default: {['sex','title']})
"""
if Xy_age_estimate is None:
Xy_age_estimate = self.Xy.groupby(groupby_columns).age_known.mean().to_frame().round(1)
Xy_age_estimate = Xy_age_estimate.rename(columns ={'age_known':'age_estimate'})
out_df = self.Xy.reset_index().merge(Xy_age_estimate, on=groupby_columns)
self.Xy['age'] = self.Xy['age_known'].fillna(out_df['age_estimate'])
self.Xy_age_estimate = Xy_age_estimate | [
"bob.kraft@infiniteleap.net"
] | bob.kraft@infiniteleap.net |
30e15a1b111b3db9ba288c9d3dc8e0e6c1a8ff63 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /load_balancer_fuzzer_mcs/intermcs_19_/interactive_replay_config.py | c579a012fb2bdbbe402547c7e4eedc9f0915ee81 | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import InteractiveReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose --unthreaded-sh misc.ip_loadbalancer --ip=123.123.1.3 --servers=123.123.2.3,123.123.1.3 sts.util.socket_mux.pox_monkeypatcher openflow.discovery openflow.of_01 --address=__address__ --port=__port__', label='c1', address='127.0.0.1', cwd='dart_pox')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=True,
kill_controllers_on_exit=True)
control_flow = InteractiveReplayer(simulation_config, "experiments/load_balancer_fuzzer_mcs/intermcs_19_/mcs.trace.notimeouts")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'check_for_ofp_error'
# Bug signature: "ERROR_SENT"
| [
"jefflai2@gmail.com"
] | jefflai2@gmail.com |
85696669f20f3e9e72ae887dfea4980d60f2d30c | cf7c928d6066da1ce15d2793dcf04315dda9b9ed | /Jungol/Lv1_LCoder_Python/pyc0_리스트3/Main_JO_924_리스트3_자가진단4.py | 5d2a061396cfe3ecaa9addc21c493d77e9562c33 | [] | no_license | refresh6724/APS | a261b3da8f53de7ff5ed687f21bb1392046c98e5 | 945e0af114033d05d571011e9dbf18f2e9375166 | refs/heads/master | 2022-02-01T23:31:42.679631 | 2021-12-31T14:16:04 | 2021-12-31T14:16:04 | 251,617,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # 100 이하의 자연수를 입력받아
# 첫 번째 항은 100으로 두 번째 항은 입력받은 수로 초기화하고
# 다음 항부터는 전전항에서 전항을 뺀 수로 채워나가는 수열을 작성하여
# 그 수가 음수가 나올 때까지 출력하는 프로그램을 작성하시오.
a = 100
b = int(input())
list = [a, b]
c = a - b
while c >= 0:
list.append(c)
a = b
b = c
c = a - b
list.append(c)
print(*list, sep=" ")
| [
"refresh6724@gmail.com"
] | refresh6724@gmail.com |
c4a06e2b61bcf307acb87e92eaa2c9ff5afa4e5a | ed12e1905d71e2ff8ff01f39e8d2ebd2e8ccda1f | /Chapter 18/spiralDraw.py | 7f3bdce70ef5205cb0a9f9c14537bcdd93946f70 | [] | no_license | afettouhi/AutomatetheBoringStuffwithPython-py38 | ac18c28a78c0fe9b4b3afd900f668a50a92203db | f8cfc1761257983280039246d3fa3ebe65ec84cb | refs/heads/master | 2022-12-01T00:44:09.828472 | 2020-08-01T04:47:46 | 2020-08-01T04:47:46 | 281,289,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | import pyautogui
import time
print('5 second til it starts')
pyautogui.LOG_SCREENSHOTS = True
pyautogui.LOG_SCREENSHOTS_LIMIT = 100
time.sleep(5)
pyautogui.click() # click to put drawing program in focus
distance = 300
shrink = 20
while distance > 0:
pyautogui.dragRel(distance, 0, duration=0.1) # move right
distance = distance - shrink
pyautogui.dragRel(0, distance, duration=0.1) # move down
pyautogui.dragRel(-distance, 0, duration=0.1) # move left
distance = distance - shrink
pyautogui.dragRel(0, -distance, duration=0.1) # move up
| [
"A.Fettouhi@gmail.com"
] | A.Fettouhi@gmail.com |
36106fd4f837ef5a89f665a1d2bd3c2438f6df1f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_nighties.py | f7282804ffcdbc15e6ef297e404c1b07eea351a2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _NIGHTIES():
def __init__(self,):
self.name = "NIGHTIES"
self.definitions = nightie
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['nightie']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
57e2a50254badcb57543d4facb1a0485fdcb2a11 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/googlecloudsdk/generated_clients/apis/tpu/v1beta1/resources.py | a1b11eb5bd917f4daa240b150019a41f2041e4d1 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 1,722 | py | # -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for Cloud Platform Apis generated from apitools."""
import enum
BASE_URL = 'https://tpu.googleapis.com/v1beta1/'
DOCS_URL = 'https://cloud.google.com/tpu/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
[u'projectsId'],
True
)
PROJECTS_LOCATIONS = (
'projects.locations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}',
},
[u'name'],
True
)
PROJECTS_LOCATIONS_OPERATIONS = (
'projects.locations.operations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/operations/'
'{operationsId}',
},
[u'name'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
ca76d2d1c807f66ca7556dc77a4ddb73eab2cd23 | f8360e6eef89f9b78365a73e7abacf87db1d880a | /models/hospitality/ambulance.py | bdf8c525a7757a874f200ac727dccadb8dc04407 | [] | no_license | Trilokan/sivappu | 022c6b5997213db8c8994429bf5e482f42b8464d | 110c95851a970f051a50bed6ee72be542ca91efe | refs/heads/master | 2020-05-04T20:44:42.555829 | 2019-04-29T12:34:08 | 2019-04-29T12:34:08 | 179,449,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,961 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
from datetime import datetime
PROGRESS_INFO = [("draft", "Draft"),
("confirmed", "Confirmed"),
("driver_intimated", "Driver to Intimated"),
("done", "Done"),
("cancel", "Cancel")]
CANCEL_INFO = [("cancel", "Cancel")]
CURRENT_DATE = datetime.now().strftime("%Y-%m-%d")
CURRENT_TIME = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
INDIA_TIME = datetime.now().strftime("%d-%m-%Y %H:%M:%S")
# Ambulance
class Ambulance(models.Model):
_name = "arc.ambulance"
_inherit = "mail.thread"
date = fields.Date(string="Date", default=CURRENT_DATE, required=True)
name = fields.Char(string="Name", readonly=True)
driver_id = fields.Many2one(comodel_name="arc.person", string="Driver", required=True)
patient_id = fields.Many2one(comodel_name="arc.person", string="Patient", required=True)
source_address = fields.Text(string="Address")
source_landmark = fields.Char(string="Landmark")
source_contact = fields.Char(string="Contact 1")
source_contact_2 = fields.Char(string="Contact 2")
destination_address = fields.Text(string="Address")
destination_landmark = fields.Char(string="Landmark")
destination_contact = fields.Char(string="Contact 1")
destination_contact_2 = fields.Char(string="Contact 2")
progress = fields.Selection(selection=PROGRESS_INFO, string="Progress", default="draft")
is_cancel = fields.Selection(selection=CANCEL_INFO, string="Is Cancel")
distance = fields.Float(string="Distance(KM)", default=0.0, required=True)
charges_km = fields.Float(string="Charges (per KM)", default=0.0, required=True)
others = fields.Float(string="Others", default=0.0, required=True)
total_amount = fields.Float(string="Total", default=0.0, required=True)
writter = fields.Text(string="Writter", track_visibility="always")
@api.multi
def trigger_confirm(self):
writter = "Ambulance confirmed by {0} on {1}".format(self.env.user.name, INDIA_TIME)
self.write({"progress": "confirmed", "writter": writter})
@api.multi
def trigger_inform_driver(self):
writter = "Ambulance Informed to {0} by {1} on {2}".format(self.driver_id.name, self.env.user.name, INDIA_TIME)
self.write({"progress": "confirmed", "writter": writter})
@api.multi
def trigger_done(self):
writter = "{0} Shifted {1} by on {2}".format(self.patient_id.name, self.driver_id.name, INDIA_TIME)
self.write({"progress": "done", "writter": writter})
@api.multi
def trigger_cancel(self):
writter = "Ambulance cancelled by {0} on {1}".format(self.env.user.name, INDIA_TIME)
self.write({"progress": "confirmed", "writter": writter})
@api.model
def create(self, vals):
vals["name"] = self.env["ir.sequence"].next_by_code(self._name)
return super(Ambulance, self).create(vals)
| [
"ram@hk.com"
] | ram@hk.com |
e76e790450bcd46e5b5dcd70d7a0b61000286552 | 4b7e282fe480415f5d52c0fc0429f144156190fe | /examples/campaign_management/validate_text_ad.py | c417044271af2550cbaf807dc27cd05064965c41 | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 4,625 | py | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example shows use of the validateOnly header for an expanded text ad.
No objects will be created, but exceptions will still be thrown.
"""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
def main(client, customer_id, ad_group_id):
ad_group_ad_operation = client.get_type("AdGroupAdOperation")
ad_group_ad = ad_group_ad_operation.create
ad_group_service = client.get_service("AdGroupService")
ad_group_ad.ad_group = ad_group_service.ad_group_path(
customer_id, ad_group_id
)
ad_group_ad.status = client.enums.AdGroupAdStatusEnum.PAUSED
# Create an expanded text ad.
ad_group_ad.ad.expanded_text_ad.description = "Luxury Cruise to Mars"
ad_group_ad.ad.expanded_text_ad.headline_part1 = (
"Visit the Red Planet in style."
)
# Adds a headline that will trigger a policy violation to demonstrate error
# handling.
ad_group_ad.ad.expanded_text_ad.headline_part2 = (
"Low-gravity fun for everyone!!"
)
ad_group_ad.ad.final_urls.append("http://www.example.com/")
ad_group_ad_service = client.get_service("AdGroupAdService")
# Attempt the mutate with validate_only=True.
try:
request = client.get_type("MutateAdGroupAdsRequest")
request.customer_id = customer_id
request.operations.append(ad_group_ad_operation)
request.partial_failure = False
request.validate_only = True
response = ad_group_ad_service.mutate_ad_group_ads(request=request)
print('"Expanded text ad validated successfully.')
except GoogleAdsException as ex:
# This will be hit if there is a validation error from the server.
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}".'
)
print(
"There may have been validation error(s) while adding expanded "
"text ad."
)
policy_error_enum = client.get_type(
"PolicyFindingErrorEnum"
).PolicyFindingError.POLICY_FINDING
count = 1
for error in ex.failure.errors:
# Note: Policy violation errors are returned as PolicyFindingErrors.
# For additional details, see
# https://developers.google.com/google-ads/api/docs/policy-exemption/overview
if error.error_code.policy_finding_error == policy_error_enum:
if error.details.policy_finding_details:
details = (
error.details.policy_finding_details.policy_topic_entries
)
for entry in details:
print(f"{count}) Policy topic entry: \n{entry}\n")
count += 1
else:
print(
f"\tNon-policy finding error with message "
f'"{error.message}".'
)
if error.location:
for (
field_path_element
) in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v8")
parser = argparse.ArgumentParser(
description="Shows how to use the ValidateOnly header."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-a", "--ad_group_id", type=str, required=True, help="The Ad Group ID."
)
args = parser.parse_args()
main(googleads_client, args.customer_id, args.ad_group_id)
| [
"noreply@github.com"
] | Z2Xsoft.noreply@github.com |
0fba4f05eb71350e2e99e4e969652cb301f906c5 | ae002b7c03eacb4081c9234fa07c5201ec703941 | /tasklock/tests.py | c58ea9334ed9a1e9d4effe8b6fd9aa83638aeaf1 | [] | no_license | zbyte64/django-tasklock | 53ea4ab4454bcece54ec1835166fb2bb5998c7ae | 75f848fde78f488457be54304027f82d12f48c25 | refs/heads/master | 2021-01-18T17:25:24.749657 | 2011-10-15T05:09:15 | 2011-10-15T05:09:15 | 2,580,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from django.test import TestCase
from models import TaskLock
import tasks
class TaskLockTest(TestCase):
def testPing(self):
task_lock = TaskLock.objects.schedule_task('thekey', 'celery.ping')
self.assertEqual(TaskLock.objects.all().count(), 1)
task_lock.ready()
def testCleanup(self):
#TODO load in data for it to cleanup
tasks.cleanup_finished_task_locks()
| [
"jasonk@cukerinteractive.com"
] | jasonk@cukerinteractive.com |
1c7dc028dc165fd5bc7b49df4beb82a4a9f66004 | 9ecfa2dfa544dc77c8adc92f414f506846823e23 | /scripts/roficalc/roficalc | 350faf4ad31b9a22935d23c2cee0fb2102ef0e4a | [
"MIT"
] | permissive | grimpy/rofi-scripts | 164eab6de2174acf33eec2d2410fd6d43df22900 | 5cee30e9b0ad5fddcd0c5cea12ce6eb14bd86bdc | refs/heads/master | 2021-04-06T11:11:13.828931 | 2018-03-10T20:22:04 | 2018-03-10T20:22:04 | 124,650,044 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | #!/usr/bin/env python3
import sys
from sys import argv
# allow users to use math function like sqrt sin and so on easily
from math import *
if len(argv) == 1: # scriptname
print("Enter math expression to evaluate")
else:
expr = ("".join(argv[1:])).strip()
try:
result = str(eval(expr))
except:
print('Invalid expression: {}'.format(expr))
sys.exit(0)
width = max(len(expr), len(result)) + 3
print("{0: >{1}}".format(expr, width))
print('=')
print("{0: >{1}}".format(result, width))
| [
"deboeck.jo@gmail.com"
] | deboeck.jo@gmail.com | |
b299b2e5a9e1daa9ba514883cc28f69082507351 | 694559acfaf08a145989ca1a4fa95e6a94b2abaa | /流量分析与处理/Scapy/TCP_Rest.py | 04d38e6aa528bb611c60cd4c57eb8c75fef718d1 | [] | no_license | Founderbn2014/Python_Network | 64d01e19aca84986eca48b85a222c62a338e1dff | e89bbbd54bdee5b13c9ffca8d2ea128ee4ecac6a | refs/heads/master | 2023-05-19T06:46:52.559372 | 2020-11-23T14:01:16 | 2020-11-23T14:01:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,500 | py | #!/usr/bin/env python3
# -*- encoding = utf-8 -*-
# 该代码由本人学习时编写,仅供自娱自乐!
# 本人QQ:1945962391
# 欢迎留言讨论,共同学习进步!
from scapy.all import *
from Tools.Scapy_IFACE import scapy_iface
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # 清除报错
def tcp_monitor_callback(pkt):
# 本代码主要任务: 对传入的数据包,发送TCP Rest进行会话重置
source_mac = pkt[Ether].fields['src']
destination_mac = pkt[Ether].fields['dst']
source_ip = pkt[IP].fields['src']
destination_ip = pkt[IP].fields['dst']
source_port = pkt[TCP].fields['sport']
destination_port = pkt[TCP].fields['dport']
seq_sn = pkt[TCP].fields['seq']
ack_sn = pkt[TCP].fields['ack']
a = Ether(src=source_mac, dst=destination_mac) / IP(src=source_ip, dst=destination_ip) / TCP(dport=destination_port,
sport=source_port,
flags=4, seq=seq_sn)
b = Ether(src=destination_mac, dst=source_mac) / IP(src=destination_ip, dst=source_ip) / TCP(dport=source_port,
sport=destination_port,
flags=4, seq=ack_sn)
sendp(a,
iface=global_if, # Windows环境不能使用iface参数
verbose=False)
sendp(b,
iface=global_if, # Windows环境不能使用iface参数
verbose=False)
def tcp_reset(src_ip, dst_ip, dst_port, ifname, src_port=None):
# 本代码主要任务: 搜索匹配过滤条件的数据包,然后使用tcp_monitor_callback方法进行重置会话处理
global global_if
global_if = scapy_iface(ifname)
if src_port is None:
match = "src host " + src_ip + " and dst host " + dst_ip + " and dst port " + dst_port
else:
match = "src host " + src_ip + " and dst host " + dst_ip + " and src port " + src_port + " and dst port " + dst_port
print("开始匹配异常流量" + match)
sniff(prn=tcp_monitor_callback,
filter=match,
iface=global_if,
store=0)
if __name__ == "__main__":
# 使用Linux解释器 & WIN解释器
tcp_reset('192.168.98.29', '192.168.98.66', '22', 'ens33')
| [
"15148365776@163.com"
] | 15148365776@163.com |
aa8e32b68deea78cb2fba849a9d3e19ff132cca3 | e45cf89071f4c625fca52dd8e549d243a79da6a5 | /tests/multithread/ctrl_thread_0.py | 3ceaa17030418dd537e258633a5afa07dca9300a | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | EddieBurning/PyCoRAM | cb78f6cebcca001a5a8ed3e868d87b11cdfb7af4 | 8eaa9a2d417a57611d78058b732ebcd86ee09759 | refs/heads/master | 2020-03-07T18:06:13.877471 | 2016-11-21T09:03:46 | 2016-11-21T09:03:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | def ctrl_thread():
print("thread 0")
ram = CoramMemory(0, 32, 128)
channel = CoramChannel(0, 32)
addr = 0
sum = 0
for i in range(4):
ram.write(0, addr, 128)
channel.write(addr)
sum = channel.read()
addr += 512
print('thread0 sum=', sum)
ctrl_thread()
| [
"shta.ky1018@gmail.com"
] | shta.ky1018@gmail.com |
82a1e89adc5a862bba569ee647f117869509b4ea | 82f3c228f1b913ed4f37d6ab651eb2c0a9ce7221 | /Configurations/UserConfigs/2018_MCOnly/EWKZNuNuConfig.py | a9b99a9014f6db73b747f831a3d4fc3d243ac35d | [] | no_license | samhiggie/ReweightScheme | 7f388f639d02753e19eca30c8b0920ca6addb6e0 | b2e4449e8d77d244048047a79e7dd8df6b2e35f7 | refs/heads/master | 2020-09-11T19:35:53.094218 | 2019-11-12T22:42:32 | 2019-11-12T22:42:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | import ROOT
from Configurations.Weights.CrossSectionWeightingModule.CrossSectionWeight import crossSectionWeight
from Configurations.Weights.MuIDIsoReweightingModule.MuIDIsoWeight import muIDIsoWeight_2018 as muIDIsoWeight
from Configurations.Weights.MuTrackingWeightModule.MuTrackingWeight import muTrackingWeight_2018 as muTrackingWeight
from Configurations.Weights.PileupWeightingModule.PileupWeight import pileupWeight_2018 as pileupWeight
from Configurations.Weights.TauFakeRateWeightModule.TauFakeRateWeight import tauFakeRateWeight_2018 as tauFakeRateWeight
from Configurations.Weights.TauIDModule.TauIDWeight import tauIDWeight_2018 as tauIDWeight
from Configurations.Weights.TriggerSFModule.TriggerWeight import triggerWeight_2018 as triggerWeight
from Configurations.Weights.ZPTReweightingModule.ZPTWeight import ZPTWeight_2018 as ZPTWeight
from Configurations.Weights.bTaggingWeightModule.bTaggingWeight import bTaggingWeight
from Configurations.ConfigDefinition import ReweightConfiguration
EWKConfiguration = ReweightConfiguration()
EWKConfiguration.name = "EWKZNuNu"
EWKConfiguration.inputFile = "/data/aloeliger/SMHTT_Selected_2018_MCOnly_Deep/EWKZNuNu.root"
crossSectionWeight.sample = 'EWKZNuNu'
crossSectionWeight.year = '2018'
totalEventsFile = ROOT.TFile.Open("/data/aloeliger/SMHTT_Selected_2018_MCOnly_Deep/EWKZNuNu.root")
crossSectionWeight.totalEvents = totalEventsFile.eventCount.GetBinContent(2)
totalEventsFile.Close()
pileupWeight.year = '2018'
pileupWeight.sample = 'EWKZNuNu'
pileupWeight.InitPileupWeightings(pileupWeight)
EWKConfiguration.listOfWeights = [
crossSectionWeight,
muIDIsoWeight,
muTrackingWeight,
pileupWeight,
tauFakeRateWeight,
tauIDWeight,
triggerWeight,
ZPTWeight,
bTaggingWeight,
]
| [
"aloelige@cern.ch"
] | aloelige@cern.ch |
af8953741c5d9fb31421e8d4289a5a1c21177f09 | 45758bad12c09ead188ee1cb7f121dab4dd5eeca | /sandbox/urls.py | 8eb5830f3f7270427a8d6f13d16ec3d99fe39019 | [
"MIT"
] | permissive | sveetch/django-icomoon | 35dd55962a4423b8930dbcb884ed2a8aa2d4ef67 | 327b70e5509811db7b46f2baa8d301a49e626167 | refs/heads/master | 2021-12-28T21:55:05.563118 | 2021-12-22T00:17:53 | 2021-12-22T00:17:53 | 42,755,203 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | """
Sandbox URL Configuration
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path('', include('icomoon.urls', namespace='icomoon')),
]
# This is only needed when using runserver with demo settings
if settings.DEBUG:
urlpatterns = (
urlpatterns
+ staticfiles_urlpatterns()
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
)
| [
"sveetch@gmail.com"
] | sveetch@gmail.com |
9113b4f9227eef4b96bbe40c1c53de88cfa930b6 | 95267d92a6438665cf7848de229bea691d537f81 | /EGE_27/dosroc2.py | 8c5c6833ef5acd30d1a39dd7040ab57e4f8b5fdc | [
"MIT"
] | permissive | webkadiz/olympiad-problems | 2874eb1846c59778e70bcdc9550b3484bc3aa9cc | b3a8a3e83d0930947a89ec42e86e3058f464ea40 | refs/heads/master | 2022-11-15T23:48:39.854546 | 2022-11-04T12:55:48 | 2022-11-04T12:55:48 | 228,297,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | n = int(input())
m1 = m2 = m171 = m172 = 0
L = R = 0
for i in range(n):
x = int(input())
if x % 17 == 0 and x % 2 == 0 and x + m2 > L + R:
L = x; R = m2
if x % 17 == 0 and x % 2 == 1 and x + m1 > L + R:
L = x; R = m1
if x % 2 == 0 and x + m172 > L + R:
L = x; R = m172
if x % 2 == 1 and x + m171 > L + R:
L = x; R = m171
if x % 17 == 0 and x % 2 == 0 and x > m172:
m172 = x
elif x % 17 == 0 and x > m171:
m171 = x
elif x % 2 == 0 and x > m2:
m2 = x
elif x > m1:
m1 = x
print(L, R)
| [
"webkadiz@gmail.com"
] | webkadiz@gmail.com |
14bf2f67d2e6064de9cf34ea6f8fee72bf41afdf | 9b5cbd04b771b6fc4c3a6a1715622d7a0d579f0f | /src/app/main.py | 6a11dd25c388335e9288137d16e89f922ce7b0a1 | [] | no_license | OneTesseractInMultiverse/fuzzy-guacamole | 7b091661b09de7859a7620dfb74e9cf371b5e223 | 141bef131c25684e48670ede76f4404291f42f9a | refs/heads/main | 2023-03-09T21:52:44.325106 | 2021-02-25T02:03:02 | 2021-02-25T02:03:02 | 342,083,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | from fastapi import FastAPI
from .routers import sample, application
from starlette.middleware.cors import CORSMiddleware
from .database import SessionLocal, engine
from starlette.requests import Request
from starlette.responses import Response
from . import models
models.Base.metadata.create_all(bind=engine)
# -----------------------------------------------------------------------------
# APPLICATION OBJECT
# -----------------------------------------------------------------------------
app = FastAPI(
title="Example Repo",
description="An identity management microservice written in Python and Cloud Native",
version="1.0.0",
openapi_url="/api/openapi.json",
docs_url="/api/docs",
redoc_url=None
)
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
response = Response("Cannot establish connection with persistence provider", status_code=500)
try:
request.state.db = SessionLocal()
response = await call_next(request)
finally:
request.state.db.close()
return response
# -----------------------------------------------------------------------------
# CORS RULES
# -----------------------------------------------------------------------------
origins = [
"*"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# -----------------------------------------------------------------------------
# ADD ROUTERS
# -----------------------------------------------------------------------------
app.include_router(sample.router, prefix="/api/v1")
app.include_router(application.router, prefix="/api/v1")
| [
"pedro@subvertic.com"
] | pedro@subvertic.com |
67f7b7355c99f3dcd4073f6246ec09ea455407a3 | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/VanderPlas17Python/D_Chapter3/A_Installingand/index.py | 5764989f53230bdcbd9f37fd9ad573b50f12c66f | [] | no_license | nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,710 | py | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# CHAPTER 3
# Data Manipulation with Pandas
#
#
#
#
# In the previous chapter, we dove into detail on NumPy and its ndarray object, which
# provides efficient storage and manipulation of dense typed arrays in Python. Here
# we’ll build on this knowledge by looking in detail at the data structures provided by
# the Pandas library. Pandas is a newer package built on top of NumPy, and provides an
# efficient implementation of a DataFrame. DataFrames are essentially multidimen‐
# sional arrays with attached row and column labels, and often with heterogeneous
# types and/or missing data. As well as offering a convenient storage interface for
# labeled data, Pandas implements a number of powerful data operations familiar to
# users of both database frameworks and spreadsheet programs.
# As we saw, NumPy’s ndarray data structure provides essential features for the type of
# clean, well-organized data typically seen in numerical computing tasks. While it
# serves this purpose very well, its limitations become clear when we need more flexi‐
# bility (attaching labels to data, working with missing data, etc.) and when attempting
# operations that do not map well to element-wise broadcasting (groupings, pivots,
# etc.), each of which is an important piece of analyzing the less structured data avail‐
# able in many forms in the world around us. Pandas, and in particular its Series and
# DataFrame objects, builds on the NumPy array structure and provides efficient access
# to these sorts of “data munging” tasks that occupy much of a data scientist’s time.
# In this chapter, we will focus on the mechanics of using Series, DataFrame, and
# related structures effectively. We will use examples drawn from real datasets where
# appropriate, but these examples are not necessarily the focus.
#
# Installing and Using Pandas
# Installing Pandas on your system requires NumPy to be installed, and if you’re build‐
# ing the library from source, requires the appropriate tools to compile the C and
#
#
# 97
#
# Cython sources on which Pandas is built. Details on this installation can be found in
# the Pandas documentation. If you followed the advice outlined in the preface and
# used the Anaconda stack, you already have Pandas installed.
# Once Pandas is installed, you can import it and check the version:
# In[1]: import pandas
# pandas.__version__
# Out[1]: '0.18.1'
#
# Just as we generally import NumPy under the alias np, we will import Pandas under
# the alias pd:
# In[2]: import pandas as pd
# This import convention will be used throughout the remainder of this book.
#
#
# Reminder About Built-In Documentation
# As you read through this chapter, don’t forget that IPython gives you the ability to
# quickly explore the contents of a package (by using the tab-completion feature) as
# well as the documentation of various functions (using the ? character). (Refer back to
# “Help and Documentation in IPython” on page 3 if you need a refresher on this.)
# For example, to display all the contents of the pandas namespace, you can type this:
# In [3]: pd.<TAB>
# And to display the built-in Pandas documentation, you can use this:
# In [4]: pd?
# More detailed documentation, along with tutorials and other resources, can be found
# at http://pandas.pydata.org/.
#
#
#
# Introducing Pandas Objects
# At the very basic level, Pandas objects can be thought of as enhanced versions of
# NumPy structured arrays in which the rows and columns are identified with labels
# rather than simple integer indices. As we will see during the course of this chapter,
# Pandas provides a host of useful tools, methods, and functionality on top of the basic
# data structures, but nearly everything that follows will require an understanding of
# what these structures are. Thus, before we go any further, let’s introduce these three
# fundamental Pandas data structures: the Series, DataFrame, and Index.
# We will start our code sessions with the standard NumPy and Pandas imports:
# In[1]: import numpy as np
# import pandas as pd
#
#
# 98 | Chapter 3: Data Manipulation with Pandas
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Installing and Using Pandas",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Installingand(HierNode):
def __init__(self):
super().__init__("Installing and Using Pandas")
self.add(Content())
# eof
| [
"lawrence.mcafee@gmail.com"
] | lawrence.mcafee@gmail.com |
c5a37467b8d6e7b6fd1b0eddf40d765701fd7025 | 10e19b5cfd59208c1b754fea38c34cc1fb14fdbe | /desktop/core/ext-py/Babel-0.9.6/babel/messages/tests/data/project/file1.py | 7f592a8354bb01e56e129eb05907aa597c5a3c21 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | sarvex/hue | 780d28d032edd810d04e83f588617d1630ec2bef | 6e75f0c4da2f3231e19c57bdedd57fb5a935670d | refs/heads/master | 2023-08-15T21:39:16.171556 | 2023-05-01T08:37:43 | 2023-05-01T08:37:43 | 32,574,366 | 0 | 0 | Apache-2.0 | 2023-09-14T16:55:28 | 2015-03-20T09:18:18 | Python | UTF-8 | Python | false | false | 207 | py | # -*- coding: utf-8 -*-
# file1.py for tests
from gettext import gettext as _
def foo():
# TRANSLATOR: This will be a translator coment,
# that will include several lines
print _('bar')
| [
"bcwalrus@cloudera.com"
] | bcwalrus@cloudera.com |
436fadf058f83c73cbe9654035f8c721ee01dd1e | ee70ae3bc47a885b5c372f3de0077c7f7b61ad41 | /application/machinelearning/demo/demo_tree.py | bb45527540f31f4e878927a6c029ca86d486b79a | [] | no_license | plutoese/mars | e2518631c36772812c70af4aa52de10dd5f1d6a7 | 28f6ded1275e47c83f2f3bad5d0c7063d51c779f | refs/heads/master | 2021-01-10T04:37:18.245424 | 2016-01-27T02:48:25 | 2016-01-27T02:48:25 | 47,901,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | # coding=UTF-8
from sklearn import tree
from sklearn.datasets import load_iris
from sklearn.externals.six import StringIO
from graphviz import Source
iris = load_iris()
clf = tree.DecisionTreeClassifier()
print(iris.data)
print(iris.target)
clf = clf.fit(iris.data, iris.target)
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
print((dot_data.getvalue()))
'''
src = Source(dot_data.getvalue())
print(type(src))
src.render('test-output/holy-grenade.gv', view=True)'''
'''
fr = open('d:/data/lenses.txt')
lenses = [inst.strip().split('\t') for inst in fr.readlines()]
lensesLabels = ['age','prescript','astigmatic','tearRate']
lenses_data = [item[0:4] for item in lenses]
lenses_target = [item[4] for item in lenses]
print(lenses_data)
print(lenses_target)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(lenses_data, lenses_target)'''
| [
"glen.zhang7@gmail.com"
] | glen.zhang7@gmail.com |
c475df4a12601af3051abf2c9415ab99487f6153 | 2d96050f870d26703d7e1ff8f1c472592c70ecf7 | /accounts/models.py | 9c1e1d0ee1b76ddb440d56310587d76850cbe521 | [] | no_license | Pythonian/tweetme | 92b211028fc683f515b98df8a29afe61e25bd9d6 | 5858b5977ff1bfbf8ee4de03d059f90defa1e3d1 | refs/heads/master | 2023-08-05T05:28:28.796794 | 2020-07-03T07:42:02 | 2020-07-03T07:42:02 | 276,833,816 | 0 | 0 | null | 2021-09-22T19:21:14 | 2020-07-03T07:14:34 | Python | UTF-8 | Python | false | false | 2,826 | py | from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.urls import reverse_lazy
class UserProfileManager(models.Manager):
use_for_related_fields = True
def all(self):
qs = self.get_queryset().all()
try:
if self.instance:
qs = qs.exclude(user=self.instance)
except:
pass
return qs
def toggle_follow(self, user, to_toggle_user):
user_profile, created = UserProfile.objects.get_or_create(user=user) # (user_obj, true)
if to_toggle_user in user_profile.following.all():
user_profile.following.remove(to_toggle_user)
added = False
else:
user_profile.following.add(to_toggle_user)
added = True
return added
def is_following(self, user, followed_by_user):
user_profile, created = UserProfile.objects.get_or_create(user=user)
if created:
return False
if followed_by_user in user_profile.following.all():
return True
return False
def recommended(self, user, limit_to=10):
print(user)
profile = user.profile
following = profile.following.all()
following = profile.get_following()
qs = self.get_queryset().exclude(user__in=following).exclude(id=profile.id).order_by("?")[:limit_to]
return qs
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='profile', on_delete=models.CASCADE) # user.profile
following = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, related_name='followed_by')
# user.profile.following -- users i follow
# user.followed_by -- users that follow me -- reverse relationship
objects = UserProfileManager() # UserProfile.objects.all()
# abc = UserProfileManager() # UserProfile.abc.all()
def __str__(self):
return str(self.following.all().count())
def get_following(self):
users = self.following.all() # User.objects.all().exclude(username=self.user.username)
return users.exclude(username=self.user.username)
def get_follow_url(self):
return reverse_lazy("profiles:follow", kwargs={"username":self.user.username})
def get_absolute_url(self):
return reverse_lazy("profiles:detail", kwargs={"username":self.user.username})
# cfe = User.objects.first()
# User.objects.get_or_create() # (user_obj, true/false)
# cfe.save()
def post_save_user_receiver(sender, instance, created, *args, **kwargs):
if created:
new_profile = UserProfile.objects.get_or_create(user=instance)
# celery + redis
# deferred task
post_save.connect(post_save_user_receiver, sender=settings.AUTH_USER_MODEL)
| [
"prontomaster@gmail.com"
] | prontomaster@gmail.com |
fbf85905651a6049958aa7abf3d844158023081a | 2ec97b62d7edf0f2e257622e0027f12bfdb3651a | /custom_components/covid19_nswhealth_tests/sensor.py | 72a601647fc62554cd94058c27c79948cacd0602 | [
"Unlicense"
] | permissive | Mirec511/HomeAssistantConfig | 6837ed67ef73600410b6e07b82ef641930762429 | 200bca51e91ef3c844f6f2ddc004a7e82da3a04e | refs/heads/master | 2023-02-08T00:03:38.825829 | 2021-01-02T00:42:42 | 2021-01-02T00:42:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,094 | py | """Sensor platform for NSW Air Quality"""
import datetime
import logging
from datetime import timedelta
import homeassistant.helpers.config_validation as cv
import pandas as pd
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
__version__ = "0.0.1"
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = []
DEFAULT_UOM = "Tests"
# DEFAULT_SCAN_INTERVAL = timedelta(hours=1)
# SCAN_INTERVAL = timedelta(hours=1)
ICON = "mdi:biohazard"
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(minutes=300)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_NAME): cv.string})
def setup_platform(hass, config, add_entities, discovery_info=None):
"Setup Platform"
add_entities([NSWHSensor(name=config[CONF_NAME])])
class NSWHSensor(Entity):
def __init__(self, name: str):
self._state = None
self._name = name
self._attributes = {}
@property
def name(self):
return "covid_19_nswh_local_tests"
@property
def state(self):
return self._state
@property
def icon(self):
return ICON
@property
def state_attributes(self):
return self._attributes
@property
def unit_of_measurement(self):
return DEFAULT_UOM
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
url = "https://data.nsw.gov.au/data/dataset/60616720-3c60-4c52-b499-751f31e3b132/resource/945c6204-272a-4cad-8e33-dde791f5059a/download/pcr_testing_table1_location.csv"
df = pd.read_csv(url, parse_dates=[0])
df_hneh = df[df.lhd_2010_code == "X800"]
df_hneh_count = len(df_hneh)
df_hneh_earliest = df_hneh["test_date"].min().strftime("%a %d %b")
df_hneh_latest = df_hneh["test_date"].max().strftime("%a %d %b")
df_trc = df[df.lga_code19 == 17310.0]
df_trc_count = len(df_trc)
df_trc_earliest = df_trc["test_date"].min().strftime("%a %d %b")
df_trc_latest = df_trc["test_date"].max().strftime("%a %d %b")
df_tamw = df[df.postcode == 2340.0]
df_tamw_count = len(df_tamw)
df_tamw_earliest = df_tamw["test_date"].min().strftime("%a %d %b")
df_tamw_latest = df_tamw["test_date"].max().strftime("%a %d %b")
self._attributes = {}
self._state = 0
self._attributes["hneh"] = df_hneh_count
self._attributes["hneh_dates"] = (
"1st: " + str(df_hneh_earliest) + " - Last: " + str(df_hneh_latest)
)
self._attributes["trc"] = df_trc_count
self._attributes["trc_dates"] = (
"1st: " + str(df_trc_earliest) + " - Last: " + str(df_trc_latest)
)
self._attributes["tamw"] = df_tamw_count
self._attributes["tamw_dates"] = (
"1st: " + str(df_tamw_earliest) + " - Last: " + str(df_tamw_latest)
)
self._attributes[ATTR_ATTRIBUTION] = "Data provided by NSW Health"
self._state = df_tamw_count
| [
"thomas@thomasbaxter.info"
] | thomas@thomasbaxter.info |
b42d07f5043047e9017805411b5a44416a02059c | 115022e4e1e2e78e99a73a87e8172efb16faecd7 | /accounts/admin.py | 85fa978e83658b9d3797c78642601e6e407a2d7a | [] | no_license | navill/toy_project | a043865c3c40c3ceb3e07c7662347df225f62203 | 7d2ea837cfc4543d219c2096ab8f156f77d118b7 | refs/heads/master | 2022-12-11T04:09:39.845515 | 2020-02-14T02:56:40 | 2020-02-14T02:56:40 | 228,669,658 | 1 | 0 | null | 2022-12-08T03:19:23 | 2019-12-17T17:38:48 | Python | UTF-8 | Python | false | false | 437 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
# from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import User
#
# class CustomUserAdmin(UserAdmin):
# add_form = CustomUserCreationForm
# form = CustomUserChangeForm
# model = User
# list_display = ['email', 'first_name', 'last_name']
#
#
# admin.site.register(User, CustomUserAdmin)
admin.site.register(User) | [
"blue_jihoon@naver.com"
] | blue_jihoon@naver.com |
f70949e09e7699a52a5ae572af19486fb8b2dc86 | b5ca0a2ce47fdb4306bbdffcb995eb7e6eac1b23 | /Python/Strings/Alphabet Rangoli/Alphabet_Rangoli.py | 26cb874f6a5802822cd69e78986885dcdb2c30df | [] | no_license | rsoemardja/HackerRank | ac257a66c3649534197b223b8ab55011d84fb9e1 | 97d28d648a85a16fbe6a5d6ae72ff6503a063ffc | refs/heads/master | 2022-04-14T22:46:03.412359 | 2020-04-03T07:44:04 | 2020-04-03T07:44:04 | 217,687,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | import string
size = int(input())
alphabet = string.ascii_lowercase
for i in range(size - 1,0, -1):
row = ["-"] * (size * 2 - 1)
for j in range(0, size - i):
row[size - 1 - j] = alphabet[j + i]
row[size - 1 + j] = alphabet[j + i]
print("-".join(row))
for i in range(0, size):
row = ["-"] * (size * 2 - 1)
for j in range(0, size - i):
row[size - 1 - j] = alphabet[j + i]
row[size - 1 + j] = alphabet[j + i]
print("-".join(row))
if __name__ == '__main__':
n = int(input())
print_rangoli(n) | [
"rsoemardja@gmail.com"
] | rsoemardja@gmail.com |
5d796f0381a5ebddca1568879724619b4be56403 | 573a2fa5094d510a9d44a361366a67efda5a3e8a | /blender/arm/logicnode/variable_scene.py | 2aa08fdf4e4328d2cf637bf86b1a6abf32e6ccd9 | [
"GPL-2.0-only",
"Zlib"
] | permissive | kirumon/armory | ca7126a47172abe676da9e36ff13052a008bc812 | 4df19ef970ba76d6f99d0b07b44048e2e148e4ff | refs/heads/master | 2020-04-19T22:51:41.810157 | 2019-01-30T16:45:54 | 2019-01-30T16:45:54 | 168,481,317 | 1 | 0 | Zlib | 2019-01-31T07:21:29 | 2019-01-31T07:21:28 | null | UTF-8 | Python | false | false | 569 | py | import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SceneNode(Node, ArmLogicTreeNode):
'''Scene node'''
bl_idname = 'LNSceneNode'
bl_label = 'Scene'
bl_icon = 'QUESTION'
property0: StringProperty(name='', default='')
def init(self, context):
self.outputs.new('NodeSocketShader', 'Scene')
def draw_buttons(self, context, layout):
layout.prop_search(self, 'property0', bpy.data, 'scenes', icon='NONE', text='')
add_node(SceneNode, category='Variable')
| [
"lubos.lenco@gmail.com"
] | lubos.lenco@gmail.com |
9e94ec20208d9e3ca2e8fc55036b9d896fa09936 | 206c10808b6224f7d8236e27cc555e723af695d9 | /tomodachi/envelope/protobuf_base.py | 8a43ed27bf914590e789c8bd1487d3173828be56 | [
"MIT"
] | permissive | xdmiodz/tomodachi | 3280209ae49100ec902e3b15c323b38e7480cdd3 | 7ca998a421dd724df5967d5baa0cf79f5112b79b | refs/heads/master | 2023-03-15T19:22:16.381212 | 2023-01-20T07:34:48 | 2023-01-20T07:34:48 | 200,020,833 | 0 | 2 | MIT | 2023-03-08T00:00:01 | 2019-08-01T09:30:22 | Python | UTF-8 | Python | false | false | 3,768 | py | import base64
import logging
import time
import uuid
import zlib
from typing import Any, Dict, Tuple, Union
from tomodachi.envelope.proto_build.protobuf.sns_sqs_message_pb2 import SNSSQSMessage
PROTOCOL_VERSION = "tomodachi-protobuf-base--1.0.0"
class ProtobufBase(object):
@classmethod
def validate(cls, **kwargs: Any) -> None:
if "proto_class" not in kwargs:
raise Exception("No proto_class defined")
if kwargs.get("proto_class", None).__class__.__name__ != "GeneratedProtocolMessageType":
raise Exception("proto_class is not a GeneratedProtocolMessageType")
@classmethod
async def build_message(cls, service: Any, topic: str, data: Any, **kwargs: Any) -> str:
message_data = data.SerializeToString()
data_encoding = "proto"
if len(message_data) > 60000:
message_data = zlib.compress(data.SerializeToString())
data_encoding = "gzip_proto"
message = SNSSQSMessage()
message.service.name = getattr(service, "name", None)
message.service.uuid = getattr(service, "uuid", None)
message.metadata.message_uuid = "{}.{}".format(getattr(service, "uuid", ""), str(uuid.uuid4()))
message.metadata.protocol_version = PROTOCOL_VERSION
message.metadata.timestamp = time.time()
message.metadata.topic = topic
message.metadata.data_encoding = data_encoding
message.data = message_data
return base64.b64encode(message.SerializeToString()).decode("ascii")
@classmethod
async def parse_message(
cls, payload: str, proto_class: Any = None, validator: Any = None, **kwargs: Any
) -> Union[Dict, Tuple]:
message = SNSSQSMessage()
message.ParseFromString(base64.b64decode(payload))
message_uuid = message.metadata.message_uuid
timestamp = message.metadata.timestamp
raw_data = None
obj = None
if not proto_class:
raw_data = message.data
else:
obj = proto_class()
if message.metadata.data_encoding == "proto":
obj.ParseFromString(message.data)
elif message.metadata.data_encoding == "base64": # deprecated
obj.ParseFromString(base64.b64decode(message.data))
elif message.metadata.data_encoding == "gzip_proto":
obj.ParseFromString(zlib.decompress(message.data))
elif message.metadata.data_encoding == "base64_gzip_proto": # deprecated
obj.ParseFromString(zlib.decompress(base64.b64decode(message.data)))
elif message.metadata.data_encoding == "raw":
raw_data = message.data
if validator is not None:
try:
if hasattr(validator, "__func__"):
# for static functions
validator.__func__(obj)
else:
# for non-static functions
validator(obj)
except Exception as e:
logging.getLogger("envelope.protobuf_base").warning(e.__str__())
raise e
return (
{
"service": {"name": message.service.name, "uuid": message.service.uuid},
"metadata": {
"message_uuid": message.metadata.message_uuid,
"protocol_version": message.metadata.protocol_version,
"timestamp": message.metadata.timestamp,
"topic": message.metadata.topic,
"data_encoding": message.metadata.data_encoding,
},
"data": raw_data if raw_data is not None else obj,
},
message_uuid,
timestamp,
)
| [
"hello@carloscar.com"
] | hello@carloscar.com |
8153fbc21d01a5e302697a944d2f268e1cb21908 | 41a4eeaf62a36d7c57ad55393996787bb55ba6b7 | /venv/lib/python3.7/site-packages/kubernetes/client/models/v1_env_var.py | b756905e43471ee3b9b8e089d217f9efdd3b5ef4 | [] | no_license | jicowan/group-operator | c7a20ff03584da9ace19489bc3d27b9fb22a066c | bac6e51aef0d9836679621e3ce7e55f4c1ead402 | refs/heads/master | 2021-07-14T11:45:30.062219 | 2019-09-26T15:26:52 | 2019-09-26T15:26:52 | 209,454,861 | 10 | 4 | null | 2021-07-01T17:23:07 | 2019-09-19T03:29:54 | Python | UTF-8 | Python | false | false | 5,399 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1EnvVar(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'value': 'str',
'value_from': 'V1EnvVarSource'
}
attribute_map = {
'name': 'name',
'value': 'value',
'value_from': 'valueFrom'
}
def __init__(self, name=None, value=None, value_from=None):
"""
V1EnvVar - a model defined in Swagger
"""
self._name = None
self._value = None
self._value_from = None
self.discriminator = None
self.name = name
if value is not None:
self.value = value
if value_from is not None:
self.value_from = value_from
@property
def name(self):
"""
Gets the name of this V1EnvVar.
Name of the environment variable. Must be a C_IDENTIFIER.
:return: The name of this V1EnvVar.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1EnvVar.
Name of the environment variable. Must be a C_IDENTIFIER.
:param name: The name of this V1EnvVar.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def value(self):
"""
Gets the value of this V1EnvVar.
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".
:return: The value of this V1EnvVar.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this V1EnvVar.
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".
:param value: The value of this V1EnvVar.
:type: str
"""
self._value = value
@property
def value_from(self):
"""
Gets the value_from of this V1EnvVar.
Source for the environment variable's value. Cannot be used if value is not empty.
:return: The value_from of this V1EnvVar.
:rtype: V1EnvVarSource
"""
return self._value_from
@value_from.setter
def value_from(self, value_from):
"""
Sets the value_from of this V1EnvVar.
Source for the environment variable's value. Cannot be used if value is not empty.
:param value_from: The value_from of this V1EnvVar.
:type: V1EnvVarSource
"""
self._value_from = value_from
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1EnvVar):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"jicowan@f0189801a302.ant.amazon.com"
] | jicowan@f0189801a302.ant.amazon.com |
33abb6dd9defeaef05ef48f283b9bbc6035dd8e9 | e71fa62123b2b8f7c1a22acb1babeb6631a4549b | /examples/inheritance1.py | 70ebab52d7eed62adfcc64973549ae62dd19fed5 | [
"BSD-2-Clause"
] | permissive | timgates42/XlsxWriter | 40480b6b834f28c4a7b6fc490657e558b0a466e5 | 7ad2541c5f12b70be471b447ab709c451618ab59 | refs/heads/main | 2023-03-16T14:31:08.915121 | 2022-07-13T23:43:45 | 2022-07-13T23:43:45 | 242,121,381 | 0 | 0 | NOASSERTION | 2020-02-21T11:14:55 | 2020-02-21T11:14:55 | null | UTF-8 | Python | false | false | 1,766 | py | ##############################################################################
#
# Example of how to subclass the Workbook and Worksheet objects. We also
# override the default worksheet.write() method to show how that is done.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2022, John McNamara, jmcnamara@cpan.org
#
from xlsxwriter.workbook import Workbook
from xlsxwriter.worksheet import Worksheet
from xlsxwriter.worksheet import convert_cell_args
class MyWorksheet(Worksheet):
"""
Subclass of the XlsxWriter Worksheet class to override the default
write() method.
"""
@convert_cell_args
def write(self, row, col, *args):
data = args[0]
# Reverse strings to demonstrate the overridden method.
if isinstance(data, str):
data = data[::-1]
return self.write_string(row, col, data)
else:
# Call the parent version of write() as usual for other data.
return super(MyWorksheet, self).write(row, col, *args)
class MyWorkbook(Workbook):
"""
Subclass of the XlsxWriter Workbook class to override the default
Worksheet class with our custom class.
"""
def add_worksheet(self, name=None):
# Overwrite add_worksheet() to create a MyWorksheet object.
worksheet = super(MyWorkbook, self).add_worksheet(name, MyWorksheet)
return worksheet
# Create a new MyWorkbook object.
workbook = MyWorkbook('inheritance1.xlsx')
# The code from now on will be the same as a normal "Workbook" program.
worksheet = workbook.add_worksheet()
# Write some data to test the subclassing.
worksheet.write('A1', 'Hello')
worksheet.write('A2', 'World')
worksheet.write('A3', 123)
worksheet.write('A4', 345)
workbook.close()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
db6569501a941b99f6394e71bd64953e8ddb415a | 993cf64df4795e7912a7f9157bd8bf02aa985506 | /past_work/SWEA_D2/1946 docdecode.py | 99fd7ef2d7f3d2cd0488565545486aecda7eebfd | [] | no_license | jiwonjulietyoon/Algorithm | b541e630c5b01b47cc05b538970d2b73d452baf5 | a11be16f4700e7e55382d4dcfd88d534a232f024 | refs/heads/master | 2020-04-24T01:54:05.200538 | 2019-11-09T03:56:47 | 2019-11-09T03:56:47 | 171,616,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | # 1946. 간단한 압축 풀기
# for T in range(int(input())):
# doc = {}
# N = int(input())
# for _ in N:
# k, v = input().split()
# v = int(v)
# doc[k] = v
# print(f"#{T+1}")
# for k in doc:
# for
for T in range(int(input())):
N = int(input())
print(f"#{T+1}")
cnt = 0
for _ in range(N):
ch, n = input().split()
n = int(n)
for _ in range(n):
print(ch, end='')
cnt += 1
if not cnt%10:
print()
print()
for T in range(int(input())):
doc = []
N = int(input())
for _ in range(N):
ch, n = input().split()
n = int(n)
doc.extend([ch] * n)
print(f"#{T+1}")
cnt = 0
for x in doc:
print(x, end='')
cnt += 1
if not cnt % 10:
print()
print()
| [
"jiwonjulietyoon@gmail.com"
] | jiwonjulietyoon@gmail.com |
d09335d99fce5581c09dbb4944074926cd84937b | 74983098c5de53007bde6052a631845c781b5ba8 | /camelback/camelback15/camelback.py | 6809a547ce3f5ca4a2973a8aad74b2abcb3e8e5e | [] | no_license | numairmansur/Experiments | 94ccdd60f4c2cf538fab41556ac72405656c9d77 | 592f39916461c7a9f7d400fa26f849043d1377ed | refs/heads/master | 2021-04-29T12:39:16.845074 | 2017-02-15T07:36:47 | 2017-02-15T07:36:47 | 78,043,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | import numpy as np
import sys
import math
import time
import csv
from hpolib.benchmarks.synthetic_functions import Camelback # Change this
from time import gmtime, strftime
def main(job_id, params):
print '!!! Entered Main !!!'
print 'Anything printed here will end up in the output directory for job #:', str(job_id)
print params
f = Camelback() # Change this
res = f.objective_function([params['x'], params['y']]) # CHANGE THIS
print res
# /home/mansurm/Experiments/...../run1.csv
with open('/home/mansurm/Experiments/camelback/run15.csv','a') as csvfile: # CHANGE THIS
writer = csv.writer(csvfile, delimiter=',')
writer.writerow([res['main'][0]])
return res['main'][0]
| [
"numair.mansur@gmail.com"
] | numair.mansur@gmail.com |
31ff8aaa5ba4c0ea5c9cd51140a9cb65ed640375 | 78ed228ff9262eaca44fe5badab05f512433eea8 | /transcrypt/development/automated_tests/transcrypt/iterators_and_generators/__init__.py | 9da8cb24e88386f5ae95b46235edb89a0fc66840 | [
"Apache-2.0"
] | permissive | khlumzeemee/Transcrypt | 74af14f3175d1ce1d4debdfc5b346214d2597105 | 6a8abee3648daa0f36b509993ba54e14e8e9cf9b | refs/heads/master | 2021-01-12T16:57:15.609336 | 2016-10-14T17:22:29 | 2016-10-14T17:22:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,910 | py | class Iterable:
def __init__ (self, i):
self.aList = range (0, 50, i)
def __iter__ (self):
return Iterator (self)
class Iterator:
def __init__ (self, iterable):
self.iterable = iterable
self.index = -1
def __next__ (self): # Should be auto-wrapped in a next (self) by the compiler
self.index += 1
if self.index > 5:
raise StopIteration ()
return self.iterable.aList [self.index]
def __iter__ (self):
return self
def exhaustableGenerator (i):
for i in range (5):
yield 2 * i
def run (autoTester):
exhaustableGenExp = (a * a * a for a in [10, 20, 30]) # Currently still converted to iterator on list comprehension, must also be iterable
# So becomes py_iter (aList).
# List already has an __iter__ which it will return, it's a __PyIterator__
# To that __PyIterator__, that will already have a __next__, py_iter first adds a next
# So exhaustableGenExp is an iterator with a next and a __next__
# If we call iter on that, py_iter is calle again py_iter, on an object with a next and a next __next__
# For this reason py_iter needs a recursion prevention check
iterables = [Iterable (7), exhaustableGenerator (5), [i * 3 for i in range (5)], exhaustableGenExp]
for iterable in iterables:
autoTester.check ('[1]')
iterator = iter (iterable)
try:
while True:
autoTester.check (next (iterator))
except Exception as exception:
autoTester.check (exception.__class__.__name__)
autoTester.check ('[2]')
iterator = iter (iterable)
try:
while True:
autoTester.check (next (iterator))
except Exception as exception:
autoTester.check (exception.__class__.__name__)
for iterable in iterables:
autoTester.check ('[3]')
for n in iterable:
autoTester.check (n)
autoTester.check ('[4]')
for n in iterable:
autoTester.check (n)
| [
"info@qquick.org"
] | info@qquick.org |
3a2bb166b63a640f43091117e69e7b8199f98ea0 | 3be86a9093167acf4cb92a0b70c7087996f8f8e1 | /0013_roman_to_integer.py | 1322ef20494da2c1ea575461524a1a41cf40a8cd | [] | no_license | zimingding/leetcode | 28d25fc1e62612752c511b52af9ff77f7b7a7da7 | c43b22146465a78a58e3cc3945228431ed94388a | refs/heads/master | 2020-05-24T00:10:59.893207 | 2019-11-11T20:21:38 | 2019-11-11T20:21:38 | 187,010,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | class Solution:
def romanToInt(self, s: str) -> int:
r = 0
i = 0
while i < len(s):
c = s[i]
if c == 'I':
if i+1 < len(s) and s[i+1] == 'V':
r += 4
i += 1
elif i+1 < len(s) and s[i+1] == 'X':
r += 9
i += 1
else:
r += 1
if c == 'V':
r += 5
if c == 'X':
if i+1 < len(s) and s[i + 1] == 'L':
r += 40
i += 1
elif i+1 < len(s) and s[i + 1] == 'C':
r += 90
i += 1
else:
r += 10
if c == 'L':
r += 50
if c == 'C':
if i+1 < len(s) and s[i + 1] == 'D':
r += 400
i += 1
elif i+1 < len(s) and s[i + 1] == 'M':
r += 900
i += 1
else:
r += 100
if c == 'D':
r += 500
if c == 'M':
r += 1000
i += 1
return r
print(Solution().romanToInt('MCMXCIV'))
| [
"uowzd01@hotmail.com"
] | uowzd01@hotmail.com |
a0ca7b7d5578f7ed4c1ad33fdabb84e8158a0362 | 98b9e0a180d65bde6e799a0ef503409ce09ad9bd | /PythonFun/pyFun/Type_List.py | 912a66f52b9aff6b8a3e6b5674fcb68014099e35 | [] | no_license | LizhangX/DojoAssignments | 7fae9ed3098d1a131f2a2b093ded95a4de70a5cb | b56ccdf41fe5d43a4d5f340e4f21aaf632d7b7d6 | refs/heads/master | 2023-02-04T14:27:09.234685 | 2019-08-23T22:18:04 | 2019-08-23T22:18:04 | 94,444,518 | 0 | 1 | null | 2023-01-31T21:55:04 | 2017-06-15T13:55:14 | TSQL | UTF-8 | Python | false | false | 948 | py | # Write a program that takes a list and prints a message for each element in the list, based on that element's data type.
def TypeList(arr):
string = ""
sum = 0
for i in arr:
if type(i) == str:
string = string + " " + i
elif type(i) == float or type(i) == int:
sum += i
if string != "" and sum != 0:
print "\"The array you entered is of mixed type\""
print "\"String:{}\"".format(string)
print "\"Sum: {}\"".format(sum)
elif string != "":
print "\"The array you entered is of string type\""
print "\"String:{}\"".format(string)
elif sum != 0 and type(sum) == int:
print "\"The array you entered is of integer type\""
print "\"Sum: {}\"".format(sum)
l = ['magical unicorns',19,'hello',98.98,'world']
m = [2,3,1,7,4,12]
n = ['magical','unicorns']
TypeList(l)
TypeList(m)
TypeList(n)
| [
"lizhang.xie@gmail.com"
] | lizhang.xie@gmail.com |
c02e9b9aafa6aeabec084409401e9a732d8f1f9a | edfd1db2b48d4d225bc58be32fbe372a43415112 | /team-task/airflow1.9/dags/redtuxedo/rwidjojo2_5.py | fda6c03c2023cabe0f8455728727512ee85cda92 | [] | no_license | rwidjojo/airflow-training | ed83cb9e97ca85ef06de1426f2f41014881a1f22 | ac82040d8ddc3859df5576eee08d397e824016f1 | refs/heads/main | 2023-08-12T21:01:17.672059 | 2021-01-04T09:17:48 | 2021-01-04T09:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | # Instructions
# Define a function that uses the python logger to log
# parameter from PythonOperator
import datetime
import logging
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
owner = 'rwidjojo' # Replace with your short name
default_args = {
'owner': owner,
'depends_on_past': False,
'start_date': days_ago(2),
}
def write_to_log(**kwargs):
myText = kwargs["log_to_write"]
logging.info(myText)
dag = DAG(
f'{owner}.lesson2.excercise5',
default_args=default_args,
)
greet_task = PythonOperator(
task_id="say_hello",
python_callable=write_to_log,
op_kwargs={'log_to_write': f'Hi {owner} greeting from airflow'},
dag=dag,
)
bye_task = PythonOperator(
task_id="bye_hello",
python_callable=write_to_log,
op_kwargs={'log_to_write': 'Good bye'},
dag=dag,
)
greet_task >> bye_task | [
"nurcahyopujo@gmail.com"
] | nurcahyopujo@gmail.com |
73df0c0ec8d34d04112fe3dd73871d6063d5dc44 | 172e5fcd35072b576380c4258f0ca3e0d8883b35 | /datasets/helmet/filter.py | 7d5e9c7548d9e929d3180d46f8d3e6a3c965bfe2 | [] | no_license | fanqie03/classifier_pytorch | e1951578fb0eeab8983bf80be710d250be64b8f9 | 21bb2e3c2ca01333080668dce928b48d4e0e6d59 | refs/heads/master | 2023-05-14T13:36:08.211691 | 2020-01-03T08:02:09 | 2020-01-03T08:02:09 | 202,356,398 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | import shutil
import os
import argparse
from pathlib import Path
from PIL import Image
from tqdm import tqdm
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--source_dir', default='/home/cmf/datasets/extract_data/gongdi/person')
parser.add_argument('--target_dir', default='/home/cmf/datasets/extract_data/gongdi/person_filter')
parser.add_argument('--min_size', default=30)
args = parser.parse_args()
return args
def split(array, split_num):
total_len = len(array)
part_num = len(array) / split_num
arrays = []
for i in range(split_num):
start = int(i*part_num)
end = int((i+1)*part_num) if int((i+1)*part_num) < total_len else total_len
arrays.append(array[start: end])
return arrays
def main():
args = get_args()
source_dir = Path(args.source_dir)
target_dir = Path(args.target_dir)
source_files = list(source_dir.rglob('*'))
folder_name = source_dir.name
for i, file in tqdm(enumerate(source_files)):
image = Image.open(file)
if image.width < args.min_size or image.height < args.min_size:
continue
del image
dst = target_dir / file.name
shutil.copy(file, dst)
if __name__ == '__main__':
main()
| [
"1242733702@qq.com"
] | 1242733702@qq.com |
f1f1b0a578bbed74ac77a1c88f51c2b536e74150 | d6150d04ec161dbdac33e9be23648ad4f258a1a7 | /tensorflow/examples/saved_model/integration_tests/use_mnist_cnn.py | 9e1ca33029a928e8f6a8bad1f068d9e56e71be54 | [
"Apache-2.0"
] | permissive | aweers/tensorflow | bf0f5c6c6a6384a044a5c081dd1e8efe89c0349e | 640726310112e1ad708faef66f751fe5d70ec102 | refs/heads/master | 2020-04-24T23:56:25.910880 | 2019-03-09T15:12:37 | 2019-03-09T15:12:37 | 172,361,635 | 0 | 0 | Apache-2.0 | 2019-03-09T15:12:38 | 2019-02-24T16:31:37 | C++ | UTF-8 | Python | false | false | 4,964 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Imports a convolutional feature extractor for MNIST in SavedModel format.
This program picks up the SavedModel written by export_mnist_cnn.py and
uses the feature extractor contained in it to classification on either
classic MNIST (digits) or Fashion MNIST (thumbnails of apparel). Optionally,
it trains the feature extractor further as part of the new classifier.
As expected, that makes training slower but does not help much for the
original training dataset but helps a lot for transfer to the other dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow.examples.saved_model.integration_tests import mnist_util
from tensorflow.examples.saved_model.integration_tests import util
from tensorflow.python.saved_model import load as svmd_load
tf.saved_model.load = svmd_load.load
FLAGS = flags.FLAGS
flags.DEFINE_string(
'export_dir', None,
'Directory of exported SavedModel.')
flags.DEFINE_integer(
'epochs', 5,
'Number of epochs to train.')
flags.DEFINE_bool(
'retrain', False,
'If set, the imported SavedModel is trained further.')
flags.DEFINE_float(
'dropout_rate', None,
'If set, dropout rate passed to the SavedModel.')
flags.DEFINE_float(
'regularization_loss_multiplier', None,
'If set, multiplier for the regularization losses in the SavedModel.')
flags.DEFINE_bool(
'use_fashion_mnist', False,
'Use Fashion MNIST (products) instead of the real MNIST (digits). '
'With this, --retrain gains a lot.')
flags.DEFINE_bool(
'fast_test_mode', False,
'Shortcut training for running in unit tests.')
def make_classifier(feature_extractor, l2_strength=0.01, dropout_rate=0.5):
"""Returns a Keras Model to classify MNIST using feature_extractor."""
regularizer = lambda: tf.keras.regularizers.l2(l2_strength)
net = inp = tf.keras.Input(mnist_util.INPUT_SHAPE)
net = feature_extractor(net)
net = tf.keras.layers.Dropout(dropout_rate)(net)
net = tf.keras.layers.Dense(mnist_util.NUM_CLASSES, activation='softmax',
kernel_regularizer=regularizer())(net)
return tf.keras.Model(inputs=inp, outputs=net)
def scale_regularization_losses(obj, multiplier):
"""Scales obj.regularization_losses by multiplier if not None."""
if multiplier is None: return
def _scale_one_loss(l): # Separate def avoids lambda capture of loop var.
f = tf.function(lambda: tf.multiply(multiplier, l()))
_ = f.get_concrete_function()
return f
obj.regularization_losses = [_scale_one_loss(l)
for l in obj.regularization_losses]
def main(argv):
del argv
# Load a pre-trained feature extractor and wrap it for use in Keras.
obj = tf.saved_model.load(FLAGS.export_dir)
scale_regularization_losses(obj, FLAGS.regularization_loss_multiplier)
arguments = {}
if FLAGS.dropout_rate is not None:
arguments['dropout_rate'] = FLAGS.dropout_rate
feature_extractor = util.CustomLayer(obj, output_shape=[128],
trainable=FLAGS.retrain,
arguments=arguments)
# Build a classifier with it.
model = make_classifier(feature_extractor)
# Train the classifier (possibly on a different dataset).
(x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
use_fashion_mnist=FLAGS.use_fashion_mnist,
fake_tiny_data=FLAGS.fast_test_mode)
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(),
metrics=['accuracy'],
# TODO(arnoegw): Remove after investigating huge allocs.
run_eagerly=True)
print('Training on %s with %d trainable and %d untrainable variables.' %
('Fashion MNIST' if FLAGS.use_fashion_mnist else 'MNIST',
len(model.trainable_variables), len(model.non_trainable_variables)))
model.fit(x_train, y_train,
batch_size=128,
epochs=FLAGS.epochs,
steps_per_epoch=3,
verbose=1,
validation_data=(x_test, y_test))
if __name__ == '__main__':
# tf.enable_v2_behavior()
app.run(main)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
2429e6f4e1bf8f3af9f8ad79ed51e9307b1be38e | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ_16_2/16_2_1_kieronb_digits.py | 88aa02a8dfc600e147bcbfb888a04aa403e5cd60 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,483 | py | #!/usr/bin/python
import sys
#import logging
#logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
nums = ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE'];
def solve(s):
digits = []
while 'Z' in s:
digits.append(0)
s = remove_word(s, nums[0])
while 'W' in s:
digits.append(2)
s = remove_word(s, nums[2])
while 'U' in s:
digits.append(4)
s = remove_word(s, nums[4])
while 'X' in s:
digits.append(6)
s = remove_word(s, nums[6])
while 'G' in s:
digits.append(8)
s = remove_word(s, nums[8])
while 'O' in s:
digits.append(1)
s = remove_word(s, nums[1])
while 'R' in s:
digits.append(3)
s = remove_word(s, nums[3])
while 'F' in s:
digits.append(5)
s = remove_word(s, nums[5])
while 'V' in s:
digits.append(7)
s = remove_word(s, nums[7])
while 'N' in s:
digits.append(9)
s = remove_word(s, nums[9])
return ''.join(sorted([str(i) for i in digits]))
def remove_word(s, w):
for c in w:
s = remove_char(s, c)
return s
def remove_char(s, c):
if not c in s:
return s
i = s.index(c)
return s[:i] + s[i+1:]
first = True
n = 0
for line in sys.stdin:
if first:
first = False
else:
n = n + 1
ans = solve(line)
print("Case #" + str(n) + ": " + ans)
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
420215bd2efecae47efd82097ef50ae4aeb87a1c | b3084604bb27ff87149bfc49c16a8a5e6ea5582c | /flsp-mrp/models/Productionmsg.py | 7cf757957caa793f22d4c429c2a337555ca17ede | [] | no_license | odoo-smg/firstlight | 9fe308fb876e80a11ebfda40a442983c9a85ae3e | 4a82cd5cfd1898c6da860cb68dff3a14e037bbad | refs/heads/master | 2022-10-09T10:10:36.108190 | 2022-09-28T16:06:30 | 2022-09-28T16:06:30 | 235,829,864 | 3 | 2 | null | 2022-03-17T19:26:27 | 2020-01-23T15:56:48 | JavaScript | UTF-8 | Python | false | false | 1,009 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class Produtionflspmsg(models.TransientModel):
_name = 'flspmrp.productionflspmsg'
_description = "Wizard: Message on Production"
@api.model
def default_get(self, fields):
res = super(Produtionflspmsg, self).default_get(fields)
production_order = self.env['mrp.production']
production_id = self.env.context.get('default_production_id') or self.env.context.get('active_id')
if production_id:
production_order = self.env['mrp.production'].browse(production_id)
if production_order.exists():
if 'product_id' in fields:
res['product_id'] = production_order.product_id.id
if 'bom_id' in fields:
res['bom_id'] = production_order.bom_id.id
return res
product_id = fields.Many2one('product.product', string='Product', readonly=True)
bom_id = fields.Many2one('mrp.bom', string='Bill of Material', readonly=True)
| [
"alexandresousa@smartrend.com"
] | alexandresousa@smartrend.com |
fadfcdfc5a65325f9e65158cb8d2183527c560d5 | 4380a4029bac26f205ed925026914dce9e96fff0 | /slyr/parser/object.py | 7edcb40861627e48a2518d8bf6be2b450d2eba7e | [] | no_license | deepVector/slyr | 6b327f835994c8f20f0614eb6c772b90aa2d8536 | 5d532ac3eec0e00c5883bf873d30c6b18a4edf30 | refs/heads/master | 2020-12-03T10:24:39.660904 | 2019-04-08T00:48:03 | 2019-04-08T00:48:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | #!/usr/bin/env python
"""
Base class for persistent objects
"""
from typing import List
class Object:
"""
Base class for objects which can be read from a stream
"""
@staticmethod
def guid() -> str:
"""
Returns the object's GUID
"""
return ''
@staticmethod
def compatible_versions():
"""
Returns the list of compatible object versions, or None to skip
ESRI version bytes
"""
return [1]
def read(self, stream, version):
"""
Reads the object from the given stream
"""
pass
def children(self) -> List['slyr.parser.Object']:
"""
Returns a list of all child objects referenced by this object
"""
return []
| [
"nyall.dawson@gmail.com"
] | nyall.dawson@gmail.com |
6c61a8655ed737b3ded276848231cfa8a07a9bb0 | 2bf56904829ab9d5e5aa49a50aeceaef620df643 | /tests/test_collector.py | 41a03d3fd613b6b8f6b685f983d856ad12323a3e | [
"MIT"
] | permissive | OCHA-DAP/hdx-scraper-unosat-flood-portal | 501f53d43ead4fc46312fc46229c43c034787ed0 | 80b0bcd404993e4bd1dae442f794c9f86b6d5328 | refs/heads/master | 2021-12-25T06:00:02.327571 | 2021-12-22T20:07:36 | 2021-12-22T20:07:36 | 37,866,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# system
import os
import sys
dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(dir, 'scripts'))
# testing
import mock
import unittest
from mock import patch
# program
import config.load as Config
import config.database as DB
import utilities.db as Database
import unosat_flood_portal_collect.collect as Collect
#
# Global variables.
#
TEST_DATA = 'test_flood_portal_output.json'
class CheckCollectorFunctions(unittest.TestCase):
'''Unit tests checking if the collector is working as expected.'''
def test_wrapper_doesnt_fail(self):
assert Collect.Main() != False
def test_fetch_data_function(self):
assert Collect.FetchData(url='http://localhost:8080') == False
def test_processing_works(self):
data = Collect.DownloadAndProcessData()
assert type(data) == list
def test_clean_table_fails(self):
assert Collect.CleanTable('foo') == False
class CheckPatches(unittest.TestCase):
'''Unit tests that check if the patches are doing what they are supposed to do.'''
def test_read_all_records_works(self):
d = Database.ReadAllRecords('unprocessed_data')
assert type(d) == list
assert Database.ReadAllRecords('unprocessed_data') != False
| [
"luiscape@gmail.com"
] | luiscape@gmail.com |
8178cc34b56a6a03048dac573f71a04c78628aa0 | 7cc9cb8bfa749cb011170299ca780f8e8d140b54 | /api/endpoints/login.py | 11e94f14912aefe873931f2ae736874d028c5596 | [
"Apache-2.0"
] | permissive | AssiaHalloul/FRDP | 75ef66168782d854494a79ef220f60d3792784b7 | e799b7e9d858c613ee7d7cce992dddee2eafca5f | refs/heads/main | 2023-07-28T04:09:27.358335 | 2021-09-17T03:11:13 | 2021-09-17T03:11:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,915 | py | from datetime import timedelta
from typing import Any
from fastapi import APIRouter, Body, Depends, HTTPException
from fastapi.security import OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
import crud
import models
from api import deps
from core import security
from core.config import settings
from core.security import get_password_hash
from crud.crud_user import CRUDUser
from schemas.msg import Msg
from schemas.token import Token
from schemas.user import User
from utils import generate_password_reset_token, verify_password_reset_token
router = APIRouter()
user1 = CRUDUser(User)
@router.post("/login/access-token", response_model=Token)
def login_access_token(
db: Session = Depends(deps.get_db), form_data: OAuth2PasswordRequestForm = Depends()
) -> Any:
"""
OAuth2 compatible token login, get an access token for future requests
"""
user = user1.authenticate(db, email=form_data.username, password=form_data.password)
if not user:
raise HTTPException(status_code=400, detail="Incorrect email or password")
elif not user1.is_active(user):
raise HTTPException(status_code=400, detail="Inactive user")
access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
return {
"access_token": security.create_access_token(
user.id, expires_delta=access_token_expires
),
"token_type": "bearer",
}
@router.post("/login/test-token", response_model=User)
def Check_Session(
current_user: models.user.User = Depends(deps.get_current_user),
) -> Any:
"""
Test if a user is logged in by checking if a valid access token is in the header
"""
return current_user
@router.post(
"/password-recovery/{email}",
response_model=Msg,
status_code=200,
response_description="Success",
)
def recover_password(email: str, db: Session = Depends(deps.get_db)) -> Any:
"""
Password Recovery
"""
user = user1.get_by_email(db, email=email)
if not user:
raise HTTPException(
status_code=422,
detail="The user with this username does not exist in the system.",
)
return {"msg": generate_password_reset_token(email=email)}
@router.post("/reset-password/", response_model=Msg)
def reset_password(
token: str = Body(...),
new_password: str = Body(...),
db: Session = Depends(deps.get_db),
) -> Any:
"""
Reset your password
"""
email = verify_password_reset_token(token)
if not email:
raise HTTPException(status_code=400, detail="Invalid token")
user = crud.user.get_by_email(db, email=email)
if not user:
raise HTTPException(status_code=404, detail="User not found")
hashed_password = get_password_hash(new_password)
user.hashed_password = hashed_password
db.add(user)
db.commit()
return {"msg": "Password updated successfully!"}
| [
"yasserth19@gmail.com"
] | yasserth19@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.