blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0540326a34c8eea969ab2b250c2b3f1ec56b7c6a | 289e17a9b3d0cf187f403894ebfb1007dcb1b3dc | /old-leetcode/older/455AssignCookies.py | 675b3deca4cadfed6019c625c6b235707590c23c | [] | no_license | congyingTech/Basic-Algorithm | 7ddb376e49ef3b1c0d989fb1d4a4949d2d121d63 | 18c06a96bb14688e4a1d5fb6baf235a6b53bd3ae | refs/heads/master | 2021-11-27T07:01:05.474609 | 2021-11-15T07:16:31 | 2021-11-15T07:16:31 | 224,206,231 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | class Solution(object):
def findContentChildren(self, g, s):
"""
:type g: List[int]
:type s: List[int]
:rtype: int
"""
if __name__ == '__main__':
a = Solution()
g = [1,2,3,4]
s = [2,4,6,8]
print(a.findContentChildren(g, s)) | [
"congyingTech@163.com"
] | congyingTech@163.com |
ebebf49828ccb7b90e9a49da0fb21464b90fbbe0 | 5c29360b142773c87cae6b245b48cefa7cce708c | /inventory/inventory/report/alokasi_barang_terkirim/alokasi_barang_terkirim.py | 9186e5fdbc59477a87e9214eeccd3b9205565207 | [
"MIT"
] | permissive | bobzz-zone/inventory | 4f496d57189496f0cdc58c5510b51d26f5ad8a68 | 0cf5f3c5cc5e77a577605f99b8210fb0178aea30 | refs/heads/master | 2020-05-22T09:30:20.896237 | 2016-10-28T14:28:31 | 2016-10-28T14:28:31 | 65,599,131 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,859 | py | # Copyright (c) 2013, Myme and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
columns, data = [], []
columns = []
select_field = ""
group_clause = ""
order_clause = ""
left_join = ""
if filters.get("group_by") == "Customer" :
columns = ["Customer:Link/Customer:100","Item Code:Link/Item:100","Colour:Data:100","Yard/Meter per Roll:Float:150","Qty Pending Order:Float:150","Qty Sisa di Pending Order:Float:200",
"Qty Terkirim:Float:150"]
select_field = " ab.`customer`,abd.`item_code_roll`,abd.`colour`,abd.`yard_atau_meter_per_roll`,abd.`roll_qty`,abd.`qty_sisa`,abd.`qty_terkirim` "
order_clause = " ORDER BY ab.`customer` "
elif filters.get("group_by") == "Item" :
columns = ["Item Code:Link/Item:100","Colour:Data:100","Yard/Meter per Roll:Float:150","Qty Pending Order:Float:150","Qty Sisa di Pending Order:Float:200",
"Qty Terkirim:Float:150"]
select_field = " abd.`item_code_roll`,abd.`colour`,abd.`yard_atau_meter_per_roll`,abd.`roll_qty`,abd.`qty_sisa`,abd.`qty_terkirim` "
iorder_clause = " ORDER BY abd.`item_code_roll` "
elif filters.get("group_by") == "Colour":
columns = ["Colour:Data:100","Item Code:Link/Item:100","Yard/Meter per Roll:Float:150","Qty Pending Order:Float:150","Qty Sisa di Pending Order:Float:200",
"Qty Terkirim:Float:150"]
select_field = " abd.`colour`,abd.`item_code_roll`,abd.`yard_atau_meter_per_roll`,abd.`roll_qty`,abd.`qty_sisa`,abd.`qty_terkirim` "
order_clause = " ORDER BY abd.`colour` "
elif filters.get("group_by") == "Alokasi Barang" :
columns = ["Alokasi Barang No.:Link/Alokasi Barang:100","Item Code:Link/Item:100","Colour:Data:100","Yard/Meter per Roll:Float:150","Qty Pending Order:Float:150","Qty Sisa di Pending Order:Float:200",
"Alokasi No.:Link/Alokasi Barang:100","Qty Delivery:Float:100"]
select_field = " ab.`name`,abd.`item_code_roll`,abd.`colour`,abd.`yard_atau_meter_per_roll`,abd.`roll_qty`,abd.`qty_sisa`,pld.`name`,pldd.`roll_qty` "
left_join = """ LEFT JOIN `tabPacking List Delivery`pld ON pld.`alokasi_barang`=ab.`name` AND pld.`docstatus`=1
LEFT JOIN `tabPacking List Delivery Data`pldd ON pld.`name`=pldd.`parent`
AND pldd.`item_code_roll`=abd.`item_code_roll` AND pldd.`colour`=abd.`colour` AND pldd.`yard_atau_meter_per_roll`=abd.`yard_atau_meter_per_roll`
AND pldd.`group`=abd.`group`
"""
order_clause = " ORDER BY ab.`name` "
else :
return [],[]
ab_clause = ""
if filters.get("alokasi_barang") :
ab_clause = """ AND ab.`name`="{0}" """.format(filters.get("alokasi_barang"))
item_clause = ""
if filters.get("item") :
item_clause = """ AND abd.`item_code_roll`="{0}" """.format(filters.get("item"))
customer_clause = ""
if filters.get("customer") :
customer_clause = """ AND ab.`customer`="{0}" """.format(filters.get("customer"))
colour_clause = ""
if filters.get("colour") :
colour_clause = """ AND abd.`colour`="{0}" """.format(filters.get("colour"))
delivery_clause = ""
if filters.get("delivery_from_date") and filters.get("delivery_to_date"):
delivery_clause = """ AND ab.`expected_delivery_date` BETWEEN "{0}" AND "{1}" """.format(filters.get("delivery_from_date"),filters.get("delivery_to_date"))
date_clause = ""
if filters.get("posting_from_date") and filters.get("posting_to_date"):
delivery_clause = """ AND ab.`posting_date` BETWEEN "{0}" AND "{1}" """.format(filters.get("posting_from_date"),filters.get("posting_to_date"))
data = frappe.db.sql(""" SELECT {0}
FROM `tabAlokasi Barang`ab
JOIN `tabAlokasi Barang Data`abd ON abd.`parent`=ab.`name`
{1}
WHERE ab.`docstatus`=1
{2} {3} {4} {5} {6} {7}
{8} """.format(select_field,left_join,ab_clause,item_clause,customer_clause,colour_clause,delivery_clause,date_clause,order_clause))
return columns, data
| [
"bobzz.zone@gmail.com"
] | bobzz.zone@gmail.com |
5cd5c23e6afd2f10efdc27cbbfc89a7c2f9d17d9 | b5a31a9b0827232fb8efa2a56db90d396abbbcef | /Data Structures/Stack.py | 84e9a0f480d06fa2389166dc1b9da774001d5e87 | [] | no_license | Storiesbyharshit/Competetive-Coding | 3a693b2b3193df6fea1b9bc3bbbbb2dfb79eecfd | 722c216ee8931754eb7de0660d2aeb21544cf48b | refs/heads/master | 2022-12-12T03:50:59.348383 | 2020-09-09T17:26:01 | 2020-09-09T17:26:01 | 281,312,841 | 2 | 2 | null | 2020-08-28T05:06:11 | 2020-07-21T06:20:05 | Python | UTF-8 | Python | false | false | 326 | py | class Stack:
def __init__(self):
self.stack = []
def isEmpty(self):
return self.stack == []
def push(self, data):
self.stack.append(data)
def pop(self):
data = self.stack[-1]
del self.stack[-1]
return data
def peek(self):
return self.stack[-1]
def sizeStack(self):
return len(self.stack)
| [
"noreply@github.com"
] | Storiesbyharshit.noreply@github.com |
da7ce1aef3312b4041c16a690b5921d4eebe669f | c97b9ae1bf06757ba61f90905e4d9b9dd6498700 | /venv/Lib/site-packages/shapely/algorithms/polylabel.py | 2869a0ab011af07a9cb05229a701e2b1ba8fd562 | [] | no_license | Rahulk1p/image-processor | f7ceee2e3f66d10b2889b937cdfd66a118df8b5d | 385f172f7444bdbf361901108552a54979318a2d | refs/heads/main | 2023-03-27T10:09:46.080935 | 2021-03-16T13:04:02 | 2021-03-16T13:04:02 | 348,115,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:597fdfa5049872301df5a7b2eaca0b4d5ce5dd97bdfc31f574663654b2341f22
size 4675
| [
"rksc.k1p@gmail.com"
] | rksc.k1p@gmail.com |
68989c58d3143706f0ff6d4c919e22855714c894 | 52389ba81fa5abe009b2dee5deb83fabe56982a8 | /kgtk/cli/tail.py | bf327303d5eb480123f504ed88c4d47fdef28569 | [
"MIT"
] | permissive | Qanu-survey/kgtk | 4111a817bd81e9d48b0b0b421f733f2f25e45d7a | 95024bfc61c12282c75d53d256115cdf41837f04 | refs/heads/master | 2023-09-01T03:17:34.712634 | 2021-10-23T00:30:45 | 2021-10-23T00:30:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,058 | py | """This utility is analogous to the POSIX 'tail' command.
For "-n N", pass just the last N data edges of a KGTK input file to the KGTK output file.
The POSIX 'tail' command's notion of '-n +N' is not supported.
The header record, cotaining the column names, is always passed and is not
included in N.
Multiplier suffixes are not supported.
--mode=NONE is default.
TODO: Need KgtkWriterOptions
"""
from argparse import Namespace, SUPPRESS
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
def parser():
return {
'help': 'Pass the tail (last records) of a KGTK file.',
'description': 'This utility is analogous to the POSIX "head" command. ' +
'\n\nFor "-n N", pass just the last N data edges of a KGTK input file to the KGTK output file. ' +
'\n\n"-n +N" does not have the special meaning it has in the POSIC "tail" command. ' +
'\n\nThe header record, cotaining the column names, is always passed and is not included in N. ' +
'\n\nMultiplier suffixes are not supported. ' +
'\n\nUse this command to filter the output of any KGTK command: ' +
'\n\nkgtk xxx / tail -n 20 ' +
'\n\nUse it to limit the records in a file: ' +
'\n\nkgtk tail -i file.tsv -o file.html' +
'\n\nThis command defaults to --mode=NONE so it will work with TSV files that do not follow KGTK column naming conventions.' +
'\n\nAdditional options are shown in expert help.\nkgtk --expert html --help'
}
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions, KgtkReaderMode
from kgtk.io.kgtkwriter import KgtkWriter
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
_expert: bool = parsed_shared_args._expert
# This helper function makes it easy to suppress options from
# The help message. The options are still there, and initialize
# what they need to initialize.
def h(msg: str)->str:
if _expert:
return msg
else:
return SUPPRESS
parser.add_input_file()
parser.add_output_file()
parser.add_argument("-n", "--edges", dest="edge_limit", type=int, default=10,
help="The number of records to pass (default=%(default)d).")
parser.add_argument( "--output-format", dest="output_format", help=h("The file format (default=kgtk)"), type=str,
choices=KgtkWriter.OUTPUT_FORMAT_CHOICES)
KgtkReader.add_debug_arguments(parser, expert=_expert)
KgtkReaderOptions.add_arguments(parser, mode_options=True, default_mode=KgtkReaderMode.NONE, expert=_expert)
KgtkValueOptions.add_arguments(parser, expert=_expert)
def run(input_file: KGTKFiles,
output_file: KGTKFiles,
edge_limit: int,
output_format: str,
errors_to_stdout: bool = False,
errors_to_stderr: bool = True,
show_options: bool = False,
verbose: bool = False,
very_verbose: bool = False,
**kwargs # Whatever KgtkFileOptions and KgtkValueOptions want.
)->int:
# import modules locally
from collections import deque
from pathlib import Path
import sys
import typing
from kgtk.exceptions import KGTKException
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions, KgtkReaderMode
from kgtk.io.kgtkwriter import KgtkWriter
from kgtk.join.kgtkcat import KgtkCat
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
input_file_path: Path = KGTKArgumentParser.get_input_file(input_file)
output_file_path: Path = KGTKArgumentParser.get_output_file(output_file)
# Select where to send error messages, defaulting to stderr.
error_file: typing.TextIO = sys.stdout if errors_to_stdout else sys.stderr
# TODO: check that at most one input file is stdin?
# Build the option structures.
reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs, mode=KgtkReaderMode.NONE)
value_options: KgtkValueOptions = KgtkValueOptions.from_dict(kwargs)
# Show the final option structures for debugging and documentation.
if show_options:
print("--input-file=%s" % str(input_file_path), file=error_file, flush=True)
print("--output-file=%s" % str(output_file_path), file=error_file, flush=True)
print("--edges=%s" % str(edge_limit), file=error_file, flush=True)
reader_options.show(out=error_file)
value_options.show(out=error_file)
print("=======", file=error_file, flush=True)
try:
kr: KgtkReader = KgtkReader.open(input_file_path,
options=reader_options,
value_options = value_options,
error_file=error_file,
verbose=verbose,
very_verbose=very_verbose,
)
output_mode: KgtkWriter.Mode = KgtkWriter.Mode.NONE
if kr.is_edge_file:
output_mode = KgtkWriter.Mode.EDGE
if verbose:
print("Opening the output edge file: %s" % str(output_file_path), file=error_file, flush=True)
elif kr.is_node_file:
output_mode = KgtkWriter.Mode.NODE
if verbose:
print("Opening the output node file: %s" % str(output_file_path), file=error_file, flush=True)
else:
if verbose:
print("Opening the output file: %s" % str(output_file_path), file=error_file, flush=True)
kw: KgtkWriter = KgtkWriter.open(kr.column_names,
output_file_path,
use_mgzip=reader_options.use_mgzip, # Hack!
mgzip_threads=reader_options.mgzip_threads, # Hack!
gzip_in_parallel=False,
mode=output_mode,
output_format=output_format,
error_file=error_file,
verbose=verbose,
very_verbose=very_verbose)
edge_count: int = 0
row: typing.List[str]
edge_buffer: deque = deque()
for row in kr:
edge_buffer.append(row)
if len(edge_buffer) > edge_limit:
edge_buffer.popleft()
while len(edge_buffer) > 0:
edge_count += 1
kw.write(edge_buffer.popleft())
kw.close()
if verbose:
print("Copied %d edges." % edge_count, file=error_file, flush=True)
except SystemExit as e:
raise KGTKException("Exit requested")
except Exception as e:
raise KGTKException(str(e))
| [
"rogers@isi.edu"
] | rogers@isi.edu |
67fe1507b996d283661d40cca7f5ffaec7b9ad5c | 1f006f0c7871fcde10986c4f5cec916f545afc9f | /apps/ice/plugins/required/plugin_auth_ldap_test.py | 790563538a3ae1cca495208198b7d61596441ab3 | [] | no_license | ptsefton/integrated-content-environment | 248b8cd29b29e8989ec1a154dd373814742a38c1 | c1d6b5a1bea3df4dde10cb582fb0da361dd747bc | refs/heads/master | 2021-01-10T04:46:09.319989 | 2011-05-05T01:42:52 | 2011-05-05T01:42:52 | 36,273,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,214 | py | #!/usr/bin/env python
# Copyright (C) 2007 Distance and e-Learning Centre,
# University of Southern Queensland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from unittest import TestCase
import sys
from plugin_auth_ldap import AuthLDAP
## ===============================
## TESTS
## ===============================
class AuthLDAPTests(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testCheckAuthentication(self):
class Object(object): pass
context = Object()
context.settings = {"ldapUrl":None}
#context.settings["ldapOU"] = "Staff"
#context.settings["ldapDC"] = "dc=usq,dc=edu,dc=au"
auth = AuthLDAP(context)
self.assertFalse(auth.checkAuthentication("userId", None))
self.assertFalse(auth.checkAuthentication("userId", ""))
self.assertFalse(auth.checkAuthentication("userId", "pw"))
# Note: can not automate testing for 'True' without a test LDAP server!
def runUnitTests(locals):
print "\n\n\n\n"
if sys.platform=="cli":
import clr
import System.Console
System.Console.Clear()
print "---- Testing under IronPython ----"
else:
print "---- Testing ----"
# Run only the selected tests
args = list(sys.argv)
sys.argv = sys.argv[:1]
args.pop(0)
runTests = args
runTests = [ i.lower().strip(", ") for i in runTests]
runTests = ["test"+i for i in runTests if not i.startswith("test")] + \
[i for i in runTests if i.startswith("test")]
if runTests!=[]:
testClasses = [i for i in locals.values() \
if hasattr(i, "__bases__") and \
(TestCase in i.__bases__)]
testing = []
for x in testClasses:
l = dir(x)
l = [ i for i in l if i.startswith("test") and callable(getattr(x, i))]
for i in l:
if i.lower() not in runTests:
delattr(x, i)
else:
testing.append(i)
x = None
num = len(testing)
if num<1:
print "No selected tests found! - %s" % str(args)[1:-1]
elif num==1:
print "Running selected test - %s" % (str(testing)[1:-1])
else:
print "Running %s selected tests - %s" % (num, str(testing)[1:-1])
from unittest import main
main()
if __name__=="__main__":
runUnitTests(locals())
sys.exit(0)
| [
"raward@gmail.com@110e3293-9ef9-cb8f-f479-66bdb1942d05"
] | raward@gmail.com@110e3293-9ef9-cb8f-f479-66bdb1942d05 |
7b0151f365485c4c49e2ff58ec33293dc6a48996 | eb99769b7c9e0eb1cf3b88878934a400ba42f0bf | /news/migrations/0002_favoriteitem.py | 054ed15c2d05e485c9d310ce8e7432b2650c6d91 | [] | no_license | Levalife/petsterr2.0 | 3657b200b9e236b81896f4ac104932e85517ceb3 | 43d20e65362596d72942fe624c29fd4f84d90f9a | refs/heads/master | 2023-01-13T04:58:23.496527 | 2018-09-13T09:50:48 | 2018-09-13T09:50:48 | 203,134,329 | 0 | 0 | null | 2023-01-05T21:55:18 | 2019-08-19T08:48:32 | Python | UTF-8 | Python | false | false | 1,358 | py | # Generated by Django 2.0.6 on 2018-07-09 10:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('news', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FavoriteItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('source_id', models.IntegerField(blank=True, null=True)),
('type', models.CharField(blank=True, choices=[('kennel', 'kennel'), ('animal', 'animal'), ('litter', 'litter')], max_length=100, null=True)),
('follower', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Favorite Items',
'verbose_name_plural': 'Favorite Item',
'db_table': 'favorite_item',
},
),
]
| [
"levushka14@gmail.com"
] | levushka14@gmail.com |
94e03b2534aad88510168b1b286c1c69c38c9c59 | 7170e8a71c85bf88c43ae7524ffab25cf853b916 | /awswrangler/__metadata__.py | b3ae6c0df2a77d84e65a606e7447b3fca0127a9e | [
"Apache-2.0"
] | permissive | ibanmarco/aws-data-wrangler | b18aa898e2c0f33f225c44cdebf11b25f6637f63 | e99937296075c671e5f8a0998b430879c808687d | refs/heads/master | 2022-12-29T21:18:18.351632 | 2020-10-19T19:13:58 | 2020-10-19T19:13:58 | 289,549,549 | 0 | 0 | Apache-2.0 | 2020-10-19T19:13:59 | 2020-08-22T19:01:24 | Python | UTF-8 | Python | false | false | 286 | py | """Metadata Module.
Source repository: https://github.com/awslabs/aws-data-wrangler
Documentation: https://aws-data-wrangler.readthedocs.io/
"""
__title__: str = "awswrangler"
__description__: str = "Pandas on AWS."
__version__: str = "1.9.6"
__license__: str = "Apache License 2.0"
| [
"igorborgest@gmail.com"
] | igorborgest@gmail.com |
280b193fda89b737c832f4360d1e6a627aa012db | d48b89048d4fe8f09d1fcc1702f89b195186e025 | /portfolio/settings.py | 1bc72b7660ac6fbed74985a0433e7bf586ff4454 | [] | no_license | Aitodev/portfoliotestwithstudents | 833e32f80afc5361204880031bdcbba3af580421 | fb8e220bb504253ce47856ea46e1a28d3b6e46ef | refs/heads/master | 2022-12-11T23:13:00.170658 | 2020-09-24T04:20:11 | 2020-09-24T04:20:11 | 298,162,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,179 | py | import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f-b%v$a9svi8d+xm%a_68)m)9x&mm3_+-v_1+dccxw1lj-p8)j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mainapp.apps.MainappConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'Asia/Bishkek'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mainapp/media')
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'aitolivelive@gmail.com'
EMAIL_HOST_PASSWORD = 'aitodevguruit'
EMAIL_PORT = 465
EMAIL_USE_SSL = True
DEFAULT_FROM_EMAIL = 'aitolivelive@gmail.com'
DEFAULT_TO_EMAIL = 'aitofullstackdev@gmail.com'
| [
"guruitcompany@gmail.com"
] | guruitcompany@gmail.com |
4b61cdfe5342b7c9620f14ce0febe8663b51fffd | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /2t6NvMe27HtSmqC4F_9.py | 5a367fa275b81bfe1cef01467c4a3e4dccdd6e0e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | """
Write three functions:
1. boolean_and
2. boolean_or
3. boolean_xor
These functions should evaluate a list of `True` and `False` values, starting
from the leftmost element and evaluating pairwise.
### Examples
boolean_and([True, True, False, True]) ➞ False
# [True, True, False, True] => [True, False, True] => [False, True] => False
boolean_or([True, True, False, False]) ➞ True
# [True, True, False, True] => [True, False, False] => [True, False] => True
boolean_xor([True, True, False, False]) ➞ False
# [True, True, False, False] => [False, False, False] => [False, False] => False
### Notes
* `XOR` is the same as `OR`, except that it excludes `[True, True]`.
* Each time you evaluate an element at 0 and at 1, you collapse it into the single result.
"""
def boolean_and(lst):
return not False in lst
def boolean_or(lst):
return True in lst
def boolean_xor(lst):
while len(lst) > 1:
tmp = []
for i in range(len(lst)-1):
a, b = lst[i:i+2]
if (a or b) and not (a and b): tmp.append(True)
else: tmp.append(False)
lst = tmp[:]
return tmp[0]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
41f654ef6d53ddd5b0dd9ad92e5c7c59858f5527 | 741c5c70bf4a0adb05db6b0777c8d07e28eb9cf6 | /lib/python3.4/site-packages/IPython/nbformat/v3/validator.py | bc037cce2b4a6337067ab018bbfd06fe6ddc9505 | [] | no_license | andybp85/hyLittleSchemer | e686d2dc0f9067562367ea1173f275e8e2d2cb85 | af5cb6adf6a196cc346aa7d14d7f9509e084c414 | refs/heads/master | 2021-01-19T07:48:31.309949 | 2015-01-04T00:57:30 | 2015-01-04T00:57:30 | 28,496,304 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,786 | py | from __future__ import print_function
#!/usr/bin/env python
# -*- coding: utf8 -*-
import argparse
import traceback
import json
from IPython.external.jsonschema import Draft3Validator, validate, ValidationError
import IPython.external.jsonpointer as jsonpointer
from IPython.utils.py3compat import iteritems
def nbvalidate(nbjson, schema='v3.withref.json', key=None,verbose=True):
v3schema = resolve_ref(json.load(open(schema,'r')))
if key :
v3schema = jsonpointer.resolve_pointer(v3schema,key)
errors = 0
v = Draft3Validator(v3schema);
for error in v.iter_errors(nbjson):
errors = errors + 1
if verbose:
print(error)
return errors
def resolve_ref(json, base=None):
"""return a json with resolved internal references
only support local reference to the same json
"""
if not base :
base = json
temp = None
if type(json) is list:
temp = [];
for item in json:
temp.append(resolve_ref(item, base=base))
elif type(json) is dict:
temp = {};
for key,value in iteritems(json):
if key == '$ref':
return resolve_ref(jsonpointer.resolve_pointer(base,value), base=base)
else :
temp[key]=resolve_ref(value, base=base)
else :
return json
return temp
def convert(namein, nameout, indent=2):
"""resolve the references of namein, save the result in nameout"""
jsn = None
with open(namein) as file :
jsn = json.load(file)
v = resolve_ref(jsn, base=jsn)
x = jsonpointer.resolve_pointer(v, '/notebook')
with open(nameout,'w') as file:
json.dump(x,file,indent=indent)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--schema',
type=str, default='v3.withref.json')
parser.add_argument('-k', '--key',
type=str, default='/notebook',
help='subkey to extract json schema from json file')
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
parser.add_argument('filename',
type=str,
help="file to validate",
nargs='*',
metavar='names')
args = parser.parse_args()
for name in args.filename :
nerror = nbvalidate(json.load(open(name,'r')),
schema=args.schema,
key=args.key,
verbose=args.verbose)
if nerror is 0:
print(u"[Pass]",name)
else :
print(u"[ ]",name,'(%d)'%(nerror))
if args.verbose :
print('==================================================')
| [
"andy@youshallthrive.com"
] | andy@youshallthrive.com |
101791f75248e23711a3f1cccf9fa0bdf8c2ba68 | c4b8e1e09dedbccd37ca008ecaaca4438610bbaf | /google_or_tools/circuit.py | 6b66df28031dfa54488a90ae0f6ab74b12c7d1e5 | [
"MIT"
] | permissive | hakank/hakank | 4806598b98cb36dd51b24b0ab688f52dadfe9626 | c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2 | refs/heads/master | 2023-08-15T00:21:52.750270 | 2023-07-27T16:21:40 | 2023-07-27T16:21:40 | 11,933,517 | 336 | 97 | MIT | 2023-07-27T11:19:42 | 2013-08-06T20:12:10 | JavaScript | UTF-8 | Python | false | false | 3,206 | py | # Copyright 2010 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Decomposition of the circuit constraint in Google CP Solver.
Cf Global constraint catalog:
http://www.emn.fr/x-info/sdemasse/gccat/Ccircuit.html
Solution of n=4:
x: [2, 0, 3, 1]
x: [3, 0, 1, 2]
x: [1, 3, 0, 2]
x: [3, 2, 0, 1]
x: [1, 2, 3, 0]
x: [2, 3, 1, 0]
The 'orbit' method that is used here is based on some
observations on permutation orbits.
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/circuit_test.mzn
* Gecode: http://www.hakank.org/gecode/circuit_orbit.mzn
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
#
# circuit(x)
# constraints x to be an circuit
#
# Note: This assumes that x is has the domain 0..len(x)-1,
# i.e. 0-based.
#
def circuit(solver, x):
n = len(x)
z = [solver.IntVar(0, n - 1, "z%i" % i) for i in range(n)]
solver.Add(solver.AllDifferent(x))
solver.Add(solver.AllDifferent(z))
# put the orbit of x[0] in in z[0..n-1]
solver.Add(z[0] == x[0])
for i in range(1, n - 1):
# The following constraint give the error
# "TypeError: list indices must be integers, not IntVar"
# solver.Add(z[i] == x[z[i-1]])
# solution: use Element instead
solver.Add(z[i] == solver.Element(x, z[i - 1]))
#
# Note: At least one of the following two constraint must be set.
#
# may not be 0 for i < n-1
for i in range(1, n - 1):
solver.Add(z[i] != 0)
# when i = n-1 it must be 0
solver.Add(z[n - 1] == 0)
def main(n=5):
# Create the solver.
solver = pywrapcp.Solver("Send most money")
# data
print("n:", n)
# declare variables
# Note: domain should be 0..n-1
x = [solver.IntVar(0, n - 1, "x%i" % i) for i in range(n)]
#
# constraints
#
circuit(solver, x)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
collector = solver.AllSolutionCollector(solution)
solver.Solve(
solver.Phase(x, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE),
[collector])
num_solutions = collector.SolutionCount()
for s in range(num_solutions):
print("x:", [collector.Value(s, x[i]) for i in range(len(x))])
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
print()
n = 5
if __name__ == "__main__":
if len(sys.argv) > 1:
n = int(sys.argv[1])
main(n)
| [
"hakank@gmail.com"
] | hakank@gmail.com |
aa76d1630dc7831741c9ddd5d4369448e608559c | 750f6d44fc8aa00a2011070af681936c53a6127e | /Question_11_20/q19.py | 4702fdfae98fa1fe8953c0bd6c0cf364aa89eb71 | [] | no_license | ryuseiasumo/Gazoshori100knock | 2aab4bf625183e597a7e8401af4ec1dc67d334ad | 71ae0ce1d712ae5979aa5be3d088290934e26369 | refs/heads/master | 2022-06-19T10:06:16.169369 | 2020-05-10T03:48:17 | 2020-05-10T03:48:17 | 254,810,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | import cv2
import sys
sys.path.append("..")
import numpy as np
from Question_01_10.q2 import Gray
def zero_padding(_img, K_size = 5):
pad = K_size//2
out = np.pad(_img, ([pad,pad],[pad,pad]), "constant")
return out
def Log_fillter(_img, K_size = 5, sigma = 3):
img = _img.copy()
pad = K_size//2
if len(_img.shape) == 3:
H, W, C = _img.shape
else:
_img = np.expand_dims(_img, axis = -1)
H, W, C = _img.shape
img_zero = zero_padding(img).astype(np.float)
tmp = img_zero.copy()
out = np.zeros_like(tmp).astype(np.float)
print(tmp)
print("ーーーーーーーーー")
#prepare kernel
K = np.zeros((K_size, K_size), dtype = np.float)
for y in range(-pad, -pad + K_size):
for x in range(-pad, -pad + K_size):
K[y + pad, x + pad] = (x ** 2 + y ** 2 - 2 * (sigma ** 2)) * np.exp( - (x ** 2 + y ** 2) / (2 * (sigma ** 2)))
K /= (2 * np.pi * (sigma ** 6))
K /= K.sum()
# filtering
for y in range(H):
for x in range(W):
out[pad + y, pad + x] = np.sum(K * tmp[y: y + K_size, x: x + K_size])
print(out)
print("ーーーーーーーーーーーーーー")
out = np.clip(out, 0, 255)
print(out)
print("ーーーーーーーーーーーーーー")
out = out[pad:pad+H, pad:pad+W].astype(np.uint8)
return out
img = cv2.imread("./image_11_20/imori_noise.jpg").astype(np.float)
img_gray = Gray(img)
img_ans = Log_fillter(img_gray)
cv2.imwrite("./image_11_20/answer19.jpg", img_ans)
cv2.imshow("result", img_ans)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"ryusao5656@icloud.com"
] | ryusao5656@icloud.com |
29c2efec1dab3982a89e48c821ee9c8b852062a3 | f12f299984060186ad8422913ed591cac1fd918d | /miscellaneous/odd_even_jump.py | 44b387b4c86d4f79aee8830bc8d895e058b8bb36 | [] | no_license | pitikdmitry/leetcode | 775f9163850dd5d6cdb971603c6fd4615c7f89d7 | f82a36be6d8dcf842354d759bab98dd915173fd5 | refs/heads/master | 2023-01-23T23:42:01.568390 | 2020-12-07T09:03:45 | 2020-12-07T09:03:45 | 240,458,017 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,812 | py | '''
You are given an integer array A. From some starting index, you can make a series of jumps.
The (1st, 3rd, 5th, ...) jumps in the series are called odd numbered jumps, and the (2nd, 4th, 6th, ...) jumps in the series are called even numbered jumps.
You may from index i jump forward to index j (with i < j) in the following way:
During odd numbered jumps (ie. jumps 1, 3, 5, ...), you jump to the index j such that A[i] <= A[j] and A[j] is the smallest possible value. If there are multiple such indexes j, you can only jump to the smallest such index j.
During even numbered jumps (ie. jumps 2, 4, 6, ...), you jump to the index j such that A[i] >= A[j] and A[j] is the largest possible value. If there are multiple such indexes j, you can only jump to the smallest such index j.
(It may be the case that for some index i, there are no legal jumps.)
A starting index is good if, starting from that index, you can reach the end of the array (index A.length - 1) by jumping some number of times (possibly 0 or more than once.)
Return the number of good starting indexes.
Example 1:
Input: [10,13,12,14,15]
Output: 2
Explanation:
From starting index i = 0, we can jump to i = 2 (since A[2]
is the smallest among A[1], A[2], A[3], A[4] that is greater or equal to A[0]),
then we can't jump any more.
From starting index i = 1 and i = 2, we can jump to i = 3, then we can't jump any more.
From starting index i = 3, we can jump to i = 4, so we've reached the end.
From starting index i = 4, we've reached the end already.
In total, there are 2 different starting indexes (i = 3, i = 4)
where we can reach the end with some number of jumps.
'''
from typing import List
# The problem is how to find next greater element in array, that is righter
# We will use decreasing stack and process elements in sorted order
class Solution:
def build_next_greater_el_array(self, increasing_elements_idxs, next_bigger_el_idxs):
decr_stack = []
for idx in increasing_elements_idxs:
while len(decr_stack) > 0 and decr_stack[-1] < idx:
prev_idx = decr_stack.pop()
next_bigger_el_idxs[prev_idx] = idx
decr_stack.append(idx)
def oddEvenJumps(self, arr: List[int]) -> int:
if len(arr) == 0:
return 0
arr_with_idx = [[arr[i], i] for i in range(len(arr))]
# for next bigger element
arr_with_idx_sorted = sorted(arr_with_idx, key=lambda x: x[0])
increasing_elements_idxs = list(map(lambda x: x[1], arr_with_idx_sorted))
next_bigger_el_idxs = [None for i in range(len(arr))]
self.build_next_greater_el_array(increasing_elements_idxs, next_bigger_el_idxs)
# for next smaller element
arr_with_idx_sorted_desc = sorted(arr_with_idx, key=lambda x: x[0], reverse=True)
increasing_elements_idxs_desc = list(map(lambda x: x[1], arr_with_idx_sorted_desc))
next_smaller_el_idxs = [None for i in range(len(arr))]
self.build_next_greater_el_array(increasing_elements_idxs_desc, next_smaller_el_idxs)
# process elements
higher = [False for i in range(len(arr))]
lower = [False for i in range(len(arr))]
higher[-1] = True
lower[-1] = True
result = 1
for i in range(len(arr) - 2, -1, -1):
next_bigger_el_idx = next_bigger_el_idxs[i]
next_smaller_el_idx = next_smaller_el_idxs[i]
if next_bigger_el_idx is not None:
lower[i] = higher[next_bigger_el_idx]
if next_smaller_el_idx is not None:
higher[i] = lower[next_smaller_el_idx]
if lower[i] is True:
result += 1
return result
solution = Solution()
arr = [10,13,12,14,15]
print(solution.oddEvenJumps(arr))
| [
"pitik.dmitry@mail.ru"
] | pitik.dmitry@mail.ru |
5658fc51036576756e2e818068317ea39e8b4c02 | fa5510464ba1573f41c94b4dd652fc684036beac | /FlaskURLShortener/main.py | f965c3e14b301c251cb9108cd21ff0bc8a3700e2 | [] | no_license | AndrewErmakov/WebDevelopmentTraining | 5371265f1737f929c454134a8ed032bdeda5e8ef | 86aeab91157dab57be41b7a10b60535a39ea0a84 | refs/heads/master | 2023-05-11T06:43:14.422330 | 2021-06-27T18:56:13 | 2021-06-27T18:56:13 | 246,019,245 | 1 | 0 | null | 2023-05-01T22:52:00 | 2020-03-09T11:35:41 | Python | UTF-8 | Python | false | false | 1,626 | py | from flask import (
Flask,
render_template,
request,
redirect,
url_for,
flash,
abort,
session,
jsonify)
import json
import os.path
app = Flask(__name__)
app.secret_key = 'shtopor'
@app.route('/')
def index():
return render_template('index.html', codes=session.keys())
@app.route('/your-url', methods=['POST', 'GET'])
def your_url():
if request.method == 'POST':
urls = {}
if os.path.exists('urls.json'):
with open('urls.json') as file:
urls = json.load(file)
if request.form['code'] in urls.keys():
flash('That short name has already been taken')
return redirect(url_for('index'))
urls[request.form.get('code')] = {'url': request.form['url']}
with open('urls.json', 'w') as file:
json.dump(urls, file)
session[request.form['code']] = True
return render_template('your_url.html', code=request.form.get('code'))
else:
return redirect(url_for('index'))
@app.route('/<string:code>')
def redirect_to_url(code):
if os.path.exists('urls.json'):
with open('urls.json') as file:
urls = json.load(file)
if code in urls.keys():
if 'url' in urls[code].keys():
return redirect(urls[code]['url'])
return abort(404)
# custom handler 404 error
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
@app.route('/api')
def session_api():
return jsonify(list(session.keys()))
if __name__ == "__main__":
app.run()
| [
"andrew.67@list.ru"
] | andrew.67@list.ru |
f509745a558fc1e998a4da4875a540a353adf7ab | afc677459e46635ceffccf60d1daf50e62694557 | /ACME/transform/RandomScaling.py | e48ad04f0eca3ce1ef9b0a164f92ce91f4ee8876 | [
"MIT"
] | permissive | mauriziokovacic/ACME | 056b06da4bf66d89087fcfcbe0fd0a2e255d09f3 | 2615b66dd4addfd5c03d9d91a24c7da414294308 | refs/heads/master | 2020-05-23T23:40:06.667416 | 2020-01-10T14:42:01 | 2020-01-10T14:42:01 | 186,997,977 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | import torch
from ..utility.islist import *
from ..math.normvec import *
from .Transform import *
class RandomScaling(Transform):
def __init__(self, attr=['pos', 'norm']):
super(RandomScaling, self).__init__()
self.attr = attr if islist(attr) else [attr]
def __eval__(self, x, *args, **kwargs):
T = torch.diag(torch.rand(3, x.pos.device) * 2 - 1)
for attr in self.attr:
if hasattr(x, attr):
d = getattr(x, attr)
if d:
if attr == 'norm':
setattr(x, attr, normr(torch.matmul(d, T)))
else:
setattr(x, attr, torch.matmul(d, T))
def __extra_repr__(self):
return 'attr={}'.format(self.attr if len(self.attr) > 1 else self.attr[0])
| [
"maurizio.kovacic@gmail.com"
] | maurizio.kovacic@gmail.com |
71665b70d45138e620cfc606a18cb9ff5b7b5ff2 | 11f7207d5e7c1c6c3329f7ae9d4fa1507c806110 | /test_faker_producer.py | c862e7e93684590ba0067889bc781f55559f764b | [] | no_license | gmdmgithub/pandas-playground | 4a7a654a27acb0d14b442afdee6af22d67f64852 | a24267ab6b79c2351e9220bd11e028a190df21c2 | refs/heads/master | 2022-12-09T20:26:55.233005 | 2019-10-07T15:02:38 | 2019-10-07T15:02:38 | 205,238,397 | 0 | 0 | null | 2022-12-08T06:36:42 | 2019-08-29T19:42:31 | Python | UTF-8 | Python | false | false | 233 | py | import unittest
import faker_producer as fp
class TestFakerProducer(unittest.TestCase):
def test_first_name(self):
self.assertIsNone(fp.first_name(None))
self.assertIsNotNone(fp.first_name('Alex'))
| [
"gmika@interia.pl"
] | gmika@interia.pl |
b370208f59708c4250a1eb9f3c929edf4c85b117 | b4849ca0f38c29407a9a88007a7ecb11035851eb | /setup.py | 011c251fcb84d42621397affd3b8ebe1d7cad831 | [
"MIT"
] | permissive | qianshuqinghan/dicom2nifti | e85619051470d01f0db11deffa821edac2cf4c67 | 79ed535835bd3be003d27a03c119d3a1cb46a0ac | refs/heads/master | 2020-04-01T03:58:12.176680 | 2018-10-11T17:39:24 | 2018-10-11T17:39:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | from distutils.core import setup
from setuptools import find_packages
version = '2.0.4'
long_description = """
With this package you can convert dicom images to nifti files.
There is support for most anatomical CT and MR data.
For MR specifically there is support for most 4D data (like DTI and fMRI)
"""
setup(
name='dicom2nifti',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
version=version,
description='package for converting dicom files to nifti',
long_description=long_description,
license='MIT',
author='icometrix NV',
author_email='dicom2nifti@icometrix.com',
maintainer="icometrix NV",
maintainer_email="dicom2nifti@icometrix.com",
url='https://github.com/icometrix/dicom2nifti',
download_url='https://github.com/icometrix/dicom2nifti/tarball/%s' % version,
keywords=['dicom', 'nifti', 'medical imaging'],
scripts=['scripts/dicom2nifti'],
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux'],
install_requires=['six', 'future', 'nibabel', 'numpy', 'scipy', 'pydicom>=1.0.1'],
setup_requires=['nose', 'coverage']
)
| [
"arne.brys@icometrix.com"
] | arne.brys@icometrix.com |
9dabdb65c1ec3b2d7f17d7d6a72ad18da35458b0 | 93db77d922572c51678d68ca6c62b04b6be0d83a | /code/文本摘要/analyser.py | 25beafdb91bb39225fe2bbf26ba06dd7d1bf7e70 | [] | no_license | gmftbyGMFTBY/BITNLP | 9b80900d9da39fa27fc1cbe25d7329b448e3fcf1 | 41ddcd4df1277ef429d70ddab8cde751cbecced0 | refs/heads/master | 2021-05-08T23:04:36.954316 | 2018-01-31T14:27:46 | 2018-01-31T14:27:46 | 114,972,685 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | #!/usr/bin/python3
# Author : GMFTBY
# Time : 2017.1.23
'''
This script try to use the Edmundson Algorithm to analyse the result of the summary
'''
import TFIDF
def Edmundson(result, answer):
# result 机器摘要
# answer 参考摘要
r_s = set(TFIDF.cut_by_sentence(result))
r_a = set(TFIDF.cut_by_sentence(answer))
share = r_s & r_a
return len(share) / len(r_a)
if __name__ == "__main__":
answer = '一度疯狂的IPO申报脚步于近日呈大幅放缓趋势。上周主板无新增初审企业。山东龙大肉食品、福建安溪铁观音集团、广东台城制药和海洋王照明科技4家拟主板企业于上周进行预披露。 创业板 方面,南京宝色、丹东欣泰电气和深圳市凯立德科技也已公布招股说明书。'
summary = '306家(包括13家中止审查企业)。上周主板无新增初审企业。IPO申报企业基本信息显示。仅有3家拟上市企业进入候审队伍。山东龙大肉食品、福建安溪铁观音集团、广东台城制药和海洋王照明科技4家拟主板企业于上周进行预披露。南京宝色、丹东欣泰电气和深圳市凯立德科技也已公布招股说明书。'
print(Edmundson(summary, answer))
| [
"18811371908@163.com"
] | 18811371908@163.com |
f7e3df9d2c601ce3a1dde43c93a654fc9db98c82 | 2a28a94fc8eb08961e76c61ab73889135153502b | /test/tests_file_controller.py | 7790e78dace3fe43f4a65345c6179958af38d99a | [
"MIT"
] | permissive | aspose-cells-cloud/aspose-cells-cloud-python | 45fc7e686b442302a29a8223e7dbddb71950438c | 270d70ce7f8f3f2ecd9370b1dacfc4789293097e | refs/heads/master | 2023-09-04T01:29:44.242037 | 2023-08-23T13:13:30 | 2023-08-23T13:13:30 | 123,092,364 | 6 | 5 | null | null | null | null | UTF-8 | Python | false | false | 3,261 | py | # coding: utf-8
from __future__ import absolute_import
import os
import sys
import unittest
import warnings
ABSPATH = os.path.abspath(os.path.realpath(os.path.dirname(__file__)) + "/..")
sys.path.append(ABSPATH)
from asposecellscloud.rest import ApiException
from asposecellscloud.apis.cells_api import CellsApi
import AuthUtil
from asposecellscloud.models import *
from asposecellscloud.requests import *
global_api = None
class TestFileControllerApi(unittest.TestCase):
def setUp(self):
warnings.simplefilter('ignore', ResourceWarning)
global global_api
if global_api is None:
global_api = CellsApi(AuthUtil.GetClientId(),AuthUtil.GetClientSecret(),"v3.0",AuthUtil.GetBaseUrl())
self.api = global_api
def tearDown(self):
pass
def test_download_file(self):
remote_folder = 'TestData/In'
local_name = 'Book1.xlsx'
remote_name = 'Book1.xlsx'
result = AuthUtil.Ready(self.api, local_name, remote_folder + '/' + remote_name , '')
self.assertTrue(len(result.uploaded)>0)
request = DownloadFileRequest( remote_folder + '/' + remote_name,storage_name= '',version_id= '')
self.api.download_file(request)
def test_upload_file(self):
remote_folder = 'TestData/In'
local_name = 'Book1.xlsx'
remote_name = 'Book1.xlsx'
mapFiles = {
local_name: os.path.dirname(os.path.realpath(__file__)) + "/../TestData/" +local_name
}
result = AuthUtil.Ready(self.api, local_name, remote_folder + '/' + remote_name , '')
self.assertTrue(len(result.uploaded)>0)
request = UploadFileRequest( mapFiles, remote_folder + '/' + remote_name,storage_name= '')
self.api.upload_file(request)
def test_copy_file(self):
remote_folder = 'TestData/In'
local_name = 'Book1.xlsx'
remote_name = 'Book1.xlsx'
result = AuthUtil.Ready(self.api, local_name, remote_folder + '/' + remote_name , '')
self.assertTrue(len(result.uploaded)>0)
request = CopyFileRequest( remote_folder + '/' + remote_name, 'OutResult/' + remote_name,src_storage_name= '',dest_storage_name= '',version_id= '')
self.api.copy_file(request)
def test_move_file(self):
remote_folder = 'TestData/In'
local_name = 'Book1.xlsx'
remote_name = 'Book1.xlsx'
result = AuthUtil.Ready(self.api, local_name, remote_folder + '/' + remote_name , '')
self.assertTrue(len(result.uploaded)>0)
request = MoveFileRequest( remote_folder + '/' + remote_name, 'OutResult/' + remote_name,src_storage_name= '',dest_storage_name= '',version_id= '')
self.api.move_file(request)
def test_delete_file(self):
remote_folder = 'TestData/In'
local_name = 'Book1.xlsx'
remote_name = 'Book1.xlsx'
result = AuthUtil.Ready(self.api, local_name, remote_folder + '/' + remote_name , '')
self.assertTrue(len(result.uploaded)>0)
request = DeleteFileRequest( remote_folder + '/' + remote_name,storage_name= '',version_id= '')
self.api.delete_file(request)
if __name__ == '__main__':
unittest.main() | [
"roy.wang@aspose.com"
] | roy.wang@aspose.com |
3bcf02b6585973dc6d8ead444f231a4e92e2c4c0 | 79030ecbe234e6906ec4925f0f816e626e30734a | /QQSpider2_new/mongo_temp.py | ac447a05a4be2424ae27873bddaef5746699b13b | [] | no_license | TSLNIHAOGIT/QQSpider | b6789e53aedb17e8f98c73ef83c15e7a1a6986d1 | 6f5c22e0ad0c22d193ec777e9dcb6294a2536934 | refs/heads/master | 2020-04-14T19:09:49.638777 | 2019-01-04T02:39:13 | 2019-01-04T02:39:13 | 164,047,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | from pymongo import MongoClient
import datetime
class MongoManager(object):
def __init__(self, server_ip='localhost', client=None):
print(server_ip)
self.client = MongoClient(server_ip, 27017) if client is None else client
# self.redis_client = redis.StrictRedis(host=server_ip, port=6379, db=0)
self.mongo_db = self.client.QQ
#有插入操作后才会在数据库中产生
self.mongo_db.query.insert({
# 'time': datetime.utcnow(),
'prediction': '''[prediction]'''})
if __name__=='__main__':
MongoManager() | [
"2509072638@qq.com"
] | 2509072638@qq.com |
2440df8c04a8ff17110262f019f91d6b325a3e50 | 716abd9e5ba4b72b72cc5f724a6cc0a6ad4390d1 | /8-Working with Python Modules/37-getpass-module.py | 9b32db78d59efce0394ef03d7cefa784cd9b1224 | [] | no_license | devopstasks/PythonScripting | ac45edd72dc134ec3539b962f02dfc866f365ecf | 48bc37733ae6b3be4e2d64909ffe0962b6908518 | refs/heads/master | 2023-03-29T11:18:01.329452 | 2021-04-07T03:25:20 | 2021-04-07T03:25:20 | 350,388,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | '''
============================
getpass() prompts the usser for a password without echoing.The getpass module provides a secure
way to handle the passsword prompts where program interact with the users via the terminal.
getuser() function displays the login name of the user. This function checks the environment variables
LOGNAME, USER, LNAME and USERNAME, in order, and returns the value of the first non-empty string.
============================
'''
'''
import getpass
#db_pass=getpass.getpass()
db_pass=getpass.getpass(prompt="Enter your DB Password: ")
print(f"The entered password is {db_pass}")
'''
| [
"rpadhan2015@gmail.com"
] | rpadhan2015@gmail.com |
af02d32f9a6520f9d256420f9097e7778dad4acd | 39689ee725bc7183d5d59fb34f7d2ffe5fd6ad36 | /ABC_A/ABC142A.py | 826ae07f226d78522817a44bd69684709a6e7e52 | [] | no_license | yu5shi8/AtCoder | b6eb920a9046bdfa98012dd3fc65f75f16214ffe | f9ca69001ece8379e3a70c993c44b540f8be2217 | refs/heads/master | 2021-06-15T17:58:07.027699 | 2021-03-20T14:04:03 | 2021-03-20T14:04:03 | 177,757,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | # -*- coding: utf-8 -*-
# A - Odds of Oddness
# https://atcoder.jp/contests/abc142/tasks/abc142_a
N = int(input())
if N % 2 != 0:
num = N // 2 + 1
else:
num = N // 2
ans = num / N
print('{:.06f}'.format(ans))
# 21:00 - 21:05(AC)
| [
"royal_unicorn411@hotmail.co.jp"
] | royal_unicorn411@hotmail.co.jp |
ed8a4c0c136e477c942f8c3a86c55171d995ac60 | d5be74d2de6fa0ded61d6c3ee7c91a403c0f90db | /tests/behave/features/steps/qrhei.py | d2b7556a64001e67f250b41cc98083845db657ff | [
"MIT"
] | permissive | tmancal74/quantarhei | 43cf9d4be857b8e6db1274ebb8a384f1545cd9ad | fa3042d809005d47106e53609e6a63aa780c477c | refs/heads/master | 2023-05-11T06:57:36.368595 | 2023-05-02T13:10:18 | 2023-05-02T13:10:18 | 63,804,925 | 20 | 22 | MIT | 2022-12-21T14:10:00 | 2016-07-20T18:30:25 | Python | UTF-8 | Python | false | false | 1,870 | py | """
Autogenerated by ghenerate script, part of Quantarhei
http://github.com/tmancal74/quantarhei
Tomas Mancal, tmancal74@gmai.com
Generated on: 2018-06-06 15:00:05
Edit the functions below to give them desired functionality.
In present version of `ghenerate`, no edits or replacements
are perfomed in the feature file text.
"""
import os
from behave import given
from behave import when
from behave import then
import quantarhei.testing.behave as bhv
#
# Given ...
#
@given('that I have a list of examples from qrhei list')
def step_given_1(context):
"""
Given that I have a list of examples from qrhei list
"""
bhv.secure_temp_dir(context)
with bhv.testdir(context):
bhv.shell_command(context, "qrhei list --examples")
text = context.output.decode("utf-8")
items = text.split()
files_to_fetch = []
for item in items:
if item.startswith("ex_"):
files_to_fetch.append(item)
context.files = files_to_fetch
#
# When ...
#
@when('I fetch all examples one by one')
def step_when_2(context):
"""
When I fetch all examples one by one
"""
failures = []
with bhv.testdir(context):
for file in context.files:
bhv.shell_command(context, "qrhei fetch --examples "+file)
print(context.output.decode("utf-8"))
if not os.path.isfile(file):
failures.append("File: "+file+" was not fetched")
context.failures = failures
#
# Then ...
#
@then('examples are all fetchable')
def step_then_3(context):
"""
Then examples are all fetchable
"""
if len(context.failures) > 0:
raise Exception("some examples are not fetchable: "
+str(context.failures))
| [
"tmancal74@gmail.com"
] | tmancal74@gmail.com |
ba9ff63d687e7e07f7e50782cbf97d733c1fef07 | f1a1e764c4ed1238c63c3f4908bd8bebdfb3197f | /opencv-starter/udemy/noise.py | 731911a56b3499f4dbbc9b86c308053b60761107 | [] | no_license | razmik/computer-vision-python | e4eadac1fae4fd0483189a23958ba7c794cbd68e | 21f5557af9804fa01bcfddc085504678828c94ef | refs/heads/master | 2021-01-25T06:35:55.600991 | 2017-06-13T01:40:59 | 2017-06-13T01:40:59 | 93,591,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,281 | py | import numpy as np
import os
from skimage.util import random_noise
import matplotlib.pyplot as plt
import scipy.misc
import cv2
"""
Parameters
----------
image : ndarray
Input image data. Will be converted to float.
mode : str
One of the following strings, selecting the type of noise to add:
'gauss' Gaussian-distributed additive noise.
'poisson' Poisson-distributed noise generated from the data.
's&p' Replaces random pixels with 0 or 1.
'speckle' Multiplicative noise using out = image + n*image,where
n is uniform noise with specified mean & variance.
"""
def noisy(image, noise_typ='gauss', var=0.1):
if noise_typ == "gauss":
if len(image.shape) == 2:
row, col = image.shape
mean = 0
sigma = var ** 0.5
gauss = np.random.normal(mean, sigma, (row, col))
gauss = gauss.reshape(row, col)
noisy = image + gauss
else:
row, col, ch = image.shape
mean = 0
sigma = var ** 0.5
gauss = np.random.normal(mean, sigma, (row, col, ch))
gauss = gauss.reshape(row, col, ch)
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row, col, ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount * image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ == "speckle":
row, col, ch = image.shape
gauss = np.random.randn(row, col, ch)
gauss = gauss.reshape(row, col, ch)
noisy = image + image * gauss
return noisy
"""
Noising image
"""
# filepath = "..\data\group.jpg".replace('\\', '/')
#
# img = cv2.imread(filepath)
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#
# noised_img = noisy(gray, var=1000)
#
# # plt.imshow(gray)
# # plt.show()
#
# # noised_img = random_noise(img) # skimage - add noise function
#
# scipy.misc.imsave('outfile.jpg', noised_img)
"""
Denoising
"""
filepath = 'outfile.jpg'
img = cv2.imread(filepath)
plt.figure(1)
plt.title('Original noised image')
plt.imshow(img)
# plt.figure(2)
# plt.title('2D Convolution ( Image Filtering )')
# kernal = np.ones((5, 5), np.float32) / 25
# filtered_image = cv2.filter2D(img, -1, kernal)
# plt.imshow(filtered_image)
#
# plt.figure(3)
# plt.title('Averaging')
# blur = cv2.blur(img, (5,5))
# plt.imshow(blur)
plt.figure(4)
plt.title('Gaussian blur')
blur = cv2.GaussianBlur(img, (5,5), 0)
# kernal = cv2.getGaussianKernel(10, 100)
# blur = cv2.filter2D(img, -1, kernal)
plt.imshow(blur)
# plt.figure(5)
# plt.title('Bilateral filtering')
# blur = cv2.bilateralFilter(img,9,75,75)
# plt.imshow(blur)
plt.show() | [
"razmik89@gmail.com"
] | razmik89@gmail.com |
40c3c590320e9ad5cfb2362493d179d28943aeaa | 510a042cc6ead9ee708a85e431bd5d271102da9c | /backend/admin/macro.py | 759fe7ba7c87e9c15bbdb860994805de91adf74c | [
"MIT"
] | permissive | rushilsrivastava/flask-react-spa | 8ad7683f9fc5a8fc2cbc551df6e135b82a8aef19 | 7cf45c92c0db411156fd6fd53c3febc84f81eba7 | refs/heads/master | 2022-11-17T19:03:16.447828 | 2020-07-18T22:44:52 | 2020-07-18T22:44:52 | 265,141,635 | 1 | 0 | MIT | 2020-05-19T04:12:11 | 2020-05-19T04:12:10 | null | UTF-8 | Python | false | false | 691 | py | def macro(name):
"""Replaces flask_admin.model.template.macro, adding support for using
macros imported from another file
For Example::
class FooAdmin(ModelAdmin):
column_formatters = {
'col_name': macro('<macro_import_name_inside_template>.<macro_name>')
}
"""
def wrapper(view, context, model, column):
if '.' in name:
macro_import_name, macro_name = name.split('.')
m = getattr(context.get(macro_import_name), macro_name, None)
else:
m = context.resolve(name)
if not m:
return m
return m(model=model, column=column)
return wrapper
| [
"briancappello@gmail.com"
] | briancappello@gmail.com |
27cdacfee09e4e1c2c9057af17e1eb5ef6004409 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/test/python/b63a17321b0865613f47eca68b65ccbb2894fa9erun.py | b63a17321b0865613f47eca68b65ccbb2894fa9e | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 1,027 | py | """myapp.py
Usage:
(window1)$ python myapp.py -l info
(window2)$ python
>>> from myapp import add
>>> add.delay(16, 16).get()
32
You can also specify the app to use with celeryd::
$ celeryd -l info --app=myapp.celery
"""
import sys
sys.path.insert(0,'lib')
sys.path.insert(0, 'lib/celery')
sys.path.insert(0, 'lib/kombu')
from celery import Celery
BROKER_BACKEND = 'redis'
BROKER_HOST = '127.0.0.1'
BROKER_PORT = 6379
BROKER_VHOST = '12'
CELERY_RESULT_BACKEND = "redis"
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REDIS_DB = '100'
REDIS_CONNECT_RETRY = True
celery = Celery("myapp")
#celery.conf.update(BROKER_URL="amqp://guest:guest@localhost:5672//")
celery.conf.update(BROKER_BACKEND='redis')
celery.conf.update(BROKER_HOST='127.0.0.1')
celery.conf.update(BROKER_PORT=6379)
celery.conf.update(BROKER_VHOST='12')
print celery.conf
@celery.task
def add(x, y):
print 'i am doing work'
return x + y
if __name__ == "__main__":
import celery.bin.celeryd
celery.bin.celeryd.main()
| [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
4789e8cbb3ac096ea2f1fcca098891ef05488484 | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/crud/models/cisco-ios-xr/Cisco-IOS-XR-ipv4-arp-cfg/nc-update-xr-ipv4-arp-cfg-10-ydk.py | 8fa6211b688d0ec4c961bf8810acadbbe1a699e5 | [
"Apache-2.0"
] | permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 2,663 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Update configuration for model Cisco-IOS-XR-ipv4-arp-cfg.
usage: nc-update-xr-ipv4-arp-cfg-10-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ipv4_arp_cfg \
as xr_ipv4_arp_cfg
import logging
def config_arp(arp):
"""Add config data to arp object."""
pass
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create CRUD service
crud = CRUDService()
arp = xr_ipv4_arp_cfg.Arp() # create object
config_arp(arp) # add object configuration
# update configuration on NETCONF device
# crud.update(provider, arp)
exit()
# End of script
| [
"saalvare@cisco.com"
] | saalvare@cisco.com |
e05e89a8cc45a7fd835f8907cb25a8e9832090b9 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_storage_utils.py | abe32c22964c58192cf3d71b3014bfcb4b142c57 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-python-cwi",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 8,103 | py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import logging
import re
from typing import Optional, Tuple, Union
from azure.ai.ml._artifacts._blob_storage_helper import BlobStorageClient
from azure.ai.ml._artifacts._constants import STORAGE_URI_REGEX
from azure.ai.ml._artifacts._fileshare_storage_helper import FileStorageClient
from azure.ai.ml._artifacts._gen2_storage_helper import Gen2StorageClient
from azure.ai.ml._azure_environments import _get_storage_endpoint_from_metadata
from azure.ai.ml._restclient.v2022_10_01.models import DatastoreType
from azure.ai.ml.constants._common import (
FILE_PREFIX,
FOLDER_PREFIX,
JOB_URI_REGEX_FORMAT,
LONG_URI_FORMAT,
LONG_URI_REGEX_FORMAT,
MLFLOW_URI_REGEX_FORMAT,
OUTPUT_URI_REGEX_FORMAT,
SHORT_URI_FORMAT,
SHORT_URI_REGEX_FORMAT,
STORAGE_ACCOUNT_URLS,
)
from azure.ai.ml.exceptions import ErrorTarget, ValidationErrorType, ValidationException
module_logger = logging.getLogger(__name__)
class AzureMLDatastorePathUri:
"""Parser for an azureml:// datastore path URI, e.g.: azureml://datastores/mydatastore/paths/images/dogs'.
:param uri: The AzureML datastore path URI.
:type uri: str
:raises ~azure.ai.ml.exceptions.ValidationException: Raised if the AzureML datastore
path URI is incorrectly formatted.
'
"""
def __init__(self, uri: str):
if uri.startswith(FILE_PREFIX):
uri = uri[len(FILE_PREFIX) :]
elif uri.startswith(FOLDER_PREFIX):
uri = uri[len(FOLDER_PREFIX) :]
self.uri = uri
short_uri_match = re.match(SHORT_URI_REGEX_FORMAT, uri)
ml_flow_uri_match = re.match(MLFLOW_URI_REGEX_FORMAT, uri)
job_uri_match = re.match(JOB_URI_REGEX_FORMAT, uri)
long_uri_match = re.match(LONG_URI_REGEX_FORMAT, uri)
output_uri_match = re.match(OUTPUT_URI_REGEX_FORMAT, uri)
if short_uri_match:
self.datastore = short_uri_match.group(1)
self.path = short_uri_match.group(2)
self.uri_type = "Datastore"
self.workspace_name = None
self.resource_group = None
self.subscription_id = None
elif ml_flow_uri_match:
self.datastore = ml_flow_uri_match.group(1)
self.path = ml_flow_uri_match.group(2)
self.uri_type = "MlFlow"
self.workspace_name = None
self.resource_group = None
self.subscription_id = None
elif job_uri_match:
self.datastore = job_uri_match.group(1)
self.path = job_uri_match.group(2)
self.uri_type = "Job"
self.workspace_name = None
self.resource_group = None
self.subscription_id = None
elif output_uri_match:
self.datastore = output_uri_match.group(1)
self.path = output_uri_match.group(2)
self.uri_type = None
self.workspace_name = None
self.resource_group = None
self.subscription_id = None
elif long_uri_match:
self.datastore = long_uri_match.group(4)
self.path = long_uri_match.group(5)
self.uri_type = "Datastore"
self.workspace_name = long_uri_match.group(3)
self.resource_group = long_uri_match.group(2)
self.subscription_id = long_uri_match.group(1)
else:
msg = "Invalid AzureML datastore path URI {}"
raise ValidationException(
message=msg.format(uri),
no_personal_data_message=msg.format("[uri]"),
target=ErrorTarget.DATASTORE,
error_type=ValidationErrorType.INVALID_VALUE,
)
def to_short_uri(self) -> str:
return SHORT_URI_FORMAT.format(self.datastore, self.path)
def to_long_uri(self, subscription_id: str, resource_group_name: str, workspace_name: str) -> str:
return LONG_URI_FORMAT.format(
subscription_id,
resource_group_name,
workspace_name,
self.datastore,
self.path,
)
def get_uri_type(self) -> str:
if self.uri[0:20] == "azureml://datastores":
return "Datastore"
if self.uri[0:14] == "azureml://jobs":
return "Jobs"
if self.uri[0 : self.uri.find(":")] == "runs":
return "MLFlow"
msg = "Invalid uri format for {}. URI must start with 'azureml://' or 'runs:/'"
raise ValidationException(
message=msg.format(self.uri),
no_personal_data_message=msg.format("[self.uri]"),
target=ErrorTarget.DATASTORE,
error_type=ValidationErrorType.INVALID_VALUE,
)
def get_storage_client(
credential: str,
storage_account: str,
storage_type: Union[DatastoreType, str] = DatastoreType.AZURE_BLOB,
account_url: Optional[str] = None,
container_name: Optional[str] = None,
) -> Union[BlobStorageClient, FileStorageClient, Gen2StorageClient]:
"""Return a storage client class instance based on the storage account type.
:param credential: The credential
:type credential: str
:param storage_account: The storage_account name
:type storage_account: str
:param storage_type: The storage type
:type storage_type: Union[DatastoreType, str]
:param account_url: The account url
:type account_url: Optional[str]
:param container_name: The container name
:type container_name: Optional[str]
:return: The storage client
:rtype: Union[BlobStorageClient, FileStorageClient, Gen2StorageClient]
"""
client_builders = {
DatastoreType.AZURE_BLOB: lambda credential, container_name, account_url: BlobStorageClient(
credential=credential, account_url=account_url, container_name=container_name
),
DatastoreType.AZURE_DATA_LAKE_GEN2: lambda credential, container_name, account_url: Gen2StorageClient(
credential=credential, file_system=container_name, account_url=account_url
),
DatastoreType.AZURE_FILE: lambda credential, container_name, account_url: FileStorageClient(
credential=credential, file_share_name=container_name, account_url=account_url
),
}
if storage_type not in client_builders:
msg = (
f"Datastore type {storage_type} is not supported. Supported storage"
+ f"types for artifact upload include: {*client_builders,}"
)
raise ValidationException(
message=msg,
no_personal_data_message=msg,
target=ErrorTarget.DATASTORE,
error_type=ValidationErrorType.INVALID_VALUE,
)
storage_endpoint = _get_storage_endpoint_from_metadata()
if not account_url and storage_endpoint:
account_url = STORAGE_ACCOUNT_URLS[storage_type].format(storage_account, storage_endpoint)
return client_builders[storage_type](credential, container_name, account_url)
def get_artifact_path_from_storage_url(blob_url: str, container_name: dict) -> str:
split_blob_url = blob_url.split(container_name)
if len(split_blob_url) > 1:
path = split_blob_url[-1]
if path.startswith("/"):
return path[1:]
return path
return blob_url
def get_ds_name_and_path_prefix(asset_uri: str, registry_name: Optional[str] = None) -> Tuple[str, str]:
if registry_name:
try:
split_paths = re.findall(STORAGE_URI_REGEX, asset_uri)
path_prefix = split_paths[0][3]
except Exception as e:
raise Exception("Registry asset URI could not be parsed.") from e
ds_name = None
else:
try:
ds_name = asset_uri.split("paths")[0].split("/")[-2]
path_prefix = asset_uri.split("paths")[1][1:]
except Exception as e:
raise Exception("Workspace asset URI could not be parsed.") from e
return ds_name, path_prefix
| [
"noreply@github.com"
] | Azure.noreply@github.com |
74df06b01faeb3ebf08331a61bb0ae9a7f7e4705 | 2b0654193b3090b309a7ea6240fc57be01c0aa43 | /xam/linear_model/auc_regressor.py | a4052bde26ea46ba09fbb8a0517cc1eb01753302 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Python-Repository-Hub/xam | 86043af1cc9edd2c779ecb76e2ad5fe20044d3b0 | 93c066990d976c7d4d74b63fb6fb3254ee8d9b48 | refs/heads/master | 2022-04-07T11:33:10.455356 | 2020-02-04T20:38:45 | 2020-02-04T20:38:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | from functools import partial
import scipy as sp
from sklearn import linear_model
from sklearn import metrics
class AUCRegressor():
def _auc_loss(self, coef, X, y):
fpr, tpr, _ = metrics.roc_curve(y, sp.dot(X, coef))
return -metrics.auc(fpr, tpr)
def fit(self, X, y, verbose=False):
lr = linear_model.LinearRegression()
auc_partial = partial(self._auc_loss, X=X, y=y)
initial_coef = lr.fit(X, y).coef_
self.coef_ = sp.optimize.fmin(auc_partial, initial_coef, disp=verbose)
def predict(self, X):
return sp.dot(X, self.coef_)
def score(self, X, y):
fpr, tpr, _ = metrics.roc_curve(y, sp.dot(X, self.coef_))
return metrics.auc(fpr, tpr)
| [
"maxhalford25@gmail.com"
] | maxhalford25@gmail.com |
87288fea8165e3ccfadfccce0ad9128d031a9760 | 0644c03cc3f89b0fc22d9e548a2d06e6a594f1b4 | /pabi_base/models/res_investment_structure.py | c3ed23de10b6fea7458a17f6f6a0e81a64860fa6 | [] | no_license | phongyanon/pb2_addons | 552fbf4cd904c81a1fd0ac5817dc1cf8f3377096 | 4c69002eeda2de8e806c8a168d8ba9f28527c8d2 | refs/heads/master | 2021-01-19T13:20:53.749866 | 2017-12-20T11:12:51 | 2017-12-20T11:12:51 | 97,184,424 | 0 | 0 | null | 2017-07-14T02:29:53 | 2017-07-14T02:29:52 | null | UTF-8 | Python | false | false | 3,469 | py | # -*- coding: utf-8 -*-
from openerp import fields, models, api
from openerp.addons.pabi_base.models.res_common import ResCommon
CONSTRUCTION_PHASE = {
'1-design': '1-Design',
'2-control': '2-Control',
'3-construct': '3-Construct',
'4-procure': '4-Procurement',
'5-other': '5-Others',
}
# Investment - Asset
class ResInvestAsset(ResCommon, models.Model):
_name = 'res.invest.asset'
_description = 'Investment Asset'
invest_asset_categ_id = fields.Many2one(
'res.invest.asset.category',
string='Investment Asset Category'
)
org_id = fields.Many2one(
'res.org',
string='Org',
required=False,
)
costcenter_id = fields.Many2one(
'res.costcenter',
string='Costcenter',
required=True,
)
name_common = fields.Char(
string='Common Name',
)
fund_ids = fields.Many2many(
'res.fund',
'res_fund_invest_asset_rel',
'invest_asset_id', 'fund_id',
string='Funds',
default=lambda self: self.env.ref('base.fund_nstda'),
)
objective = fields.Char(
string='Objective',
)
owner_section_id = fields.Many2one(
'res.section',
string='Owner Section',
help="Not related to budgeting, this field hold the "
"section owner of this asset",
)
class ResInvestAssetCategory(ResCommon, models.Model):
_name = 'res.invest.asset.category'
_description = 'Investment Asset Category'
# Investment - Construction
class ResInvestConstruction(ResCommon, models.Model):
_name = 'res.invest.construction'
_description = 'Investment Construction'
phase_ids = fields.One2many(
'res.invest.construction.phase',
'invest_construction_id',
string='Phases',
)
org_id = fields.Many2one(
'res.org',
string='Org',
required=False,
)
costcenter_id = fields.Many2one(
'res.costcenter',
string='Costcenter',
required=True,
)
fund_ids = fields.Many2many(
'res.fund',
'res_fund_invest_construction_rel',
'invest_construction_id', 'fund_id',
string='Funds',
default=lambda self: self.env.ref('base.fund_nstda'),
)
class ResInvestConstructionPhase(ResCommon, models.Model):
_name = 'res.invest.construction.phase'
_description = 'Investment Construction Phase'
_order = 'sequence, id'
sequence = fields.Integer(
string='Sequence',
default=10,
)
invest_construction_id = fields.Many2one(
'res.invest.construction',
string='Investment Construction',
index=True,
ondelete='cascade',
)
phase = fields.Selection(
sorted(CONSTRUCTION_PHASE.items()),
string='Phase',
required=True,
)
fund_ids = fields.Many2many(
'res.fund',
related='invest_construction_id.fund_ids',
string='Funds',
)
_sql_constraints = [
('phase_uniq', 'unique(invest_construction_id, phase)',
'Phase must be unique for a construction project!'),
]
@api.multi
def name_get(self):
result = []
for rec in self:
result.append((rec.id, "[%s] %s - %s" %
(rec.invest_construction_id.code,
rec.invest_construction_id.name,
CONSTRUCTION_PHASE[rec.phase])))
return result
| [
"kittiu@gmail.com"
] | kittiu@gmail.com |
667b89dec3e842d6015cbe4a72ffe366da95bcf0 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20161010/example_yaml/01cc.py | e9af9b204c556569002ac8f1244ad7c6ae27567b | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 2,719 | py | SafeConstructor.add_constructor(
'tag:yaml.org,2002:null',
SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:bool',
SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:int',
SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:float',
SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:timestamp',
SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:omap',
SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:pairs',
SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:set',
SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:str',
SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:seq',
SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:map',
SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(None,
SafeConstructor.construct_undefined)
Constructor.add_constructor(
'tag:yaml.org,2002:python/none',
Constructor.construct_yaml_null)
Constructor.add_constructor(
'tag:yaml.org,2002:python/bool',
Constructor.construct_yaml_bool)
Constructor.add_constructor(
'tag:yaml.org,2002:python/str',
Constructor.construct_python_str)
Constructor.add_constructor(
'tag:yaml.org,2002:python/unicode',
Constructor.construct_python_unicode)
Constructor.add_constructor(
'tag:yaml.org,2002:python/bytes',
Constructor.construct_python_bytes)
Constructor.add_constructor(
'tag:yaml.org,2002:python/int',
Constructor.construct_yaml_int)
Constructor.add_constructor(
'tag:yaml.org,2002:python/long',
Constructor.construct_python_long)
Constructor.add_constructor(
'tag:yaml.org,2002:python/float',
Constructor.construct_yaml_float)
Constructor.add_constructor(
'tag:yaml.org,2002:python/complex',
Constructor.construct_python_complex)
Constructor.add_constructor(
'tag:yaml.org,2002:python/list',
Constructor.construct_yaml_seq)
Constructor.add_constructor(
'tag:yaml.org,2002:python/tuple',
Constructor.construct_python_tuple)
Constructor.add_constructor(
'tag:yaml.org,2002:python/dict',
Constructor.construct_yaml_map)
| [
"ababjam61+github@gmail.com"
] | ababjam61+github@gmail.com |
4746f82e2d5ad8079a4557a45cd4a09eba253752 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-policyinsights/azure/mgmt/policyinsights/models/policy_definition_summary_py3.py | 6ed5afbba2da97206e215dc580d8c25f7cbcb7ab | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,765 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PolicyDefinitionSummary(Model):
"""Policy definition summary.
:param policy_definition_id: Policy definition ID.
:type policy_definition_id: str
:param policy_definition_reference_id: Policy definition reference ID.
:type policy_definition_reference_id: str
:param effect: Policy effect, i.e. policy definition action.
:type effect: str
:param results: Non-compliance summary for the policy definition.
:type results: ~azure.mgmt.policyinsights.models.SummaryResults
"""
_attribute_map = {
'policy_definition_id': {'key': 'policyDefinitionId', 'type': 'str'},
'policy_definition_reference_id': {'key': 'policyDefinitionReferenceId', 'type': 'str'},
'effect': {'key': 'effect', 'type': 'str'},
'results': {'key': 'results', 'type': 'SummaryResults'},
}
def __init__(self, *, policy_definition_id: str=None, policy_definition_reference_id: str=None, effect: str=None, results=None, **kwargs) -> None:
super(PolicyDefinitionSummary, self).__init__(**kwargs)
self.policy_definition_id = policy_definition_id
self.policy_definition_reference_id = policy_definition_reference_id
self.effect = effect
self.results = results
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
ac521f40077e998cd74f25609bde0b95df5e5258 | 8cdc63b549f5a7f1aca7b476a5a918e5c05e38c5 | /app/account/authentication.py | 8a8033971e3e0561994788b43e15e5dada3c0e08 | [
"MIT"
] | permissive | rogeriopaulos/gep | 984e3bcd8bd4569031577e1d28a8c47c6aace91f | e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2 | refs/heads/main | 2023-08-14T08:41:19.558899 | 2021-09-15T02:51:46 | 2021-09-15T02:51:46 | 402,270,601 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
class EmailAuthBackend(object):
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(email=username)
if user.check_password(password):
return user
return None
except User.DoesNotExist:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| [
"rogeriopaulos@gmail.com"
] | rogeriopaulos@gmail.com |
06ae159ec81acf80cbb7b8a38ccee9c9863f9f61 | c22da67f4b1ac5e7fc28a9f81bf9fccefb33308e | /campus/mi/2019_fall_1.py | 6ed4262c1b44d7c133b63eba4ff47ead975e8d70 | [] | no_license | iamkissg/nowcoder | 3b9d7ffffaba2c1ee43647595ae86619e2efb504 | 9b7e590d8f2e200d1ac98672d10f3ae9216a13e1 | refs/heads/master | 2020-05-04T10:35:55.606425 | 2020-04-28T12:49:52 | 2020-04-28T12:49:52 | 179,091,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | def is_huiwen(ll):
len_ll = len(ll)
if len_ll == 0:
# 假定空链表是回文的
# raise ValueError('Empty input.')
return True
if len_ll == 1:
return True
for i in range(len_ll//2+1):
if ll[i] != ll[len_ll-1-i]:
return False
else:
return True
ll = input().split()
print(is_huiwen(ll)) | [
"enginechen07@gmail.com"
] | enginechen07@gmail.com |
93facca6d7d15082320c04bb9f392522ee8a120d | cf9103d28a1c09bd9d7aeffdb43e95961ae50f5d | /LSTM/data.py | 00861d2151263c1a890e2879b64ba862b2e9ca6a | [] | no_license | AhmedSSoliman/NLP_models | d308993c07edb62809140952be304f222328b46c | c223d2398e55a6a87d45cf8c3ffed543a649bda4 | refs/heads/master | 2023-04-17T05:41:38.261185 | 2021-04-23T11:06:17 | 2021-04-23T11:06:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,978 | py | import os
import torch
#from .tokenizer import tokenize
from collections import defaultdict
import logging
from tqdm import tqdm
#------------------------------------------------------------------------
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
import os
import re
import inflect
from tqdm import tqdm
special_words = {
'english': {
'grown-ups': 'grownups',
'grown-up': 'grownup',
'hasn\'t': 'hasnt',
'hasn‘t': 'hasnt'
},
'french': {
}
}
def tokenize(path, language, vocab=None, path_like=True, train=False):
print('Tokenizing...')
if path_like:
assert os.path.exists(path)
path = open(path, 'r', encoding='utf8').read()
if not train:
print('Preprocessing...')
text = preprocess(path, special_words, language)
print('Preprocessed.')
else:
text = path
# iterator = [unk_transform(item, vocab).lower() for item in text.split()]
iterator = [unk_transform(item, vocab) for item in tqdm(text.split())] # vocab words not lowered
print('Tokenized.')
return iterator
def unk_transform(word, vocab=None):
if word == 'unk':
return '<unk>'
elif not vocab:
return word
elif word in vocab.idx2word:
return word
else:
return '<unk>'
def preprocess(text, special_words, language):
text = text.replace('\n', '')
text = text.replace('<unk>', 'unk')
for word in special_words[language].keys():
text = text.replace(word, special_words[language][word])
transf = inflect.engine()
numbers = re.findall('\d+', text)
for number in numbers:
text = text.replace(number, transf.number_to_words(number))
punctuation = ['.', '\'', ',', ';', ':', '!', '?', '/', '-', '"', '‘', '’', '(', ')', '{', '}', '[', ']', '`', '“', '”', '—']
for item in punctuation:
text = text.replace(item, ' '+ item + ' ')
text = text.replace('. . .', '...')
### tokenize without punctuation ###
# for item in punctuation:
# text = text.replace(item, ' ')
### tokenize with punctuation ###
# ### tokenize thanks to usual tools for text without strange characters ###
# tokenized = sent_tokenize(text, language=language)
# tokenized = [word_tokenize(sentence, language=language) + ['<eos>'] for sentence in tokenized]
# iterator = [unk_transform(item, vocab).lower() for sublist in tokenized for item in sublist]
return text
#------------------------------------------------------------------------
class Dictionary(object):
def __init__(self, path, language):
self.word2idx = {}
self.idx2word = []
self.language = language
self.word2freq = defaultdict(int)
vocab_path = os.path.join(path, 'vocab.txt')
try:
vocab = open(vocab_path, encoding="utf8").read()
self.word2idx = {w: i for i, w in enumerate(vocab.split())}
self.idx2word = [w for w in vocab.split()]
self.vocab_file_exists = True
except FileNotFoundError:
logging.info("Vocab file not found, creating new vocab file.")
self.create_vocab(os.path.join(path, 'train.txt'))
open(vocab_path,"w").write("\n".join([w for w in self.idx2word]))
def add_word(self, word):
self.word2freq[word] += 1
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
def __len__(self):
return len(self.idx2word)
def create_vocab(self, path):
iterator = tokenize(path, self.language, train=True)
for item in tqdm(iterator):
self.add_word(item)
self.add_word('<unk>')
class Corpus(object):
def __init__(self, path, language):
print('Building dictionary...')
self.dictionary = Dictionary(path, language)
print('Dictionary built.')
train_path = os.path.join(path, 'train.txt')
valid_path = os.path.join(path, 'valid.txt')
test_path = os.path.join(path, 'test.txt')
train_tensor = os.path.join(path, 'train.pkl')
valid_tensor = os.path.join(path, 'valid.pkl')
test_tensor = os.path.join(path, 'test.pkl')
try:
with open(train_tensor, 'rb') as f:
self.train = torch.load(f)
with open(valid_tensor, 'rb') as f:
self.valid = torch.load(f)
with open(test_tensor, 'rb') as f:
self.test = torch.load(f)
except FileNotFoundError:
logging.info("Tensor files not found, creating new tensor files.")
print('Computing train tensor...')
self.train = create_tokenized_tensor(tokenize(train_path, language, self.dictionary, train=True), self.dictionary)
print('Train tensor computed.')
print('Computing valid tensor...')
self.valid = create_tokenized_tensor(tokenize(valid_path, language, self.dictionary, train=True), self.dictionary)
print('Valid tensor computed.')
print('Computing test tensor...')
self.test = create_tokenized_tensor(tokenize(test_path, language, self.dictionary, train=True), self.dictionary)
print('Test tensor computed.')
with open(train_tensor, 'wb') as f:
torch.save(self.train, f)
with open(valid_tensor, 'wb') as f:
torch.save(self.valid, f)
with open(test_tensor, 'wb') as f:
torch.save(self.test, f)
def create_tokenized_tensor(iterator, dictionary):
"""Create tensor of embeddings from word iterator."""
tensor = torch.LongTensor(len(iterator))
token = 0
for item in tqdm(iterator):
tensor[token] = dictionary.word2idx[item] if item in dictionary.word2idx else dictionary.word2idx['<unk>']
token += 1
return tensor
| [
"alexandre-pasquiou@orange.fr"
] | alexandre-pasquiou@orange.fr |
d6bebea6f5eb3b2edc9a167664fd7feeb9a2d97a | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /Wicd/rev519-568/right-branch-568/wicd/backends/be-ip4network/wired/ui/gtkui.py | cdc56a51e6c38b0cc00182b964bced86b604f3bc | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | import sys, os
import logging
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(
os.path.realpath(__file__)
),
'../..')))
logging.debug(sys.path[0])
from gtkuibase import ShortInterfaceUiBase
sys.path.pop(0)
class WiredShortInterfaceUi(ShortInterfaceUiBase):
def __init__(self, interface):
ShortInterfaceUiBase.__init__(self, interface)
self.image.set_from_icon_name('network-wired', 6)
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
15fd3e2083ed3d05dd57c9731f3d952acc1107d6 | 674e5072af9433f0f41d9520a260acf4ac4616f8 | /mysite/mysite/views.py | ef985e7f90c71c47ff414656b28b0326f0966563 | [] | no_license | dh-linux/eg_django | 0446b62e2a691951ae7de30bfd3e2d1b526dd112 | ed225eedc50255303a8305b667c5f57ec367ae71 | refs/heads/master | 2021-04-15T03:33:38.693205 | 2016-08-03T09:08:53 | 2016-08-03T09:08:53 | 64,833,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from django.http import HttpResponse
from django.shortcuts import render_to_response, render
def search_form(request):
return render_to_response('search_form.html')
def search(request):
if 'q' in request.GET:
message = 'You search : %s' % request.GET['q']
else:
message = 'You submitted an empty form.'
return HttpResponse(message)
def ua(request):
ua = request.META.get('HTTP_USER_AGENT', 'unknown')
return HttpResponse("Your brower is %s" % ua) | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
9fe28abefb259f74efd5f4ae8626643f992ca4fd | 60ca69e2a4c6b05e6df44007fd9e4a4ed4425f14 | /beginner_contest/165/B.py | 1933a54f16ef1d57c4ed38dcabb1ebdb098de06d | [
"MIT"
] | permissive | FGtatsuro/myatcoder | 12a9daafc88efbb60fc0cd8840e594500fc3ee55 | 25a3123be6a6311e7d1c25394987de3e35575ff4 | refs/heads/master | 2021-06-13T15:24:07.906742 | 2021-05-16T11:47:09 | 2021-05-16T11:47:09 | 195,441,531 | 0 | 0 | MIT | 2021-05-16T11:47:10 | 2019-07-05T16:47:58 | Python | UTF-8 | Python | false | false | 210 | py | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
x = int(input())
year = 1
total = 100
while True:
total = int(total * 1.01)
if total >= x:
break
year += 1
print(year)
| [
"204491+FGtatsuro@users.noreply.github.com"
] | 204491+FGtatsuro@users.noreply.github.com |
02662542389abbb0e92a4e8e6133371ba0804813 | 41209325da09107de74e5864821e7e429f16df6b | /h2o-py/tests/testdir_demos/notebooks/pyunit_prep_airlines.py | 2392e0832111bebbb52d2087ddd2c48f25f9a74b | [
"Apache-2.0"
] | permissive | Sam7/h2o-3 | 8719fdced9f738db95f525165806dd7e585c53c6 | c107d383ea4e201eea6e3e30129ed3d2748d6e61 | refs/heads/master | 2021-01-18T06:49:02.662728 | 2015-08-04T03:28:38 | 2015-08-04T03:28:38 | 40,160,767 | 0 | 0 | null | 2015-08-04T03:14:37 | 2015-08-04T03:14:37 | null | UTF-8 | Python | false | false | 342 | py | import sys
sys.path.insert(1, "../../../")
import h2o
def prep_airlines(ip,port):
# Connect to a pre-existing cluster
h2o.init(ip,port)
# execute ipython notebook
h2o.ipy_notebook_exec(h2o.locate("h2o-py/demos/prep_airlines.ipynb"),save_and_norun=False)
if __name__ == "__main__":
h2o.run_test(sys.argv, prep_airlines)
| [
"eric.eckstrand@gmail.com"
] | eric.eckstrand@gmail.com |
66f954f76279951de08e260c1e5652866f7842e0 | b3879bc761ac38dab903da57c4061ad79fd70c6d | /курсы пайтон модуль 4/задание 26.py | 2b39f54e860189efac740823746dfecbc9b1c91b | [] | no_license | Ruslan5252/all-of-my-projects-byPyCharm | 4df70cc3a31c4a5d97560fa858a706edcc856299 | 817d5f711408590ea141590ae52c6d888dfa2015 | refs/heads/master | 2023-05-03T01:06:30.156731 | 2021-05-29T13:51:16 | 2021-05-29T13:51:16 | 371,970,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | n = int(input("Введите количество элементов массива = "))
b = []
m=""
sum = 0
count = 0
for i in range(n):
a = int(input("введите число = "))
b.append(a)
print(b)
m=int(input("введите число m = "))
for i in b:
if i>m:
count+=1
sum+=i
aver=sum/count
print(aver) | [
"r.u.s_2000@mail.ru"
] | r.u.s_2000@mail.ru |
3e54db497d49da0138de012bb12ff3abbe2a31b7 | 08401cff6a54ca358d3d563c0cbc1acf26e9960d | /Simulation_Tool/New_SimulationEnvironment_Ladybug/Sensitivity/SensitivityEnvelopAnnual.py | 99fc1eb259284e048e510c5edccd8aab1fe972b9 | [] | no_license | architecture-building-systems/ASF_Simulation | 5f55ba474a06d48e4e629db77e794874cc376d44 | 8cabb20da689f61891966dfa5d15cc82771050d3 | refs/heads/main | 2022-10-28T08:27:10.781156 | 2022-10-03T19:19:19 | 2022-10-03T19:19:19 | 46,800,943 | 9 | 3 | null | 2022-10-03T19:19:20 | 2015-11-24T15:36:58 | HTML | UTF-8 | Python | false | false | 6,604 | py | """
Created on March 20 2017
@author: Prageeth Jayathissa
"""
import unittest
import os
import sys
import pandas as pd
import time
import numpy as np
from j_paths import PATHS
paths = PATHS()
sys.path.insert(0, paths['5R1C_ISO_simulator'])
sys.path.insert(0, paths['main'])
from SimulationClass import ASF_Simulation
from supplySystem import *
from emissionSystem import *
class TestMainSimulation(unittest.TestCase):
def test_Standard(self):
"""
Runs the ASF Simulation Analysis for multiple archetypes
TODO: Archetypes can only be chosen in the BuildArchetypeDict function. This should be moved here as an input parameter
:Output: all_results: A dataframe of building energy requirements for each archetype evaluated
"""
paths = PATHS()
# SimulationData = {
# 'optimizationTypes' : ['E_total'], #, 'Cooling', 'Heating', 'SolarEnergy', 'Lighting', 'E_HCL'
# 'DataFolderName' : 'ZH13_49comb', #'ZH13_49comb',
# 'FileName': 'ZH13_49comb',
# 'geoLocation' : 'Zuerich_Kloten_2013',
# 'EPWfile': 'Zuerich_Kloten_2013.epw',
# 'Save' : False,
# 'ShowFig': False}
# # Set Building Parameters in [mm]
# BuildingData = {
# "room_width": 4900,
# "room_height": 3100,
# "room_depth": 7000,
# "glazing_percentage_w": 1.0, #0.92
# "glazing_percentage_h": 1.0} #0.97
# PanelData = {
# "XANGLES": [0, 15, 30, 45, 60, 75, 90],
# "YANGLES" : [-45, -30,-15,0, 15, 30, 45],
# "NoClusters":1,
# "numberHorizontal":6,
# "numberVertical":9,
# "panelOffset":400,
# "panelSize":400,
# "panelSpacing":500,
# "panelGridSize" : 25}
##-----Static Simulatioh----
# SimulationData = {
# 'optimizationTypes' : ['E_total'], #, 'Cooling', 'Heating', 'SolarEnergy', 'Lighting', 'E_HCL'
# 'DataFolderName' : 'ZH13_49comb_static_45_0', #'ZH13_49comb_static_45_0',
# 'FileName': 'ZH13_49comb_static_45_0',
# 'geoLocation' : 'Zuerich_Kloten_2013',
# 'EPWfile': 'Zuerich_Kloten_2013.epw',
# 'Save' : False,
# 'ShowFig': False}
# # Set Building Parameters in [mm]
# BuildingData = {
# "room_width": 4900,
# "room_height": 3100,
# "room_depth": 7000,
# "glazing_percentage_w": 0.92,
# "glazing_percentage_h": 0.97}
# PanelData = {
# "XANGLES": [45],
# "YANGLES" : [0],
# "NoClusters":1,
# "numberHorizontal":6,
# "numberVertical":9,
# "panelOffset":400,
# "panelSize":400,
# "panelSpacing":500,
# "panelGridSize" : 25}
###----No ASF Simulatin -----
SimulationData = {
'optimizationTypes' : ['E_total'], #, 'Cooling', 'Heating', 'SolarEnergy', 'Lighting', 'E_HCL'
'DataFolderName' : 'ZH13_NoASF', #'ZH13_49comb_static_45_0',
'FileName': 'ZH13_NoASF',
'geoLocation' : 'Zuerich_Kloten_2013',
'EPWfile': 'Zuerich_Kloten_2013.epw',
'Save' : False,
'ShowFig': False}
# Set Building Parameters in [mm]
BuildingData = {
"room_width": 4900,
"room_height": 3100,
"room_depth": 7000,
"glazing_percentage_w": 0.92,
"glazing_percentage_h": 0.97}
PanelData = {
"XANGLES": [0],
"YANGLES" : [0],
"NoClusters":1,
"numberHorizontal":0,
"numberVertical":0,
"panelOffset":400,
"panelSize":400,
"panelSpacing":500,
"panelGridSize" : 25}
#Set building properties for RC-Model simulator
#Set simulation Properties
SimulationOptions= {
'setBackTempH' : 4.,
'setBackTempC' : 4.,
'Occupancy' : 'Occupancy_COM.csv',
'ActuationEnergy' : False,
"Temp_start" : 20,
'human_heat_emission' : 0.12,}
#U_Range=np.arange(0.2,4.1,0.2)
U_Range=np.arange(0.2,2.1,0.2)
all_results=pd.DataFrame({'Infiltration': []})
all_results.set_index(['Infiltration'], inplace=True)
# loop through building properties and simulation options dictionaries:
for ii,sens in enumerate(U_Range): #range(0, len(runlist)):
BuildingProperties={
"glass_solar_transmittance" : 0.687 ,
"glass_light_transmittance" : 0.744 ,
"lighting_load" : 11.74 ,
"lighting_control" : 300,
"Lighting_Utilisation_Factor" : 0.45,
"Lighting_Maintenance_Factor" : 0.9,
"U_em" : 0.2,
"U_w" : 1.2,
"ACH_vent" : 1.5,
"ACH_infl" :sens,
"ventilation_efficiency" : 0.6 ,
"c_m_A_f" : 165 * 10**3,
"theta_int_h_set" : 20,
"theta_int_c_set" : 26,
"phi_c_max_A_f": -np.inf,
"phi_h_max_A_f": np.inf,
"heatingSupplySystem" : DirectHeater,
"coolingSupplySystem" : COP3Cooler,
"heatingEmissionSystem" : AirConditioning,
"coolingEmissionSystem" : AirConditioning,
}
# Run ASF simulation
ASF_archetype = ASF_Simulation(SimulationData=SimulationData, BuildingData=BuildingData,
BuildingProperties=BuildingProperties, SimulationOptions=SimulationOptions, PanelData=PanelData)
ASF_archetype.SolveASF()
# Add building U_envelope to dataframe and append subsequent iterations:
current_result = ASF_archetype.yearlyData.T
current_result['Infiltration'] = sens
current_result.set_index(['Infiltration'], inplace=True)
temp_list = [all_results, current_result] #TODO: Change this to one line
all_results = pd.concat(temp_list)
print '--simulations complete--'
# write results to csv:
timestr = time.strftime("%d%m%Y_%H%M")
name = 'Sensitivity_' + SimulationData.get('DataFolderName') + '_' + timestr + '.csv'
all_results.to_csv(name)
print all_results
if __name__ == '__main__':
unittest.main() | [
"p.jayathissa@gmail.com"
] | p.jayathissa@gmail.com |
2317d058180aaf1d421414b1a7f8fef85d9d7ffc | fe9573bad2f6452ad3e2e64539361b8bc92c1030 | /Assignment/data_analysis/proving_fairness.py | f22b4ba82046ae8cb0861fd1f78fe43ddc168c11 | [] | no_license | OceanicSix/Python_program | e74c593e2e360ae22a52371af6514fcad0e8f41f | 2716646ce02db00306b475bad97105b260b6cd75 | refs/heads/master | 2022-01-25T16:59:31.212507 | 2022-01-09T02:01:58 | 2022-01-09T02:01:58 | 149,686,276 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from card_analyze import Card_analysis
#create a deck with 4 suits in it
a=Card_analysis(4)
#show the diagram
a.draw_average_value() | [
"byan0007@student.monash.edu"
] | byan0007@student.monash.edu |
d459d0fd5da7c1b4733bab84227575675b742b81 | 3a8110706a67e111305a943ab7590d94782b0f6a | /temp_file.py | 32a89fc94f6b8f71abd85a5d234030af2d27d891 | [] | no_license | pawwahn/python_practice | 41fac14f7107fd8f7c8a33fa7e09561f24bf9376 | 9e6564582abeb9f65c95de86121199939d0ee388 | refs/heads/master | 2022-10-04T10:32:49.952690 | 2022-09-15T09:43:18 | 2022-09-15T09:43:18 | 223,134,205 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | text = 'asd sdhosa afo hasdfoi fasd '
pattern = 'afo'
def KnuthMorrisPratt(text, pattern):
'''Yields all starting positions of copies of the pattern in the text.
Calling conventions are similar to string.find, but its arguments can be
lists or iterators, not just strings, it returns all matches, not just
the first one, and it does not need the whole text in memory at once.
Whenever it yields, it will have read the text exactly up to and including
the match that caused the yield.'''
# allow indexing into pattern and protect against change during yield
pattern = list(pattern)
# build table of shift amounts
shifts = [1] * (len(pattern) + 1)
shift = 1
for pos in range(len(pattern)):
while shift <= pos and pattern[pos] != pattern[pos-shift]:
shift += shifts[pos-shift]
shifts[pos+1] = shift
# do the actual search
startPos = 0
matchLen = 0
for c in text:
while matchLen == len(pattern) or \
matchLen >= 0 and pattern[matchLen] != c:
startPos += shifts[matchLen]
matchLen -= shifts[matchLen]
matchLen += 1
if matchLen == len(pattern):
yield startPos | [
"pavan.skt@gmail.com"
] | pavan.skt@gmail.com |
a5256482ac3f5b173c2f34566009130210b5e73d | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/arc/arc044/D/answers/693063_tnk0812.py | 808ab63297d877c466eb4ff8d1c1b5f6b9ceab7a | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | def main():
R = 10**9+7
N = int(input())
a = list(map(int, input().split(" ")))
if a[0] != 0:
return 0
amax = max(a)
h = [0] * (amax+1)
for i in a:
h[i] += 1
if h[0] != 1:
return 0
ans = 1
b = 1
for i in h[1:]:
if i == 0:
return 0
ans *= pow(2, i * (i - 1) // 2, R)
ans %= R
ans *= pow(pow(2, b, R) - 1, i, R)
ans %= R
b = i
return ans
if __name__ == '__main__':
print(main()) | [
"kojinho10@gmail.com"
] | kojinho10@gmail.com |
339ee1a6ae6f55d7ed98ae1c6906e31fd243914d | 91438802ee114b2fb945aae4105a17993dd6953d | /build/learning_ros_noetic/Part_1/example_ros_service/catkin_generated/pkg.installspace.context.pc.py | f8ed8bfecc5e6967674ef6da63cd664c6b2eeaca | [] | no_license | AlexLam616/Baxter-robot | 3a4cef31fe46da0fdb23c0e3b5808d84b412d037 | d10fdcd35f29427ca14bb75f14fa9c64af3b028c | refs/heads/master | 2023-05-12T01:25:56.454549 | 2021-05-25T02:02:09 | 2021-05-25T02:02:09 | 367,070,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;nav_msgs;geometry_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "example_ros_service"
PROJECT_SPACE_DIR = "/home/alex/workspace/install"
PROJECT_VERSION = "0.0.0"
| [
"1155135145@link.cuhk.edu.hk"
] | 1155135145@link.cuhk.edu.hk |
ac99a5886f3a2a1b61a3b4229237a77c3f47b84d | c7014b5d347d63db9293260173642e41a3c73ccc | /.history/bacalab/settings/production_20190521163656.py | b2c5a0106006969ed89b327922ea11aafcd1ea18 | [] | no_license | helder-a-reis/bacalab | 6f03d218d6234fc656814f443afecf9b28f67e59 | 2b34c9ff6f303561d7b787d766d6f20d849bc3f1 | refs/heads/master | 2023-04-27T09:52:29.493627 | 2019-06-13T14:06:38 | 2019-06-13T14:06:38 | 171,745,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from .base import *
import django_heroku
ALLOWED_HOSTS = ['bacalab.herokuapp.com']
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'bacalab',
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PWD'),
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
django_heroku.settings(locals()) | [
"helderreis78@gmail.com"
] | helderreis78@gmail.com |
ec626dc70cae86ce5e4964a304e8be838b6eefa0 | 8ce656578e04369cea75c81b529b977fb1d58d94 | /bank_guarantee/management/commands/export_bank_config.py | c52f95d875669bff83d718689ce7e6d95ae76981 | [] | no_license | JJvzd/django_exp | f9a08c40a6a7535777a8b5005daafe581d8fe1dc | b1df4681e67aad49a1ce6426682df66b81465cb6 | refs/heads/master | 2023-05-31T13:21:24.178394 | 2021-06-22T10:19:43 | 2021-06-22T10:19:43 | 379,227,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,898 | py | from django.core.management import BaseCommand
from bank_guarantee.models import RequestPrintForm, OfferPrintForm
from clients.models import Bank
class Command(BaseCommand):
""" команда в процессе разработки """
help = 'Экспортирует конфиг банка по коду банка'
def add_arguments(self, parser):
parser.add_argument('bank_code', nargs='?', type=str, default=0)
def pack_users(self, bank):
return [
{
'first_name': user.first_name,
'last_name': user.last_name,
'middle_name': user.middle_name,
'email': user.email,
'username': user.username,
'is_active': user.is_active,
'roles': [role.name for role in user.roles.all()]
}
for user in bank.user_set.all()
]
def pack_print_forms(self, bank):
result = []
for pf in RequestPrintForm.objects.all():
if bank in pf.banks.all():
result.append({
'name': pf.name,
'download_name': pf.download_name,
'type': pf.type,
'filename': pf.filename,
'active': pf.active,
'readonly': pf.readonly,
'in_conclusions': pf.in_conclusions,
'roles': pf.roles,
})
return result
def pack_offer_categories(self, bank):
return [
{
'first_name': user.first_name,
'last_name': user.last_name,
'middle_name': user.middle_name,
'email': user.email,
'username': user.username,
'is_active': user.is_active,
'roles': [role.name for role in user.roles.all()]
}
for user in bank.user_set.all()
]
def pack_offer_print_forms(self, bank):
return [{
'name': pf.name,
'filename': pf.filename,
'type': pf.type,
'download_name': pf.download_name,
'active': pf.active,
} for pf in OfferPrintForm.objects.all()]
def handle(self, *args, **options):
bank_code = options['bank_code'] or None
print(bank_code)
bank = Bank.objects.get(code=bank_code)
data = {
'full_name': bank.full_name,
'short_name': bank.short_name,
'inn': bank.inn,
'ogrn': bank.ogrn,
'settings': {
'': ''
},
'users': self.pack_users(bank),
'print_forms': self.pack_print_forms(bank),
'offer_categories': self.pack_offer_categories(bank),
'offer_print_forms': self.pack_offer_print_forms(bank)
}
print(data)
| [
"javad@MacBook-Pro-Namig.local"
] | javad@MacBook-Pro-Namig.local |
7fad11fabe4b33ddbf34788cfb4383664f2e3679 | 298266f026dd1762f4469f3b1343d0fbde51076c | /core/environnement/base/order_n_price_managment.py | 1a3c681f97e27bcec4209428ba0883e1b93e1b2c | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | elvis121193/TradzQAI | 721f7e57cfb9fc1014fc0f7dc4936f9fe6978b43 | 5cb8775833cb438e7e57a676702d05ab1733edb6 | refs/heads/master | 2020-04-01T20:46:49.304246 | 2018-10-08T10:52:11 | 2018-10-08T10:52:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | import time
class onpm(object):
def __init__(self):
self.orders = []
self.order_number = 0
self.decay = 0.01
self.max_pip_diff = 5
def bestPriceFunc(self, bids, asks, order, side, tid):
id = tid
if not tid:
id = self.order_number
self.order_number += 1
ordr = dict(
current_order = order,
id = id,
last_price = 0,
start_price = 0,
start_time = time.time()
)
self.orders.append(ordr)
self.orders[id]['current_order'] = order
if (time.time() - self.orders[id]['start_time']) / 60 >= 10:
cancel = True
else:
cancel = False
best_ask, best_bid = None, None
# Mutation protection
while True:
try:
best_bid = float(bids(0)[0][0]['price'])
best_ask = float(asks(0)[0][0]['price'])
break
except:
print ("rofl")
pass
spread = round(best_ask - best_bid, 2)
price = self.orders[id]['last_price']
if side == "buy":
if self.orders[id]['start_price'] - self.orders[id]['last_price'] <= -self.max_pip_diff:
cancel = True
elif spread == 0.01:
price = best_bid
else:
price = best_bid + self.decay
elif side == "sell":
if self.orders[id]['last_price'] - self.orders[id]['start_price'] <= -self.max_pip_diff:
cancel = True
elif spread == 0.01:
price = best_ask
else:
price = best_ask + self.decay
price = round(price, 2)
self.orders[id]['last_price'] = price
if not tid:
self.orders[id]['start_price'] = price
return price, cancel, id
| [
"awakeproduction@hotmail.fr"
] | awakeproduction@hotmail.fr |
40e5c01b7b17b21b34690cf16d24daa0d37bb3b8 | 023763d9f86116381f5765c51fb8b403e8eef527 | /BootCamp_easy/agc012_a.py | c741541fb1fcb7ee964e0dfb2106f92bccd6d35c | [] | no_license | Hilary02/atcoder | d45589682159c0f838561fc7d0bd25f0828e578b | 879c74f3acc7befce75abd10abf1ab43967fc3c7 | refs/heads/master | 2021-07-18T11:34:22.702502 | 2021-07-11T09:04:12 | 2021-07-11T09:04:12 | 144,648,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | n = int(input())
la = [int(w) for w in input().split()]
la.sort()
ans = 0
for i in range(1, n+1):
ans += la[-2 * i]
print(ans)
| [
"c011605154@edu.teu.ac.jp"
] | c011605154@edu.teu.ac.jp |
9c5152d4fff88be75490b75d261de4694934beb3 | 4fc016459e4c78680c61488c771eb6b7eb20d5fe | /Python-Algorithms-DataStructure/src/leet/166_FractiontoRecurringDecimal.py | b2a0d7150c268c7ce26934a19ec65bceec14faa7 | [] | no_license | coremedy/Python-Algorithms-DataStructure | 7c318de68fd9694377a0a4369d8dbeb49e1e17aa | 3873502679a5def6af4be03028542f07d059d1a9 | refs/heads/master | 2021-01-25T07:34:17.714241 | 2015-11-05T10:17:40 | 2015-11-05T10:17:40 | 27,949,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | '''
Created on 2015-08-30
'''
class Solution(object):
def fractionToDecimal(self, numerator, denominator):
"""
:type numerator: int
:type denominator: int
:rtype: str
"""
if denominator == 0:
return None
sign, numerator, denominator = "" if numerator * denominator >= 0 else "-", numerator if numerator >= 0 else -numerator, denominator if denominator >= 0 else -denominator
result, nums, d, index, location = str(numerator // denominator), [], dict(), 0, None
if numerator % denominator == 0:
return sign + result
while True:
numerator, index = 10 * (numerator % denominator), index + 1
nums.append(str(numerator // denominator))
if numerator == 0:
break
location = d.get(numerator)
if location is not None:
break
d[numerator] = index - 1
remainder = ''.join(nums[:index - 1]) if numerator == 0 else ''.join(nums[:location]) + '(' + ''.join(nums[location: index - 1]) + ')'
return sign + result + '.' + remainder
if __name__ == '__main__':
pass | [
"coremedy@hotmail.com"
] | coremedy@hotmail.com |
6f00a953f4fa99d16d0061f0dbc1fd5347fbf9e6 | 8e6005ff82a6b37b8c4e2a2fed5791323837d316 | /RecoBTag/SoftLepton/python/softElectronES_cfi.py | 52068ce86695633b8718d2cac44ad50b5f882ba5 | [] | no_license | CMSRA2Tau/gurrola-sl5-on-sl6 | 633050a5ec5fd1a81a15c2e1dcf6b4952b718a9e | f56a99cd7121bcbdf301c2bea9fe397a6b9ef6a1 | refs/heads/master | 2020-04-15T06:13:09.462508 | 2014-12-17T17:57:01 | 2014-12-17T17:57:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | import FWCore.ParameterSet.Config as cms
softElectron = cms.ESProducer("ElectronTaggerESProducer",
ipSign = cms.string("any")
)
| [
"andrew.m.melo@vanderbilt.edu"
] | andrew.m.melo@vanderbilt.edu |
8fac7d928bc84de65246152f01be74062f5b0855 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/errors/types/shared_set_error.py | 43a227580d39ce496d5ef5f7db138a54ebe3bf91 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.errors',
marshal='google.ads.googleads.v6',
manifest={
'SharedSetErrorEnum',
},
)
class SharedSetErrorEnum(proto.Message):
r"""Container for enum describing possible shared set errors."""
class SharedSetError(proto.Enum):
r"""Enum describing possible shared set errors."""
UNSPECIFIED = 0
UNKNOWN = 1
CUSTOMER_CANNOT_CREATE_SHARED_SET_OF_THIS_TYPE = 2
DUPLICATE_NAME = 3
SHARED_SET_REMOVED = 4
SHARED_SET_IN_USE = 5
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
2abdd38dea5b5bf0ea675c26ada1586f3d849381 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03439/s570157916.py | 2dd9d9548f62cf052083d4afa720b0e4796ac96b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | n = int(input())
print(0)
L = R = input()
if L == "Vacant":
exit()
l = 0
r = n
while True:
m = (l + r) // 2
print(m)
M = input()
if M == "Vacant":
exit()
if (M == L) ^ ((m - l) % 2 == 1):
l = m
L = M
else:
r = m
R = M
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d338b9ceafda5116c1882198fe560b051f6d981b | 0061ed492e5c566c8b4c9bfef7218b79518803f2 | /python/generate-syllables.py | 8e2321abfb9f6341eebf9f438f4197141b7447dc | [
"CC-BY-4.0"
] | permissive | oneoffcoder/rpa | 4c04e811d22ec28fda17410be21100f27cc56aeb | cb0401e0c38652f998ca1b3bfe49d2084a279be7 | refs/heads/master | 2021-08-04T14:30:10.344198 | 2021-07-23T19:11:59 | 2021-07-23T19:11:59 | 225,539,119 | 11 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py |
import itertools
C = """
c, d, f, h, k, l, m, n, p, q, r, s, t, v, x, y, z,
ch, dh, hl, hm, hn, kh, ml, nc, nk, np, nq, nr, nt, ny, ph, pl, qh, rh, th, ts, tx, xy,
hml, hny, nch, nkh, nph, npl, nqh, nrh, nth, nts, ntx, plh, tsh, txh,
nplh, ntsh, ntxh
""".strip()
V = """
a, e, i, o, u, w,
ai, au, aw, ee, ia, oo, ua
""".strip()
T = """
j, s, v, m, g, b
""".strip()
C = [c.strip() for c in C.split(',')]
V = [v.strip() for v in V.split(',')]
T = [t.strip() for t in T.split(',')]
print(f'C = {len(C)}, V = {len(V)}, T = {len(T)}')
c_frag = '|'.join([f"'{c}'" for c in C])
v_frag = '|'.join([f"'{c}'" for c in V])
t_frag = '|'.join([f"'{c}'" for c in T])
c_frag = f'({c_frag})'
v_frag = f'({v_frag})'
t_frag = f'({t_frag})'
print(c_frag)
print(v_frag)
print(t_frag)
| [
"vangjee@gmail.com"
] | vangjee@gmail.com |
7d5c1b44ad5a8a64637df9196e677c4996b7dde8 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/pybites/advanced/2_v3/regex.py | 274f3408853cc0593272182183e8fe7bd31963bf | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,621 | py | # _______ __
# ____ pp__ _______ pp__
#
# COURSE ('Introduction 1 Lecture 01:47'
# 'The Basics 4 Lectures 32:03'
# 'Getting Technical! 4 Lectures 41:51'
# 'Challenge 2 Lectures 27:48'
# 'Afterword 1 Lecture 05:02')
# TWEET ('New PyBites article: Module of the Week - Requests-cache '
# 'for Repeated API Calls - http://pybit.es/requests-cache.html '
# '#python #APIs')
# HTML ('<p>pybites != greedy</p>'
# '<p>not the same can be said REgarding ...</p>')
#
#
# ___ extract_course_times course_?
# """Return the course timings from the passed in
# course string. Timings are in mm:ss (minutes:seconds)
# format, so taking COURSE above you would extract:
# ['01:47', '32:03', '41:51', '27:48', '05:02']
# Return this list.
# """
# r.. __.f.. _ (\d\d:\d\d) ?
#
#
# ___ get_all_hashtags_and_links tweet_?
# """Get all hashtags and links from the tweet text
# that is passed into this function. So for TWEET
# above you need to extract the following list:
# ['http://pybit.es/requests-cache.html',
# '#python',
# '#APIs']
# Return this list.
# """
# r.. __.f.. _ (#\w+|https?://[^\s]*)' ?
#
#
# ___ match_first_paragraph html_?
# """Extract the first paragraph of the passed in
# html, so for HTML above this would be:
# 'pybites != greedy' (= content of first paragraph).
# Return this string.
# """
# result __.s.. _ <p>(.+?)</p> ?
# r.. ?.g.. 1 __ ? ____ ''
#
# __ _______ __ _______
# pp__ ?
# pp__ ?
# pp__ ?
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
666f8021d9d99254a7a69fb2f55c76a50c261736 | 9d29861e44389e02762e6eb0457c6a415a54e26c | /samples/cloud_monitoring/util.py | 4a83bb03cbb3e7051ea9b5ba34375c07d307ea8c | [
"MIT"
] | permissive | itsmemattchung/pyrax | e787d67f8a79036834575f951f8c9e81d64d8b8f | e8eff127a5c9b6e64a9a42593d5e889c3c03f81d | refs/heads/master | 2021-01-18T10:14:31.752469 | 2015-05-16T16:44:35 | 2015-05-16T16:44:35 | 21,360,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,694 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2013 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
def option_chooser(options, attr=None):
"""Given an iterable, enumerate its contents for a user to choose from.
If the optional `attr` is not None, that attribute in each iterated
object will be printed.
This function will exit the program if the user chooses the escape option.
"""
for num, option in enumerate(options):
if attr:
print("%s: %s" % (num, getattr(option, attr)))
else:
print("%s: %s" % (num, option))
# Add an escape option
escape_opt = num + 1
print("%s: I want to exit!" % escape_opt)
choice = raw_input("Selection: ")
try:
ichoice = int(choice)
if ichoice > escape_opt:
raise ValueError
except ValueError:
print("Valid entries are the numbers 0-%s. Received '%s'." % (escape_opt,
choice))
sys.exit()
if ichoice == escape_opt:
print("Bye!")
sys.exit()
return ichoice
| [
"ed@leafe.com"
] | ed@leafe.com |
91ffe969a51f4efe6edb0ebab8465bbed5120892 | bb35185816208aaeb73016b7380e01a2d6b86278 | /lists/migrations/0003_list.py | a52176c79813f19f0b541d426d089bd9720f86e7 | [] | no_license | the-fool/goat_driven_development | fa70dee5dc4dd48180ff6b5302e6ef966f2e207a | 0d55ca75e29b4a90c4e54500df88c4b23854ab81 | refs/heads/master | 2021-01-10T02:32:16.947425 | 2016-02-15T17:25:37 | 2016-02-15T17:25:37 | 51,492,427 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-12 16:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_item_text'),
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| [
"sketchbang@gmail.com"
] | sketchbang@gmail.com |
df3ff938a6aae98b752d7f465f52fbbab2f796c3 | 78d949674cd1e6261ee23372e6ff24897ba6bc9a | /user/migrations/0076_auto_20190310_0610.py | b9c330c739a0bf271a6f0b0cb827de029ff57fad | [] | no_license | Aravindhan-M/first_project | 60377a65dec777ce6a55a57358dccb96ef6c396d | 68f5815aa7a7d7fa695bb753b5312f66bb201fe1 | refs/heads/master | 2022-12-14T11:23:56.782700 | 2019-08-09T09:53:17 | 2019-08-09T09:53:17 | 185,782,547 | 2 | 0 | null | 2022-12-08T01:45:24 | 2019-05-09T11:02:59 | Python | UTF-8 | Python | false | false | 415 | py | # Generated by Django 2.1.5 on 2019-03-10 03:10
from django.db import migrations
import user.managers
class Migration(migrations.Migration):
dependencies = [
('user', '0075_merge_20190310_0257'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', user.managers.UserManager()),
],
),
]
| [
"aravindhan@sayonetech.com"
] | aravindhan@sayonetech.com |
cd254179e403b8ca9bffcc103ef4ccd10d84811c | 75452de12ec9eea346e3b9c7789ac0abf3eb1d73 | /scripts/fuzzing/test/factory_fake.py | 629dfd2ea6fcc8d2db3e8661a32834b1072f5f8b | [
"BSD-3-Clause"
] | permissive | oshunter/fuchsia | c9285cc8c14be067b80246e701434bbef4d606d1 | 2196fc8c176d01969466b97bba3f31ec55f7767b | refs/heads/master | 2022-12-22T11:30:15.486382 | 2020-08-16T03:41:23 | 2020-08-16T03:41:23 | 287,920,017 | 2 | 2 | BSD-3-Clause | 2022-12-16T03:30:27 | 2020-08-16T10:18:30 | C++ | UTF-8 | Python | false | false | 3,616 | py | #!/usr/bin/env python2.7
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import test_env
from lib.factory import Factory
from host_fake import FakeHost
from lib.buildenv import BuildEnv
from lib.device import Device
class FakeFactory(Factory):
"""Fake factory that creates objects for testing.
Unlike the real factory, this object caches and reuses created BuildEnvs
and Devices. It also allows tests to access created objects for
examination.
Attributes:
buildenv: The associated BuildEnv object.
device: The associated Device object.
fuzzer: The most recently created FakeFuzzer object.
"""
def __init__(self):
super(FakeFactory, self).__init__(host=FakeHost())
self._parser = None
self._buildenv = None
self._device = None
self._fuzzer = None
# Factory created objects, lazily instantiated.
@property
def parser(self):
"""The associated ArgParser object."""
if not self._parser:
self._parser = self.create_parser()
return self._parser
@property
def buildenv(self):
"""The associated BuildEnv object."""
if not self._buildenv:
self._buildenv = self.create_buildenv()
return self._buildenv
@property
def device(self):
"""The associated Device object."""
if not self._device:
self._device = self.create_device()
return self._device
@property
def fuzzer(self):
"""The most recently created Fuzzer object."""
assert self._fuzzer, 'No fuzzer created.'
return self._fuzzer
# Methods to create objects.
def create_buildenv(self):
"""Returns the factory's build environment, creating it if needed."""
fuchsia_dir = self.host.getenv('FUCHSIA_DIR')
self.host.mkdir(fuchsia_dir)
buildenv = BuildEnv(self.host, fuchsia_dir)
build_dir = 'build_dir'
self.host.mkdir(buildenv.path(build_dir))
self.host.touch(buildenv.path(build_dir, 'host_x64', 'symbolize'))
self.host.touch(
buildenv.path(
'prebuilt', 'third_party', 'clang', self.host.platform, 'bin',
'llvm-symbolizer'))
self.host.mkdir(
buildenv.path(
'prebuilt', 'third_party', 'clang', self.host.platform, 'lib',
'debug', '.build-id'))
self.host.mkdir(buildenv.path(build_dir, '.build-id'))
self.host.mkdir(buildenv.path(build_dir + '.zircon', '.build-id'))
self.host.touch(buildenv.path(build_dir, 'ssh-keys', 'ssh_config'))
buildenv.configure(build_dir)
buildenv.add_fuzzer('fake-package1', 'fake-target1')
buildenv.add_fuzzer('fake-package1', 'fake-target2')
buildenv.add_fuzzer('fake-package1', 'fake-target3')
buildenv.add_fuzzer('fake-package2', 'fake-target1')
buildenv.add_fuzzer('fake-package2', 'fake-target11')
buildenv.add_fuzzer('fake-package2', 'an-extremely-verbose-target-name')
return buildenv
def create_device(self):
"""Returns the factory's device, creating it if needed."""
device = Device(self.create_buildenv(), '::1')
device.configure()
return device
def create_fuzzer(self, args, device=None):
self._fuzzer = super(FakeFactory, self).create_fuzzer(
args, device=device)
return self.fuzzer
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
1bbb76b0c9c1c2dacf2770af4281dbfd3e6218a5 | eee0b990f4b09c3418d9fdce6ef733cf6df99fc8 | /ansible_fsm/merge.py | 35069b8bbf7978681723f9d604514aa3a57bb7d4 | [
"Apache-2.0"
] | permissive | benthomasson/ansible-fsm | 6a463339e9ca82fbf3bf36c09b5a978baba1ba5a | fc82655c66040bfee1bb3b9defb1c93fbb0d0f97 | refs/heads/master | 2020-04-08T19:55:59.943230 | 2019-01-30T13:39:36 | 2019-01-30T13:39:36 | 159,676,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | from collections import OrderedDict
from pprint import pprint
def _merge_list_of_named_items(merged, a, b, sub_merge_fn):
"""
Merge two lists by merging items with the same name.
"""
a_items = {x['name']: x for x in a}
b_items = OrderedDict()
for item_b in b:
b_items[item_b['name']] = item_b
for key in b_items.keys():
merged.append(sub_merge_fn(b_items[key], a_items.get(key, {})))
for key in a_items.keys():
if key in b_items:
continue
merged.append(a_items[key])
def _merge_ordered_dicts(merged, a, b, skip_keys=[]):
"""
Merge two ordered dicts and preserve the order of the keys from b then add keys from a that are not in b.
"""
for key in b.keys():
if key in skip_keys:
pass
else:
merged[key] = b[key]
for key in a.keys():
if key in skip_keys:
pass
elif key in b:
continue
else:
merged[key] = a[key]
def merge_ast(a, b):
"""
Merge two ASTs by merging FSMs with the same name.
"""
merged = []
_merge_list_of_named_items(merged, a, b, merge_fsm)
return merged
def merge_fsm(a, b):
"""
Merge two FSMs and preserve the order of the keys from b then add keys from a that are not in b.
"""
merged = OrderedDict()
_merge_ordered_dicts(merged, a, b, skip_keys=['states'])
merged['states'] = []
_merge_list_of_named_items(merged['states'], a.get('states', []), b.get('states', []), merge_state)
return merged
def merge_state(a, b):
merged = OrderedDict()
_merge_ordered_dicts(merged, a, b, skip_keys=['handlers'])
merged['handlers'] = {}
_merge_ordered_dicts(merged['handlers'], a.get('handlers', {}), b.get('handlers', {}))
return merged
| [
"bthomass@redhat.com"
] | bthomass@redhat.com |
7f66c61d3053bbfe625c83c90d9fc0a051416dfb | 3947a6283fd42413dcf68aa3133a13208d17c50e | /FinderTemplate.py | 82d5d398b427b344db5ae7652429abc8c2a2a516 | [] | no_license | Mystified131/FindingStrings | 2d60ac114303c7ebdf54b43a1fecc86fb768228c | 8d3a91957d66c4760818279e99c7579565c19cf0 | refs/heads/master | 2020-05-20T15:55:08.417673 | 2019-05-13T14:49:28 | 2019-05-13T14:49:28 | 185,654,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | #This code imports the modules
import re
import requests
#This code retrieves a blob of text from the remote target
page = requests.get("http://www.thomasparksolutions.com").text
#This code makes a list of the substrings sought
#Replace the string Python with your desired regex
results = re.findall('(Thomas)',page)
#This code prints the result(s)
for i in results:
print (i)
| [
"mystifiedthomas@gmail.com"
] | mystifiedthomas@gmail.com |
7bceaa84866c21708fd5eef20d623749a08a1709 | 46e57ccd746256fa4e471e53d8c24ce285635f50 | /专题训练/数组/中等/3sum Closest.py | 709fa3231efcccf5eb486141b9cdf79c39ec31f7 | [] | no_license | sandwu/leetcode_problems | 6aea1f1837f5a8cfebc8f9c4cd8863081dfabb87 | b99ef39268c5713fae3dbbf0a5548ec1f5a7e434 | refs/heads/master | 2021-08-16T15:48:05.412320 | 2020-04-26T07:04:10 | 2020-04-26T07:04:10 | 168,446,663 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py |
class Solution(object):
"""
和15题的思路差不多,利用先求得三者的和,然后进行与target最小值的判断,保持当前最小值sum1,然后逐步推进数组,直到l>=r,所以是个O(n^2)
Runtime: 72 ms, faster than 75.37% of Python online submissions for 3Sum Closest.
Memory Usage: 11 MB, less than 100.00% of Python online submissions for 3Sum Closest.
"""
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
n = len(nums)
sum1 = nums[0] + nums[1] + nums[2]
for i in range(1,n):
l,r = i+1, n-1
while l < r:
res = nums[i] + nums[l] + nums[r]
if abs(sum1-target) > abs(res-target): #相当于一直维护当前的最靠近target的值
sum1 = res
if res == target:
return res
elif res < target:
while l < r and nums[l]==nums[l-1]:
l += 1
l += 1
else:
while l < r and nums[r] == nums[r-1]:
r -= 1
r -= 1
return sum1 | [
"wuxinlingwxl@163.com"
] | wuxinlingwxl@163.com |
e6507917417689eff40a169e51c981fd54ab8bd0 | 63eb05febaac75f781a266d48d1cfff2debe64ea | /the_tale/game/bills/tests/test_building_destroy.py | 3062c4ba21cf15e1d64eaa62978d2dd74b318cee | [
"BSD-2-Clause-Views"
] | permissive | MadRiw/the-tale | 185ca33e410a59de63a594daf15fc8a5701338d2 | 1801beab2ed149556c0b3380e8adaaa976f74e6c | refs/heads/master | 2021-01-15T23:45:34.873857 | 2015-06-17T13:06:12 | 2015-06-17T13:06:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,269 | py | # coding: utf-8
import mock
import datetime
from the_tale.game import names
from the_tale.game.map.places.models import Building
from the_tale.game.map.places.prototypes import BuildingPrototype
from the_tale.game.map.places.storage import buildings_storage
from the_tale.game.map.places.relations import BUILDING_STATE
from the_tale.game.bills.relations import BILL_STATE
from the_tale.game.bills.prototypes import BillPrototype, VotePrototype
from the_tale.game.bills.bills import BuildingDestroy
from the_tale.game.bills.tests.helpers import BaseTestPrototypes
class BuildingDestroyTests(BaseTestPrototypes):
def setUp(self):
super(BuildingDestroyTests, self).setUp()
self.person_1 = self.place1.persons[0]
self.person_2 = self.place2.persons[0]
self.person_3 = self.place3.persons[0]
self.building_1 = BuildingPrototype.create(self.person_1, utg_name=names.generator.get_test_name('building-name-1'))
self.building_2 = BuildingPrototype.create(self.person_2, utg_name=names.generator.get_test_name('building-name-2'))
self.bill_data = BuildingDestroy(person_id=self.person_1.id, old_place_name_forms=self.place1.utg_name)
self.bill = BillPrototype.create(self.account1, 'bill-1-caption', 'bill-1-rationale', self.bill_data, chronicle_on_accepted='chronicle-on-accepted')
def test_create(self):
self.assertEqual(self.bill.data.person_id, self.person_1.id)
def test_actors(self):
self.assertEqual([id(a) for a in self.bill_data.actors], [id(self.person_1.place)])
def test_update(self):
form = self.bill.data.get_user_form_update(post={'caption': 'new-caption',
'rationale': 'new-rationale',
'chronicle_on_accepted': 'chronicle-on-accepted-2',
'person': self.person_2.id })
self.assertTrue(form.is_valid())
self.bill.update(form)
self.bill = BillPrototype.get_by_id(self.bill.id)
self.assertEqual(self.bill.data.person_id, self.person_2.id)
def test_user_form_choices(self):
form = self.bill.data.get_user_form_update(initial={'person': self.bill.data.person_id })
persons_ids = []
for city_name, person_choices in form.fields['person'].choices:
persons_ids.extend(choice_id for choice_id, choice_name in person_choices)
self.assertEqual(set(persons_ids), set([self.person_1.id, self.person_2.id]))
@mock.patch('the_tale.game.bills.conf.bills_settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_apply(self):
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 2)
VotePrototype.create(self.account2, self.bill, False)
VotePrototype.create(self.account3, self.bill, True)
form = BuildingDestroy.ModeratorForm({'approved': True})
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.assertTrue(self.bill.apply())
bill = BillPrototype.get_by_id(self.bill.id)
self.assertTrue(bill.state.is_ACCEPTED)
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 1)
self.assertEqual(len(buildings_storage.all()), 1)
building = buildings_storage.all()[0]
self.assertNotEqual(building.id, self.building_1.id)
@mock.patch('the_tale.game.bills.conf.bills_settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_duplicate_apply(self):
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 2)
VotePrototype.create(self.account2, self.bill, False)
VotePrototype.create(self.account3, self.bill, True)
form = BuildingDestroy.ModeratorForm({'approved': True})
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.assertTrue(self.bill.apply())
bill = BillPrototype.get_by_id(self.bill.id)
bill.state = BILL_STATE.VOTING
bill.save()
self.assertTrue(bill.apply())
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 1)
@mock.patch('the_tale.game.bills.conf.bills_settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_no_building(self):
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 2)
VotePrototype.create(self.account2, self.bill, False)
VotePrototype.create(self.account3, self.bill, True)
form = BuildingDestroy.ModeratorForm({'approved': True})
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.building_1.destroy()
self.building_1.save()
self.assertTrue(self.bill.apply())
self.assertEqual(Building.objects.filter(state=BUILDING_STATE.WORKING).count(), 1)
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
77f81016ce59e4a6cc2945653f32bd7d8102a644 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02862/s826059466.py | e92f0b2b83b7a167580486ae93c9f6ca0120f581 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | import sys
# N = int(input())
X, Y = [int(i) for i in input().split()]
def cmb(n, r, mod):
if ( r<0 or r>n ):
return 0
r = min(r, n-r)
return g1[n] * g2[r] * g2[n-r] % mod
mod = 10**9+7 #出力の制限
N = 10**6
g1 = [1, 1] # 元テーブル
g2 = [1, 1] #逆元テーブル
inverse = [0, 1] #逆元テーブル計算用テーブル
for i in range( 2, N + 1 ):
g1.append( ( g1[-1] * i ) % mod )
inverse.append( ( -inverse[mod % i] * (mod//i) ) % mod )
g2.append( (g2[-1] * inverse[-1]) % mod )
a_3 = 2 * Y - X
b_3 = 2 * X - Y
if a_3 % 3 != 0 or b_3 % 3 != 0 or a_3 < 0 or b_3 < 0:
print(0)
sys.exit(0)
a = a_3//3
b = b_3//3
print(cmb(a+b,a,mod))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c50b7650fc3230e08546d4b0bde6a864e55bb463 | e341bc45e2b889ae5f88576e6ef7c3e9c84a9f60 | /dojo/a/Python_OOP_Slack/Python_OOP/Human/server.py | 8c4b6aecd0d364dde3c270cacc0b19c2498b5d87 | [] | no_license | jhflorey/Web_Automation_Framework | 8efb5b91a9b4ef01f2c8bde0856ea8e85467ca1d | 41bc124d3eaf2985667afa399e4789c26de8744a | refs/heads/master | 2021-01-25T07:54:47.907619 | 2017-06-09T00:34:03 | 2017-06-09T00:34:03 | 93,682,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | import random
class Human(object):
def __init__(self, clan=None):
print 'New Human!!!'
self.health = 100
self.clan = clan
self.strength = 3
self.intelligence = 3
self.stealth = 3
def taunt(self):
print "You want a piece of me?"
michael = Human()
michael.taunt()
def attack(self):
self.taunt()
luck = round(random.random() * 100)
if(luck > 50):
if((luck * self.stealth) > 150):
print 'attacking!'
return True
else:
print 'attack failed'
return False
else:
self.health -= self.strength
print "attack failed"
return False
class Cat(object):
def __init__(self, color, type, age):
self.color = color
self.type = type
self.age = age
michael = Human('clan')
print michael.health
garfield = Cat('orange, fat, 5')
print garfield.color
print garfield.age
print garfield.type
| [
"jhflorey@gmail.com"
] | jhflorey@gmail.com |
24d8aaf428c14575bceaaa1eb0d3fc8997296484 | 902facd06f1f37f2a65a8e5c1b1a208a5429fba3 | /buildout-cache/eggs/collective.carousel-1.7-py2.7.egg/collective/carousel/tests/test_portlet.py | fc410d96dddb0797ded993e4d974bf2cf356cea6 | [] | no_license | Vinsurya/Plone | 8366e57383da90b61aea82ab08a90415d753a15b | 55e273528cd5db4bbd1929a23ef74c3d873ec690 | refs/heads/master | 2021-01-19T18:55:48.515216 | 2015-05-12T16:10:08 | 2015-05-12T16:10:08 | 33,325,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,484 | py | # Carousel is rendered through a viewlet in IAboveContent
# using items provided by the carousel provider added to the context
from collective.carousel.portlets import carousel
from collective.carousel.tests.base import TestCase
from plone.app.portlets.storage import PortletAssignmentMapping
from plone.portlets.interfaces import IPortletAssignment
from plone.portlets.interfaces import IPortletDataProvider
from plone.portlets.interfaces import IPortletManager
from plone.portlets.interfaces import IPortletRenderer
from plone.portlets.interfaces import IPortletType
from zope.component import getUtility, getMultiAdapter
# default test query
query = [{
'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.is',
'v': ['Document', 'Event', 'News Item']
}]
class PortletTest(TestCase):
def afterSetUp(self):
"""Set up the carousel Collection and some dummy objects"""
self.setRoles('Manager')
self.folder.invokeFactory('Collection', 'collection')
collection = getattr(self.folder, 'collection')
collection.setQuery(query)
field = self.folder.Schema().getField('carouselprovider')
field.set(self.folder, collection)
# add a few objects
self.folder.invokeFactory('Document', 'carousel-doc')
self.folder.invokeFactory('News Item', 'carousel-news-item')
self.folder.invokeFactory('Event', 'carousel-event')
def testPortletTypeRegistered(self):
portlet = getUtility(IPortletType, name='portlet.Carousel')
self.assertEquals(portlet.addview, 'portlet.Carousel')
def testInterfaces(self):
portlet = carousel.Assignment(header=u"title")
self.failUnless(IPortletAssignment.providedBy(portlet))
self.failUnless(IPortletDataProvider.providedBy(portlet.data))
def testInvokeAddview(self):
portlet = getUtility(IPortletType, name='portlet.Carousel')
mapping = self.portal.restrictedTraverse(
'++contextportlets++plone.leftcolumn')
for m in mapping.keys():
del mapping[m]
addview = mapping.restrictedTraverse('+/' + portlet.addview)
addview.createAndAdd(data={'header': u"test title"})
self.assertEquals(len(mapping), 1)
self.failUnless(isinstance(mapping.values()[0], carousel.Assignment))
def testInvokeEditView(self):
mapping = PortletAssignmentMapping()
request = self.folder.REQUEST
mapping['foo'] = carousel.Assignment(header=u"title")
editview = getMultiAdapter((mapping['foo'], request), name='edit')
self.failUnless(isinstance(editview, carousel.EditForm))
def testRenderer(self):
context = self.folder
request = self.folder.REQUEST
view = self.folder.restrictedTraverse('@@plone')
manager = getUtility(IPortletManager, name='plone.rightcolumn',
context=self.portal)
assignment = carousel.Assignment(header=u"title")
renderer = getMultiAdapter((context, request, view,
manager, assignment),
IPortletRenderer)
self.failUnless(isinstance(renderer, carousel.Renderer))
class TestRenderer(TestCase):
# default test query
query = [{
'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.is',
'v': ['Document', 'Event', 'News Item']
}]
def afterSetUp(self):
self.setRoles(('Manager', ))
self.folder.invokeFactory('Collection', 'collection')
collection = getattr(self.folder, 'collection')
collection.setQuery(query)
field = self.folder.Schema().getField('carouselprovider')
field.set(self.folder, collection)
# add a few objects
self.folder.invokeFactory('Document', 'carousel-doc')
self.folder.invokeFactory('News Item', 'carousel-news-item')
self.folder.invokeFactory('Event', 'carousel-event')
def renderer(self, context=None, request=None, view=None,
manager=None, assignment=None):
context = context or self.folder
request = request or self.folder.REQUEST
view = view or self.folder.restrictedTraverse('@@plone')
manager = manager or getUtility(IPortletManager,
name='plone.rightcolumn',
context=self.portal)
assignment = assignment or carousel.Assignment(header=u"title")
return getMultiAdapter((context, request, view, manager, assignment),
IPortletRenderer)
def test_render(self):
r = self.renderer(context=self.portal,
assignment=carousel.Assignment(
header=u"title",
target_collection=
'/plone/Members/test_user_1_/collection'))
r = r.__of__(self.folder)
r.update()
output = r.render()
self.assertTrue('title' in output)
def test_css_class(self):
r = self.renderer(
context=self.portal,
assignment=carousel.Assignment(header=u"Test carousel"))
self.assertEquals('portlet-carousel-test-carousel', r.css_class())
def test_hideheader(self):
r = self.renderer(
context=self.portal,
assignment=carousel.Assignment(header=u"Test carousel", hideheader=True))
output = r.render()
self.failUnless('class="portletHeader hiddenStructure"' in output)
def test_portlet_collection(self):
query = [{
'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.is',
'v': ['Document']
}]
# add a few documents
for i in range(6):
self.folder.invokeFactory('Document', 'document_%s' % i)
getattr(self.folder, 'document_%s' % i).reindexObject()
collection = getattr(self.folder, 'collection')
collection.setQuery(query)
# the documents are returned by the collection
collection_num_items = len(self.folder.collection.queryCatalog())
# We better have some documents - we should have 8
self.failUnless(collection_num_items >= 8)
mapping = PortletAssignmentMapping()
mapping['foo'] = carousel.Assignment(
header=u"Test carousel",
target_collection='/Members/test_user_1_/collection')
r = self.renderer(context=None, request=None, view=None,
manager=None, assignment=mapping['foo'])
# sanity check
self.assertEqual(r.collection().id, 'collection')
# we want the portlet to return us the same results as the collection
self.assertEquals(collection_num_items, len(r.results()))
def test_edit_link(self):
collection = getattr(self.folder, 'collection')
collection.setQuery(query)
mapping = PortletAssignmentMapping()
mapping['foo'] = carousel.Assignment(
header=u"Test carousel",
target_collection='/Members/test_user_1_/collection')
r = self.renderer(context=None, request=None, view=None,
manager=None, assignment=mapping['foo'])
self.assertTrue(r.editCarouselLink().endswith('/edit'))
def test_suite():
from unittest import defaultTestLoader
return defaultTestLoader.loadTestsFromName(__name__)
| [
"sdnlab@sdnlab.incntre.iu.edu"
] | sdnlab@sdnlab.incntre.iu.edu |
a3f6e118f74a61067ad87c807459fdbef492e5fd | 118704d5cc395019a3afb2aa74e8dc87da7fb8ba | /errorhandles.py | f955ef33c1a989108c2f66122c20a5f7382e1771 | [] | no_license | daniemart5/PythangMan | a2a0eb546b9ce824009da796bccc5b63ca550d15 | 7a957f7218b9053306fd99eef91aa8e2c57ae8b8 | refs/heads/master | 2020-09-02T10:58:08.912103 | 2019-11-15T19:09:20 | 2019-11-15T19:09:20 | 219,206,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | try:
age = int(input('Age: '))
income = 20000
risk = income / age
print(age)
except ZeroDivisionError:
print("Age must be higher than 0")
except ValueError:
print('Invalid value') | [
"github email address"
] | github email address |
f015df5f2e90e880e6be266068df95216321a9fa | 4a42a8b32e2a79d598981141961396a6c2d625d2 | /dialogbot/views.py | 4d330c7a654e98a887491229c0105f322d53c20d | [] | no_license | sgmagar/DialogSlackBot | e851b2c44a11fd56b80c74e15b3dfb8fb8f74d22 | 89fddbacf58e6c8cf45d95d86411fd0668640c77 | refs/heads/master | 2022-12-15T22:46:53.536518 | 2018-07-08T04:03:41 | 2018-07-08T04:03:41 | 138,411,634 | 0 | 2 | null | 2022-12-08T02:13:27 | 2018-06-23T15:19:34 | Python | UTF-8 | Python | false | false | 4,922 | py | import json
import logging
from urllib.parse import urlencode
import os
import requests
from django.http import HttpResponse, JsonResponse
from django.views import View
from django.views.generic import TemplateView
from slackclient import SlackClient
from dialogbot.models import Team, Category
from .mixins import SlackMixin
from .dialogs import category_form
class IndexView(SlackMixin, TemplateView):
template_name = 'dialogbot/index.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx.update({
'title': 'Home',
'authorization_url': self.get_authorization_url()
})
return ctx
def get_authorization_url(self):
state = os.urandom(8).hex()
self.request.session["slack_oauth_state"] = state
query = urlencode({
"client_id": self.client_id,
"scope": self.scopes,
"redirect_uri": self.get_redirect_url(),
"state": state
})
return "https://slack.com/oauth/authorize?" + query
class OauthCallbackView(SlackMixin, TemplateView):
template_name = 'dialogbot/auth_result.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['error'] = kwargs.get('error') or None
return super().get_context_data(**kwargs)
def get(self, request, *args, **kwargs):
response = self.exchange_code_for_token()
logging.info(response)
team, created = Team.objects.update_or_create(
team_id=response["team_id"], app_id=self.client_id, defaults={
"user_access_token": response["access_token"],
"bot_access_token": response["bot"]["bot_access_token"],
"team_name": response['team_name']
}
)
return self.render_to_response(response)
def exchange_code_for_token(self):
code = self.request.GET.get("code")
state = self.request.GET.get("state")
error = self.request.GET.get("error")
if error or not state or state != self.request.session.get('slack_oauth_state'):
return {
'error': "Error while installing rocket app in your workspace."
}
sc = SlackClient("")
# Request the auth tokens from Slack
response = sc.api_call(
"oauth.access",
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=self.get_redirect_url(),
code=code
)
if not response.get("ok"):
return {
'error': "Error while installing rocket app in your workspace."
}
return response
class CommandView(SlackMixin, View):
# for setting team object
set_team_obj = True
def post(self, request, *args, **kwargs):
command = self.data['command'].strip('/')
try:
method = getattr(self, f'{command}_command')
except AttributeError:
logging.info(f'Unhandled command {command}')
return HttpResponse(status=200)
return method()
def interrupt_command(self):
resolution_times = [1, 4, 8, 12]
types = ['Interruption', 'Service Outage']
categories = Category.objects.values_list('title',flat=True) or ['sample_category']
payload = {
'token': self.team.bot_access_token,
'trigger_id': self.data['trigger_id'],
'dialog': json.dumps(category_form(resolution_times, types, categories))
}
response = requests.post('https://slack.com/api/dialog.open', params=payload)
return HttpResponse(status=200)
class InteractionView(SlackMixin, View):
# for setting team object
set_team_obj = True
def post(self, request, *args, **kwargs):
callback_id = self.data['callback_id']
try:
method = getattr(self, f'handle_{callback_id}')
except AttributeError:
logging.info(f'Unhandled interaction {callback_id}')
return HttpResponse(status=200)
return method()
def handle_category(self):
submission = self.data['submission']
username = self.data['user']['name']
message = {
'text': f"Category Submission Success by `{username}`",
'attachments': get_attachments(submission)
}
requests.post(self.data['response_url'], data=json.dumps(message))
return HttpResponse(status=200)
def get_attachments(submission):
fields = [
{
"title": key.replace("_", " ").title(),
"value": value
}
for key, value in submission.items()
]
attachment = {
"color": "#aaefab",
"mrkdwn_in": ['fields', 'text', 'pretext'],
"fields": fields,
'footer': 'Category',
}
return [attachment]
| [
"sp.gharti@gmail.com"
] | sp.gharti@gmail.com |
f9dcccc861d7d82429d1611cba13efbfed9c30d5 | 309ac0cd757422b77e2bd820205fcafd11216bc9 | /src/feature/BOB.py | 47be42cf04e31d7c9a3a4d911e57ef4b855d3cd2 | [
"Apache-2.0"
] | permissive | jainal09/speaker-recognition | 024722fc16c60833a20529fdd9d651ee607ba9d3 | 53b0b5a3a7d6134e9bf21dfa4c1a7519b8e3389f | refs/heads/master | 2020-09-04T17:28:46.365993 | 2019-11-05T19:16:03 | 2019-11-05T19:16:03 | 219,831,541 | 0 | 0 | Apache-2.0 | 2019-11-05T19:15:33 | 2019-11-05T19:15:32 | null | UTF-8 | Python | false | false | 993 | py | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: BOB.py
# Date: Wed Oct 29 22:38:35 2014 +0800
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
from utils import cached_func, diff_feature
import bob
import bob.ap
import numpy
@cached_func
def get_bob_extractor(fs, win_length_ms=32, win_shift_ms=16,
n_filters=55, n_ceps=19, f_min=0., f_max=6000,
delta_win=2, pre_emphasis_coef=0.95, dct_norm=True,
mel_scale=True):
ret = bob.ap.Ceps(fs, win_length_ms, win_shift_ms, n_filters, n_ceps, f_min,
f_max, delta_win, pre_emphasis_coef, mel_scale, dct_norm)
return ret
def extract(fs, signal=None, diff=False, **kwargs):
"""accept two argument, or one as a tuple"""
if signal is None:
assert type(fs) == tuple
fs, signal = fs[0], fs[1]
signal = numpy.cast['float'](signal)
ret = get_bob_extractor(fs, **kwargs)(signal)
if diff:
return diff_feature(ret)
return ret
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
e814636b3b0b7fc2d863355d0f65f5daf9722c50 | 482ec3480e8418dda62f85a5359e70c89256f1a8 | /pythonTutorial/drf_tutorial/quickstart/views.py | ca61c5364c666436720a5e36e5a295b7ce045e48 | [] | no_license | rajatgirotra/study | 84d319968f31f78798a56362546f21d22abd7ae7 | acbb6d21a8182feabcb3329e17c76ac3af375255 | refs/heads/master | 2023-09-01T20:48:31.137541 | 2023-08-29T01:41:17 | 2023-08-29T01:41:17 | 85,041,241 | 6 | 1 | null | 2023-05-01T19:25:38 | 2017-03-15T07:17:24 | C++ | UTF-8 | Python | false | false | 602 | py | from django.shortcuts import render
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from .serializers import UserSerializer, GroupSerializer
# Create your views here.
class UserViewSet(viewsets.ModelViewSet):
""" API Endpoint that allows users to be viewed and edited. """
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
""" API Endpoint that allows users to be viewed and edited. """
queryset = Group.objects.all()
serializer_class = GroupSerializer
| [
"rajatgirotra@yahoo.com"
] | rajatgirotra@yahoo.com |
e0055c8917f50e0204eac439448028eb2c6d46c9 | d05c946e345baa67e7894ee33ca21e24b8d26028 | /web-scraping/pdf-url-extractor/pdf_link_extractor.py | f8ba7423e404f066582b76bc2e587b76fac36a74 | [
"MIT"
] | permissive | x4nth055/pythoncode-tutorials | 327255550812f84149841d56f2d13eaa84efd42e | d6ba5d672f7060ba88384db5910efab1768c7230 | refs/heads/master | 2023-09-01T02:36:58.442748 | 2023-08-19T14:04:34 | 2023-08-19T14:04:34 | 199,449,624 | 1,858 | 2,055 | MIT | 2023-08-25T20:41:56 | 2019-07-29T12:35:40 | Jupyter Notebook | UTF-8 | Python | false | false | 412 | py | import pikepdf # pip3 install pikepdf
file = "1810.04805.pdf"
# file = "1710.05006.pdf"
pdf_file = pikepdf.Pdf.open(file)
urls = []
# iterate over PDF pages
for page in pdf_file.pages:
for annots in page.get("/Annots"):
uri = annots.get("/A").get("/URI")
if uri is not None:
print("[+] URL Found:", uri)
urls.append(uri)
print("[*] Total URLs extracted:", len(urls)) | [
"fullclip@protonmail.com"
] | fullclip@protonmail.com |
397adeb4b7a56622f5af0d2c042b01dd33be81de | 14d8e090d540a2947ba71038390078d7783ff510 | /update/models.py | dabb8836578ee1835bce4b488ea09814ea5103b0 | [] | no_license | BrutyLi/osupf | 657d61c7e8e8711e50006944bfd675cb39bddedc | 5dc218a05a980755ea218bd5e4b3189096a68c71 | refs/heads/master | 2021-01-25T12:52:59.625225 | 2018-03-02T02:34:13 | 2018-03-02T02:34:13 | 123,520,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | from django.db import models
# Create your models here.
class hostInfo(models.Model):
hname=models.CharField(max_length=32,verbose_name='描述',blank=True)
hip=models.GenericIPAddressField(verbose_name='IPadd')
hcpu=models.CharField(max_length=8,verbose_name='cpu',)
hdisk=models.CharField(max_length=16,verbose_name='磁盘')
huser=models.CharField(max_length=128,verbose_name='用户信息')
hlog=models.TextField(max_length=10240,verbose_name='日志')
def __str__(self):
return self.hname
class Meta:
verbose_name='主机信息'
verbose_name_plural='主机信息'
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
f1eb78fa9b327850db28fbb286357a8dd27b0ee2 | 747febe786dd6b7fd6c63cfe73dbe3023354daa8 | /src/the_tale/the_tale/statistics/models.py | d416b459decf2bdf77f76b382d9135e4eca0ee66 | [
"BSD-3-Clause"
] | permissive | the-tale/the-tale | 4e4b8d91dc873a5fb935fe58e9721a877baa6d3f | e8450bd2332344da805b1851e728da5a3e5bf0ef | refs/heads/develop | 2023-08-01T13:53:46.835667 | 2022-12-25T18:04:56 | 2022-12-25T18:04:56 | 1,949,167 | 98 | 52 | BSD-3-Clause | 2023-02-15T18:57:33 | 2011-06-24T18:49:48 | Python | UTF-8 | Python | false | false | 465 | py |
import smart_imports
smart_imports.all()
class Record(django_models.Model):
date = django_models.DateTimeField(null=False)
type = rels_django.RelationIntegerField(relation=relations.RECORD_TYPE, db_index=True)
value_int = django_models.BigIntegerField()
value_float = django_models.FloatField()
class FullStatistics(django_models.Model):
created_at = django_models.DateTimeField(auto_now_add=True)
data = django_models.JSONField()
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
96ab3af6980eacbfc7d7b58bf47d470aa1ddfacf | 06add04cbcffada7085043a75a1565d0e01cafff | /notion_scripts/not_goals.py | 67c2cc703a4578364b23b72938d43c6cd2e0da0a | [] | no_license | josancamon19/productivity-apps-wrapper | 0c405726e9dbdc0de3db4f6e59d55f8e2e8f20ab | 7d99836bda716deca90f4592a95b84786a44dc9f | refs/heads/main | 2023-06-15T12:34:39.698296 | 2021-07-07T17:44:22 | 2021-07-07T17:44:22 | 374,472,485 | 22 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,944 | py | import os
import requests
from . import utils, unofficial_api_utils, not_todoist
from integrations.todoist import get_goals_sections,detail_completed_tasks
database_goals = os.getenv('NOTION_GOALS_DB')
def get_db_added_goals():
data = requests.post(f'{utils.base_url}databases/{database_goals}/query', json={}, headers=utils.headers).json()
ids = [task['properties']['Id']['number'] for task in data['results']]
while cursor := data['next_cursor']:
data = requests.post(f'{utils.base_url}databases/{database_goals}/query', json={'start_cursor': cursor, 'page_size': 100},
headers=utils.headers).json()
ids += [task['properties']['Id']['number'] for task in data['results']]
return ids
def save_goals_tasks(tasks: list):
already_added = get_db_added_goals()
sections = get_goals_sections()
print(sections)
unofficial_api_utils.synchronize_goals(sections)
for task in detail_completed_tasks(tasks):
if task['id'] in already_added:
continue
data = {'parent': {'type': 'database_id', 'database_id': database_goals},
'properties': {
"Task": {
"type": "title",
"title": [{"type": "text", "text": {"content": task['content']}}]
},
"Goal": utils.map_select_value(str(task['section'])),
"Date Completion": {"type": "date", "date": {"start": task['date_completed']}},
"Id": utils.map_number_value(task['id']),
}}
result = requests.post(f'{utils.base_url}pages', json=data, headers=utils.headers)
if result.status_code >= 300:
print(result, result.content)
return
page_id = result.json().get('id')
not_todoist.add_notes_to_page(page_id, task['notes'])
| [
"joan.santiago.cabezas@gmail.com"
] | joan.santiago.cabezas@gmail.com |
17c0f628eec50d0bb168010b4109176082c7f0e8 | 1ea0e2b4f064ba0de45a73c527ee89a36771e8fc | /src/sentry/south_migrations/0005_auto.py | da6dbb394a1b0bfafb965133f97ea71ddf694688 | [
"BSD-2-Clause"
] | permissive | atlassian/sentry | 6775e59c317f20f96982e91c2b3c88c02ecbb56b | b937615079d7b24dc225a83b99b1b65da932fc66 | refs/heads/master | 2023-08-27T15:45:47.699173 | 2017-09-18T22:14:55 | 2017-09-18T22:14:55 | 103,999,066 | 1 | 5 | BSD-3-Clause | 2023-04-01T07:49:37 | 2017-09-18T22:38:18 | Python | UTF-8 | Python | false | false | 6,423 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'GroupedMessage', fields ['status']
db.create_index('sentry_groupedmessage', ['status'])
def backwards(self, orm):
# Removing index on 'GroupedMessage', fields ['status']
db.delete_index('sentry_groupedmessage', ['status'])
models = {
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '255'
})
},
'sentry.groupedmessage': {
'Meta': {
'unique_together': "(('logger', 'view', 'checksum'),)",
'object_name': 'GroupedMessage'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
}),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'view': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '255',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.message': {
'Meta': {
'object_name': 'Message'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'message_set'",
'null': 'True',
'to': "orm['sentry.GroupedMessage']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'db_index': 'True'
}),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'view': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '255',
'null': 'True',
'blank': 'True'
}
)
}
}
complete_apps = ['sentry']
| [
"dcramer@gmail.com"
] | dcramer@gmail.com |
441a838a11222598abb6d464a17d3eea057ef15f | aed3fa9b75944995f8441c28e3ef74cfe130a004 | /test/conftest.py | 1481114b302b275160a0f69af8d541512635c1af | [] | no_license | kziolkowska/PyEMMA_IPython | ca95fadc5cfa7c7808592668abd6b2e080498838 | 7105ec66091df3ff9c1a5f0c8babbc0ec1b3f104 | refs/heads/master | 2020-12-24T22:21:03.405851 | 2015-02-26T09:07:18 | 2015-02-26T09:07:18 | 31,358,905 | 0 | 0 | null | 2015-02-26T09:02:18 | 2015-02-26T09:02:18 | null | UTF-8 | Python | false | false | 291 | py | '''
Created on Jun 20, 2014
@author: marscher
'''
def pytest_pycollect_makeitem(collector, name, obj):
"""
this is a hook for pytest to enforce the dynamic generated testcases
of 'TestNotebooks' are initialized.
"""
if name == 'TestNotebooks':
obj.setUpClass()
| [
"m.scherer@fu-berlin.de"
] | m.scherer@fu-berlin.de |
d1947ffbf45071eabaa38c2962280ba3fd7b0196 | ff9fd1bae4ea538fcae66df96b4acc1abdef1556 | /fabnet/mgmt/mgmt_db.py | 84e7c574f548521ba85fe95e832c70da12c3d242 | [] | no_license | fabregas/fabnet | 640b6cf9d96ef477f8db5038881bd448dfcbc2a1 | 6ad7f0791ca0fd08dcbbfc49f785ef634e5fcffa | refs/heads/master | 2021-01-10T20:22:20.107342 | 2014-02-17T16:51:30 | 2014-02-17T16:51:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,142 | py | #!/usr/bin/python
"""
Copyright (C) 2013 Konstantin Andrusenko
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
@package fabnet.mgmt.mgmt_db
@author Konstantin Andrusenko
@date July 24, 2013
This module contains the implementation of MgmtDatabaseManager class
"""
import hashlib
from datetime import datetime
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from fabnet.mgmt.exceptions import *
from fabnet.mgmt.constants import *
class MgmtDatabaseManager:
MGMT_DB_NAME = 'fabnet_mgmt_db'
def __init__(self, conn_str):
try:
self.__client = MongoClient(conn_str)
except ConnectionFailure, err:
raise MEDatabaseException('No database connection! Details: %s'%err)
self.__mgmt_db = self.__client[self.MGMT_DB_NAME]
self.__check_users()
def __check_users(self):
users_cnt = self.__mgmt_db[DBK_USERS].find({}).count()
if users_cnt:
return
self.create_user('admin', hashlib.sha1('admin').hexdigest(), [ROLE_UM])
def close(self):
self.__client.close()
def get_cluster_config(self):
config = self.__mgmt_db[DBK_CLUSTER_CONFIG].find_one({})
if not config:
return {}
return config
def set_cluster_config(self, config):
old_config = self.__mgmt_db[DBK_CLUSTER_CONFIG].find_one({})
if old_config:
old_config.update(config)
config = old_config
self.__mgmt_db[DBK_CLUSTER_CONFIG].update({}, config, upsert=True)
def get_user_info(self, username):
user = self.__mgmt_db[DBK_USERS].find_one({DBK_USERNAME: username})
return user
def __validate(self, value, c_type, minlen=None, val_name=None, possible_vals=None):
if not val_name:
val_name = value
if not isinstance(value, c_type):
raise MEInvalidArgException('"%s" should be an instance of %s (%s occured)'\
%(val_name, c_type, type(value)))
if minlen and len(value) < minlen:
raise MEInvalidArgException('len(%s) < %s raised'%(val_name, minlen))
if possible_vals:
if type(value) not in (list, tuple):
value = [value]
for item in value:
if item not in possible_vals:
raise MEInvalidArgException('"%s" does not supported! possible values: %s'\
%(item, possible_vals))
def create_user(self, username, pwd_hash, roles):
user = self.get_user_info(username)
if user:
raise MEAlreadyExistsException('User "%s" is already exists'%username)
self.__validate(username, str, minlen=3, val_name='user_name')
self.__validate(pwd_hash, str, minlen=1, val_name='password_hash')
self.__validate(roles, list, minlen=1, val_name='roles', possible_vals=ROLES_DESC.keys())
user = {DBK_USERNAME: username,
DBK_USER_PWD_HASH: pwd_hash,
DBK_ROLES: roles}
self.__mgmt_db[DBK_USERS].insert(user)
def remove_user(self, username):
self.__mgmt_db[DBK_USERS].remove({DBK_USERNAME: username})
def update_user_info(self, username, pwd_hash=None, roles=None):
user = self.__mgmt_db[DBK_USERS].find_one({DBK_USERNAME: username})
if not user:
raise MENotFoundException('User "%s" does not found!'%username)
if pwd_hash:
self.__validate(pwd_hash, str, minlen=1, val_name='password_hash')
user[DBK_USER_PWD_HASH] = pwd_hash
if roles:
self.__validate(roles, list, minlen=1, val_name='roles', possible_vals=ROLES_DESC.keys())
user[DBK_ROLES] = roles
self.__mgmt_db[DBK_USERS].update({DBK_USERNAME: username}, user)
def add_session(self, session_id, username):
self.__mgmt_db[DBK_SESSIONS].insert({DBK_ID: session_id, \
DBK_USERNAME: username, \
DBK_START_DT: datetime.now()})
def del_session(self, session_id):
self.__mgmt_db[DBK_SESSIONS].remove({DBK_ID: session_id})
def get_user_by_session(self, session_id):
session = self.__mgmt_db[DBK_SESSIONS].find_one({DBK_ID: session_id})
if not session:
return None
username = session[DBK_USERNAME]
user = self.get_user_info(username)
if not user:
return None
return user
def get_user_last_session(self, username):
sessions = self.__mgmt_db[DBK_SESSIONS].find({DBK_USERNAME: username}).sort([(DBK_START_DT, -1)])
for session in sessions:
return session
return None
def append_node(self, node_name, node_type, node_address):
self.__mgmt_db[DBK_NODES].insert({DBK_ID: node_name, \
DBK_NODETYPE: node_type, \
DBK_NODEADDR: node_address, \
DBK_INSTALLDATE: datetime.now()})
| [
"kksstt@gmail.com"
] | kksstt@gmail.com |
1408f27ecce4aa24ccc4ea574ef3aeb35d24887f | 4b6046439b3411cbc30276e00ad712a3d4c95768 | /pysaga/commons.py | b09fb685312fab7a85a2babab18b4eda9a5bcd58 | [] | no_license | Aluriak/pysaga | 4c731a5d404d0ca0087997af4315e69ecf14fc37 | 54320e5b65a73e793ad18c306b9c004cf8cb0593 | refs/heads/master | 2020-03-30T00:09:21.380417 | 2018-09-27T13:21:22 | 2018-09-27T13:21:22 | 150,507,760 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | """Various general definitions.
"""
import re
from . import utils
TEMPLATE_PATH = 'sagas-mp3/{saga_name}/Textes/'
REFWORDS = 'REF', 'LOL', 'TRI', 'JDM'
REG_CHAPTER = re.compile(r"^Chapitre ([0-9]+) - (.+)$")
REG_CHARACTER = re.compile(r"^([A-Z0-9ÉÈÊÀÄÂÔÛÏ.,!?' -]+) : ?")
REG_LINE = re.compile(r"^([A-Z0-9ÉÈÊÀÄÂÔÛÏ.,!?' -]+) : (.+)")
UNPARSABLES = {
TEMPLATE_PATH.format(saga_name='Reflets') + 'Fleau.html',
}
assert REG_CHAPTER.fullmatch('Chapitre 1 - Introduction à la quête')
SAGA_NAME = {
'Reflets': "Reflets d'Acide",
'Xantah': "La Légende de Xantah",
'Adoprixtoxis': "Adoprixtoxis",
}
SAGA_ALIASES = utils.reverse_multivalues_dict({
'Reflets': ('rda', 'reflets', 'reflet', 'reflets d\'acide', 'reflet d\'acide'),
'Xantah': ('xantah', 'xantha', 'xant'),
'Adoprixtoxis': ('adoprixtoxis', 'adop'),
}, unique_value=True)
| [
"lucas.bourneuf@laposte.net"
] | lucas.bourneuf@laposte.net |
3a99c35b78a898a0605d35c6f449c950a1532dc9 | 53e8c9feacc101f3bfefe013808a778c81a0aca7 | /my_words/nounce.py | b536f3cbe57a331ec82d779d2f32f15d3e6b3e04 | [] | no_license | chouchouyu/my_words | 0cc98c2b1e66b38c0311f871f390932215faecde | 2227fcb28bd49f2daca8fcf0bfa8645a95d434c9 | refs/heads/master | 2020-11-24T10:42:17.802396 | 2019-12-02T00:35:41 | 2019-12-02T00:35:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | 英语词源字典
nounc
announce, 宣布
【发音释义】, [ə'naʊns]vt, 宣布, 述说, 预示, 播报vi, 宣布参加竞选, 当播音员
【结构分析】, announce = an(=ad, 去)nounce(报告)→宣布
词源解析, nounce←拉丁语nuntiare(报告, 叙述)←拉丁语nuntius(信使)
enunciate(宣布), renounce(宣布放弃), denounce(谴责), pronounce(发音, 宣判)
【衍生词】, announcement(宣布), announcer(广播员)
announce, 通知
前缀an同ad, 词根nounce, 通知, 见pronounce, 通知,
denounce, 谴责
de, 向下, 强调, nounce, 通知, 呼喊, 词源同announce, pronounce, 即向下喊, 引申词义谴责,
mispronounce, 发音不当
mis, 坏的, 错的, 不好的, pronounce, 发音,
announce, 宣布, 通告, 发表
词根词缀, an加强意义nounc通知e
announcement, 通[布, 预]告, 告示, 声明
词根词缀, an加强意义nounc通知ement
denounce, 公开指责, 公然抨击, 谴责, 告发
词根词缀, de下降, 向下, 减少nounc通知e
announce, 宣佈
denounce, 聲討
pronounce, 發音
renounce, 放棄
# announce,宣佈,顯示,預告,當報幕員,宣佈參加競選
denounce - -0, 告發, 公然抨擊, 譴責, 向下宣佈譴責
denunciation, 記公開指責, 公開指責, 指控, 類, 苛責是十分譴責駁斥是十分否定贊頌優點, 痛罵指責, 反, 贊詞, 頌詞, 貢品, 頌詞, 贊美, 譴責, 告發
denunciatory, 譴責的, 指責的譴責
enunciate, 發音, 明確地敘述
noun 名詞
# pronounce,發音
# pronounced--0,明顯的,明確的,有人告訴我說,我在說法語時有明顯的英國口音
# pronouncement,聲明,財政部長在最近的公開發言中對經濟復甦持更樂觀的態度
renounce, 正式, 放棄, 如頭銜, 斷絕關系參, 公開指責, 宣言, 發音, 表達, 類背棄誓言廢除命令反, 要求, 聲稱擁有, 擁抱, 信奉
renunciation, 放棄, 廢棄, 棄權, 脫離關系
pronounce, 发音, 宣判, 宣布
词根词缀, pro前, 公开nounc通知e
pronounced, 讲出来的, 显著的, 断然的, 明确的
词根词缀, pro前, 公开nounc通知ed
renounce, 声明
词根词缀, re回, 向后nounc通知e→宣布收回
renouncement, 否认, 拒绝
词根词缀, re回, 向后nounc通知ement
pronounce, 发音, 读音, 宣布, 公布
pro, 向前, nounce, 发音, 词源同announce, nuncio, 引申诸相关词义,
pronouncement, 公布, 公告
来自pronounce, 宣布, 公布,
renounce, 声明放弃, 摒弃
re, 向后, 往回, nounc, 说话, 通知, 词源同announce, denounce, 即收回已经说过的话, 引申词义声明放弃, 摒弃,
| [
"iwtfagp@gmail.com"
] | iwtfagp@gmail.com |
b0ae0ec00f64dacd2002923982d4003e65fbdadf | d98fae9cd74992a2f81f8c267a71542475b27300 | /img_core/img_mvp/woot/apps/img/cpmath/setup.py | e5f56b50cfa997c4ee985bf49748913c5f251ce1 | [] | no_license | NicholasPiano/img-desktop | f516c4da8041aabe3cd4a1af24fdbc42eda105fa | 9a2eed6cc0d912b675d02d8b0b20f60a71a5b481 | refs/heads/master | 2021-01-23T13:17:49.601534 | 2015-10-07T11:28:25 | 2015-10-07T11:28:25 | 39,502,099 | 0 | 0 | null | 2015-10-07T11:28:26 | 2015-07-22T11:19:19 | Python | UTF-8 | Python | false | false | 4,197 | py | """setup.py - setup to build C modules for cpmath
CellProfiler is distributed under the GNU General Public License,
but this file is licensed under the more permissive BSD license.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
"""
from distutils.core import setup,Extension
import glob
import os
import sys
is_win = sys.platform.startswith("win")
try:
from Cython.Distutils import build_ext
from numpy import get_include
except ImportError:
import site
site.addsitedir('../../site-packages')
from Cython.Distutils import build_ext
from numpy import get_include
def configuration():
if is_win:
extra_compile_args = None
extra_link_args = ['/MANIFEST']
else:
extra_compile_args = ['-O3']
extra_link_args = None
extensions = [Extension(name="_cpmorphology",
sources=["src/cpmorphology.c"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_cpmorphology2",
sources=["_cpmorphology2.pyx"],
include_dirs=[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_watershed",
sources=["_watershed.pyx", "heap_watershed.pxi"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_propagate",
sources=["_propagate.pyx", "heap.pxi"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_filter",
sources=["_filter.pyx"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_lapjv",
sources=["_lapjv.pyx"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_convex_hull",
sources=["_convex_hull.pyx"],
include_dirs=['src']+[get_include()],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args),
Extension(name="_fastemd",
sources=["_fastemd.pyx"],
include_dirs = [
"include", get_include(),
"../contrib/include/FastEMD"],
depends=["include/fastemd_hat.hpp",
"include/npy_helpers.hpp"] +
glob.glob("../contrib/include/FastEMD/*.hpp"),
extra_compile_args = extra_compile_args,
extra_link_args=extra_link_args,
language="c++")
]
dict = { "name":"cpmath",
"description":"algorithms for CellProfiler",
"maintainer":"Lee Kamentsky",
"maintainer_email":"leek@broadinstitute.org",
"cmdclass": {'build_ext': build_ext},
"ext_modules": extensions
}
return dict
if __name__ == '__main__':
if '/' in __file__:
os.chdir(os.path.dirname(__file__))
setup(**configuration())
| [
"nicholas.d.piano@gmail.com"
] | nicholas.d.piano@gmail.com |
c2773af266019d171d5db388a8485e13acd96789 | d2628b4562ec2f7815ae9a5af546a9fe56492ff3 | /sitch/sitchlib/config_helper.py | c94b725e51c79583c32dc5ae100c90651a40921a | [
"Apache-2.0"
] | permissive | mtigas/feed_builder | 6b5927e48a0d65d4a83830a99eda75decfb30202 | 59c65f50f221e170c229444fd8f3c7b3b1dce6a1 | refs/heads/master | 2021-01-11T14:02:44.662709 | 2017-06-20T23:57:32 | 2017-06-20T23:57:32 | 94,944,694 | 0 | 0 | null | 2017-06-20T23:53:53 | 2017-06-20T23:53:53 | null | UTF-8 | Python | false | false | 806 | py | import os
class ConfigHelper:
def __init__(self):
self.ocid_key = os.getenv("OCID_KEY")
self.base_path = "/var/feed/"
self.iso_country = "US"
self.twilio_sid = ConfigHelper.get_from_env("TWILIO_SID")
self.twilio_token = ConfigHelper.get_from_env("TWILIO_TOKEN")
self.ocid_base = "/var/"
self.fcc_tempfile = "/var/fcc.tmp.zip"
self.fcc_enclosed_file = "fcc_lic_vw.csv"
self.fcc_destination_file = "/var/fcc.csv.gz"
self.target_radio = "GSM"
return
@classmethod
def get_from_env(cls, k):
retval = os.getenv(k)
if retval is None:
print "Required config variable not set: %s" % k
print "Unable to continue. Exiting."
raise KeyError
return retval
| [
"ash.d.wilson@gmail.com"
] | ash.d.wilson@gmail.com |
b9ce33769d4545ddcaa1e2b7122e021dec4a6681 | 7c9173875ba6e20a9fc705753a5c553891d01a79 | /Python_Bilibili/同济子豪兄/zihaowordcloud/code/example6.py | 1722efd0b58e1763c9a8e46f96ad47fc63ebc500 | [] | no_license | zhy0313/children-python | 1df120930637b8bd320ab090ea784aab7b7cfed2 | 941e29d5f39092b02f8486a435e61c7ec2bdcdb6 | refs/heads/master | 2021-01-07T00:59:31.332746 | 2020-02-10T11:45:52 | 2020-02-10T11:45:52 | 241,533,568 | 0 | 1 | null | 2020-02-19T04:36:38 | 2020-02-19T04:36:37 | null | UTF-8 | Python | false | false | 1,035 | py | # 6号词云:乡村振兴战略中央文件(五角星形状)
# B站专栏:同济子豪兄 2019-5-23
# 导入词云制作库wordcloud和中文分词库jieba
import jieba
import wordcloud
# 导入imageio库中的imread函数,并用这个函数读取本地图片,作为词云形状图片
import imageio
mk = imageio.imread("wujiaoxing.png")
# 构建并配置词云对象w,注意要加scale参数,提高清晰度
w = wordcloud.WordCloud(width=1000,
height=700,
background_color='white',
font_path='msyh.ttc',
mask=mk,
scale=15)
# 对来自外部文件的文本进行中文分词,得到string
f = open('关于实施乡村振兴战略的意见.txt',encoding='utf-8')
txt = f.read()
txtlist = jieba.lcut(txt)
string = " ".join(txtlist)
# 将string变量传入w的generate()方法,给词云输入文字
w.generate(string)
# 将词云图片导出到当前文件夹
w.to_file('output6-village.png')
| [
"baixuelin12hit@outlook.com"
] | baixuelin12hit@outlook.com |
a8639ef72bab2024a9a6515cc547b788964a64da | 788bdd9e443a571bc8262323425317015303cf1d | /p1522.py | 1aed2ee14ece3b5ccb7ddd473dcdf6620cf438ff | [] | no_license | qyx2018/Xiaojiayu | e07dd5480babafb5f1940a2032ef30a92c7331f2 | e7decd439ed95b5a6d3cf195444cabfedf0eff76 | refs/heads/master | 2020-03-11T06:21:27.609708 | 2018-04-17T01:33:40 | 2018-04-17T01:33:40 | 129,827,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from tkinter import *
root = Tk()
s1 = Scale(root, from_ = 0, to = 42)
s1.pack()
s2 = Scale(root, from_ = 0, to = 200,orient = HORIZONTAL)
s2.pack()
def show():
print(s1.get(), s2.get())
Button(root, text = 'Position', command = show).pack()
mainloop() | [
"noreply@github.com"
] | qyx2018.noreply@github.com |
28c527fbe2a4759a8226f709c73a8b020425f06e | f87c86c4dcb9192717a06ec41caa5c49b1fd0201 | /adv/summer_verica.py | 1dd6707c1ca4334defc697de0b3bec11834d4fce | [
"Apache-2.0"
] | permissive | Caledor/dl | 5377982f31f0c1890aff487d5eefc1ffb6f7115f | fc5e9b6855afb40c4c499a70dfa0e0503e8c8f05 | refs/heads/master | 2023-04-01T19:15:04.372285 | 2021-03-28T23:25:06 | 2021-03-28T23:25:06 | 277,363,765 | 0 | 0 | Apache-2.0 | 2020-07-05T18:49:36 | 2020-07-05T18:49:36 | null | UTF-8 | Python | false | false | 321 | py | from core.advbase import *
class Summer_Verica(Adv):
def prerun(self):
self.s2.autocharge_init(self.s2_autocharge).on()
def s2_autocharge(self, t):
if self.s1.sp > self.s1.charged:
log("sp", "s2_autocharge", 1578)
self.s2.charge(1578)
variants = {None: Summer_Verica}
| [
"wildshinobu@gmail.com"
] | wildshinobu@gmail.com |
e7d4ce72e6fa92f7206ac7798833e5fd688d432e | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /5bYXQfpyoithnQisa_13.py | a15674c0b040fae09a272140048ddb56764fc393 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py |
def encode_morse(message):
char_to_dots = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..', ' ': ' ', '0': '-----',
'1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..', '9': '----.',
'&': '.-...', "'": '.----.', '@': '.--.-.', ')': '-.--.-', '(': '-.--.',
':': '---...', ',': '--..--', '=': '-...-', '!': '-.-.--', '.': '.-.-.-',
'-': '-....-', '+': '.-.-.', '"': '.-..-.', '?': '..--..', '/': '-..-.',
' ': ' '
}
ans = ""
for i in message:
if i.isalpha():
a = i.upper()
else:
a = i
ans += char_to_dots[a]
ans += " "
return ans[:-1]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
d4380185dc4b6ddf9ad0d4f353b0a695e968d5d4 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/consumption/v20180131/outputs.py | c66a207304ca209166f50facce58d6ca57713144 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 7,747 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'BudgetTimePeriodResponse',
'CurrentSpendResponse',
'FiltersResponse',
'NotificationResponse',
]
@pulumi.output_type
class BudgetTimePeriodResponse(dict):
"""
The start and end date for a budget.
"""
def __init__(__self__, *,
start_date: str,
end_date: Optional[str] = None):
"""
The start and end date for a budget.
:param str start_date: The start date for the budget.
:param str end_date: The end date for the budget. If not provided, we default this to 10 years from the start date.
"""
pulumi.set(__self__, "start_date", start_date)
if end_date is not None:
pulumi.set(__self__, "end_date", end_date)
@property
@pulumi.getter(name="startDate")
def start_date(self) -> str:
"""
The start date for the budget.
"""
return pulumi.get(self, "start_date")
@property
@pulumi.getter(name="endDate")
def end_date(self) -> Optional[str]:
"""
The end date for the budget. If not provided, we default this to 10 years from the start date.
"""
return pulumi.get(self, "end_date")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CurrentSpendResponse(dict):
"""
The current amount of cost which is being tracked for a budget.
"""
def __init__(__self__, *,
amount: float,
unit: str):
"""
The current amount of cost which is being tracked for a budget.
:param float amount: The total amount of cost which is being tracked by the budget.
:param str unit: The unit of measure for the budget amount.
"""
pulumi.set(__self__, "amount", amount)
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter
def amount(self) -> float:
"""
The total amount of cost which is being tracked by the budget.
"""
return pulumi.get(self, "amount")
@property
@pulumi.getter
def unit(self) -> str:
"""
The unit of measure for the budget amount.
"""
return pulumi.get(self, "unit")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class FiltersResponse(dict):
"""
May be used to filter budgets by resource group, resource, or meter.
"""
def __init__(__self__, *,
meters: Optional[Sequence[str]] = None,
resource_groups: Optional[Sequence[str]] = None,
resources: Optional[Sequence[str]] = None):
"""
May be used to filter budgets by resource group, resource, or meter.
:param Sequence[str] meters: The list of filters on meters, mandatory for budgets of usage category.
:param Sequence[str] resource_groups: The list of filters on resource groups, allowed at subscription level only.
:param Sequence[str] resources: The list of filters on resources.
"""
if meters is not None:
pulumi.set(__self__, "meters", meters)
if resource_groups is not None:
pulumi.set(__self__, "resource_groups", resource_groups)
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def meters(self) -> Optional[Sequence[str]]:
"""
The list of filters on meters, mandatory for budgets of usage category.
"""
return pulumi.get(self, "meters")
@property
@pulumi.getter(name="resourceGroups")
def resource_groups(self) -> Optional[Sequence[str]]:
"""
The list of filters on resource groups, allowed at subscription level only.
"""
return pulumi.get(self, "resource_groups")
@property
@pulumi.getter
def resources(self) -> Optional[Sequence[str]]:
"""
The list of filters on resources.
"""
return pulumi.get(self, "resources")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NotificationResponse(dict):
"""
The notification associated with a budget.
"""
def __init__(__self__, *,
contact_emails: Sequence[str],
enabled: bool,
operator: str,
threshold: float,
contact_groups: Optional[Sequence[str]] = None,
contact_roles: Optional[Sequence[str]] = None):
"""
The notification associated with a budget.
:param Sequence[str] contact_emails: Email addresses to send the budget notification to when the threshold is exceeded.
:param bool enabled: The notification is enabled or not.
:param str operator: The comparison operator.
:param float threshold: Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000.
:param Sequence[str] contact_groups: Action groups to send the budget notification to when the threshold is exceeded.
:param Sequence[str] contact_roles: Contact roles to send the budget notification to when the threshold is exceeded.
"""
pulumi.set(__self__, "contact_emails", contact_emails)
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "threshold", threshold)
if contact_groups is not None:
pulumi.set(__self__, "contact_groups", contact_groups)
if contact_roles is not None:
pulumi.set(__self__, "contact_roles", contact_roles)
@property
@pulumi.getter(name="contactEmails")
def contact_emails(self) -> Sequence[str]:
"""
Email addresses to send the budget notification to when the threshold is exceeded.
"""
return pulumi.get(self, "contact_emails")
@property
@pulumi.getter
def enabled(self) -> bool:
"""
The notification is enabled or not.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def operator(self) -> str:
"""
The comparison operator.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def threshold(self) -> float:
"""
Threshold value associated with a notification. Notification is sent when the cost exceeded the threshold. It is always percent and has to be between 0 and 1000.
"""
return pulumi.get(self, "threshold")
@property
@pulumi.getter(name="contactGroups")
def contact_groups(self) -> Optional[Sequence[str]]:
"""
Action groups to send the budget notification to when the threshold is exceeded.
"""
return pulumi.get(self, "contact_groups")
@property
@pulumi.getter(name="contactRoles")
def contact_roles(self) -> Optional[Sequence[str]]:
"""
Contact roles to send the budget notification to when the threshold is exceeded.
"""
return pulumi.get(self, "contact_roles")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
7c0ef510fdaeb63ff3582c9253c1ae34e03d982b | 66fe6eb64afeb7313a4c7685a8748455325b6726 | /918-maximum-sum-circular-subarray.py | 0150e754bb0889e9f423b95cd511f16e0fef3402 | [] | no_license | anantkaushik/leetcode | b54eb27b17ed95b02ab426392208c346f2d87aaa | 06f0a6dbff2e2062fa4568efa5f01ad982d6ac94 | refs/heads/master | 2022-03-07T18:21:35.881943 | 2022-02-23T12:27:24 | 2022-02-23T12:27:24 | 120,501,367 | 40 | 13 | null | 2019-10-11T11:07:22 | 2018-02-06T18:05:51 | Python | UTF-8 | Python | false | false | 1,949 | py | """
Problem Link: https://leetcode.com/problems/maximum-sum-circular-subarray/
Given a circular integer array nums of length n, return the maximum possible
sum of a non-empty subarray of nums.
A circular array means the end of the array connects to the beginning of the array.
Formally, the next element of nums[i] is nums[(i + 1) % n] and the previous
element of nums[i] is nums[(i - 1 + n) % n].
A subarray may only include each element of the fixed buffer nums at most once.
Formally, for a subarray nums[i], nums[i + 1], ..., nums[j], there does not
exist i <= k1, k2 <= j with k1 % n == k2 % n.
Example 1:
Input: nums = [1,-2,3,-2]
Output: 3
Explanation: Subarray [3] has maximum sum 3
Example 2:
Input: nums = [5,-3,5]
Output: 10
Explanation: Subarray [5,5] has maximum sum 5 + 5 = 10
Example 3:
Input: nums = [3,-1,2,-1]
Output: 4
Explanation: Subarray [2,-1,3] has maximum sum 2 + (-1) + 3 = 4
Example 4:
Input: nums = [3,-2,2,-3]
Output: 3
Explanation: Subarray [3] and [3,-2,2] both have maximum sum 3
Example 5:
Input: nums = [-2,-3,-1]
Output: -1
Explanation: Subarray [-1] has maximum sum -1
Constraints:
n == nums.length
1 <= n <= 3 * 104
-3 * 104 <= nums[i] <= 3 * 104
Solution:
There two cases:
1. When max sub array is in the middle.
2. When max sub array = prefix + suffix
= total sum - middle sub array (min sub array)
Edge case: If all items are negative.
"""
class Solution:
def maxSubarraySumCircular(self, nums: List[int]) -> int:
cur_max_sum = cur_min_sum = total = 0
max_sum = min_sum = nums[0]
for num in nums:
total += num
cur_max_sum = max(cur_max_sum + num, num)
max_sum = max(max_sum, cur_max_sum)
cur_min_sum = min(cur_min_sum + num, num)
min_sum = min(min_sum, cur_min_sum)
return max(max_sum, total - min_sum) if max_sum > 0 else max_sum
| [
"anant.kaushik2@gmail.com"
] | anant.kaushik2@gmail.com |
63d843377be4bde15c3f3fabfa13e644d1cd9f48 | 4111ca5a73a22174f189361bef654c3f91c3b7ed | /Lintcode/Ladder_11_15_A/362. Sliding Window Maximum.py | 352cc5b57065b3b2edbc26517191dbbcd1fa9897 | [
"MIT"
] | permissive | ctc316/algorithm-python | 58b541b654509ecf4e9eb8deebfcbdf785699cc4 | ac4580d55e05e93e407c6156c9bb801808027d60 | refs/heads/master | 2020-03-16T06:09:50.130146 | 2019-08-02T02:50:49 | 2019-08-02T02:50:49 | 132,548,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | from queue import Queue
from collections import deque
class MaxQueue:
def __init__(self, capacity):
self.capacity = capacity
self.que = Queue()
self.max_deque = deque()
def put(self, val):
if self.que.qsize() >= self.capacity:
self.pop()
self.que.put(val)
while len(self.max_deque) > 0 and val > self.max_deque[-1]:
self.max_deque.pop()
self.max_deque.append(val)
def pop(self):
val = self.que.get()
if val == self.max_deque[0]:
self.max_deque.popleft()
def getMax(self):
return self.max_deque[0]
class Solution:
"""
@param nums: A list of integers.
@param k: An integer
@return: The maximum number inside the window at each moving.
"""
def maxSlidingWindow(self, nums, k):
if k == 0 or len(nums) < k:
return []
mq = MaxQueue(k)
res = []
for i in range(k - 1):
mq.put(nums[i])
for i in range(k - 1, len(nums)):
mq.put(nums[i])
res.append(mq.getMax())
return res
| [
"mike.tc.chen101@gmail.com"
] | mike.tc.chen101@gmail.com |
146995e0e0ef52d28990b4198ac8a86ec561fe8e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02883/s186596180.py | a67dc653be0d152e0a20b9113874949cc3e810b6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | import sys
import os,io
input=io.BytesIO(os.read(0,os.fstat(0).st_size)).readline
n,k=map(int,input().split())
a=list(map(int,input().split()))
f=list(map(int,input().split()))
a.sort()
f.sort()
f.reverse()
if sum(a)<=k:
print(0)
sys.exit()
pointer=0
l=0
r=0
for i in range(n):
r=max(r,a[i]*f[i])
while l+1<r:
try1=(l+r)//2
required=0
for i in range(n):
required+=(max(0,a[i]-try1//f[i]))
if required>k:
l=try1
else:
r=try1
required=0
for i in range(n):
required+=(max(0,a[i]-l//f[i]))
if required>k:
print(r)
else:
print(l)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8cb6515893f4750b2e3a962fcd4192b44c3107ae | 913fb9ec1e709a5140676ba7b2371b1976afca72 | /endoClip/miRandaTest/parseAlignment.py | c08d09bcc7dc0a4a54c27a157d1eb854ad369bb4 | [] | no_license | cgreer/ResearchScripts | 171cfe9555ea06fdeb91084c12d07d1b45a2335c | 1107803bb1459d6b6e1dfb1a89679d2b6fd49062 | refs/heads/master | 2016-09-05T10:43:19.090247 | 2012-04-12T21:38:11 | 2012-04-12T21:38:11 | 1,673,080 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,998 | py | import bioLibCG
import cgNexusFlat
import cgDL
from cgAutoCast import autocast
import string
from cgNexus import Nexus
@autocast
def updateAdjustedMismatches(fN, fF, guValue = .5, otherValue = 1.0):
NX = Nexus(fN, fF)
NX.load(['sigMask', 'adjustedNumMismatches'])
while NX.nextID():
mask = NX.sigMask
numGU = mask.count('G')
numGapAndMM = mask.count('X')
NX.adjustedNumMismatches = (numGU * guValue) + (numGapAndMM * otherValue)
NX.save()
@autocast
def updateAdjustedMismatchesFlat(fN, fOut, guValue = .5, otherValue = 1.0):
f = open(fN, 'r')
fOut = open(fOut, 'w')
for line in f:
ls = line.strip().split('\t')
mask = ls[18]
numGU = mask.count('G')
numGapAndMM = mask.count('X')
adjustedNumMismatches = (numGU * guValue) + (numGapAndMM * otherValue)
ls.append(str(adjustedNumMismatches))
fOut.write('\t'.join(ls) + '\n')
f.close()
fOut.close()
#TAG::lowercase,count from end::
def countLowerEnd(theString, fromEnd = False):
if fromEnd:
theString = theString[::-1]
numLower = 0
for c in theString:
if c in string.lowercase:
numLower += 1
else:
return numLower
#TAG::collapse,repeat,string::
def collapseRuns(theString):
prevChar = None
newString = []
for c in theString:
if c == prevChar:
continue
else:
newString.append(c)
prevChar = c
return ''.join(newString)
#TAG::read multiple file lines,read file,file::
def lineFileParser(inFile, linesPerEntry = 4):
'''retrieve X lines at a time'''
f = open(inFile, 'r')
allLines = []
while True:
newEntry = [f.readline() for i in range(linesPerEntry)]
for l in newEntry:
if l == '':
return allLines
allLines.append(newEntry)
def parseRandaOutput(inFile, smallFN, degFN, oFN):
'''miRanda Output will be grepped to make it every alignment is 10lines'''
allAlignments = lineFileParser(inFile, 10)
smallID_length = getIDLength(smallFN)
degID_length = getIDLength(degFN)
outF = open(oFN, 'w')
for i, alignment in enumerate(allAlignments):
oldScore, qRange, rRange, query, matchInfo, reference, sID, dID = parseAlignmentBasic(alignment)
complexResult = parseAlignmentComplex(query, reference)
numMM, numM, numGU, numQGaps, numRGaps, significantTargetMask, numExtensionsQ, numExtensionsR = complexResult
#check for alignments with N in them
if all([x == 0 for x in complexResult]): continue
sLen = smallID_length[int(sID)]
dLen = degID_length[int(dID)]
pString = [i, sID, dID, qRange[0], qRange[1], rRange[0], rRange[1], sLen, dLen, query, reference, numM, numMM, numGU, numQGaps, numRGaps, numExtensionsQ, numExtensionsR, significantTargetMask]
pString = '\t'.join([str(x) for x in pString])
outF.write(pString + '\n')
outF.close()
#TAG::aligning,parse alignment,miRanda::
def parseAlignmentBasic(alignment):
#parse raw data
info, n1, query, matchInfo, reference, n3, n4, n5, idInfo, n6 = alignment
oldScore = float(info.split()[2])
qRange = (int(info.split()[3][2:]), int(info.split()[5]))
rRange = (int(info.split()[6][2:]), int(info.split()[8]))
query = query.split()[2]
matchInfo = matchInfo.strip()
reference = reference.split()[2]
sID = idInfo.split()[0][1:]
dID = idInfo.split()[1]
#calculate read qRange and rRange
'''qrange is weird, the lower number is correct and the higher one is to high by one (2 to 14) should be (2 to 13)
In addition, the qRange has to have the lower case letters added to it whereas the rRange already includes it'''
qRange = qRange[0] - countLowerEnd(query, fromEnd = True), qRange[1] + countLowerEnd(query) - 1 #should always be (1,N)
output = [oldScore, qRange, rRange, query, matchInfo, reference, sID, dID]
if any([x == '' for x in output]):
print output
raise NameError("missing some parsing info")
return output
def parseAlignmentComplex(query, reference):
'''Get num missmatches and other parsed data from alignment'''
allLetters = ['A', 'T', 'C', 'G']
queryGapPairs = ['-%s' % x for x in allLetters]
referenceGapPairs = ['%s-' % x for x in allLetters]
#TAG::genomic letter combinations,combinations,letters::
guPairs = ['GT', 'TG']
compPairs = ['AT', 'TA', 'CG', 'GC']
misPairs = ['TT', 'TC', 'AG', 'AC', 'AA', 'CC', 'CT', 'CA', 'GG', 'GA']
if len(reference) != len(query):
raise NameError("ALIGNMENTS ARE DIFFERENT SIZE!")
#calculate # gaps// ALLOW TARGET GAPS???
collQ = collapseRuns(query)
collR = collapseRuns(reference)
numQGaps = collQ.count('-')
numRGaps = collR.count('-')
numExtensionsQ = query.count('-') - numQGaps
numExtensionsR = reference.count('-') - numRGaps
#calc match/mismatch (rev to get from small 5-->3)
query = query.upper()[::-1]
reference = reference.upper()[::-1]
matchPairs = ['%s%s' % (x,y) for x,y in zip(query, reference)]
significantTargetMask = [] #Mask is entire small string masked
numM = numMM = numGU = 0
gapShift = 0
for i,pair in enumerate(matchPairs):
if pair in queryGapPairs:
gapShift -= 1
numMM += 1
significantTargetMask.append('X')
elif pair in guPairs:
numGU += 1
significantTargetMask.append('G')
elif pair in compPairs:
numM += 1
significantTargetMask.append('N')
elif pair in misPairs:
numMM += 1
significantTargetMask.append('X')
elif pair in referenceGapPairs:
numMM += 1
significantTargetMask.append('X')
elif 'N' in pair:
return [0,0,0,0,0,0,0,0] #dont take N alignments
else:
print query
print reference
print pair
raise NameError("COMBINATION NOT ACCOUNTED FOR!!!")
significantTargetMask = ''.join(significantTargetMask)
significantTargetMask = significantTargetMask[::-1] # did re-aligning reverse from miRanda...for sanity
return [numMM, numM, numGU, numQGaps, numRGaps, significantTargetMask, numExtensionsQ, numExtensionsR]
def getIDLength(fN):
allSeqs = lineFileParser(fN, 3)
id_length = {}
for small in allSeqs:
id, seq, blank = small
id = int(id.strip().split('>')[-1])
id_length[id] = len(seq.strip())
return id_length
def parseRandaInclusiveCheck(inFile, smallFN):
'''miRanda Output will be grepped to make it every alignment is 10lines'''
id_length = getIDLength(smallFN)
allAlignments = lineFileParser(inFile, 10)
for i, alignment in enumerate(allAlignments):
checkInclusiveSmallLength(alignment, id_length)
def checkInclusiveSmallLength(alignment, id_length):
'''hacked script to check if miRanda always shows the full length small
RNA at the QUERY part of the alignment...needed for alignment calculations'''
#parse raw data
info, n1, query, matchInfo, reference, n3, n4, n5, idInfo, n6 = alignment
oldScore = float(info.split()[2])
qRange = (int(info.split()[3][2:]), int(info.split()[5]))
rRange = (int(info.split()[6][2:]), int(info.split()[8]))
query = query.split()[2]
matchInfo = matchInfo.strip()
reference = reference.split()[2]
sID = idInfo.split()[0][1:]
dID = idInfo.split()[1]
query = list(query)
dashCount = query.count('-')
if len(query) - dashCount != id_length[int(sID)]:
print sID, dID
raise NameError("MISMATCH")
if __name__ == "__main__":
import sys
if sys.argv[1] == "help":
bioLibCG.gd(sys.argv[0])
else:
bioLibCG.submitArgs(globals()[sys.argv[1]], sys.argv[1:])
| [
"chrisgreer88@gmail.com"
] | chrisgreer88@gmail.com |
c2c2302c19cd27a8c388c3076648b346e94027f1 | 4f98acd943807cfa593c61b4332548fd3cd6fb72 | /CodeChef/2016 Nov/1ALEXTASK.py | 18583b9192db4d160449ec59b1d559bba8f3e51b | [] | no_license | mrkimkim/Competitive-Programming-Summary | 9086986782bef615db2002a3be3f3d8317c80345 | fa75693818c8ec65f623d98b11c1fe0123bc7200 | refs/heads/master | 2021-01-20T05:25:10.793615 | 2017-11-12T10:33:14 | 2017-11-12T10:33:14 | 89,775,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | def lcm(a1, a2):
a, b = a1, a2
while a % b != 0:
a %= b
a, b = b, a
return a1 * a2 / b
T = int(raw_input())
for tt in xrange(T):
N = int(raw_input())
A = [int(x) for x in raw_input().split(' ')]
LCM = []
for i in range(N):
for j in range(i+1, N):
LCM.append(lcm(A[i], A[j]))
LCM.sort()
print LCM[0]
| [
"="
] | = |
f2e706a3ce884256f22aef75a2af9a5311ac1719 | 5004c5592ead17f55acacce36d880bb3b12a1255 | /graphql/client.py | 33eb2cdc977f484ead2333602eac2a2dc6967621 | [] | no_license | xav-b/stacks | f856c9efd5b5c3fb5165d31fb9b928cca14038f4 | 2faa0ca759509174920eca88c6553f1b55a14f09 | refs/heads/master | 2021-10-08T05:07:44.878158 | 2018-12-08T02:29:46 | 2018-12-08T02:29:55 | 64,149,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
import requests
HEADERS = {'Content-Type': 'application/json'}
QUERY = {'query': '{ hello }'}
res = requests.post('http://localhost:4000/graphql', headers=HEADERS, json=QUERY)
if res.status_code == 200:
print(res.json())
| [
"xavier.bruhiere@gmail.com"
] | xavier.bruhiere@gmail.com |
7dd3116a2797092f761d2f59ec02bc3ba7272e5a | 9d862dd68f8b4ea4e7de9397fef8592824c77449 | /app/top/api/rest/WlbWmsSkuCreateRequest.py | 6598145c2d04de7ebb441ec797e5897dc69e60ee | [] | no_license | hi-noikiy/tmall-sku-outer_id | ffaca630dfb288ca33d962b8a050932d1047b9c8 | 1bcf29386a513bcb210bf5d91016e0dcb1ebc1ad | refs/heads/master | 2021-05-09T18:20:27.150316 | 2017-03-08T06:43:57 | 2017-03-08T06:43:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | '''
Created by auto_sdk on 2016.05.25
'''
from app.top.api.base import RestApi
class WlbWmsSkuCreateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.advent_lifecycle = None
self.approval_number = None
self.bar_code = None
self.brand = None
self.brand_name = None
self.category = None
self.category_name = None
self.color = None
self.cost_price = None
self.extend_fields = None
self.gross_weight = None
self.height = None
self.is_area_sale = None
self.is_batch_mgt = None
self.is_danger = None
self.is_hygroscopic = None
self.is_shelflife = None
self.is_sn_mgt = None
self.item_code = None
self.item_id = None
self.item_price = None
self.length = None
self.lifecycle = None
self.lockup_lifecycle = None
self.name = None
self.net_weight = None
self.origin_address = None
self.pcs = None
self.reject_lifecycle = None
self.size = None
self.specification = None
self.store_code = None
self.tag_price = None
self.title = None
self.type = None
self.use_yn = None
self.volume = None
self.width = None
def getapiname(self):
return 'taobao.wlb.wms.sku.create'
| [
"1037096435@qq.com"
] | 1037096435@qq.com |
2a93997c42a613a172bca0bdc009d9f471283b9a | 3449e5511dc8da19fc841af767dbe8d216e26ffb | /manage.py | 580284fcc718e6d888b0f90ffd61253c58d00251 | [] | no_license | erikwestra/mm-server | 8ba2af0ee7acd372949589b6f8d429099a38ea58 | bead1ad439541211e33fdc60264a869f18a99ae9 | refs/heads/master | 2021-01-10T21:14:23.636707 | 2015-05-27T21:22:54 | 2015-05-27T21:22:54 | 28,573,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mmServer.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"ewestra@gmail.com"
] | ewestra@gmail.com |
7a2fda54a67c368b387b99cdca294856aff50611 | 40b9c3020bda615a7c097ea962c039fc08a90a2f | /bonsai/core/admin.py | 108699c3a43ee6c0619aa03ee7c034ed2673ed10 | [] | no_license | mahongquan/bonsailims | 980e9d606a58a81c7637e7f9e640cb16b9cabb61 | 9f5d58a49d24ba86c8d9472ea7e6449bc8c61538 | refs/heads/master | 2020-12-24T14:53:56.886079 | 2013-11-27T02:35:15 | 2013-11-27T02:35:15 | 14,733,537 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | from django.contrib import admin
from models import *
admin.site.register(ProjectCode)
admin.site.register(CollectionMethod)
admin.site.register(Material)
admin.site.register(StorageMethod)
admin.site.register(Project)
admin.site.register(Subject)
admin.site.register(Sample)
| [
"mahongquan@sina.com"
] | mahongquan@sina.com |
f944801874341c8211053e8eeb0fde3287cd620e | 9ce4292954000fd66bcdbd0797a280c306308d08 | /quizzes/00.organize.me/Cracking the Coding Interview/17-14-2.py | 47dc8b3fc7400cb532246997349c31584ce6d361 | [
"MIT"
] | permissive | JiniousChoi/encyclopedia-in-code | 0c786f2405bfc1d33291715d9574cae625ae45be | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | refs/heads/master | 2021-06-27T07:50:10.789732 | 2020-05-29T12:50:46 | 2020-05-29T12:50:46 | 137,426,553 | 2 | 0 | MIT | 2020-10-13T08:56:12 | 2018-06-15T01:29:31 | Python | UTF-8 | Python | false | false | 3,161 | py | '''
17.14 - 구둣점이 없어진 문장을 최대한 복원하라.
'''
WORD_LIST = []
with open('english_dictionary.txt', 'r', encoding="ISO-8859-1") as fp:
WORD_LIST = fp.read().splitlines()
WORD_LIST = [word.lower() for word in WORD_LIST]
WORD_LIST = list(set(WORD_LIST))
_WORD_LIST = []
for word in WORD_LIST:
for ch in word:
if not ch in 'abcdefghijklmnopqrstuvwxyz':
continue
_WORD_LIST.append(word)
WORD_LIST = _WORD_LIST
WORD_LIST.sort(key = lambda x: (len(x), x))
print('done sorting word_list')
print('words length ranges from {} to {}'.format(len(WORD_LIST[0]), len(WORD_LIST[-1])))
print('Creating rainbow_table')
rainbow_table = {}
for word in WORD_LIST:
if word not in rainbow_table:
rainbow_table[word] = True
print('Done!')
assert 'dog' in rainbow_table
def recursive(broken_sentence):
if not broken_sentence:
return [[]]
result = []
candidates = get_candidates(broken_sentence)
for candidate in candidates:
word, rest = candidate
for each in recursive(rest):
tmp = [word]
tmp.extend(each)
result.append(tmp)
return result
def get_candidates(sentence):
yield (sentence[:1].upper(), sentence[1:])
for i in range(1, len(sentence)+1):
word = sentence[:i]
rest = sentence[i:] #성능상은 if절 안으로 보내는게 남.
if word in rainbow_table:
yield (word, rest)
def concat_capitals_together(words):
on_capital = False
range_list = []
for i, word in enumerate(words):
if word.isupper() and not on_capital:
on_capital = True
start_idx = i
elif word.isupper() and on_capital:
if i==(len(words)-1):
range_list.append((start_idx, len(words)))
elif not word.isupper() and on_capital:
on_capital=False
end_idx = i
range_list.append((start_idx, end_idx))
elif not word.isupper() and not on_capital:
pass
else:
assert False
#range_list is prepared
for i,j in range_list[::-1]:
words[i:j] = [''.join(words[i:j])]
return words
broken_sentence = input('input a broken sentence: ')
#broken_sentence = 'ilovejinsungheleftthismorning'
#broken_sentence = 'jesslookedjustliketimherbrother'
#broken_sentence = 'dog'
result = recursive(broken_sentence)
sentences = []
for each_list in result:
#assert isinstance(each, list)
each_list = concat_capitals_together(each_list)
sentence = ' '.join(each_list)
sentences.append(sentence)
print('numbers of sentences : {}'.format(len(sentences)))
def lesser_capitals(sentence):
count = 0
for c in sentence:
if c.isupper():
count +=1
return count, sentence.count(' ')
sentence_in_order = sorted(sentences, key = lesser_capitals)
#print(sentence_in_order)
print('restored sentence :',sentence_in_order[:1])
for stc in sentence_in_order:
if 'brother' in stc:
print('found')
print(stc)
import sys
sys.exit(0)
print('not found')
| [
"jinchoiseoul@gmail.com"
] | jinchoiseoul@gmail.com |
6d18e5b463a36cc4e08cb5c8a534bab7b40eed17 | d36c4c882089b9b81e6e3b6323eeb9c43f5160a9 | /7KYU/Basic Calculator/solution.py | b3781c325d408bf5bcf4057d678e10d871a6578b | [] | no_license | stuartstein777/CodeWars | a6fdc2fa6c4fcf209986e939698d8075345dd16f | d8b449a16c04a9b883c4b5e272cc90a4e6d8a2e6 | refs/heads/master | 2023-08-27T20:32:49.018950 | 2023-08-24T23:23:29 | 2023-08-24T23:23:29 | 233,281,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | def calculate(num1, operation, num2):
if operation == '+':
return num1 + num2
elif operation == '-':
return num1 - num2
elif operation == '*':
return num1 * num2
elif operation == '/' and num2 != 0:
return num1 / num2
return None | [
"qmstuart@gmail.com"
] | qmstuart@gmail.com |
f8d124ea1738661ae00c133c22ce07c03b60fac3 | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Machine Learning Scientist with Python/13. Feature Engineering for NLP in Python/01. Basic features and readability scores/04. Hashtags and mentions in Russian tweets.py | 993f21900df53d45c8ed5930dbf0aecac0dd49e1 | [] | no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,693 | py | '''
Hashtags and mentions in Russian tweets
Let's revisit the tweets dataframe containing the Russian tweets. In this exercise, you will compute the number of hashtags and mentions in each tweet by defining two functions count_hashtags() and count_mentions() respectively and applying them to the content feature of tweets.
In case you don't recall, the tweets are contained in the content feature of tweets.
Instructions 1/2
50 XP
1
In the list comprehension, use startswith() to check if a particular word starts with '#'.
2
In the list comprehension, use startswith() to check if a particular word starts with '@'.
'''
SOLUTION
1
# Function that returns numner of hashtags in a string
def count_hashtags(string):
# Split the string into words
words = string.split()
# Create a list of words that are hashtags
hashtags = [word for word in words if word.startswith('#')]
# Return number of hashtags
return(len(hashtags))
# Create a feature hashtag_count and display distribution
tweets['hashtag_count'] = tweets['content'].apply(count_hashtags)
tweets['hashtag_count'].hist()
plt.title('Hashtag count distribution')
plt.show()
2
# Function that returns number of mentions in a string
def count_mentions(string):
# Split the string into words
words = string.split()
# Create a list of words that are mentions
mentions = [word for word in words if word.startswith('@')]
# Return number of mentions
return(len(mentions))
# Create a feature mention_count and display distribution
tweets['mention_count'] = tweets['content'].apply(count_mentions)
tweets['mention_count'].hist()
plt.title('Mention count distribution')
plt.show() | [
"didimilikina8@gmail.com"
] | didimilikina8@gmail.com |
83ff8724593b0b4cb2fbcade365b2021df9e4454 | 4dd695521343d56ff943e8c1768343d7680714e3 | /experiments/experiments_SBrT_2018/scripts_bob/ivector_T200_fold4.py | ed7c89dd5e015998081776051f44612c8bdf818c | [] | no_license | natharb/environment | ea659ee541f6473e92b5b30c549e52b66f47b280 | 86e6cee6e01d2370abeb7c55a2c8a15001735919 | refs/heads/master | 2021-09-28T02:39:02.222966 | 2018-11-13T12:03:34 | 2018-11-13T12:03:34 | 139,762,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | import sys
sys.path.insert(0, '.')
import bob.bio.spear
import bob.bio.gmm
import numpy
import scipy.spatial
temp_directory = './closedset_ynogutti/ivector/256/T200/fold_4/temp/'
result_directory = './closedset_ynogutti/ivector/256/T200/fold_4/results/'
sub_directory = 'subdirectory'
database = 'database_ivector_T200_fold4.py'
groups = ['dev']
#groups = ['dev', 'eval']
preprocessor = bob.bio.spear.preprocessor.Energy_2Gauss(max_iterations = 10, convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., win_shift_ms = 10., smoothing_window = 10)
extractor = bob.bio.spear.extractor.Cepstral(win_length_ms = 25, win_shift_ms = 10, n_filters = 24 , dct_norm = False, f_min = 0, f_max = 4000, delta_win = 2, mel_scale = True,
with_energy = True, with_delta = True, with_delta_delta = True, n_ceps = 19, pre_emphasis_coef = 0.97)
algorithm = bob.bio.gmm.algorithm.IVector(subspace_dimension_of_t = 200, tv_training_iterations = 10, update_sigma = True, use_whitening = True, use_lda = False,
use_wccn = False, use_plda = False, lda_dim = 50, plda_dim_F = 50, plda_dim_G = 50, plda_training_iterations = 50, number_of_gaussians = 256)
parallel = 10
verbose = 2 | [
"nathbapt@decom.fee.unicamp.br"
] | nathbapt@decom.fee.unicamp.br |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.