blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f123fb96b9fc9be0b130cb0616266daca2a4b1f0 | 4791bde7bb7275fc25480fdf0cd81d1a9450a50c | /articles/migrations/0002_article_thumb.py | f990e16292870d560147e2f25f5793b8e00c4c80 | [] | no_license | VinneyJ/RU-I-tech-app | 9692f0681dd704ce52c621b3d080d1a90fbe501b | dd15335f26a35d8e32477e7dd384f3a80351d25d | refs/heads/master | 2020-05-02T21:04:17.773867 | 2019-03-28T13:25:18 | 2019-03-28T13:25:18 | 178,209,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | # Generated by Django 2.1.7 on 2019-03-07 18:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='thumb',
field=models.ImageField(blank=True, default='default.png', upload_to=''),
),
]
| [
"vincentjayden49@gmail.com"
] | vincentjayden49@gmail.com |
aea9b526917cfd144e682e610acce9676629ad37 | 3f09e77f169780968eb4bd5dc24b6927ed87dfa2 | /src/Problems/Linked_List_Cycle.py | d954942bb12a57f59ca94bcca0dceb6070ef809a | [] | no_license | zouyuanrenren/Leetcode | ad921836256c31e31cf079cf8e671a8f865c0660 | 188b104b81e6c73792f7c803c0fa025f9413a484 | refs/heads/master | 2020-12-24T16:59:12.464615 | 2015-01-19T21:59:15 | 2015-01-19T21:59:15 | 26,719,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | '''
Created on 17 Nov 2014
@author: zouyuanrenren
'''
'''
Given a linked list, determine if it has a cycle in it.
Follow up:
Can you solve it without using extra space?
'''
'''
Can be done with two pointers:
1. one fast pointer;
2. one slow pointer;
3. the list has a cycle iff fast == slow at some point
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return a boolean
def hasCycle(self, head):
fast = head
slow = head
if head == None:
return False
while fast != None and fast.next != None:
fast = fast.next.next
slow = slow.next
if fast == slow:
return True
return False | [
"y.ren@abdn.ac.uk"
] | y.ren@abdn.ac.uk |
00d3db21ba2b288188763689d3d2ea6d93620a2e | 33b705ff525bd5efe3f8dc18949aa79be7cd41ec | /dvc/__init__.py | 8c166489ee26f2cdc2adba8e6620df190377a754 | [
"Apache-2.0"
] | permissive | nanokinetics/dvc | 7bbb737642e317fa3543334fac29b4e14e5e3514 | af6ac421fd8b7c514e4358543df0b5ae57746a4b | refs/heads/master | 2021-04-15T17:02:08.455586 | 2018-03-21T10:23:22 | 2018-03-21T10:23:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | """
DVC
----
Make your data science projects reproducible and shareable.
"""
VERSION = '0.9.3'
__version__ = VERSION
| [
"kupruser@gmail.com"
] | kupruser@gmail.com |
4ffcca5c272df580b13e18d5d6c875ec77f8e30b | c8f5d69d21ac4df40d79a811dea2e3ad82fb5e04 | /src/stock_list_upd.py | 8985c45bee57d8f3d21e912834f494b47f4fdfdf | [] | no_license | webclinic017/usstock | e71ab18534fd3afc05ab2452578821584750e2b9 | c724f00bc1c5d2a41ee58e037ba0b1b3f0904f70 | refs/heads/master | 2023-08-15T05:22:14.275202 | 2021-10-14T21:19:53 | 2021-10-14T21:19:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | #!/usr/bin/env python
'''
TBD, Wed Jan 30 21:47:38 EST 2019
to automate 1). sp500 ticker selection, 2.) additional ticker price hist update 3. split adj (TBD), 4. dividends adj (TBD), 5. earnings update 6. financials update
Procedure:
1. pull sp500 stock list to [sp500_component]
2. pull available stock list to [iex_company_temp]
3. add additional new sp500 list to [mapping_ticker_cik] via "mapping_ticker_cik.add.upd.sql"
4. check stock status in [mapping_ticker_cik]
update [mapping_ticker_cik] set act_code=0
if act_code=1 is no longer available in [iex_company_temp]
5. pull iex stock "quotes" in daily basis based on [iex_company_temp]
6. pull iex stock "chart" in weekly basis based on [iex_company_temp]
7. check splits, M&A activies daily basis
'''
import sys
import pandas as pd
from pprint import pprint
from _alan_calc import sqlQuery,conn2pgdb,upd_temp2hist
def printerr(s,file=sys.stderr,end='\n'):
file.write(s+end)
dbname='ara';hostname='localhost'
pgDB=conn2pgdb(dbname=dbname,hostname=hostname)
xqr = """select * from (select a.pbdate,a.name,a.close,b.close,(a.close/b.close-1)*100. as pchg from prc_hist_iex a,prc_temp_iex b, (select name,min(pbdate) mndate from prc_temp_iex group by name) as c where a.pbdate=c.mndate and b.pbdate=c.mndate and a.pbdate=b.pbdate and a.name=b.name and a.name=c.name ) as x where abs(pchg)>0.5 ORDER BY abs(pchg)
"""
scLst = sqlQuery(xqr,engine=pgDB)
if len(scLst)>0:
tb_temp='temp_list'
scLst.to_sql(tb_temp,pgDB,index=False,schema='public',if_exists='replace')
xlst = "('{}')".format("','".join(scLst['name']))
#fp=open('stock_list_upd.tmp','w')
#fp.write(scLst.to_csv(index=False,sep='|'))
#fp.close()
printerr(xlst)
# delete entire hist if temp in the earliest is not consistent
xqr = """delete from prc_hist_iex where name in {}""".format(xlst)
printerr(xqr)
pgDB.execute(xqr,pgDB)
# update temp to hist
upd_temp2hist(pgDB,temp='prc_temp_iex',hist='prc_hist_iex',pcol=['name','pbdate'])
| [
"facebook@beyondbond.com"
] | facebook@beyondbond.com |
31ee31ef742d9ac8ad49c4ca2f2a2c422d62dfd0 | e489b2f77b52d33bbf952066b0975485a30ec8f9 | /BootCRUDICApp/urls.py | 26f1cd46f50089a767ac6d5c45fa439ca1cbcf0c | [
"Apache-2.0"
] | permissive | cs-fullstack-2019-spring/django-bootstrapcrud-ic-Kenn-CodeCrew | 9e837b6a4d8f6b637273bcb5478c2ee844b1301e | dd65eda49daf8527d01f96c7b39f3c874f371138 | refs/heads/master | 2020-04-27T18:34:02.209542 | 2019-03-08T20:45:31 | 2019-03-08T20:45:31 | 174,576,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('addMember/', views.addMember, name='addMember'),
] | [
"kenn+git@code-crew.org"
] | kenn+git@code-crew.org |
5a8fa6e8941d07f985b6f4dddff4736e33c5f14c | d2327e4277629ff5c04cfdfb71cdae7209d56fa7 | /sample codes/topic6/py_test6.py | 43b88b93dc59ce1a63f4197fb2a40aeda7050b52 | [] | no_license | tertiarycourses/Full-ROS-Training | 77caeedda216e6e065fec0664a93d66df38ba571 | b9bdb7daf2a08421d163001cb2b22cbfb1221f23 | refs/heads/main | 2023-03-04T09:04:02.432243 | 2021-02-16T15:57:51 | 2021-02-16T15:57:51 | 314,543,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | #!/usr/bin/env python
import rospy
from std_msgs.msg import Float32
def callback(msg_data):
rospy.loginfo(msg_data)
def listener():
rospy.init_node('iot_sensor')
rospy.Subscriber('temperature',Float32,callback)
rospy.spin()
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
rospy.shutdown() | [
"angch@tertiaryinfotech.com"
] | angch@tertiaryinfotech.com |
3ad485aaa42a75f2c082663ae1879b0af554ddba | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow_nightly/source2.7/markdown/extensions/sane_lists.py | 651b12b257b63a3aa6f281602a59f9a40c8a6c26 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 1,597 | py | """
Sane List Extension for Python-Markdown
=======================================
Modify the behavior of Lists in Python-Markdown to act in a sane manor.
See <https://Python-Markdown.github.io/extensions/sane_lists>
for documentation.
Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
All changes Copyright 2011-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import OListProcessor, UListProcessor
import re
class SaneOListProcessor(OListProcessor):
SIBLING_TAGS = ['ol']
def __init__(self, parser):
super(SaneOListProcessor, self).__init__(parser)
self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.))[ ]+(.*)' %
(self.tab_length - 1))
class SaneUListProcessor(UListProcessor):
SIBLING_TAGS = ['ul']
def __init__(self, parser):
super(SaneUListProcessor, self).__init__(parser)
self.CHILD_RE = re.compile(r'^[ ]{0,%d}(([*+-]))[ ]+(.*)' %
(self.tab_length - 1))
class SaneListExtension(Extension):
""" Add sane lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Override existing Processors. """
md.parser.blockprocessors['olist'] = SaneOListProcessor(md.parser)
md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser)
def makeExtension(*args, **kwargs):
return SaneListExtension(*args, **kwargs)
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
322cccfc6c3c7ac77fce6b3f7163edea0ab29daa | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/documentdb/v20210701preview/sql_resource_sql_container.py | d09c4449bc4b846dedb02b4ed47f13814dce032c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,889 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SqlResourceSqlContainerArgs', 'SqlResourceSqlContainer']
@pulumi.input_type
class SqlResourceSqlContainerArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
database_name: pulumi.Input[str],
resource: pulumi.Input['SqlContainerResourceArgs'],
resource_group_name: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input['CreateUpdateOptionsArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SqlResourceSqlContainer resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input['SqlContainerResourceArgs'] resource: The standard JSON format of a container
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] container_name: Cosmos DB container name.
:param pulumi.Input['ManagedServiceIdentityArgs'] identity: Identity for the resource.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input['CreateUpdateOptionsArgs'] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "resource", resource)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if options is not None:
pulumi.set(__self__, "options", options)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database name.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter
def resource(self) -> pulumi.Input['SqlContainerResourceArgs']:
"""
The standard JSON format of a container
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input['SqlContainerResourceArgs']):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Cosmos DB container name.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]:
"""
Identity for the resource.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ManagedServiceIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def options(self) -> Optional[pulumi.Input['CreateUpdateOptionsArgs']]:
"""
A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: Optional[pulumi.Input['CreateUpdateOptionsArgs']]):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class SqlResourceSqlContainer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['SqlContainerResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
An Azure Cosmos DB container.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] container_name: Cosmos DB container name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: Identity for the resource.
:param pulumi.Input[str] location: The location of the resource group to which the resource belongs.
:param pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[pulumi.InputType['SqlContainerResourceArgs']] resource: The standard JSON format of a container
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SqlResourceSqlContainerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure Cosmos DB container.
:param str resource_name: The name of the resource.
:param SqlResourceSqlContainerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SqlResourceSqlContainerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[pulumi.InputType['CreateUpdateOptionsArgs']]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['SqlContainerResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SqlResourceSqlContainerArgs.__new__(SqlResourceSqlContainerArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["container_name"] = container_name
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__.__dict__["database_name"] = database_name
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["options"] = options
if resource is None and not opts.urn:
raise TypeError("Missing required property 'resource'")
__props__.__dict__["resource"] = resource
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210701preview:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20150401:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150401:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20150408:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150408:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20151106:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20151106:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20160319:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160319:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20160331:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160331:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20190801:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20191212:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20191212:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20200301:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200301:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20200401:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200401:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20200601preview:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200601preview:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20200901:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20200901:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20210115:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20210315:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20210415:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20210515:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-native:documentdb/v20210615:SqlResourceSqlContainer"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:SqlResourceSqlContainer")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlResourceSqlContainer, __self__).__init__(
'azure-native:documentdb/v20210701preview:SqlResourceSqlContainer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlResourceSqlContainer':
"""
Get an existing SqlResourceSqlContainer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlResourceSqlContainerArgs.__new__(SqlResourceSqlContainerArgs)
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["options"] = None
__props__.__dict__["resource"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return SqlResourceSqlContainer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
Identity for the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def options(self) -> pulumi.Output[Optional['outputs.SqlContainerGetPropertiesResponseOptions']]:
return pulumi.get(self, "options")
@property
@pulumi.getter
def resource(self) -> pulumi.Output[Optional['outputs.SqlContainerGetPropertiesResponseResource']]:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | vivimouret29.noreply@github.com |
9dada075fc6880ae3051f749c69383a9d71ad79e | f4dedea53630c9cbdc6297ae4a7e2a8195fd7691 | /10 Advanced Techniques/21 Dynamic Connectivity.py | 7cca06fb0f10671253d333d96b04477b6777f2fa | [] | no_license | nikkisora/cses_problemset | d089db048444e07e002f131b4323adc9df95b05b | 03160f33e36cdc6d538403357b36bcb015b4dba7 | refs/heads/master | 2023-07-03T10:34:23.487709 | 2021-08-05T21:13:49 | 2021-08-05T21:13:49 | 379,251,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | '''
CSES - Dynamic Connectivity
Time limit: 1.00 s
Memory limit: 512 MB
Consider an undirected graph that consists of n nodes and m edges. There are two types of events that can happen:
A new edge is created between nodes a and b.
An existing edge between nodes a and b is removed.
Your task is to report the number of components after every event.
Input
The first input line has three integers n, m and k: the number of nodes, edges and events.
After this there are m lines describing the edges. Each line has two integers a and b: there is an edge between nodes a and b. There is at most one edge between any pair of nodes.
Then there are k lines describing the events. Each line has the form "t a b" where t is 1 (create a new edge) or 2 (remove an edge). A new edge is always created between two nodes that do not already have an edge between them, and only existing edges can get removed.
Output
Print k+1 integers: first the number of components before the first event, and after this the new number of components after each event.
Constraints
2 <= n <= 10^5
1 <= m,k <= 10^5
1 <= a,b <= n
Example
Input:
5 3 3
1 4
2 3
3 5
1 2 5
2 3 5
1 1 2
Output:
2 2 2 1
''' | [
"32413317+nikkisora@users.noreply.github.com"
] | 32413317+nikkisora@users.noreply.github.com |
f5b9b5aad03c625181b97dedf70853d9f1345517 | c27c51f5c33e0431dbe7db6e18c21b249d476cfa | /OpenSource_Python_Code/nova-2013.2/build/lib/nova/virt/libvirt/imagebackend.py | 84c46e86a3635fb2a8957e6bf13c2a4b8c1e1236 | [
"Apache-2.0"
] | permissive | bopopescu/Python_Stuff | 9bef74e0db17bb5e3ba2d908ced01ee744820d80 | 9aa94a0fa5e4e802090c7b29ec88b840e304d9e5 | refs/heads/master | 2022-11-20T06:54:36.581623 | 2017-12-04T18:56:02 | 2017-12-04T18:56:02 | 282,171,169 | 0 | 0 | null | 2020-07-24T08:54:37 | 2020-07-24T08:54:36 | null | UTF-8 | Python | false | false | 22,365 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import contextlib
import os
from oslo.config import cfg
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
try:
import rbd
except ImportError:
rbd = None
__imagebackend_opts = [
cfg.StrOpt('libvirt_images_type',
default='default',
help='VM Images format. Acceptable values are: raw, qcow2, lvm,'
'rbd, default. If default is specified,'
' then use_cow_images flag is used instead of this one.'),
cfg.StrOpt('libvirt_images_volume_group',
help='LVM Volume Group that is used for VM images, when you'
' specify libvirt_images_type=lvm.'),
cfg.BoolOpt('libvirt_sparse_logical_volumes',
default=False,
help='Create sparse logical volumes (with virtualsize)'
' if this flag is set to True.'),
cfg.IntOpt('libvirt_lvm_snapshot_size',
default=1000,
help='The amount of storage (in megabytes) to allocate for LVM'
' snapshot copy-on-write blocks.'),
cfg.StrOpt('libvirt_images_rbd_pool',
default='rbd',
help='the RADOS pool in which rbd volumes are stored'),
cfg.StrOpt('libvirt_images_rbd_ceph_conf',
default='', # default determined by librados
help='path to the ceph configuration file to use'),
]
CONF = cfg.CONF
CONF.register_opts(__imagebackend_opts)
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
CONF.import_opt('preallocate_images', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
class Image(object):
__metaclass__ = abc.ABCMeta
def __init__(self, source_type, driver_format, is_block_dev=False):
"""Image initialization.
:source_type: block or file
:driver_format: raw or qcow2
:is_block_dev:
"""
self.source_type = source_type
self.driver_format = driver_format
self.is_block_dev = is_block_dev
self.preallocate = False
# NOTE(mikal): We need a lock directory which is shared along with
# instance files, to cover the scenario where multiple compute nodes
# are trying to create a base file at the same time
self.lock_path = os.path.join(CONF.instances_path, 'locks')
@abc.abstractmethod
def create_image(self, prepare_template, base, size, *args, **kwargs):
"""Create image from template.
Contains specific behavior for each image type.
:prepare_template: function, that creates template.
Should accept `target` argument.
:base: Template name
:size: Size of created image in bytes
"""
pass
def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
extra_specs, hypervisor_version):
"""Get `LibvirtConfigGuestDisk` filled for this image.
:disk_dev: Disk bus device name
:disk_bus: Disk bus type
:device_type: Device type for this image.
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
"""
info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
info.source_device = device_type
info.target_bus = disk_bus
info.target_dev = disk_dev
info.driver_cache = cache_mode
info.driver_format = self.driver_format
driver_name = libvirt_utils.pick_disk_driver_name(hypervisor_version,
self.is_block_dev)
info.driver_name = driver_name
info.source_path = self.path
tune_items = ['disk_read_bytes_sec', 'disk_read_iops_sec',
'disk_write_bytes_sec', 'disk_write_iops_sec',
'disk_total_bytes_sec', 'disk_total_iops_sec']
# Note(yaguang): Currently, the only tuning available is Block I/O
# throttling for qemu.
if self.source_type in ['file', 'block']:
for key, value in extra_specs.iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in tune_items:
setattr(info, scope[1], value)
return info
def check_image_exists(self):
return os.path.exists(self.path)
def cache(self, fetch_func, filename, size=None, *args, **kwargs):
"""Creates image from template.
Ensures that template and image not already exists.
Ensures that base directory exists.
Synchronizes on template fetching.
:fetch_func: Function that creates the base image
Should accept `target` argument.
:filename: Name of the file in the image directory
:size: Size of created image in bytes (optional)
"""
@utils.synchronized(filename, external=True, lock_path=self.lock_path)
def call_if_not_exists(target, *args, **kwargs):
if not os.path.exists(target):
fetch_func(target=target, *args, **kwargs)
elif CONF.libvirt_images_type == "lvm" and \
'ephemeral_size' in kwargs:
fetch_func(target=target, *args, **kwargs)
base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if not os.path.exists(base_dir):
fileutils.ensure_tree(base_dir)
base = os.path.join(base_dir, filename)
if not self.check_image_exists() or not os.path.exists(base):
self.create_image(call_if_not_exists, base, size,
*args, **kwargs)
if (size and self.preallocate and self._can_fallocate() and
os.access(self.path, os.W_OK)):
utils.execute('fallocate', '-n', '-l', size, self.path)
def _can_fallocate(self):
"""Check once per class, whether fallocate(1) is available,
and that the instances directory supports fallocate(2).
"""
can_fallocate = getattr(self.__class__, 'can_fallocate', None)
if can_fallocate is None:
_out, err = utils.trycmd('fallocate', '-n', '-l', '1',
self.path + '.fallocate_test')
fileutils.delete_if_exists(self.path + '.fallocate_test')
can_fallocate = not err
self.__class__.can_fallocate = can_fallocate
if not can_fallocate:
LOG.error('Unable to preallocate_images=%s at path: %s' %
(CONF.preallocate_images, self.path))
return can_fallocate
def snapshot_create(self):
raise NotImplementedError()
def snapshot_extract(self, target, out_format):
raise NotImplementedError()
def snapshot_delete(self):
raise NotImplementedError()
class Raw(Image):
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Raw, self).__init__("file", "raw", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
self.snapshot_name = snapshot_name
self.preallocate = CONF.preallocate_images != 'none'
self.correct_format()
def correct_format(self):
if os.path.exists(self.path):
data = images.qemu_img_info(self.path)
self.driver_format = data.file_format or 'raw'
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def copy_raw_image(base, target, size):
libvirt_utils.copy_image(base, target)
if size:
# class Raw is misnamed, format may not be 'raw' in all cases
use_cow = self.driver_format == 'qcow2'
disk.extend(target, size, use_cow=use_cow)
generating = 'image_id' not in kwargs
if generating:
#Generating image in place
prepare_template(target=self.path, *args, **kwargs)
else:
prepare_template(target=base, *args, **kwargs)
if not os.path.exists(self.path):
with fileutils.remove_path_on_error(self.path):
copy_raw_image(base, self.path, size)
self.correct_format()
def snapshot_create(self):
pass
def snapshot_extract(self, target, out_format):
images.convert_image(self.path, target, out_format)
def snapshot_delete(self):
pass
class Qcow2(Image):
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
self.snapshot_name = snapshot_name
self.preallocate = CONF.preallocate_images != 'none'
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def copy_qcow2_image(base, target, size):
# TODO(pbrady): Consider copying the cow image here
# with preallocation=metadata set for performance reasons.
# This would be keyed on a 'preallocate_images' setting.
libvirt_utils.create_cow_image(base, target)
if size:
disk.extend(target, size, use_cow=True)
# Download the unmodified base image unless we already have a copy.
if not os.path.exists(base):
prepare_template(target=base, *args, **kwargs)
legacy_backing_size = None
legacy_base = base
# Determine whether an existing qcow2 disk uses a legacy backing by
# actually looking at the image itself and parsing the output of the
# backing file it expects to be using.
if os.path.exists(self.path):
backing_path = libvirt_utils.get_disk_backing_file(self.path)
if backing_path is not None:
backing_file = os.path.basename(backing_path)
backing_parts = backing_file.rpartition('_')
if backing_file != backing_parts[-1] and \
backing_parts[-1].isdigit():
legacy_backing_size = int(backing_parts[-1])
legacy_base += '_%d' % legacy_backing_size
legacy_backing_size *= 1024 * 1024 * 1024
# Create the legacy backing file if necessary.
if legacy_backing_size:
if not os.path.exists(legacy_base):
with fileutils.remove_path_on_error(legacy_base):
libvirt_utils.copy_image(base, legacy_base)
disk.extend(legacy_base, legacy_backing_size, use_cow=True)
# NOTE(cfb): Having a flavor that sets the root size to 0 and having
# nova effectively ignore that size and use the size of the
# image is considered a feature at this time, not a bug.
disk_size = disk.get_disk_size(base)
if size and size < disk_size:
msg = _('%(base)s virtual size %(disk_size)s'
'larger than flavor root disk size %(size)s')
LOG.error(msg % {'base': base,
'disk_size': disk_size,
'size': size})
raise exception.InstanceTypeDiskTooSmall()
if not os.path.exists(self.path):
with fileutils.remove_path_on_error(self.path):
copy_qcow2_image(base, self.path, size)
def snapshot_create(self):
libvirt_utils.create_snapshot(self.path, self.snapshot_name)
def snapshot_extract(self, target, out_format):
libvirt_utils.extract_snapshot(self.path, 'qcow2',
self.snapshot_name, target,
out_format)
def snapshot_delete(self):
libvirt_utils.delete_snapshot(self.path, self.snapshot_name)
class Lvm(Image):
@staticmethod
def escape(filename):
return filename.replace('_', '__')
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Lvm, self).__init__("block", "raw", is_block_dev=True)
if path:
info = libvirt_utils.logical_volume_info(path)
self.vg = info['VG']
self.lv = info['LV']
self.path = path
else:
if not CONF.libvirt_images_volume_group:
raise RuntimeError(_('You should specify'
' libvirt_images_volume_group'
' flag to use LVM images.'))
self.vg = CONF.libvirt_images_volume_group
self.lv = '%s_%s' % (self.escape(instance['name']),
self.escape(disk_name))
self.path = os.path.join('/dev', self.vg, self.lv)
# TODO(pbrady): possibly deprecate libvirt_sparse_logical_volumes
# for the more general preallocate_images
self.sparse = CONF.libvirt_sparse_logical_volumes
self.preallocate = not self.sparse
if snapshot_name:
self.snapshot_name = snapshot_name
self.snapshot_path = os.path.join('/dev', self.vg,
self.snapshot_name)
def _can_fallocate(self):
return False
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def create_lvm_image(base, size):
base_size = disk.get_disk_size(base)
resize = size > base_size
size = size if resize else base_size
libvirt_utils.create_lvm_image(self.vg, self.lv,
size, sparse=self.sparse)
images.convert_image(base, self.path, 'raw', run_as_root=True)
if resize:
disk.resize2fs(self.path, run_as_root=True)
generated = 'ephemeral_size' in kwargs
#Generate images with specified size right on volume
if generated and size:
libvirt_utils.create_lvm_image(self.vg, self.lv,
size, sparse=self.sparse)
with self.remove_volume_on_error(self.path):
prepare_template(target=self.path, *args, **kwargs)
else:
prepare_template(target=base, *args, **kwargs)
with self.remove_volume_on_error(self.path):
create_lvm_image(base, size)
@contextlib.contextmanager
def remove_volume_on_error(self, path):
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
libvirt_utils.remove_logical_volumes(path)
def snapshot_create(self):
size = CONF.libvirt_lvm_snapshot_size
cmd = ('lvcreate', '-L', size, '-s', '--name', self.snapshot_name,
self.path)
libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
def snapshot_extract(self, target, out_format):
images.convert_image(self.snapshot_path, target, out_format,
run_as_root=True)
def snapshot_delete(self):
# NOTE (rmk): Snapshot volumes are automatically zeroed by LVM
cmd = ('lvremove', '-f', self.snapshot_path)
libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
class Rbd(Image):
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None, **kwargs):
super(Rbd, self).__init__("block", "rbd", is_block_dev=True)
if path:
try:
self.rbd_name = path.split('/')[1]
except IndexError:
raise exception.InvalidDevicePath(path=path)
else:
self.rbd_name = '%s_%s' % (instance['name'], disk_name)
self.snapshot_name = snapshot_name
if not CONF.libvirt_images_rbd_pool:
raise RuntimeError(_('You should specify'
' libvirt_images_rbd_pool'
' flag to use rbd images.'))
self.pool = CONF.libvirt_images_rbd_pool
self.ceph_conf = CONF.libvirt_images_rbd_ceph_conf
self.rbd = kwargs.get('rbd', rbd)
def _supports_layering(self):
return hasattr(self.rbd, 'RBD_FEATURE_LAYERING')
def _ceph_args(self):
args = []
args.extend(['--id', CONF.rbd_user])
args.extend(['--conf', self.ceph_conf])
return args
def _get_mon_addrs(self):
args = ['ceph', 'mon', 'dump', '--format=json'] + self._ceph_args()
out, _ = utils.execute(*args)
lines = out.split('\n')
if lines[0].startswith('dumped monmap epoch'):
lines = lines[1:]
monmap = jsonutils.loads('\n'.join(lines))
addrs = [mon['addr'] for mon in monmap['mons']]
hosts = []
ports = []
for addr in addrs:
host_port = addr[:addr.rindex('/')]
host, port = host_port.rsplit(':', 1)
hosts.append(host.strip('[]'))
ports.append(port)
return hosts, ports
def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
extra_specs):
"""Get `LibvirtConfigGuestDisk` filled for this image.
:disk_dev: Disk bus device name
:disk_bus: Disk bus type
:device_type: Device type for this image.
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
"""
info = vconfig.LibvirtConfigGuestDisk()
hosts, ports = self._get_mon_addrs()
info.device_type = device_type
info.driver_format = 'raw'
info.driver_cache = cache_mode
info.target_bus = disk_bus
info.target_dev = disk_dev
info.source_type = 'network'
info.source_protocol = 'rbd'
info.source_name = '%s/%s' % (self.pool, self.rbd_name)
info.source_hosts = hosts
info.source_ports = ports
auth_enabled = (CONF.rbd_user is not None)
if CONF.rbd_secret_uuid:
info.auth_secret_uuid = CONF.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.rbd_user:
info.auth_username = CONF.rbd_user
if auth_enabled:
info.auth_secret_type = 'ceph'
info.auth_secret_uuid = CONF.rbd_secret_uuid
return info
def _can_fallocate(self):
return False
def check_image_exists(self):
rbd_volumes = libvirt_utils.list_rbd_volumes(self.pool)
for vol in rbd_volumes:
if vol.startswith(self.rbd_name):
return True
return False
def create_image(self, prepare_template, base, size, *args, **kwargs):
if self.rbd is None:
raise RuntimeError(_('rbd python libraries not found'))
old_format = True
features = 0
if self._supports_layering():
old_format = False
features = self.rbd.RBD_FEATURE_LAYERING
if not os.path.exists(base):
prepare_template(target=base, *args, **kwargs)
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
args = ['--pool', self.pool, base, self.rbd_name]
if self._supports_layering():
args += ['--new-format']
args += self._ceph_args()
libvirt_utils.import_rbd_image(*args)
def snapshot_create(self):
pass
def snapshot_extract(self, target, out_format):
snap = 'rbd:%s/%s' % (self.pool, self.rbd_name)
images.convert_image(snap, target, out_format)
def snapshot_delete(self):
pass
class Backend(object):
def __init__(self, use_cow):
self.BACKEND = {
'raw': Raw,
'qcow2': Qcow2,
'lvm': Lvm,
'rbd': Rbd,
'default': Qcow2 if use_cow else Raw
}
def backend(self, image_type=None):
if not image_type:
image_type = CONF.libvirt_images_type
image = self.BACKEND.get(image_type)
if not image:
raise RuntimeError(_('Unknown image_type=%s') % image_type)
return image
def image(self, instance, disk_name, image_type=None):
"""Constructs image for selected backend
:instance: Instance name.
:name: Image name.
:image_type: Image type.
Optional, is CONF.libvirt_images_type by default.
"""
backend = self.backend(image_type)
return backend(instance=instance, disk_name=disk_name)
def snapshot(self, disk_path, snapshot_name, image_type=None):
"""Returns snapshot for given image
:path: path to image
:snapshot_name: snapshot name
:image_type: type of image
"""
backend = self.backend(image_type)
return backend(path=disk_path, snapshot_name=snapshot_name)
| [
"thelma1944@gmail.com"
] | thelma1944@gmail.com |
4f770d7903cc4c618074e9115637038e89b1a77c | 6a819308924a005aa66475515bd14586b97296ae | /venv/lib/python3.6/site-packages/setuptools/py31compat.py | e80573126c1c9604f492fb04516ad2e619c23349 | [] | no_license | AlexandrTyurikov/my_first_Django_project | a2c655dc295d3904c7688b8f36439ae8229d23d1 | 1a8e4d033c0ff6b1339d78c329f8beca058b019a | refs/heads/master | 2020-05-04T13:20:20.100479 | 2019-05-04T23:41:39 | 2019-05-04T23:41:39 | 179,156,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | __all__ = ['get_config_vars', 'get_path']
try:
# Python 2.7 or >=3.2
from sysconfig import get_config_vars, get_path
except ImportError:
from distutils.sysconfig import get_config_vars, get_python_lib
def get_path(name):
if name not in ('platlib', 'purelib'):
raise ValueError("Name must be purelib or platlib")
return get_python_lib(name == 'platlib')
try:
# Python >=3.2
from tempfile import TemporaryDirectory
except ImportError:
import shutil
import tempfile
class TemporaryDirectory(object):
"""
Very simple temporary book context manager.
Will try to delete afterward, but will also ignore OS and similar
errors on deletion.
"""
def __init__(self):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp()
def __enter__(self):
return self.name
def __exit__(self, exctype, excvalue, exctrace):
try:
shutil.rmtree(self.name, True)
except OSError: # removal errors are not the only possible
pass
self.name = None
| [
"tyur.sh@gmail.com"
] | tyur.sh@gmail.com |
3692dfd4e01349c9d821ab23d96b5c0ced307d96 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/1c6aa5e912cb777ad2a6a9b42097c64e3f0d2f71-<send_message>-bug.py | e08ebe23393d18dd9b4df26cfe0c4d549738a144 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | def send_message(self, queue_url, message_body, delay_seconds=0, message_attributes=None):
return self.get_conn().send_message(QueueUrl=queue_url, MessageBody=message_body, DelaySeconds=delay_seconds, MessageAttributes=(message_attributes or {
})) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
1c3b1236c2af033089a675439e47f130b4baec41 | dd573ed68682fd07da08143dd09f6d2324f51345 | /swea/모의SW역량테스트/5658_보물상자비밀번호.py | 0d31a6192d7541eaeb1e770c774b0ca5c24ab353 | [] | no_license | chelseashin/My-Algorithm | 0f9fb37ea5c6475e8ff6943a5fdaa46f0cd8be61 | db692e158ebed2d607855c8e554fd291c18acb42 | refs/heads/master | 2021-08-06T12:05:23.155679 | 2021-07-04T05:07:43 | 2021-07-04T05:07:43 | 204,362,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | import sys
sys.stdin = open('5658_input.txt')
from collections import deque
T = int(input())
for tc in range(T):
N, K = map(int, input().split())
S = deque(input())
n = N//4
numbers = set()
# 회전한 상태
for _ in range(N//4):
for i in range(0, N, n):
temp = ''
for j in range(n):
temp += S[i+j]
numbers.add(int(temp, 16))
S.rotate()
print("#{} {}".format(tc+1, sorted(numbers)[::-1][K-1]))
# 다른 풀이
# T = int(input())
# for test_case in range(T):
# N, K = map(int, input().split())
# data = input() * 2
# ans = set()
# for i in range(N // 4):
# for j in range(1, 5):
# ans.add(int(data[i + (N // 4) * (j - 1) : i + (N // 4) * j], 16))
# # print(sorted(ans, reverse=True))
# print("#{} {}".format(test_case + 1, sorted(ans)[:: - 1][K - 1])) | [
"chaewonshin95@gmail.com"
] | chaewonshin95@gmail.com |
3d99b1f9a252500cbbfbbfb578d4f1b8692adbd4 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Tutorials/Google API's Python Client/expandsymlinks.py | 28b3c662846a89d685f571c65fdc7a1e5d5cebcb | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:5f1f470f54b639699e0364110137529c5a098bae64631d265ea1ec985e10485a
size 1756
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
74fb966098d31d5bd5b87dddb2f7208224027dee | 6a44e772dfdec969f5e2af430f0bf3a35eb73c4e | /src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyComplex/autorestcomplextestservice/models/int_wrapper.py | fc276dcc05f4904cd2f0ccc34b7f154b9521e849 | [
"MIT"
] | permissive | lurumad/autorest | ecc4b1de223e4b4cdd226a3cf922a6940dbddd34 | fef0c4c9e7fdb5c851bdb095d5a2ff93572d452e | refs/heads/master | 2021-01-12T11:07:39.298341 | 2016-11-04T03:12:08 | 2016-11-04T03:12:08 | 72,835,570 | 1 | 0 | null | 2016-11-04T09:58:50 | 2016-11-04T09:58:50 | null | UTF-8 | Python | false | false | 925 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IntWrapper(Model):
"""IntWrapper.
:param field1:
:type field1: int
:param field2:
:type field2: int
"""
_attribute_map = {
'field1': {'key': 'field1', 'type': 'int'},
'field2': {'key': 'field2', 'type': 'int'},
}
def __init__(self, field1=None, field2=None):
self.field1 = field1
self.field2 = field2
| [
"noreply@github.com"
] | lurumad.noreply@github.com |
fe63ce9389ff63768465358dbdd8a16ac4f05c7f | ea85e903db500eee66fe70ed3029b05577494d9d | /排序/349. 两个数组的交集.py | 40954a68011ddec065523c8ae1fe288574149fd7 | [] | no_license | baolibin/leetcode | fcd975eb23e5ca3fc7febbd6c47ec833595b5a51 | bc0540ec42131439be144cca19f6355a01de992a | refs/heads/master | 2021-08-15T20:40:25.580955 | 2021-01-20T09:57:21 | 2021-01-20T09:57:21 | 76,557,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | # coding:utf-8
'''
349. 两个数组的交集
给定两个数组,编写一个函数来计算它们的交集。
示例 1:
输入:nums1 = [1,2,2,1], nums2 = [2,2]
输出:[2]
示例 2:
输入:nums1 = [4,9,5], nums2 = [9,4,9,8,4]
输出:[9,4]
说明:
输出结果中的每个元素一定是唯一的。
我们可以不考虑输出结果的顺序。
'''
def intersection(nums1, nums2):
n_1 = list(set(nums1))
n_2 = list(set(nums2))
res = []
for each in n_1:
if each in n_2:
res.append(each)
return res
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
nums1 = [4, 9, 5]
nums2 = [9, 4, 9, 8, 4]
print(intersection(nums1, nums2))
class Solution:
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
n_1 = list(set(nums1))
n_2 = list(set(nums2))
res = []
for each in n_1:
if each in n_2:
res.append(each)
return res
| [
"yangfengling@inttech.cn"
] | yangfengling@inttech.cn |
b0c71791df4278ee8b534d877f78f47e8c7b4ad7 | 7889f7f0532db6a7f81e6f8630e399c90438b2b9 | /3.4.0/_downloads/62c71faf74da9063a3e8cccd6bfdbed9/boxplot_demo.py | d5eb08f9cf1d9be597338a1e963c023ea4f694ef | [] | no_license | matplotlib/matplotlib.github.com | ef5d23a5bf77cb5af675f1a8273d641e410b2560 | 2a60d39490941a524e5385670d488c86083a032c | refs/heads/main | 2023-08-16T18:46:58.934777 | 2023-08-10T05:07:57 | 2023-08-10T05:08:30 | 1,385,150 | 25 | 59 | null | 2023-08-30T15:59:50 | 2011-02-19T03:27:35 | null | UTF-8 | Python | false | false | 7,870 | py | """
========
Boxplots
========
Visualizing boxplots with matplotlib.
The following examples show off how to visualize boxplots with
Matplotlib. There are many options to control their appearance and
the statistics that they use to summarize the data.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Polygon
# Fixing random state for reproducibility
np.random.seed(19680801)
# fake up some data
spread = np.random.rand(50) * 100
center = np.ones(25) * 50
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
data = np.concatenate((spread, center, flier_high, flier_low))
fig, axs = plt.subplots(2, 3)
# basic plot
axs[0, 0].boxplot(data)
axs[0, 0].set_title('basic plot')
# notched plot
axs[0, 1].boxplot(data, 1)
axs[0, 1].set_title('notched plot')
# change outlier point symbols
axs[0, 2].boxplot(data, 0, 'gD')
axs[0, 2].set_title('change outlier\npoint symbols')
# don't show outlier points
axs[1, 0].boxplot(data, 0, '')
axs[1, 0].set_title("don't show\noutlier points")
# horizontal boxes
axs[1, 1].boxplot(data, 0, 'rs', 0)
axs[1, 1].set_title('horizontal boxes')
# change whisker length
axs[1, 2].boxplot(data, 0, 'rs', 0, 0.75)
axs[1, 2].set_title('change whisker length')
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.05, top=0.9,
hspace=0.4, wspace=0.3)
# fake up some more data
spread = np.random.rand(50) * 100
center = np.ones(25) * 40
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
d2 = np.concatenate((spread, center, flier_high, flier_low))
# Making a 2-D array only works if all the columns are the
# same length. If they are not, then use a list instead.
# This is actually more efficient because boxplot converts
# a 2-D array into a list of vectors internally anyway.
data = [data, d2, d2[::2]]
# Multiple box plots on one Axes
fig, ax = plt.subplots()
ax.boxplot(data)
plt.show()
###############################################################################
# Below we'll generate data from five different probability distributions,
# each with different characteristics. We want to play with how an IID
# bootstrap resample of the data preserves the distributional
# properties of the original sample, and a boxplot is one visual tool
# to make this assessment
random_dists = ['Normal(1, 1)', 'Lognormal(1, 1)', 'Exp(1)', 'Gumbel(6, 4)',
'Triangular(2, 9, 11)']
N = 500
norm = np.random.normal(1, 1, N)
logn = np.random.lognormal(1, 1, N)
expo = np.random.exponential(1, N)
gumb = np.random.gumbel(6, 4, N)
tria = np.random.triangular(2, 9, 11, N)
# Generate some random indices that we'll use to resample the original data
# arrays. For code brevity, just use the same random indices for each array
bootstrap_indices = np.random.randint(0, N, N)
data = [
norm, norm[bootstrap_indices],
logn, logn[bootstrap_indices],
expo, expo[bootstrap_indices],
gumb, gumb[bootstrap_indices],
tria, tria[bootstrap_indices],
]
fig, ax1 = plt.subplots(figsize=(10, 6))
fig.canvas.manager.set_window_title('A Boxplot Example')
fig.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = ax1.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set(
axisbelow=True, # Hide the grid behind plot objects
title='Comparison of IID Bootstrap Resampling Across Five Distributions',
xlabel='Distribution',
ylabel='Value',
)
# Now fill the boxes with desired colors
box_colors = ['darkkhaki', 'royalblue']
num_boxes = len(data)
medians = np.empty(num_boxes)
for i in range(num_boxes):
box = bp['boxes'][i]
box_x = []
box_y = []
for j in range(5):
box_x.append(box.get_xdata()[j])
box_y.append(box.get_ydata()[j])
box_coords = np.column_stack([box_x, box_y])
# Alternate between Dark Khaki and Royal Blue
ax1.add_patch(Polygon(box_coords, facecolor=box_colors[i % 2]))
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
median_x = []
median_y = []
for j in range(2):
median_x.append(med.get_xdata()[j])
median_y.append(med.get_ydata()[j])
ax1.plot(median_x, median_y, 'k')
medians[i] = median_y[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
ax1.plot(np.average(med.get_xdata()), np.average(data[i]),
color='w', marker='*', markeredgecolor='k')
# Set the axes ranges and axes labels
ax1.set_xlim(0.5, num_boxes + 0.5)
top = 40
bottom = -5
ax1.set_ylim(bottom, top)
ax1.set_xticklabels(np.repeat(random_dists, 2),
rotation=45, fontsize=8)
# Due to the Y-axis scale being different across samples, it can be
# hard to compare differences in medians across the samples. Add upper
# X-axis tick labels with the sample medians to aid in comparison
# (just use two decimal places of precision)
pos = np.arange(num_boxes) + 1
upper_labels = [str(round(s, 2)) for s in medians]
weights = ['bold', 'semibold']
for tick, label in zip(range(num_boxes), ax1.get_xticklabels()):
k = tick % 2
ax1.text(pos[tick], .95, upper_labels[tick],
transform=ax1.get_xaxis_transform(),
horizontalalignment='center', size='x-small',
weight=weights[k], color=box_colors[k])
# Finally, add a basic legend
fig.text(0.80, 0.08, f'{N} Random Numbers',
backgroundcolor=box_colors[0], color='black', weight='roman',
size='x-small')
fig.text(0.80, 0.045, 'IID Bootstrap Resample',
backgroundcolor=box_colors[1],
color='white', weight='roman', size='x-small')
fig.text(0.80, 0.015, '*', color='white', backgroundcolor='silver',
weight='roman', size='medium')
fig.text(0.815, 0.013, ' Average Value', color='black', weight='roman',
size='x-small')
plt.show()
###############################################################################
# Here we write a custom function to bootstrap confidence intervals.
# We can then use the boxplot along with this function to show these intervals.
def fake_bootstrapper(n):
"""
This is just a placeholder for the user's method of
bootstrapping the median and its confidence intervals.
Returns an arbitrary median and confidence interval packed into a tuple.
"""
if n == 1:
med = 0.1
ci = (-0.25, 0.25)
else:
med = 0.2
ci = (-0.35, 0.50)
return med, ci
inc = 0.1
e1 = np.random.normal(0, 1, size=500)
e2 = np.random.normal(0, 1, size=500)
e3 = np.random.normal(0, 1 + inc, size=500)
e4 = np.random.normal(0, 1 + 2*inc, size=500)
treatments = [e1, e2, e3, e4]
med1, ci1 = fake_bootstrapper(1)
med2, ci2 = fake_bootstrapper(2)
medians = [None, None, med1, med2]
conf_intervals = [None, None, ci1, ci2]
fig, ax = plt.subplots()
pos = np.arange(len(treatments)) + 1
bp = ax.boxplot(treatments, sym='k+', positions=pos,
notch=1, bootstrap=5000,
usermedians=medians,
conf_intervals=conf_intervals)
ax.set_xlabel('treatment')
ax.set_ylabel('response')
plt.setp(bp['whiskers'], color='k', linestyle='-')
plt.setp(bp['fliers'], markersize=3.0)
plt.show()
#############################################################################
#
# ------------
#
# References
# """"""""""
#
# The use of the following functions and methods is shown in this example:
import matplotlib
matplotlib.axes.Axes.boxplot
matplotlib.pyplot.boxplot
matplotlib.axes.Axes.set
| [
"quantum.analyst@gmail.com"
] | quantum.analyst@gmail.com |
f65bae1fcb345c5530e79ae207a72b94a2dca716 | 1101585a921ae1abede0d2a5e28970d430871fe8 | /apps/account/consts.py | bb8b86dfc190bfc6435bbb9a0b665e405073b6db | [] | no_license | myanko01/Lesson_apps | 21e9a62a74731df69940039e140697f2d3b6fd64 | dda8571a56b85cee4cf11886dc3720d68fa123eb | refs/heads/master | 2020-07-01T00:00:05.711391 | 2019-08-07T06:36:49 | 2019-08-07T06:36:49 | 200,983,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | CHOICE_GENDER = (
(1, '男性'),
(2, '女性'),
)
| [
"you@example.com"
] | you@example.com |
9147b60d41af7abb08939ef3562dc2359547bf87 | eb87c8b1ce8591d207643d3924b7939228f1a4fe | /conformance_suite/bool_is_a_subtype_of_int_pos.py | bb706bf124e6ecd51ce427cfe899d55b2d6e5a41 | [] | no_license | brownplt/insta-model | 06543b43dde89913c219d476ced0f51a439add7b | 85e2c794ec4b1befa19ecb85f2c8d2509ec8cf42 | refs/heads/main | 2023-08-30T19:06:58.083150 | 2023-05-03T18:53:58 | 2023-05-10T22:29:18 | 387,500,638 | 5 | 0 | null | 2022-04-23T23:06:52 | 2021-07-19T14:53:09 | Racket | UTF-8 | Python | false | false | 70 | py | # bool_is_a_subtype_of_int_pos.py
# This should pass.
x: int = True
| [
"lukuangchen1024@gmail.com"
] | lukuangchen1024@gmail.com |
0cab1f253f5f673f3a60a7e8f1f2fd07c1bfb275 | d2f91b93ad42aaefa5fc315a9b3a5d45d07fa705 | /slbman/venv/Lib/site-packages/aliyunsdkcdn/request/v20141111/DescribeDomainFileSizeProportionDataRequest.py | 50ce4087dcc0fd25e5b8e817ee998a50fb616012 | [] | no_license | junlongzhou5566/managePlatform | 66cb5bc5b176147ff0038819924f7efa8df1d556 | 3201ba1a11b05c86db5f42aa9ca8eaf1cc20e216 | refs/heads/master | 2021-03-29T00:58:23.337808 | 2020-03-17T09:50:21 | 2020-03-17T09:50:21 | 247,910,365 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,816 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeDomainFileSizeProportionDataRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'DescribeDomainFileSizeProportionData')
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
"645647713@qq.com@qq.com"
] | 645647713@qq.com@qq.com |
1b5d14da025a52cca9cb61629495dbd0444d9afa | d0cb58e1658d4b5b88bdc07e497dc8092707ae02 | /2020/01january/14.py | a3f2553dce31bd97e8db5f46114b464bdc4eb538 | [] | no_license | June-fu/python365 | 27f9b753d38ade549d59aa8f2d8bda0fb8b1e20c | 242033a4b644a7566fbfa4dba9b60f60aa31fe91 | refs/heads/master | 2021-07-02T21:42:28.454091 | 2021-05-04T15:08:44 | 2021-05-04T15:08:44 | 233,629,713 | 0 | 0 | null | 2020-01-13T15:52:58 | 2020-01-13T15:36:53 | null | UTF-8 | Python | false | false | 823 | py | # -*- coding: utf-8 -*-
# Author: june-fu
# Date : 2020/1/27
"""
python-practice-book 12
Write a function group(list, size) that take a list and splits into smaller lists of given size.
input: group([1, 2, 3, 4, 5, 6, 7, 8, 9], 4)
output: [[1, 2, 3, 4], [5, 6, 7, 8], [9]]
"""
def group(list1, size):
# return [list1[i::size] for i in range(size)]
list_one = []
if int(len(list1) % size) == 0:
x = int(len(list1)/size)
else:
x = int(len(list1)/size) + 1
for i in range(x):
list_one.append(list1[i * size:(i+1) * size])
return list_one
# use slice
def group1(list1, size):
return [list1[i:i+size] for i in range(0, len(list1), size)]
if __name__ == '__main__':
print(group([1, 2, 3, 4, 5, 6, 7, 8, 9], 4))
print(group1([1, 2, 3, 4, 5, 6, 7, 8, 9], 4))
| [
"fujun1990@gmail.com"
] | fujun1990@gmail.com |
af7900ac95be793c451bd2cf4e983dfefb9c666a | bb959d621b83ec2c36d0071dd48bc20942c0dd84 | /apps/users/forms.py | 456ba979e88254bf1317fed9a3c4b540b56701e5 | [
"BSD-3-Clause"
] | permissive | clincher/django-base-template | 6dac3b67db80b2e6336698f3ed60c8ef85dd974f | 560263bf0bb9737f89ae06e391b3d730f102046b | refs/heads/master | 2021-01-18T08:30:27.286465 | 2014-07-19T09:51:27 | 2014-07-19T09:51:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,972 | py | # -*- coding: utf-8 -*-
from django.forms import ModelForm
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils.text import capfirst
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.models import UserManager
from braces.views import FormValidMessageMixin
class UserForm(FormValidMessageMixin, ModelForm):
form_valid_message = _(u"Account updated!")
success_list_url = "user_update"
class Meta:
model = get_user_model()
exclude = [
'password',
'email',
'is_staff',
'is_active',
'date_joined',
'last_login',
'groups',
'is_superuser',
'user_permissions'
]
class CleanEmailMixin:
def clean_email(self):
return UserManager.normalize_email(self.cleaned_data["email"])
class EmailAuthenticateForm(CleanEmailMixin, forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
email = forms.EmailField(max_length=255)
password = forms.CharField(label=_("Пароль"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Пожалуйста, проверьте правильность ввода "
"электронной почты и пароля. "
"Обратите внимание на то, что оба поля "
"чувствительны к регистру."),
'no_cookies': _("Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."),
'inactive': _("Акаунт не активирован."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(EmailAuthenticateForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
email_field = UserModel._meta.get_field(UserModel.EMAIL_FIELD)
self.fields[UserModel.EMAIL_FIELD].label = capfirst(
email_field.verbose_name)
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user_cache = authenticate(email=email, password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'])
elif not self.user_cache.is_active:
raise forms.ValidationError(self.error_messages['inactive'])
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class RegistrationForm(CleanEmailMixin, forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given email and
password.
"""
error_messages = {
'duplicate_email': _("Плоьзователь с таким именем уже существует."),
'password_mismatch': _("Пароли не совпадают."),
}
password1 = forms.CharField(label=_("Пароль"), widget=forms.PasswordInput)
password2 = forms.CharField(
label=_("Подтверждение пароля"), widget=forms.PasswordInput,
help_text=_("Введите еще раз тот же пароль для проверки."))
class Meta:
model = get_user_model()
fields = ("email",)
def clean_email(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
email = super(RegistrationForm, self).clean_email()
User = get_user_model()
if User.objects.filter(email=email).exists():
raise forms.ValidationError(self.error_messages['duplicate_email'])
return email
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class AdminUserCreationForm(CleanEmailMixin, forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = get_user_model()
excludes = ('residence', 'timezone', 'bio', 'is_staff', 'is_active')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(AdminUserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class AdminUserChangeForm(CleanEmailMixin, forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = get_user_model()
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
| [
"va.bolshakov@gmail.com"
] | va.bolshakov@gmail.com |
7c62691590e5917d92a3dbb511c391577cd87034 | 7c16a9f999f966060c064ae5bd4bddaf8f4e1dd0 | /operator.py | 264f50d7656d72931316ada1cbd34bf10b76bb2c | [] | no_license | sbd2309/Adv.Python | fd5ed698b14c75484903006da7753a155cf11b47 | f7ef906cd78114643ffaaaaca6d4cb0ccfb34f62 | refs/heads/master | 2021-10-25T01:48:29.420102 | 2021-10-17T06:20:11 | 2021-10-17T06:20:11 | 232,631,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | n=float(input())
x=float(input())
z=float(input())
d=(n*(x/100))
c=(n*(z/100))
print(round(n+d+c)) | [
"noreply@github.com"
] | sbd2309.noreply@github.com |
cd07a0839cf198525a33aed5909864ac6527e502 | 77b3ef4cae52a60181dfdf34ee594afc7a948925 | /mediation/dags/cm_sub_dag_parse_and_import_zte_3g.py | 5781cc0b7f0d683960266ee1c6de6a45da242481 | [
"Apache-2.0"
] | permissive | chandusekhar/bts-ce | 4cb6d1734efbda3503cb5fe75f0680c03e4cda15 | ad546dd06ca3c89d0c96ac8242302f4678ca3ee3 | refs/heads/master | 2021-07-15T02:44:27.646683 | 2020-07-26T08:32:33 | 2020-07-26T08:32:33 | 183,961,877 | 0 | 0 | Apache-2.0 | 2020-07-26T08:32:34 | 2019-04-28T21:42:29 | Python | UTF-8 | Python | false | false | 2,466 | py | import sys
import os
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import BranchPythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
from cm_sub_dag_parse_huawei_2g_files import run_huawei_2g_parser
from cm_sub_dag_import_huawei_2g_files import import_huawei_2g_parsed_csv
sys.path.append('/mediation/packages');
from bts import NetworkBaseLine, Utils, ProcessCMData;
bts_utils = Utils();
def parse_and_import_zte_3g(parent_dag_name, child_dag_name, start_date, schedule_interval):
"""Parse and import ZTE 2G"""
dag_id = '%s.%s' % (parent_dag_name, child_dag_name)
dag = DAG(
'%s.%s' % (parent_dag_name, child_dag_name),
schedule_interval=schedule_interval,
start_date=start_date,
)
# @TODO: Investigate other ways to check if there are not files yet
t28 = BashOperator(
task_id='check_if_zte_3g_raw_files_exist',
bash_command='ls -1 /mediation/data/cm/zte/raw/in | wc -l',
dag=dag)
# @TODO: Backup parsed files
t30 = BashOperator(
task_id='backup_zte_3g_csv_files',
bash_command='mv -f /mediation/data/cm/zte/3g/parsed/bulkcm_umts/* /mediation/data/cm/zte/parsed/backup/ 2>/dev/null || true',
dag=dag)
def clear_zte_2g_cm_tables():
pass
t31 = PythonOperator(
task_id='clear_zte_3g_cm_tables',
python_callable=clear_zte_2g_cm_tables,
dag=dag)
parse_zte_2g_cm_files = BashOperator(
task_id='parse_zte_3g_cm_files',
bash_command='java -jar /mediation/bin/boda-bulkcmparser.jar /mediation/data/cm/zte/raw/bulkcm_umts /mediation/data/cm/zte/parsed/bulkcm_umts /mediation/conf/cm/zte_cm_3g_blkcm_parser.cfg',
dag=dag)
import_zte_cm_data = BashOperator(
task_id='import_zte_3g_cm_data',
bash_command='python /mediation/bin/load_cm_data_into_db.py zte_bulkcm_umts /mediation/data/cm/zte/parsed/bulkcm_umts ',
dag=dag)
dag.set_dependency('check_if_zte_3g_raw_files_exist', 'backup_zte_3g_csv_files')
dag.set_dependency('backup_zte_3g_csv_files', 'parse_zte_3g_cm_files')
dag.set_dependency('parse_zte_3g_cm_files', 'clear_zte_3g_cm_tables')
dag.set_dependency('clear_zte_3g_cm_tables', 'import_zte_3g_cm_data')
return dag | [
"emmanuel.ssebaggala@bodastage.com"
] | emmanuel.ssebaggala@bodastage.com |
8e46ea58063b50c3a21f81a13f3a9e256b2011bc | 0e0ce88c886370df9af51855115c99dfc003e5da | /2019/01_Curso_Geek_basico_avancado/Exercicios_Python_Geek/exercicio_daniel/ex_2.py | fd492eecbc0a4528145e6fa1523d2b6de4c3795f | [] | no_license | miguelzeph/Python_Git | ed80db9a4f060836203df8cc2e42e003b0df6afd | 79d3b00236e7f4194d2a23fb016b43e9d09311e6 | refs/heads/master | 2021-07-08T18:43:45.855023 | 2021-04-01T14:12:23 | 2021-04-01T14:12:23 | 232,007,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | """2-) Teste se o ano é bissexto"""
ano = int(input("Digite o ano: "))
if ano%4 == 0:
print("É ano bissexto, tem 366 dias")
else:
print("Não é ano bissexto, tem 365 dias") | [
"miguel.junior.mat@hotmail.com"
] | miguel.junior.mat@hotmail.com |
e744eb2408e49179a3f2fd7ef1493748da92e8a6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02879/s383346727.py | 7f19bc56acd3472c1250fb5a9475c3828f9375fa | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | a, b = map(int, input().split())
if a < 10 and b < 10:
print(a * b)
else:
print('-1')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3ae8c2d0c34a084d8aaad125c9ce618b4e15a444 | dbcd14a6a4e85f3e6a815c3fd05125ccb57d99b3 | /data_science/pandas_datacamp/manipulating_dataframes_with_pandas/03_rearranging_and_reshaping_data/05_stacking_and_unstacking_II.py | 2800c570456538d2908b651c37062cf9f126b606 | [] | no_license | blockchainassets/data-engineering | b1351b1321c612ba651f27230b506ebf73d949b8 | fc3136e89bc7defafb9e2fa6377217066f124fc7 | refs/heads/master | 2021-05-17T14:56:59.911700 | 2020-03-28T14:47:37 | 2020-03-28T14:47:37 | 250,831,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | '''
Stacking & unstacking II
You are now going to continue working with the users DataFrame. As always, first explore it in the IPython Shell to see the layout and note the index.
Your job in this exercise is to unstack and then stack the 'city' level, as you did previously for 'weekday'. Note that you won't get the same DataFrame.
Instructions
Define a DataFrame bycity with the 'city' level of users unstacked.
Print the bycity DataFrame to see the new data layout. This has been done for you.
Stack bycity by 'city' and print it to check if you get the same layout as the original users DataFrame.
'''
# Unstack users by 'city': bycity
bycity = users.unstack(level='city')
# Print the bycity DataFrame
print(bycity)
# Stack bycity by 'city' and print it
print(bycity.stack(level='city'))
| [
"pierinaacam@gmail.com"
] | pierinaacam@gmail.com |
eb5c93e8330a4712426af4c4de2fcbb3b250f22e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03409/s717601792.py | 2e4854f89fc655c31e61797553566850201cbd4f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | n=int(input())
a=[0]*n
b=[0]*n
c=[0]*n
d=[0]*n
edges=[set() for _ in range(n)]
matched=[-1]*n
for i in range(n):
ta,tb=list(map(int,input().split()))
a[i]=ta
b[i]=tb
for i in range(n):
tc,td=list(map(int,input().split()))
c[i]=tc
d[i]=td
for i in range(n):
for j in range(n):
if a[i]<c[j] and b[i]<d[j]:
edges[i].add(j)
def dfs(v,visited):
for u in edges[v]:
if u in visited:
continue
visited.add(u)
if matched[u]==-1 or dfs(matched[u],visited):
matched[u]=v
return True
return False
print(sum(dfs(s,set()) for s in range(n))) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
38ff709a61cfb1ae8896dc1e3c08780ad1e9a2ef | 54df8336b50e8f2d7dbe353f0bc51a2b3489095f | /Python/Interview Que/Interview Companies/Helious/helious.py | 689ab9b6cbb7979e1b4729d382fd1fd9463b130b | [] | no_license | SurendraKumarAratikatla/MyLenovolapCodes1 | 42d5bb7a14bfdf8d773ee60719380ee28ff4947a | 12c56200fcfd3e5229bfeec209fd03b5fc35b823 | refs/heads/master | 2023-06-17T15:44:18.312398 | 2021-07-19T10:28:11 | 2021-07-19T10:28:11 | 387,358,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | import pandas as pd
def amazon(rows,column,grid):
l = grid
li = []
j = 1
for i in range(1,rows+1):
#print(i)
del li[:]
li.append(l[j:i+1])
j = j + 1
#print(li)
#print(len(li))
df = pd.read_excel('helious_excel.xlsx',index = False)
products_list = df.values.tolist()
print(products_list)
#print(df[1:2])
amazon(5,4,df) | [
"suendra.aratikatla1608@gmail.com"
] | suendra.aratikatla1608@gmail.com |
65a307e046bd927498c11951ac68c7b6f1820282 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/sunData/SType/ST_facets/ST_facets00103m/ST_facets00103m1_p.py | 48419483f9e7d9f711bb0a9db68d1efb0f190a40 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 137 | py | from output.models.sun_data.stype.st_facets.st_facets00103m.st_facets00103m_xsd.st_facets00103m import Test
obj = Test(
value=99
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
717a43259dc60384e77d65e86db860b4dc534928 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/core/assembler/AssemblyDualTextField.pyi | 2bb5f62b3cb243f9fb56b96919696eb29f21d33a | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,327 | pyi | from typing import List
import docking.widgets.autocomplete
import ghidra
import ghidra.app.plugin.core.assembler
import ghidra.program.model.lang
import ghidra.program.util
import java.awt
import java.awt.event
import java.lang
import javax.swing
class AssemblyDualTextField(object):
class AssemblyDualTextFieldDemo(object, ghidra.GhidraLaunchable):
ADDR_FORMAT: unicode
DEMO_LANG_ID: ghidra.program.model.lang.LanguageID
def __init__(self, __a0: ghidra.app.plugin.core.assembler.AssemblyDualTextField): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def launch(self, __a0: ghidra.GhidraApplicationLayout, __a1: List[unicode]) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
class AssemblyCompletion(object, java.lang.Comparable):
def __init__(self, __a0: unicode, __a1: unicode, __a2: java.awt.Color, __a3: int): ...
@overload
def compareTo(self, __a0: ghidra.app.plugin.core.assembler.AssemblyDualTextField.AssemblyCompletion) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getCanDefault(self) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColor(self) -> java.awt.Color: ...
def getDisplay(self) -> unicode: ...
def getText(self) -> unicode: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def canDefault(self) -> bool: ...
@property
def color(self) -> java.awt.Color: ...
@property
def display(self) -> unicode: ...
@property
def text(self) -> unicode: ...
class VisibilityMode(java.lang.Enum):
DUAL_VISIBLE: ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode = DUAL_VISIBLE
INVISIBLE: ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode = INVISIBLE
SINGLE_VISIBLE: ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode = SINGLE_VISIBLE
@overload
def compareTo(self, __a0: java.lang.Enum) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getDeclaringClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def name(self) -> unicode: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def ordinal(self) -> int: ...
def toString(self) -> unicode: ...
@overload
@staticmethod
def valueOf(__a0: unicode) -> ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode: ...
@overload
@staticmethod
def valueOf(__a0: java.lang.Class, __a1: unicode) -> java.lang.Enum: ...
@staticmethod
def values() -> List[ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode]: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
def __init__(self): ...
def addFocusListener(self, __a0: java.awt.event.FocusListener) -> None: ...
def addKeyListener(self, __a0: java.awt.event.KeyListener) -> None: ...
def clear(self) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getAssemblyField(self) -> javax.swing.JTextField: ...
def getAutocompleter(self) -> docking.widgets.autocomplete.TextFieldAutocompleter: ...
def getClass(self) -> java.lang.Class: ...
def getMnemonicField(self) -> javax.swing.JTextField: ...
def getOperandsField(self) -> javax.swing.JTextField: ...
def getText(self) -> unicode: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def setCaretPosition(self, __a0: int) -> None: ...
def setLanguageLocation(self, __a0: ghidra.program.model.lang.Language, __a1: long) -> None: ...
def setProgramLocation(self, __a0: ghidra.program.util.ProgramLocation) -> None: ...
def setText(self, __a0: unicode) -> None: ...
def setVisible(self, __a0: ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def assemblyField(self) -> javax.swing.JTextField: ...
@property
def autocompleter(self) -> docking.widgets.autocomplete.TextFieldAutocompleter: ...
@property
def caretPosition(self) -> None: ... # No getter available.
@caretPosition.setter
def caretPosition(self, value: int) -> None: ...
@property
def mnemonicField(self) -> javax.swing.JTextField: ...
@property
def operandsField(self) -> javax.swing.JTextField: ...
@property
def programLocation(self) -> None: ... # No getter available.
@programLocation.setter
def programLocation(self, value: ghidra.program.util.ProgramLocation) -> None: ...
@property
def text(self) -> unicode: ...
@text.setter
def text(self, value: unicode) -> None: ...
@property
def visible(self) -> None: ... # No getter available.
@visible.setter
def visible(self, value: ghidra.app.plugin.core.assembler.AssemblyDualTextField.VisibilityMode) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
5d9be8cbd2400d26b6973bf47290a761629d2234 | f50f1aa1f8f139d546db3230a1cb1f53043fd9e6 | /programming/language/python/xmpppy/actions.py | c7191c67dea317bccb65cc5fb9ea28569e810d0e | [] | no_license | pars-linux/corporate2 | 7887961d1552d39bc3b0bef4a60fd3413d9b82bb | 14d1eacfc824fb8d0bff8173e7ac06b36b88d10d | refs/heads/master | 2020-05-26T15:02:12.005654 | 2017-02-27T03:07:14 | 2017-02-27T03:07:14 | 82,476,084 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "%s-%s" % (get.srcNAME(), get.srcVERSION().replace("_", ""))
def install():
pythonmodules.install()
pisitools.insinto("%s/%s" % (get.docDIR(), get.srcNAME()), "doc/*")
pisitools.dodoc("ChangeLog")
| [
"ozancaglayan@users.noreply.github.com"
] | ozancaglayan@users.noreply.github.com |
fe45965db65727374e8c7858346a6f1b042d6ccb | aa0270b351402e421631ebc8b51e528448302fab | /sdk/containerregistry/azure-containerregistry/azure/containerregistry/_version.py | e58e03dcf5cb74c0037ab2a31a9b3476cbb2efe7 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 172 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
VERSION = "1.1.0b5"
| [
"noreply@github.com"
] | fangchen0601.noreply@github.com |
9c45ea43930611d5d6687bc81905d6bd6dc7511d | 32dbb74f03c7450ee1f3166f82260e60272f57e0 | /Push/special_mixer_component.py | 9867a321f3da17667503d3af980a96407815a7f9 | [] | no_license | cce/buttons10 | 61555bc767f2bd300bfffb373f9feaae96b83ca7 | 6f1137c96eead0b9771ad8ec9327dd72ada2e916 | refs/heads/master | 2021-04-15T09:45:39.684764 | 2018-03-24T04:29:52 | 2018-03-24T04:29:52 | 126,565,725 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,946 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Push/special_mixer_component.py
from __future__ import absolute_import, print_function, unicode_literals
from itertools import izip_longest
from ableton.v2.base import listens
from ableton.v2.control_surface import components
from ableton.v2.control_surface.elements import DisplayDataSource
from .special_chan_strip_component import SpecialChanStripComponent
class SpecialMixerComponent(components.MixerComponent):
u"""
Special mixer class that uses return tracks alongside midi and
audio tracks. This provides also a more convenient interface to
set controls for the different modes of Push.
"""
num_label_segments = 4
def __init__(self, *a, **k):
super(SpecialMixerComponent, self).__init__(*a, **k)
self._pan_send_index = 0
self._pan_send_controls = None
self._pan_send_names_display = None
self._pan_send_values_display = None
self._pan_send_graphics_display = None
self._pan_send_toggle_skip = False
self._selected_track_data_sources = map(DisplayDataSource, (u'',) * self.num_label_segments)
self._selected_track_data_sources[0].set_display_string(u'Track Selection:')
self._selected_track_name_data_source = self._selected_track_data_sources[1]
self._on_selected_track_changed.subject = self.song.view
self._on_track_list_changed.subject = self.song
self._update_selected_track_name()
return
def _create_strip(self):
return SpecialChanStripComponent()
def set_pan_send_toggle(self, toggle):
u"""
The pan_send_toggle cycles through the different pan, or send
modes changing the bejhaviour of the pan_send display and
controls.
"""
self._pan_send_toggle = toggle
self._on_pan_send_value.subject = toggle
self._pan_send_toggle_skip = True
def set_selected_track_name_display(self, display):
if display:
display.set_data_sources(self._selected_track_data_sources)
def set_track_select_buttons(self, buttons):
for strip, button in izip_longest(self._channel_strips, buttons or []):
if button:
button.set_on_off_values(u'Option.Selected', u'Option.Unselected')
strip.set_select_button(button)
def set_solo_buttons(self, buttons):
for strip, button in izip_longest(self._channel_strips, buttons or []):
if button:
button.set_on_off_values(u'Mixer.SoloOn', u'Mixer.SoloOff')
strip.set_solo_button(button)
def set_mute_buttons(self, buttons):
for strip, button in izip_longest(self._channel_strips, buttons or []):
if button:
button.set_on_off_values(u'Mixer.MuteOff', u'Mixer.MuteOn')
strip.set_mute_button(button)
def set_track_names_display(self, display):
if display:
sources = [ strip.track_name_data_source() for strip in self._channel_strips ]
display.set_data_sources(sources)
def set_volume_names_display(self, display):
self._set_parameter_names_display(display, 0)
def set_volume_values_display(self, display):
self._set_parameter_values_display(display, 0)
def set_volume_graphics_display(self, display):
self._set_parameter_graphics_display(display, 0)
def set_volume_controls(self, controls):
for strip, control in izip_longest(self._channel_strips, controls or []):
strip.set_volume_control(control)
def set_pan_send_names_display(self, display):
self._normalize_pan_send_index()
self._pan_send_names_display = display
self._set_parameter_names_display(display, self._pan_send_index + 1)
def set_pan_send_values_display(self, display):
self._normalize_pan_send_index()
self._pan_send_values_display = display
self._set_parameter_values_display(display, self._pan_send_index + 1)
def set_pan_send_graphics_display(self, display):
self._normalize_pan_send_index()
self._pan_send_graphics_display = display
self._set_parameter_graphics_display(display, self._pan_send_index + 1)
def set_pan_send_controls(self, controls):
self.set_send_controls(None)
self.set_pan_controls(None)
self._pan_send_controls = controls
self._normalize_pan_send_index()
if self._pan_send_index == 0:
self.set_pan_controls(controls)
else:
sends = self._pan_send_index - 1
self.set_send_controls(map(lambda ctl: (None,) * sends + (ctl,), controls or []))
return
@listens(u'visible_tracks')
def _on_track_list_changed(self):
self._update_pan_sends()
def set_pan_controls(self, controls):
for strip, control in izip_longest(self._channel_strips, controls or []):
strip.set_pan_control(control)
def set_send_controls(self, controls):
for strip, control in izip_longest(self._channel_strips, controls or []):
strip.set_send_controls(control)
def _set_parameter_names_display(self, display, parameter):
if display:
sources = [ strip.track_parameter_name_sources(parameter) for strip in self._channel_strips ]
display.set_data_sources(sources)
def _set_parameter_values_display(self, display, parameter):
if display:
sources = [ strip.track_parameter_data_sources(parameter) for strip in self._channel_strips ]
display.set_data_sources(sources)
def _set_parameter_graphics_display(self, display, parameter):
if display:
sources = [ strip.track_parameter_graphic_sources(parameter) for strip in self._channel_strips ]
display.set_data_sources(sources)
@listens(u'value')
def _on_pan_send_value(self, value):
if not self._pan_send_toggle_skip and self.is_enabled() and (value or not self._pan_send_toggle.is_momentary()):
self._pan_send_index += 1
self._update_pan_sends()
self._pan_send_toggle_skip = False
def _update_pan_sends(self):
self.set_pan_send_controls(self._pan_send_controls)
self.set_pan_send_names_display(self._pan_send_names_display)
self.set_pan_send_graphics_display(self._pan_send_graphics_display)
def _normalize_pan_send_index(self):
if len(self.song.tracks) == 0 or self._pan_send_index > len(self.song.tracks[0].mixer_device.sends):
self._pan_send_index = 0
@listens(u'selected_track.name')
def _on_selected_track_changed(self):
self._update_selected_track_name()
def _update_selected_track_name(self):
selected = self.song.view.selected_track
self._selected_track_name_data_source.set_display_string(selected.name) | [
"cce@appneta.com"
] | cce@appneta.com |
d5e78a332dff2134a46453a63b84a1c416791fe0 | 7cfcb2a79226d8fe90276bd32964d94243cc496a | /joints_detectors/hrnet/pose_estimation/video.py | 5271ff11f7860bd444e5f2f422c916880f5c3343 | [
"MIT"
] | permissive | daydreamer2023/videopose | 5185442eb31138f6bd6a86fdbec2c411a8773bc3 | 463f5d2770288a217033e901d1a8251b489d7f76 | refs/heads/master | 2022-01-12T16:14:33.589459 | 2019-05-13T12:45:23 | 2019-05-13T12:45:23 | 186,530,514 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,104 | py | '''
使用yolov3作为pose net模型的前处理
use yolov3 as the 2d human bbox detector
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
path1 = os.path.split(os.path.realpath(__file__))[0]
path2 = os.path.join(path1, '..')
sys.path.insert(0, path1)
sys.path.insert(0, path2)
import argparse
import pprint
import ipdb;pdb=ipdb.set_trace
import numpy as np
from tqdm import tqdm
from utilitys import plot_keypoint, PreProcess
import time
import torch
import _init_paths
from config import cfg
import config
from config import update_config
from utils.transforms import *
from lib.core.inference import get_final_preds
import cv2
import models
from lib.detector.yolo.human_detector import main as yolo_det
from scipy.signal import savgol_filter
from lib.detector.yolo.human_detector import load_model as yolo_model
sys.path.pop(0)
sys.path.pop(1)
sys.path.pop(2)
kpt_queue = []
def smooth_filter(kpts):
if len(kpt_queue) < 6:
kpt_queue.append(kpts)
return kpts
queue_length = len(kpt_queue)
if queue_length == 50:
kpt_queue.pop(0)
kpt_queue.append(kpts)
# transpose to shape (17, 2, num, 50) 关节点keypoints num、横纵坐标、每帧人数、帧数
transKpts = np.array(kpt_queue).transpose(1,2,3,0)
window_length = queue_length - 1 if queue_length % 2 == 0 else queue_length - 2
# array, window_length(bigger is better), polyorder
result = savgol_filter(transKpts, window_length, 3).transpose(3, 0, 1, 2) #shape(frame_num, human_num, 17, 2)
# 返回倒数第几帧 return third from last frame
return result[-3]
class get_args():
# hrnet config
cfg = path2 + '/experiments/coco/hrnet/w32_256x192_adam_lr1e-3.yaml'
dataDir=''
logDir=''
modelDir=''
opts=[]
prevModelDir=''
##### load model
def model_load(config):
model = eval('models.'+config.MODEL.NAME+'.get_pose_net')(
config, is_train=False
)
model_file_name = path2 + '/models/pytorch/pose_coco/pose_hrnet_w32_256x192.pth'
state_dict = torch.load(model_file_name)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k # remove module.
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.eval()
return model
def ckpt_time(t0=None, display=None):
if not t0:
return time.time()
else:
t1 = time.time()
if display:
print('consume {:2f} second'.format(t1-t0))
return t1-t0, t1
###### LOAD human detecotor model
human_model = yolo_model()
def generate_kpts(video_name):
args = get_args()
update_config(cfg, args)
cam = cv2.VideoCapture(video_name)
video_length = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))
ret_val, input_image = cam.read()
# Video writer
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
input_fps = cam.get(cv2.CAP_PROP_FPS)
#### load pose-hrnet MODEL
pose_model = model_load(cfg)
pose_model.cuda()
# collect keypoints coordinate
kpts_result = []
for i in tqdm(range(video_length-1)):
ret_val, input_image = cam.read()
try:
bboxs, scores = yolo_det(input_image, human_model)
# bbox is coordinate location
inputs, origin_img, center, scale = PreProcess(input_image, bboxs, scores, cfg)
except Exception as e:
print(e)
continue
with torch.no_grad():
# compute output heatmap
inputs = inputs[:,[2,1,0]]
output = pose_model(inputs.cuda())
# compute coordinate
preds, maxvals = get_final_preds(
cfg, output.clone().cpu().numpy(), np.asarray(center), np.asarray(scale))
# smooth and fine-tune coordinates
preds = smooth_filter(preds)
# 3D video pose (only support single human)
kpts_result.append(preds[0])
result = np.array(kpts_result)
return result
if __name__ == '__main__':
main()
| [
"lxy5513@gmail.com"
] | lxy5513@gmail.com |
aab2aa6a4021a8c5efe708d79a20a7aa1c24c13d | 5e331ab99f88dd4d68099980a45ab33247ba6536 | /src/__init__.py | c140b8f38f4d6b28190bdddb890b3756dc31fc8b | [] | no_license | zidanewenqsh/mtcnn_02 | 264353976a5be15c8acce30ceda2d908fc4bc413 | 0e7abb7b2dd3148d5f083c1214747215209f0295 | refs/heads/master | 2020-12-11T00:40:54.228301 | 2020-02-11T12:22:21 | 2020-02-11T12:22:21 | 233,755,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py |
from src import train, nets
__all__ = ["train","nets"]
| [
"zidanewenqsh@163.com"
] | zidanewenqsh@163.com |
9045b983285883318be000d8d32475ee8e221303 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnincorrupt.py | c955653958e8de29fca899bc995da1724924bb1c | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 528 | py | ii = [('KembFFF.py', 1), ('WilbRLW4.py', 1), ('CookGHP.py', 3), ('MartHSI2.py', 1), ('PettTHE.py', 3), ('WilbRLW2.py', 1), ('ClarGE2.py', 1), ('CarlTFR.py', 34), ('CoopJBT2.py', 1), ('AinsWRR3.py', 1), ('CookGHP2.py', 2), ('ClarGE.py', 2), ('DibdTRL2.py', 1), ('WadeJEB.py', 2), ('NewmJLP.py', 1), ('SoutRD2.py', 2), ('HowiWRL2.py', 1), ('FerrSDO.py', 1), ('RoscTTI.py', 1), ('StorJCC.py', 4), ('MackCNH2.py', 1), ('BellCHM.py', 1), ('AinsWRR2.py', 1), ('DibdTRL.py', 1), ('DwigTHH.py', 1), ('NortSTC.py', 1), ('KirbWPW.py', 2)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
e6365024ee3cd8e06acfeef8570a4e5969727fbd | ca17bd80ac1d02c711423ac4093330172002a513 | /remove_invalid_parenthese/RemoveInvalidParenthese.py | 6228196ed2a67837b1c30815b6bd3b8b6f1be798 | [] | no_license | Omega094/lc_practice | 64046dea8bbdaee99d767b70002a2b5b56313112 | e61776bcfd5d93c663b247d71e00f1b298683714 | refs/heads/master | 2020-03-12T13:45:13.988645 | 2018-04-23T06:28:32 | 2018-04-23T06:28:32 | 130,649,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | class Solution(object):
def isvalid(self,s):
ctr = 0
for c in s:
if c == '(':
ctr += 1
elif c == ')':
ctr -= 1
if ctr < 0:
return False
return ctr == 0
def removeInvalidParentheses(self, s):
level = {s}
while True:
valid = filter(self.isvalid, level)
if valid:
return valid
level ={s[:i] + s[i+1:] for s in level for i in range(len(s))}
return []
#test
if __name__ == "__main__":
sol = Solution()
print sol.removeInvalidParentheses("()())()")
print sol.removeInvalidParentheses("(a)())()")
| [
"zhao_j1@denison.edu"
] | zhao_j1@denison.edu |
85b68db2627031f616d3e2b99ba34bd77c09acf7 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/CISCO-GSLB-HEALTH-MON-MIB.py | cf61f3660ddb681fecf0998387da4bbecc70b4ed | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 26,320 | py | #
# PySNMP MIB module CISCO-GSLB-HEALTH-MON-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-GSLB-HEALTH-MON-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:42:11 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint")
cgdAnswerId, = mibBuilder.importSymbols("CISCO-GSLB-DNS-MIB", "cgdAnswerId")
CiscoGslbTerminationMethod, CiscoGslbKeepaliveStatus, CiscoGslbKeepaliveMethod, CiscoGslbKeepaliveRate, CiscoGslbKalapType, CiscoGslbKeepaliveTargetType = mibBuilder.importSymbols("CISCO-GSLB-TC-MIB", "CiscoGslbTerminationMethod", "CiscoGslbKeepaliveStatus", "CiscoGslbKeepaliveMethod", "CiscoGslbKeepaliveRate", "CiscoGslbKalapType", "CiscoGslbKeepaliveTargetType")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
InetAddress, InetAddressType, InetPortNumber = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType", "InetPortNumber")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
sysName, = mibBuilder.importSymbols("SNMPv2-MIB", "sysName")
Bits, Gauge32, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Unsigned32, Counter64, MibIdentifier, NotificationType, TimeTicks, ModuleIdentity, IpAddress, Integer32, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Gauge32", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Unsigned32", "Counter64", "MibIdentifier", "NotificationType", "TimeTicks", "ModuleIdentity", "IpAddress", "Integer32", "iso")
StorageType, TruthValue, RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "StorageType", "TruthValue", "RowStatus", "DisplayString", "TextualConvention")
ciscoGslbHealthMonMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 600))
ciscoGslbHealthMonMIB.setRevisions(('2007-04-09 00:00', '2006-12-04 00:00',))
if mibBuilder.loadTexts: ciscoGslbHealthMonMIB.setLastUpdated('200704090000Z')
if mibBuilder.loadTexts: ciscoGslbHealthMonMIB.setOrganization('Cisco Systems, Inc.')
ciscoGslbHealthMonMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 600, 0))
ciscoGslbHealthMonMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 600, 1))
ciscoGslbHealthMonMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 600, 2))
cghMonNotifControl = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 1))
cghMonNotifObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 2))
cghMonKalGeneralConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3))
cghMonKal = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4))
cghMonNsQueryDomainName = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 1), SnmpAdminString().clone('.')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cghMonNsQueryDomainName.setStatus('current')
cghMonCappHash = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 2), SnmpAdminString().clone('hash-not-set')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cghMonCappHash.setStatus('current')
cghMonHttpHeadPath = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 3), SnmpAdminString().clone('/')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cghMonHttpHeadPath.setStatus('current')
cghMonHttpHeadConnTermMethod = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 4), CiscoGslbTerminationMethod().clone('reset')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cghMonHttpHeadConnTermMethod.setStatus('current')
cghMonTcpConnTermMethod = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 5), CiscoGslbTerminationMethod().clone('reset')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cghMonTcpConnTermMethod.setStatus('current')
cghMonCraDecay = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cghMonCraDecay.setStatus('current')
cghMonTotalConfiguredProbes = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cghMonTotalConfiguredProbes.setStatus('current')
cghMonDroppedKalNotifs = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 8), Unsigned32()).setUnits('traps').setMaxAccess("readonly")
if mibBuilder.loadTexts: cghMonDroppedKalNotifs.setStatus('current')
cghMonKalTrapRateLimit = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(25)).setUnits('traps per minute').setMaxAccess("readonly")
if mibBuilder.loadTexts: cghMonKalTrapRateLimit.setStatus('current')
cghMonKalParameterTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 10), )
if mibBuilder.loadTexts: cghMonKalParameterTable.setStatus('current')
cghMonKalParameterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 10, 1), ).setIndexNames((0, "CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalParameterMethod"))
if mibBuilder.loadTexts: cghMonKalParameterEntry.setStatus('current')
cghMonKalParameterMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 10, 1, 1), CiscoGslbKeepaliveMethod())
if mibBuilder.loadTexts: cghMonKalParameterMethod.setStatus('current')
cghMonKalParameterRate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 10, 1, 2), CiscoGslbKeepaliveRate()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalParameterRate.setStatus('current')
cghMonKalParameterMinimumFrequency = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 10, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalParameterMinimumFrequency.setStatus('current')
cghMonKalParameterResponseTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 10, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalParameterResponseTimeout.setStatus('current')
cghMonKalParameterFastRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 10, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setUnits('retries').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalParameterFastRetries.setStatus('current')
cghMonKalParameterFastSuccessfulProbes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 10, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setUnits('probes').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalParameterFastSuccessfulProbes.setStatus('current')
cghMonKalParameterDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 10, 1, 7), InetPortNumber().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalParameterDestPort.setStatus('current')
cghMonKalParameterStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 10, 1, 8), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalParameterStorageType.setStatus('current')
cghMonKalParameterRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 3, 10, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalParameterRowStatus.setStatus('current')
cghMonKalConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1), )
if mibBuilder.loadTexts: cghMonKalConfigTable.setStatus('current')
cghMonKalConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1), ).setIndexNames((0, "CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalId"))
if mibBuilder.loadTexts: cghMonKalConfigEntry.setStatus('current')
cghMonKalId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: cghMonKalId.setStatus('current')
cghMonKalTargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 2), CiscoGslbKeepaliveTargetType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalTargetType.setStatus('current')
cghMonKalMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 3), CiscoGslbKeepaliveMethod()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalMethod.setStatus('current')
cghMonKalAnswerId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 4), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalAnswerId.setStatus('current')
cghMonKalPrimaryTargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 5), InetAddressType().clone('ipv4')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalPrimaryTargetType.setStatus('current')
cghMonKalPrimaryTarget = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 6), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalPrimaryTarget.setStatus('current')
cghMonKalEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 7), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalEnable.setStatus('current')
cghMonKalDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 8), Unsigned32()).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalDelay.setStatus('current')
cghMonKalKalapType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 9), CiscoGslbKalapType().clone('kalapByVip')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalKalapType.setStatus('current')
cghMonKalTagName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 10), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalTagName.setStatus('current')
cghMonKalDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 11), InetPortNumber().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalDestPort.setStatus('current')
cghMonKalCappSecure = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 12), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalCappSecure.setStatus('current')
cghMonKalCappHash = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 13), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalCappHash.setStatus('current')
cghMonKalQueryDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 14), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalQueryDomainName.setStatus('current')
cghMonKalPath = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 15), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalPath.setStatus('current')
cghMonKalHostTag = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 16), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalHostTag.setStatus('current')
cghMonKalSecondaryTargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 17), InetAddressType().clone('ipv4')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalSecondaryTargetType.setStatus('current')
cghMonKalSecondaryTarget = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 18), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalSecondaryTarget.setStatus('current')
cghMonKalFastRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 19), Unsigned32()).setUnits('retries').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalFastRetries.setStatus('current')
cghMonKalFastSuccessfulProbes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 20), Unsigned32()).setUnits('probes').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalFastSuccessfulProbes.setStatus('current')
cghMonKalStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 21), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalStorageType.setStatus('current')
cghMonKalRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 1, 1, 22), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalRowStatus.setStatus('current')
cghMonKalSharedAnswerTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 2), )
if mibBuilder.loadTexts: cghMonKalSharedAnswerTable.setStatus('current')
cghMonKalSharedAnswerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 2, 1), ).setIndexNames((0, "CISCO-GSLB-DNS-MIB", "cgdAnswerId"), (0, "CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalId"))
if mibBuilder.loadTexts: cghMonKalSharedAnswerEntry.setStatus('current')
cghMonKalShAnsStoragetype = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 2, 1, 1), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalShAnsStoragetype.setStatus('current')
cghMonKalShAnsRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 2, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cghMonKalShAnsRowStatus.setStatus('current')
cghMonKalStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 3), )
if mibBuilder.loadTexts: cghMonKalStatsTable.setStatus('current')
cghMonKalStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 3, 1), )
cghMonKalConfigEntry.registerAugmentions(("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalStatsEntry"))
cghMonKalStatsEntry.setIndexNames(*cghMonKalConfigEntry.getIndexNames())
if mibBuilder.loadTexts: cghMonKalStatsEntry.setStatus('current')
cghMonKalStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 3, 1, 1), CiscoGslbKeepaliveStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cghMonKalStatus.setStatus('current')
cghMonKalSentProbes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 3, 1, 2), Counter32()).setUnits('probes').setMaxAccess("readonly")
if mibBuilder.loadTexts: cghMonKalSentProbes.setStatus('current')
cghMonKalReceivedProbes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 3, 1, 3), Counter32()).setUnits('probes').setMaxAccess("readonly")
if mibBuilder.loadTexts: cghMonKalReceivedProbes.setStatus('current')
cghMonKalPositiveProbes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 3, 1, 4), Counter32()).setUnits('probes').setMaxAccess("readonly")
if mibBuilder.loadTexts: cghMonKalPositiveProbes.setStatus('current')
cghMonKalNegativeProbes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 3, 1, 5), Counter32()).setUnits('probes').setMaxAccess("readonly")
if mibBuilder.loadTexts: cghMonKalNegativeProbes.setStatus('current')
cghMonKalStatusTransitions = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cghMonKalStatusTransitions.setStatus('current')
cghMonKalDynamicLoad = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 3, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cghMonKalDynamicLoad.setStatus('current')
cghMonKalVIPFailovers = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 4, 3, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cghMonKalVIPFailovers.setStatus('current')
cghMonKalNotifEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 1, 1), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cghMonKalNotifEnable.setStatus('current')
cghMonKalPrevStatus = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 600, 1, 2, 1), CiscoGslbKeepaliveStatus()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: cghMonKalPrevStatus.setStatus('current')
ciscoGslbKalEventStatus = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 600, 0, 1)).setObjects(("SNMPv2-MIB", "sysName"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalPrimaryTargetType"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalPrimaryTarget"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalMethod"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalPrevStatus"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalStatus"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonDroppedKalNotifs"))
if mibBuilder.loadTexts: ciscoGslbKalEventStatus.setStatus('current')
ciscoGslbHealthMonMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 600, 2, 1))
ciscoGslbHealthMonMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 600, 2, 2))
ciscoGslbHealthMonMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 600, 2, 1, 1)).setObjects(("CISCO-GSLB-HEALTH-MON-MIB", "ciscoGslbGeneralConfigGroup"), ("CISCO-GSLB-HEALTH-MON-MIB", "ciscoGslbKalParameterGroup"), ("CISCO-GSLB-HEALTH-MON-MIB", "ciscoGslbKalConfigGroup"), ("CISCO-GSLB-HEALTH-MON-MIB", "ciscoGslbKalStatsGroup"), ("CISCO-GSLB-HEALTH-MON-MIB", "ciscoGslbKalNotifControlGroup"), ("CISCO-GSLB-HEALTH-MON-MIB", "ciscoGslbKalNotifObjectsGroup"), ("CISCO-GSLB-HEALTH-MON-MIB", "ciscoGslbKalNotificationGroup"), ("CISCO-GSLB-HEALTH-MON-MIB", "ciscoGslbGeneralConfigRateLimitGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoGslbHealthMonMIBCompliance = ciscoGslbHealthMonMIBCompliance.setStatus('deprecated')
ciscoGslbGeneralConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 600, 2, 2, 1)).setObjects(("CISCO-GSLB-HEALTH-MON-MIB", "cghMonNsQueryDomainName"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonCappHash"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonHttpHeadPath"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonHttpHeadConnTermMethod"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonCraDecay"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonTcpConnTermMethod"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonTotalConfiguredProbes"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonDroppedKalNotifs"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalTrapRateLimit"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoGslbGeneralConfigGroup = ciscoGslbGeneralConfigGroup.setStatus('current')
ciscoGslbKalParameterGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 600, 2, 2, 2)).setObjects(("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalParameterRate"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalParameterMinimumFrequency"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalParameterResponseTimeout"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalParameterFastRetries"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalParameterFastSuccessfulProbes"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalParameterDestPort"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalParameterStorageType"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalParameterRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoGslbKalParameterGroup = ciscoGslbKalParameterGroup.setStatus('current')
ciscoGslbKalConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 600, 2, 2, 3)).setObjects(("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalTargetType"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalMethod"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalAnswerId"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalPrimaryTargetType"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalPrimaryTarget"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalEnable"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalDelay"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalKalapType"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalTagName"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalDestPort"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalCappSecure"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalCappHash"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalQueryDomainName"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalPath"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalHostTag"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalSecondaryTargetType"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalSecondaryTarget"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalFastRetries"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalFastSuccessfulProbes"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalStorageType"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalRowStatus"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalShAnsStoragetype"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalShAnsRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoGslbKalConfigGroup = ciscoGslbKalConfigGroup.setStatus('current')
ciscoGslbKalStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 600, 2, 2, 4)).setObjects(("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalStatus"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalSentProbes"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalReceivedProbes"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalPositiveProbes"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalNegativeProbes"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalStatusTransitions"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalDynamicLoad"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalVIPFailovers"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoGslbKalStatsGroup = ciscoGslbKalStatsGroup.setStatus('current')
ciscoGslbKalNotifControlGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 600, 2, 2, 5)).setObjects(("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalNotifEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoGslbKalNotifControlGroup = ciscoGslbKalNotifControlGroup.setStatus('current')
ciscoGslbKalNotifObjectsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 600, 2, 2, 6)).setObjects(("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalPrevStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoGslbKalNotifObjectsGroup = ciscoGslbKalNotifObjectsGroup.setStatus('current')
ciscoGslbKalNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 600, 2, 2, 7)).setObjects(("CISCO-GSLB-HEALTH-MON-MIB", "ciscoGslbKalEventStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoGslbKalNotificationGroup = ciscoGslbKalNotificationGroup.setStatus('current')
ciscoGslbGeneralConfigRateLimitGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 600, 2, 2, 8)).setObjects(("CISCO-GSLB-HEALTH-MON-MIB", "cghMonDroppedKalNotifs"), ("CISCO-GSLB-HEALTH-MON-MIB", "cghMonKalTrapRateLimit"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoGslbGeneralConfigRateLimitGroup = ciscoGslbGeneralConfigRateLimitGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-GSLB-HEALTH-MON-MIB", cghMonKalPrimaryTargetType=cghMonKalPrimaryTargetType, cghMonKal=cghMonKal, cghMonKalStatus=cghMonKalStatus, cghMonKalDelay=cghMonKalDelay, cghMonKalShAnsStoragetype=cghMonKalShAnsStoragetype, cghMonKalPath=cghMonKalPath, cghMonKalHostTag=cghMonKalHostTag, cghMonKalAnswerId=cghMonKalAnswerId, ciscoGslbKalNotifControlGroup=ciscoGslbKalNotifControlGroup, cghMonNsQueryDomainName=cghMonNsQueryDomainName, cghMonKalParameterFastRetries=cghMonKalParameterFastRetries, ciscoGslbKalConfigGroup=ciscoGslbKalConfigGroup, cghMonHttpHeadConnTermMethod=cghMonHttpHeadConnTermMethod, cghMonKalNegativeProbes=cghMonKalNegativeProbes, cghMonKalConfigEntry=cghMonKalConfigEntry, ciscoGslbKalEventStatus=ciscoGslbKalEventStatus, cghMonKalStatusTransitions=cghMonKalStatusTransitions, cghMonNotifControl=cghMonNotifControl, cghMonKalEnable=cghMonKalEnable, cghMonKalParameterResponseTimeout=cghMonKalParameterResponseTimeout, cghMonKalCappHash=cghMonKalCappHash, ciscoGslbGeneralConfigGroup=ciscoGslbGeneralConfigGroup, cghMonKalDynamicLoad=cghMonKalDynamicLoad, cghMonKalDestPort=cghMonKalDestPort, cghMonKalGeneralConfig=cghMonKalGeneralConfig, cghMonKalSharedAnswerEntry=cghMonKalSharedAnswerEntry, cghMonKalCappSecure=cghMonKalCappSecure, cghMonKalParameterTable=cghMonKalParameterTable, cghMonHttpHeadPath=cghMonHttpHeadPath, cghMonCappHash=cghMonCappHash, cghMonKalTagName=cghMonKalTagName, cghMonKalRowStatus=cghMonKalRowStatus, ciscoGslbHealthMonMIBGroups=ciscoGslbHealthMonMIBGroups, cghMonKalParameterMethod=cghMonKalParameterMethod, cghMonKalParameterMinimumFrequency=cghMonKalParameterMinimumFrequency, cghMonKalParameterRate=cghMonKalParameterRate, cghMonKalKalapType=cghMonKalKalapType, ciscoGslbHealthMonMIBConform=ciscoGslbHealthMonMIBConform, cghMonKalParameterStorageType=cghMonKalParameterStorageType, cghMonDroppedKalNotifs=cghMonDroppedKalNotifs, cghMonKalTargetType=cghMonKalTargetType, cghMonKalNotifEnable=cghMonKalNotifEnable, cghMonCraDecay=cghMonCraDecay, cghMonKalTrapRateLimit=cghMonKalTrapRateLimit, cghMonKalSecondaryTargetType=cghMonKalSecondaryTargetType, cghMonKalQueryDomainName=cghMonKalQueryDomainName, cghMonKalFastRetries=cghMonKalFastRetries, cghMonTcpConnTermMethod=cghMonTcpConnTermMethod, cghMonKalPositiveProbes=cghMonKalPositiveProbes, cghMonKalParameterFastSuccessfulProbes=cghMonKalParameterFastSuccessfulProbes, ciscoGslbHealthMonMIBCompliances=ciscoGslbHealthMonMIBCompliances, cghMonKalStatsEntry=cghMonKalStatsEntry, ciscoGslbHealthMonMIB=ciscoGslbHealthMonMIB, cghMonKalMethod=cghMonKalMethod, cghMonKalSecondaryTarget=cghMonKalSecondaryTarget, ciscoGslbKalNotifObjectsGroup=ciscoGslbKalNotifObjectsGroup, PYSNMP_MODULE_ID=ciscoGslbHealthMonMIB, ciscoGslbHealthMonMIBObjects=ciscoGslbHealthMonMIBObjects, ciscoGslbHealthMonMIBCompliance=ciscoGslbHealthMonMIBCompliance, cghMonKalConfigTable=cghMonKalConfigTable, cghMonKalParameterEntry=cghMonKalParameterEntry, cghMonKalStorageType=cghMonKalStorageType, cghMonKalId=cghMonKalId, cghMonKalPrevStatus=cghMonKalPrevStatus, ciscoGslbHealthMonMIBNotifs=ciscoGslbHealthMonMIBNotifs, cghMonTotalConfiguredProbes=cghMonTotalConfiguredProbes, cghMonNotifObjects=cghMonNotifObjects, cghMonKalStatsTable=cghMonKalStatsTable, cghMonKalSentProbes=cghMonKalSentProbes, cghMonKalReceivedProbes=cghMonKalReceivedProbes, cghMonKalPrimaryTarget=cghMonKalPrimaryTarget, ciscoGslbKalParameterGroup=ciscoGslbKalParameterGroup, ciscoGslbGeneralConfigRateLimitGroup=ciscoGslbGeneralConfigRateLimitGroup, cghMonKalParameterDestPort=cghMonKalParameterDestPort, ciscoGslbKalNotificationGroup=ciscoGslbKalNotificationGroup, cghMonKalParameterRowStatus=cghMonKalParameterRowStatus, cghMonKalFastSuccessfulProbes=cghMonKalFastSuccessfulProbes, cghMonKalShAnsRowStatus=cghMonKalShAnsRowStatus, cghMonKalSharedAnswerTable=cghMonKalSharedAnswerTable, cghMonKalVIPFailovers=cghMonKalVIPFailovers, ciscoGslbKalStatsGroup=ciscoGslbKalStatsGroup)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
d0521ace1b2b1d05327842085cb9e3c88b7ff56e | 6532b2c6dfefa27a7f5c3c790f13dfc1e42cc703 | /mlbstats/wsgi.py | b178aa582e80746b55bd0bee8b2755a5fc27ee86 | [] | no_license | aclark4life/mlbstats | c59ba44b17541364fc4da16dbd3383f7ea05497f | b939f32dadeed6642b23de3e57624c0b0661b87c | refs/heads/master | 2023-04-29T07:15:36.471625 | 2021-05-18T14:35:12 | 2021-05-18T14:35:12 | 348,089,359 | 0 | 0 | null | 2021-05-18T14:35:12 | 2021-03-15T18:57:54 | Python | UTF-8 | Python | false | false | 397 | py | """
WSGI config for mlbstats project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mlbstats.settings.dev")
application = get_wsgi_application()
| [
"aclark@aclark.net"
] | aclark@aclark.net |
a3367b66199f1c62e07674b577ce9be357b098d4 | 3c63087e4c6371b92e03e20a1770d60ba1ec5f89 | /pytest_services/__init__.py | 547303de6a745f4cf8bd0de02c6caa013bddbf0f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | mgorny/pytest-services | d989fec749954ad82d3e430aca9ed24fbe79f33c | 9a18fc16093e4d239b8981998a05bc54058fa6d4 | refs/heads/master | 2022-12-17T14:03:59.055517 | 2020-09-25T04:13:53 | 2020-09-25T04:13:53 | 298,503,817 | 0 | 0 | MIT | 2020-09-25T07:43:28 | 2020-09-25T07:43:27 | null | UTF-8 | Python | false | false | 53 | py | """pytest-services package."""
__version__ = '2.2.0'
| [
"bubenkoff@gmail.com"
] | bubenkoff@gmail.com |
f3c73006fa5e1104d3838c08a0ff4d64b2ed7f7f | fcd00440495737c72f2ec0183e78002a6c9f0afe | /sample_plot_script_2.py | b51aa77d168c9ec09d9eb0a026c13281b5054960 | [] | no_license | Riashat/Plotting_Scripts | caa7ba20fbaa81e6f415c8311ec3d481f45e72c1 | 1b0b6d61aac0f3dbc0c6af207ce351d45e88411f | refs/heads/master | 2021-01-23T18:22:04.741566 | 2018-05-08T16:43:25 | 2018-05-08T16:43:25 | 102,787,346 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,040 | py | import matplotlib.pyplot as plt
import time
import numpy as np
import pandas as pd
from numpy import genfromtxt
import pdb
from scipy import stats
eps = np.arange(1000)
#HalfCheetah Policy Activations
hs_leaky_relu = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/HalfCheetah_Policy_Act_Leaky_Relu_all_exp_rewards.npy')
hs_relu = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/HalfCheetah_Policy_Act_Relu_all_exp_rewards.npy')
hs_tanh = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/HalfCheetah_Policy_Act_TanH_all_exp_rewards.npy')
mean_hs_leaky = np.mean(hs_leaky_relu, axis=1)
mean_hs_relu = np.mean(hs_relu, axis=1)
mean_hs_tanh = np.mean(hs_tanh, axis=1)
std_hs_leaky = np.std(hs_leaky_relu, axis=1)
std_hs_relu = np.std(hs_relu, axis=1)
std_hs_tanh = np.std(hs_tanh, axis=1)
last_hs_leaky = mean_hs_leaky[-1]
last_error_hs_leaky = stats.sem(hs_leaky_relu[-1, :], axis=None, ddof=0)
print ("last_hs_leaky", last_hs_leaky)
print ("last_error_hs_leaky", last_error_hs_leaky)
last_hs_relu = mean_hs_relu[-1]
last_error_hs_relu = stats.sem(hs_relu[-1, :], axis=None, ddof=0)
print ("last_hs_relu", last_hs_relu)
print ("last_error_hs_relu", last_error_hs_relu)
last_hs_tanh = mean_hs_tanh[-1]
last_error_hs_tanh = stats.sem(hs_tanh[-1, :], axis=None, ddof=0)
print ("last_hs_tanh", last_hs_tanh)
print ("last_error_hs_tanh", last_error_hs_tanh)
#Hopper Policy Activations
ho_leaky_relu = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/Hopper_Policy_Activation_Leaky_Relu_all_exp_rewards.npy')
ho_relu = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/Hopper_Policy_Activation_Relu_all_exp_rewards.npy')
ho_tanh = np.load('/Users/Riashat/Documents/PhD_Research/OpenAIBaselines/ReproducibilityML/Results/rllab_results/baselines_ddpg_results/Results/Hopper_Policy_Activation_TanH_all_exp_rewards.npy')
mean_ho_leaky = np.mean(ho_leaky_relu, axis=1)
mean_ho_relu = np.mean(ho_relu, axis=1)
mean_ho_tanh = np.mean(ho_tanh, axis=1)
std_ho_leaky = np.std(ho_leaky_relu, axis=1)
std_ho_relu = np.std(ho_relu, axis=1)
std_ho_tanh = np.std(ho_tanh, axis=1)
last_ho_leaky = mean_ho_leaky[-1]
last_error_ho_leaky = stats.sem(ho_leaky_relu[-1, :], axis=None, ddof=0)
print ("last_ho_leaky", last_ho_leaky)
print ("last_error_ho_leaky", last_error_ho_leaky)
last_ho_relu = mean_ho_relu[-1]
last_error_ho_relu = stats.sem(ho_relu[-1, :], axis=None, ddof=0)
print ("last_ho_relu", last_ho_relu)
print ("last_error_ho_relu", last_error_ho_relu)
last_ho_tanh = mean_ho_tanh[-1]
last_error_ho_tanh = stats.sem(ho_tanh[-1, :], axis=None, ddof=0)
print ("last_ho_tanh", last_ho_tanh)
print ("last_error_ho_tanh", last_error_ho_tanh)
def multiple_plot(average_vals_list, std_dev_list, traj_list, other_labels, env_name, smoothing_window=5, no_show=False, ignore_std=False, limit=None, extra_lines=None):
# average_vals_list - list of numpy averages
# std_dev list - standard deviation or error
# traj_list - list of timestep (x-axis) quantities
# other_labels - the labels for the lines
# Env-name the header
# smoothing window how much to smooth using a running average.
fig = plt.figure(figsize=(16, 8))
# fig = plt.figure(figsize=(15, 10))
colors = ["#1f77b4", "#ff7f0e", "#d62728", "#9467bd", "#2ca02c", "#8c564b", "#e377c2", "#bcbd22", "#17becf"]
color_index = 0
ax = plt.subplot() # Defines ax variable by creating an empty plot
offset = 1
# Set the tick labels font
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Arial')
label.set_fontsize(22)
if traj_list is None:
traj_list = [None]*len(average_vals_list)
index = 0
for average_vals, std_dev, label, trajs in zip(average_vals_list, std_dev_list, other_labels[:len(average_vals_list)], traj_list):
index += 1
rewards_smoothed_1 = pd.Series(average_vals).rolling(smoothing_window, min_periods=smoothing_window).mean()[:limit]
if limit is None:
limit = len(rewards_smoothed_1)
rewards_smoothed_1 = rewards_smoothed_1[:limit]
std_dev = std_dev[:limit]
if trajs is None:
trajs = list(range(len(rewards_smoothed_1)))
else:
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
ax.xaxis.get_offset_text().set_fontsize(20)
fill_color = colors[color_index]#choice(colors, 1)
color_index += 1
cum_rwd_1, = plt.plot(trajs, rewards_smoothed_1, label=label, color=fill_color)
offset += 3
if not ignore_std:
#plt.errorbar(trajs[::25 + offset], rewards_smoothed_1[::25 + offset], yerr=std_dev[::25 + offset], linestyle='None', color=fill_color, capsize=5)
plt.fill_between(trajs, rewards_smoothed_1 + std_dev, rewards_smoothed_1 - std_dev, alpha=0.3, edgecolor=fill_color, facecolor=fill_color)
if extra_lines:
for lin in extra_lines:
plt.plot(trajs, np.repeat(lin, len(rewards_smoothed_1)), linestyle='-.', color = colors[color_index], linewidth=2.5, label=other_labels[index])
color_index += 1
index += 1
axis_font = {'fontname':'Arial', 'size':'28'}
plt.legend(loc='lower right', prop={'size' : 16})
plt.xlabel("Iterations", **axis_font)
if traj_list:
plt.xlabel("Timesteps", **axis_font)
else:
plt.xlabel("Iterations", **axis_font)
plt.ylabel("Average Return", **axis_font)
plt.title("%s"% env_name, **axis_font)
if no_show:
fig.savefig('%s.png' % env_name, dpi=fig.dpi)
else:
plt.show()
return fig
def get_plot(stats1, stats2, stats3, smoothing_window=5, noshow=False):
## Figure 1
fig = plt.figure(figsize=(70, 40))
rewards_smoothed_1 = pd.Series(stats1).rolling(smoothing_window, min_periods=smoothing_window).mean()
rewards_smoothed_2 = pd.Series(stats2).rolling(smoothing_window, min_periods=smoothing_window).mean()
rewards_smoothed_3 = pd.Series(stats3).rolling(smoothing_window, min_periods=smoothing_window).mean()
cum_rwd_1, = plt.plot(eps, rewards_smoothed_1, color = "red", linewidth=2.5, label="Policy Network Activation = ReLU")
plt.fill_between( eps, rewards_smoothed_1 + std_hs_relu, rewards_smoothed_1 - std_hs_relu, alpha=0.2, edgecolor='red', facecolor='red')
cum_rwd_2, = plt.plot(eps, rewards_smoothed_2, color = "blue", linewidth=2.5, label="Policy Network Activation = TanH" )
plt.fill_between( eps, rewards_smoothed_2 + std_hs_tanh, rewards_smoothed_2 - std_hs_tanh, alpha=0.2, edgecolor='blue', facecolor='blue')
cum_rwd_3, = plt.plot(eps, rewards_smoothed_3, color = "black", linewidth=2.5, label="Policy Network Activation = Leaky ReLU" )
plt.fill_between( eps, rewards_smoothed_3 + std_hs_leaky, rewards_smoothed_3 - std_hs_leaky, alpha=0.2, edgecolor='black', facecolor='black')
plt.legend(handles=[cum_rwd_1, cum_rwd_2, cum_rwd_3], fontsize=22)
plt.xlabel("Number of Iterations",fontsize=26)
plt.ylabel("Average Returns", fontsize=26)
plt.title("DDPG with HalfCheetah Environment - Actor Network Activations", fontsize=30)
plt.show()
fig.savefig('ddpg_halfcheetah_policy_activations.png')
return fig
def main():
timesteps_per_epoch = 2000
max_timesteps = 2e6
plot_multiple(
[mean_ho_relu, mean_ho_tanh, mean_ho_leaky],
[std_ho_relu, std_ho_tanh, std_ho_leaky],
[range(0, max_timesteps, timesteps_per_epoch)]*3,
["relu", "tanh", "leaky_relu"],
"HalfCheetah-v1 (DDPG, Policy Network Activation)")
if __name__ == '__main__':
main() | [
"riashat.islam.93@gmail.com"
] | riashat.islam.93@gmail.com |
7c282c200459bd553ba96067cc096214498e9fc3 | a2080cbcf9694ad03690769cfc64d85a57f1d9d5 | /tests/type/test_directives.py | 83147cd74eda1f3a3c5616fe6181eed9445f576d | [
"MIT"
] | permissive | wuyuanyi135/graphql-core | 84196a47aec0f9508db3f8aadb8951b9fc9b9fe0 | 169ae7bced0f515603e97f1def925f3d062e5009 | refs/heads/main | 2023-04-13T11:38:10.815573 | 2021-05-02T05:17:29 | 2021-05-02T05:21:58 | 363,327,364 | 1 | 0 | MIT | 2021-05-01T05:05:29 | 2021-05-01T05:05:28 | null | UTF-8 | Python | false | false | 8,101 | py | from pytest import raises # type: ignore
from graphql.language import DirectiveLocation, DirectiveDefinitionNode, Node
from graphql.type import GraphQLArgument, GraphQLDirective, GraphQLInt, GraphQLString
def describe_type_system_directive():
def can_create_instance():
arg = GraphQLArgument(GraphQLString, description="arg description")
node = DirectiveDefinitionNode()
locations = [DirectiveLocation.SCHEMA, DirectiveLocation.OBJECT]
directive = GraphQLDirective(
name="test",
locations=[DirectiveLocation.SCHEMA, DirectiveLocation.OBJECT],
args={"arg": arg},
description="test description",
is_repeatable=True,
ast_node=node,
)
assert directive.name == "test"
assert directive.locations == locations
assert directive.args == {"arg": arg}
assert directive.is_repeatable is True
assert directive.description == "test description"
assert directive.extensions is None
assert directive.ast_node is node
def defines_a_directive_with_no_args():
locations = [DirectiveLocation.QUERY]
directive = GraphQLDirective("Foo", locations=locations)
assert directive.name == "Foo"
assert directive.args == {}
assert directive.is_repeatable is False
assert directive.extensions is None
assert directive.locations == locations
def defines_a_directive_with_multiple_args():
args = {
"foo": GraphQLArgument(GraphQLString),
"bar": GraphQLArgument(GraphQLInt),
}
locations = [DirectiveLocation.QUERY]
directive = GraphQLDirective("Foo", locations=locations, args=args)
assert directive.name == "Foo"
assert directive.args == args
assert directive.is_repeatable is False
assert directive.locations == locations
def defines_a_repeatable_directive():
locations = [DirectiveLocation.QUERY]
directive = GraphQLDirective("Foo", is_repeatable=True, locations=locations)
assert directive.name == "Foo"
assert directive.args == {}
assert directive.is_repeatable is True
assert directive.locations == locations
def directive_accepts_input_types_as_arguments():
# noinspection PyTypeChecker
directive = GraphQLDirective(
name="Foo", locations=[], args={"arg": GraphQLString} # type: ignore
)
arg = directive.args["arg"]
assert isinstance(arg, GraphQLArgument)
assert arg.type is GraphQLString
def directive_accepts_strings_as_locations():
# noinspection PyTypeChecker
directive = GraphQLDirective(
name="Foo", locations=["SCHEMA", "OBJECT"] # type: ignore
)
assert directive.locations == [
DirectiveLocation.SCHEMA,
DirectiveLocation.OBJECT,
]
def directive_has_str():
directive = GraphQLDirective("foo", [])
assert str(directive) == "@foo"
def directive_has_repr():
directive = GraphQLDirective("foo", [])
assert repr(directive) == "<GraphQLDirective(@foo)>"
def can_compare_with_other_source_directive():
locations = [DirectiveLocation.QUERY]
directive = GraphQLDirective("Foo", locations)
assert directive == directive
assert not directive != directive
assert not directive == {}
assert directive != {}
same_directive = GraphQLDirective("Foo", locations)
assert directive == same_directive
assert not directive != same_directive
other_directive = GraphQLDirective("Bar", locations)
assert not directive == other_directive
assert directive != other_directive
other_locations = [DirectiveLocation.MUTATION]
other_directive = GraphQLDirective("Foo", other_locations)
assert not directive == other_directive
assert directive != other_directive
other_directive = GraphQLDirective("Foo", locations, is_repeatable=True)
assert not directive == other_directive
assert directive != other_directive
other_directive = GraphQLDirective("Foo", locations, description="other")
assert not directive == other_directive
assert directive != other_directive
def rejects_an_unnamed_directive():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective(None, locations=[]) # type: ignore
assert str(exc_info.value) == "Directive must be named."
def rejects_a_directive_with_incorrectly_typed_name():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective({"bad": True}, locations=[]) # type: ignore
assert str(exc_info.value) == "The directive name must be a string."
def rejects_a_directive_with_incorrectly_typed_args():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations=[], args=["arg"]) # type: ignore
assert str(exc_info.value) == (
"Foo args must be a dict with argument names as keys."
)
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective(
"Foo",
locations=[],
args={1: GraphQLArgument(GraphQLString)}, # type: ignore
)
assert str(exc_info.value) == (
"Foo args must be a dict with argument names as keys."
)
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective(
"Foo",
locations=[],
args={"arg": GraphQLDirective("Bar", [])}, # type: ignore
)
assert str(exc_info.value) == (
"Foo args must be GraphQLArgument or input type objects."
)
def rejects_a_directive_with_incorrectly_typed_repeatable_flag():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations=[], is_repeatable=None) # type: ignore
assert str(exc_info.value) == "Foo is_repeatable flag must be True or False."
def rejects_a_directive_with_undefined_locations():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations=None) # type: ignore
assert str(exc_info.value) == (
"Foo locations must be specified"
" as a collection of DirectiveLocation enum values."
)
def rejects_a_directive_with_incorrectly_typed_locations():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations="bad") # type: ignore
assert (
str(exc_info.value) == "Foo locations must be specified"
" as a collection of DirectiveLocation enum values."
)
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations=["bad"]) # type: ignore
assert str(exc_info.value) == (
"Foo locations must be specified"
" as a collection of DirectiveLocation enum values."
)
def rejects_a_directive_with_incorrectly_typed_description():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective(
"Foo", locations=[], description={"bad": True} # type: ignore
)
assert str(exc_info.value) == "Foo description must be a string."
def rejects_a_directive_with_incorrectly_typed_ast_node():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
GraphQLDirective("Foo", locations=[], ast_node=Node()) # type: ignore
assert str(exc_info.value) == (
"Foo AST node must be a DirectiveDefinitionNode."
)
| [
"cito@online.de"
] | cito@online.de |
7a197fe4699e4b70d97f31eb1a9343d397616030 | 53e2aabd85f3154f5c3c79d26fadf094ff694d92 | /Etl.Highlight/test.py | f2b6790f36ab963b6eafc13ac3ecccbdba9a427c | [] | no_license | yuchanmo/Upbit | 69446b08eb86692df5b2c68886d71310d7e226f2 | d7106579d644286b1305c0de370501821d6f499c | refs/heads/master | 2023-08-18T01:56:45.210567 | 2021-09-28T12:15:43 | 2021-09-28T12:15:43 | 411,268,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,961 | py | from dbconnector import sqlserver
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import datetime as dt
import numpy as np
import requests
from sklearn.preprocessing import MinMaxScaler
df = pd.read_sql("SELECT * FROM [CoinStar].[dbo].[MarketPrice] where market = 'KRW-OMG' and regdate between '2021-05-01 10:29' and '2021-05-01 10:42:05'",sqlserver)
df.set_index('regdate',inplace=True)
df['trade_price']
df = df.sort_index()
print(df['market'].unique())
df[['cur_acc_trade_price','cur_trade_volume']] = df[['acc_trade_price','acc_trade_volume']].diff().apply(pd.Series)
trade_number_df = df[['cur_acc_trade_price','cur_trade_volume','trade_price']]
trade_number_df[['coef_cur_acc_trade_price','coef_cur_trade_volume','coef_trade_price']] = ((trade_number_df - trade_number_df.shift(1))/trade_number_df.shift(1)).apply(pd.Series)
cur_trade_number = trade_number_df.iloc[-1]
df['avg_5'] = df['trade_price'].rolling(window=5,min_periods=1).mean()
df['avg_10'] = df['trade_price'].rolling(window=10,min_periods=1).mean()
df['avg_20'] = df['trade_price'].rolling(window=20,min_periods=1).mean()
df[['trade_price','avg_5','avg_10','avg_20']].plot()
plt.show()
tail_df = df.tail(5)
y = tail_df[['avg_5','avg_10','avg_20']].values
y= MinMaxScaler().fit(y).transform(y)
np.linspace(0,1,5)
x = np.linspace(0,1,5).reshape(-1,1)
coefs = LinearRegression().fit(x,y).coef_
last_row = df.iloc[-1]
cur_trade_number['avg_5'] = last_row['avg_5']
cur_trade_number['avg_10'] = last_row['avg_10']
cur_trade_number['avg_20'] = last_row['avg_20']
cur_trade_number['avg_5_coef'] = coefs[0][0]
cur_trade_number['avg_10_coef'] = coefs[1][0]
cur_trade_number['avg_20_coef'] = coefs[2][0]
res_df = cur_trade_number.to_frame().T
res_df['posi_coef'] = np.all(coefs>0)
res_df['correct_order_avg'] = last_row['avg_5'] >= last_row['avg_10'] >= last_row['avg_20']
res_df['market'] = last_row['market']
res_df.reset_index()
res_df.iloc[0] | [
"mojjijji@gmail.com"
] | mojjijji@gmail.com |
fe0eb734470c894706cf2b66195c37013924e275 | 49b33bd602e9e003c4017eb1d9852dec31a34cad | /wowza_ec2_bootstrapper/actions/set_config.py | 0a76884b6618e83ae8709df4c9ef0a0e9c433cec | [] | no_license | nocarryr/wowza-ec2-bootstrapper | 4dba7180c594a708edd6502c5530a1ab5a98ab12 | 0a4f9e31f4ea8422ea3234d5a76144d5917017a1 | refs/heads/master | 2021-01-10T05:46:29.216619 | 2015-10-20T16:20:34 | 2015-10-20T16:20:34 | 43,161,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,102 | py | import os
import requests
from wowza_ec2_bootstrapper.actions import BaseAction
class SetConfig(BaseAction):
action_fields = dict(
server_license={
'required':False,
'help':'Wowza License Key',
},
users={
'required':False,
'help':'A list of dicts containing admin user data ("name", "password" and "group")'
},
publish_users={
'required':False,
'help':'A list of dicts containing publisher users ("name", "password")'
},
conf_files={
'required':False,
'help':
'''A list of dicts for conf files to replace containing:
"path" : conf filename relative to Wowza root
"content" : contents for the file (if not given, "url" must be supplied)
"url": url to retrieve the contents for the file
'''
},
)
@property
def conf_path(self):
p = getattr(self, '_conf_path', None)
if p is None:
c = self.config.wowza
p = self._conf_path = os.path.join(c.root_path, 'conf')
return p
def build_filename(self, *args):
return os.path.join(self.conf_path, *args)
def do_action(self, **kwargs):
c = self.config.wowza
for key in ['server_license', 'users', 'publish_users', 'conf_files']:
if key in kwargs:
c.setdefault(key, kwargs[key])
if c.get('server_license'):
self.set_server_license()
if c.get('users'):
self.set_users()
if c.get('publish_users'):
self.set_publish_users()
if c.get('conf_files'):
self.copy_files()
def set_server_license(self):
c = self.config.wowza
fn = self.build_filename('Server.license')
with open(fn, 'w') as f:
f.write(c.server_license)
def set_users(self):
c = self.config.wowza
fn = self.build_filename('admin.password')
lines = []
keys = ['name', 'password', 'group']
for user in c.users:
user.setdefault('group', 'admin')
lines.append(' '.join([user.get(key) for key in keys]))
with open(fn, 'w') as f:
f.write('\n'.join(lines))
def set_publish_users(self):
c = self.config.wowza
fn = self.build_filename('publish.password')
lines = []
keys = ['name', 'password']
for user in c.publish_users:
lines.append(' '.join([user.get(key) for key in keys]))
with open(fn, 'w') as f:
f.write('\n'.join(lines))
def copy_files(self):
c = self.config.wowza
for file_info in c.conf_files:
content = file_info.get('content')
if content is None:
url = file_info['url']
r = requests.get(url)
content = r.content
fn = os.path.join(c.root_path, file_info['path'])
with open(fn, 'wb') as f:
f.write(content)
| [
"matt@nomadic-recording.com"
] | matt@nomadic-recording.com |
766714b71a03a0bfe48dbaae5c793aea50540062 | ddf1267a1a7cb01e70e3b12ad4a7bfaf291edb3e | /src/user/migrations/0026_auto_20200117_1954.py | 0d698365c342a20c5d63847872745908c6e77769 | [
"MIT"
] | permissive | Garinmckayl/researchhub-backend | 46a17513c2c9928e51db4b2ce5a5b62df453f066 | cd135076d9a3b49a08456f7ca3bb18ff35a78b95 | refs/heads/master | 2023-06-17T04:37:23.041787 | 2021-05-18T01:26:46 | 2021-05-18T01:26:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # Generated by Django 2.2.8 on 2020-01-17 19:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0025_auto_20200117_1954'),
]
operations = [
migrations.RenameField(
model_name='action',
old_name='hub',
new_name='hubs',
),
]
| [
"lightning.lu7@gmail.com"
] | lightning.lu7@gmail.com |
6cee066db02d74992825bd071d758556b612e038 | b29589f95734682663ae6cd40ab00eb0a94b6d87 | /longwave/lblnew_20160916/study__g1_threshold/h2o_1_co2_1_o3_0_n2o_0_ch4_0_o2_0/band03c_wn_720_800/nv_1000/dv_0.001/wgt_flux_1/crd_1cb396b/atmpro_mls/param.py | c194611e3e4c28d338cbb8ea4eb4f450381498ad | [] | no_license | qAp/offline_radiation_notebooks | 02c2b2414ef1410f235776001a668f7df0b9f1cf | 44fb62391c27e4e314ad68ae3e91f6111b3172c5 | refs/heads/master | 2020-04-15T14:31:34.675322 | 2019-07-08T04:45:54 | 2019-07-08T04:45:54 | 43,118,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | DIR_FORTRAN = '/chia_cluster/home/jackyu/radiation/crd/LW/examples/separate_g_groups/study__lblnew_g1_threshold/h2o_1_co2_1_o3_0_n2o_0_ch4_0_o2_0/band03c_wn_720_800/nv_1000/dv_0.001/wgt_flux_1/crd_1cb396b/atmpro_mls'
PARAM = {'molecule': ['h2o', 'co2'], 'band': '3c', 'atmpro': 'mls', 'tsfc': 294, 'nv': 1000, 'dv': 0.001, 'option_wgt_flux': 1, 'commitnumber': '1cb396b'} | [
"llacque@gmail.com"
] | llacque@gmail.com |
efef0ce5c060b5976beca74ab1d52ce316fc24fe | 03a22b3c00dc5188da3ed1a19077874e3ad786c5 | /futoin/cid/rmstool.py | aea53cbbfb428d40c425fdabe9c6f726303a4657 | [
"Apache-2.0"
] | permissive | iforgotband/cid-tool | fdf050169e5aa895ded9d9efb2741860ecd91a34 | f7d0e53057ecff156cf52c8dcae80c6408fb37d8 | refs/heads/master | 2021-08-15T02:51:17.717986 | 2017-11-17T07:59:24 | 2017-11-17T07:59:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,169 | py | #
# Copyright 2015-2017 (c) Andrey Galkin
#
from .subtool import SubTool
__all__ = ['RmsTool']
class RmsTool(SubTool):
__slots__ = ()
ALLOWED_HASH_TYPES = [
'md5',
'sha1',
'sha256',
'sha512',
]
def autoDetect(self, config):
if self._autoDetectRMS(config):
return True
return super(RmsTool, self).autoDetect(config)
def rmsUpload(self, config, rms_pool, package_list):
raise NotImplementedError(self._name)
def rmsPromote(self, config, src_pool, dst_pool, package_list):
raise NotImplementedError(self._name)
def rmsGetList(self, config, rms_pool, package_hint):
raise NotImplementedError(self._name)
def rmsRetrieve(self, config, rms_pool, package_list):
raise NotImplementedError(self._name)
def rmsPoolCreate(self, config, rms_pool):
raise NotImplementedError(self._name)
def rmsPoolList(self, config):
raise NotImplementedError(self._name)
def rmsGetHash(self, config, rms_pool, package, hash_type):
raise NotImplementedError(self._name)
def _autoDetectRMS(self, config):
if config.get('rms', None) == self._name:
return True
return False
def rmsProcessChecksums(self, config, rms_pool, package_list):
ret = []
for package in package_list:
package = package.split('@', 1)
filename = package[0]
if len(package) == 2:
hash_str = package[1]
hash_type, hash = hash_str.split(':', 1)
if hash_type not in self.ALLOWED_HASH_TYPES:
self._errorExit(
'Unsupported hash type "{0}"'.format(hash_type))
self._info('Verifying {2} hash of {0} in {1}'.format(
filename, rms_pool, hash_type))
rms_hash = self.rmsGetHash(
config, rms_pool, filename, hash_type)
if rms_hash != hash:
self._errorExit(
'RMS hash mismatch "{0}" != "{1}"'.format(rms_hash, hash))
ret.append(filename)
return ret
def rmsCalcHash(self, file_name, hash_type):
hashlib = self._ext.hashlib
hf = hashlib.new(hash_type)
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(65536), ''):
if not chunk:
break
hf.update(chunk)
return "{0}:{1}".format(hash_type, hf.hexdigest())
def rmsCalcHashes(self, file_name):
hashlib = self._ext.hashlib
hashes = {}
for hash_type in self.ALLOWED_HASH_TYPES:
hashes[hash_type] = hashlib.new(hash_type)
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(65536), ''):
if not chunk:
break
for hash_type in self.ALLOWED_HASH_TYPES:
hashes[hash_type].update(chunk)
for hash_type in self.ALLOWED_HASH_TYPES:
hashes[hash_type] = hashes[hash_type].hexdigest()
return hashes
| [
"andrey@futoin.org"
] | andrey@futoin.org |
43058dec0c17f852dc746ef45be8dccbff4c8e27 | 8bb4a472344fda15985ac322d14e8f4ad79c7553 | /Python3-Core/src/main/prompto/debug/ResumeReason.py | 4fae21a528ecd8d43a0ef4d870d04b77d3b012dc | [] | no_license | prompto/prompto-python3 | c6b356f5af30c6826730ba7f2ad869f341983a2d | 64bd3d97d4702cc912097d41d961f7ab3fd82bee | refs/heads/master | 2022-12-24T12:33:16.251468 | 2022-11-27T17:37:56 | 2022-11-27T17:37:56 | 32,623,633 | 4 | 0 | null | 2019-05-04T11:06:05 | 2015-03-21T07:17:25 | Python | UTF-8 | Python | false | false | 96 | py | package presto.debug;
public enum ResumeReason {
STEP_OVER,
STEP_INTO,
STEP_OUT,
RESUMED
}
| [
"eric.vergnaud@wanadoo.fr"
] | eric.vergnaud@wanadoo.fr |
6ce7bc39c05aee5ebed9f3370a45057595d99e7f | 9f99485ac5479c1e6169e71d88a33c31ff591f4e | /migrations/versions/0021.py | 4a35b8ca73df34418a507a045bfdbb03a1f849f7 | [
"MIT"
] | permissive | NewAcropolis/api | b8c65554ca78ac0e87fbef46f5f2fbecb6d7700a | 34367f55d3c9ee5bf870956ffc90fd23da559b15 | refs/heads/master | 2023-08-31T09:27:02.125549 | 2023-08-26T22:15:10 | 2023-08-26T22:15:10 | 99,582,634 | 1 | 1 | MIT | 2023-08-26T22:15:11 | 2017-08-07T13:46:23 | Python | UTF-8 | Python | false | false | 773 | py | """empty message
Revision ID: 0021 allow access_area nullable
Revises: 0020 add users
Create Date: 2019-02-02 13:42:31.511289
"""
# revision identifiers, used by Alembic.
revision = '0021 allow access_area nullable'
down_revision = '0020 add users'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'access_area',
existing_type=sa.VARCHAR(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'access_area',
existing_type=sa.VARCHAR(),
nullable=False)
# ### end Alembic commands ###
| [
"kenlt.uk@gmail.com"
] | kenlt.uk@gmail.com |
2242d8d1034270b8cfee1d019fcede2872faaa7d | 42d3d37a3dd22402154da4f4bd020afd7b7bad58 | /examples/adspygoogle/adwords/v201206/campaign_management/add_location_extension_override.py | db059a6c791048bd092aec25eff36d441d6cba7d | [
"Apache-2.0"
] | permissive | nearlyfreeapps/python-googleadwords | 1388316ec4f8d9d6074688ec4742872b34b67636 | b30d90f74248cfd5ca52967e9ee77fc4cd1b9abc | refs/heads/master | 2020-06-03T23:05:08.865535 | 2012-08-02T21:46:16 | 2012-08-02T21:46:16 | 5,278,295 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,536 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds an ad extension override to a given campaign. To get
campaigns, run get_campaigns.py.
Tags: GeoLocationService.get, AdExtensionOverrideService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_id = 'INSERT_AD_GROUP_AD_ID_HERE'
ad_extension_id = 'INSERT_AD_EXTENSION_ID_HERE'
def main(client, ad_id, ad_extension_id):
# Initialize appropriate service.
geo_location_service = client.GetGeoLocationService(
'https://adwords-sandbox.google.com', 'v201206')
ad_extension_override_service = client.GetAdExtensionOverrideService(
'https://adwords-sandbox.google.com', 'v201206')
# Construct selector and get geo location info for a given address.
selector = {
'addresses': [
{
'streetAddress': '1600 Amphitheatre Parkway',
'cityName': 'Mountain View',
'provinceCode': 'US-CA',
'provinceName': 'California',
'postalCode': '94043',
'countryCode': 'US'
}
]
}
geo_location = geo_location_service.Get(selector)[0]
# Construct operations and add ad extension override.
operations = [
{
'operator': 'ADD',
'operand': {
'adId': ad_id,
'adExtension': {
'xsi_type': 'LocationExtension',
'id': ad_extension_id,
'address': geo_location['address'],
'geoPoint': geo_location['geoPoint'],
'encodedLocation': geo_location['encodedLocation'],
'source': 'ADWORDS_FRONTEND',
# Optional fields.
'companyName': 'ACME Inc.',
'phoneNumber': '(650) 253-0000'
# 'iconMediaId': '...',
# 'imageMediaId': '...'
},
# Optional fields.
'overrideInfo': {
'LocationOverrideInfo': {
'radius': '5',
'radiusUnits': 'MILES'
}
}
}
}
]
ad_extensions = ad_extension_override_service.Mutate(operations)[0]
# Display results.
for ad_extension in ad_extensions['value']:
print ('Ad extension override with id \'%s\' for ad with id \'%s\' was '
'added.' % (ad_extension['adExtension']['id'], ad_extension['adId']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_id, ad_extension_id)
| [
"ahalligan@nearlyfreehosting.com"
] | ahalligan@nearlyfreehosting.com |
b62071de2eebb19932cc24e94198d7d9b2505bc4 | 6af51aa6b83175acb256524beaf7972c92b58a74 | /python/QuickDS/implementations/list.py | 005373c66ec1445062c0ff1d12e97a97ac8aaabd | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rishi772001/QuickDS | 17dc0aa52bfd65dc12f55cd33309dafdf66f52e7 | dfe29840c81a557f9dbc20dbc5088057e2d7b987 | refs/heads/master | 2023-02-21T04:34:52.354396 | 2021-01-25T13:39:58 | 2021-01-25T13:39:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | """
@Author: rishi
"""
# Import required modules
import random as rd
# Build List class
class List:
# Creates random list
@staticmethod
def create_random_list(length=10):
"""
:param length: length of the list
:return: list of random numbers
"""
return [rd.randint(0, length) for _ in range(length)]
# Create random 2d list
@staticmethod
def create_random_2d_list(row_length=4, col_length=4):
"""
:param row_length: row length of the list
:param col_length: col length of the list
:return: 2d list of random numbers
"""
return [[rd.randint(0, 10) for _ in range(row_length)] for _ in range(col_length)]
| [
"noreply@github.com"
] | rishi772001.noreply@github.com |
a032ae93a455de494d3208476a65b775ec1ba715 | 39cd9aa81927c20d85d1b65e55523455626ee902 | /python_work/chapter_6/exercises/6_6_polling.py | e73625d621232cceca5ee36ce3f1237f6ea02530 | [] | no_license | SMS-NED16/crash-course-python | acf363562a813f7deb36614dc935be4ed2d07fee | e6e6cb787d208f51f114f71331c43af1ddc1e4c2 | refs/heads/master | 2020-03-09T02:29:35.241621 | 2018-04-21T16:09:16 | 2018-04-21T16:09:16 | 128,541,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | #dict of people and their favourite languages
favourite_languages = {
'sarah' : 'c',
'john' : 'python',
'brad' : 'c++',
'mike' : 'ruby',
'jessica' : 'java'
}
#list of people
respondents = ['sarah', 'jessica', 'andrew', 'mike', 'gilfoyle',
'peyton', 'joaquin', 'brad']
#parse list of names, check if they have taken the survey
for name in respondents:
if name not in favourite_languages.keys():
print(name.title() + ", you should take this survey.")
else:
print("Thank you for taking the survey, " + name.title() + ".")
print() | [
"saadmsiddiqui96@gmail.com"
] | saadmsiddiqui96@gmail.com |
0b1e777da2a004715ed496397a5acf96ebf6b323 | 7c5ed3cbbd777d6cf3789f48e82cedbf2cec0539 | /functions/net/icmp/ping-nodes-with-thread-and-save-results.py | fde628356b9022e0bfd0b49ad5bb297d472fa470 | [] | no_license | pench3r/ShellScriptForDevOps | 8735b22b04fd2c5c4de76c5c6ebe63563c215c5c | 2df2911ebe567daeeae493a57a31f356e305089a | refs/heads/master | 2023-03-23T09:23:13.964646 | 2021-03-22T02:46:31 | 2021-03-22T02:46:31 | 350,180,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by PyCharm.
File Name: LinuxBashShellScriptForOps:ping-nodes-with-thread-and-save-results.py
Version: 0.0.1
Author: dgden
Author Email: liuhongda@didiglobal.com
Create Date: 2020/1/19
Create Time: 11:07
Description: ping nodes with multi-threading and save results into a file
Long Description:
References:
Prerequisites: pip install ping
Development Status: 3 - Alpha, 5 - Production/Stable
Environment: Console
Intended Audience: System Administrators, Developers, End Users/Desktop
License: Freeware, Freely Distributable
Natural Language: English, Chinese (Simplified)
Operating System: POSIX :: Linux, Microsoft :: Windows
Programming Language: Python :: 2.6
Programming Language: Python :: 2.7
Topic: Utilities
"""
import time
from multiprocessing.pool import ThreadPool
from threading import Lock
import ping
def is_node_alive_with_icmp_ping(ip):
percent_lost, mrtt, artt = ping.quiet_ping(ip, timeout=1, count=1, psize=64)
if percent_lost == 0:
return True
else:
lock.acquire()
with open(dbf, 'a') as fp:
fp.write(ip + " " + now + "\n" * 2)
lock.release()
return False
if __name__ == '__main__':
nodes_list = [
'192.168.88.3',
'192.168.88.12',
'192.168.88.4',
'192.168.88.8',
'192.168.88.15',
]
dbf = "ping_nodes_result.txt"
now = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
with open(dbf, 'w') as fp_init:
fp_init.write(now + "\n")
lock = Lock()
try:
while True:
processes_count = 254 if len(nodes_list) > 254 else len(nodes_list)
pool = ThreadPool(processes=processes_count)
pool.map(is_node_alive_with_icmp_ping, nodes_list)
pool.close()
pool.join()
time.sleep(1)
except KeyboardInterrupt:
print("canceled")
| [
"nageshangzei@gmail.com"
] | nageshangzei@gmail.com |
2856e10c6c27a1b2d3da8c8fbb72be1733014051 | a2211f0ef8297a77200a0b2eec8ba3476989b7e6 | /itcast/02_python核心编程/02_linux系统编程/day02_线程/demo09_轮询法-消除竞争状态.py | 1112eea51a6e9cce12ac7813f2e0563a914bd6ed | [] | no_license | qq1197977022/learnPython | f720ecffd2a70044f1644f3527f4c29692eb2233 | ba294b8fa930f784304771be451d7b5981b794f3 | refs/heads/master | 2020-03-25T09:23:12.407510 | 2018-09-16T00:41:56 | 2018-09-16T00:42:00 | 143,663,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | from threading import Thread
g_num = 0
g_flag = True
def fun1():
global g_num
global g_flag
if g_flag:
for i in range(1000000):
g_num += 1
g_flag = False
print(f'线程1{g_num:+>20}\t{id(g_num)}')
def fun2():
global g_num
global g_flag
# 轮询
while True: # 如果不要求实时, 设置休眠延时更好
if not g_flag:
for i in range(1000000):
g_num += 1
print(f'线程2{g_num:*>20}\t{id(g_num)}')
break
if __name__ == '__main__':
print(f'主线程{g_num:->50}\t{id(g_num)}')
p1 = Thread(target=fun1)
p1.start()
print(f'主线程{g_num:->60}\t\t{id(g_num)}')
p2 = Thread(target=fun2)
p2.start()
print(f'主线程{g_num:->70}\t{id(g_num)}')
| [
"1197977022@qq.com"
] | 1197977022@qq.com |
b113c837d2138196777156e7cf6f982f1f752475 | 2f0d56cdcc4db54f9484b3942db88d79a4215408 | /.history/Python_Learning/lesson17_20200503135756.py | a34d2eb01fc8a23dd6720f62c11d594ccb6dc60d | [] | no_license | xiangxing98/xiangxing98.github.io | 8571c8ee8509c0bccbb6c2f3740494eedc53e418 | 23618666363ecc6d4acd1a8662ea366ddf2e6155 | refs/heads/master | 2021-11-17T19:00:16.347567 | 2021-11-14T08:35:01 | 2021-11-14T08:35:01 | 33,877,060 | 7 | 1 | null | 2017-07-01T16:42:49 | 2015-04-13T15:35:01 | HTML | UTF-8 | Python | false | false | 1,178 | py | # -*- encoding: utf-8 -*-
# !/usr/bin/env python
'''
@File : lesson17.py
@Time : 2020/04/19 21:42:24
@Author : Stone_Hou
@Version : 1.0
@Contact : xiangxing985529@163.com
@License : (C)Copyright 2010-2020, Stone_Hou
@Desc : None
'''
# here put the import lib
# 类型转换
a = 1
print(a, type(a))
# 1 <class 'int'>
a = 'hello'
print(a, type(a))
# hello <class 'str'>
a = True
print(a, type(a))
# True <class 'bool'>
# print('Hello'+1)
# TypeError: can only concatenate str (not "int") to str
# 发生异常: TypeError
# can only concatenate str (not "int") to str
# File "F:\Github\xiangxing98.github.io\Python_Learning\lesson17.py"
# line 28, in <module>
# print('Hello'+1)
print('Hello'+str(1))
# print('hello%d' % '123')
# 发生异常: TypeError
# %d format: a number is required, not str
print("Change String to integer\n")
print('hello%d' % int('123'))
# hello123
# Those statement all true
print(int('123') == 123)
# True
print(float('3.3') == 3.3)
# True
print(str(111) == '111')
# True
print(if(bool(0) == False):False)
# bool
print("bool \n")
bool(-123)
# True
bool(0)
# False
bool('abc')
# True
bool('False')
# True
bool('')
# False
| [
"xiangxing985529@163.com"
] | xiangxing985529@163.com |
723cafe05499d104d0a4bcd08b6a751d0732586e | 7d23afa21ca9653ea337fbb01ba54e9488de8a80 | /autosuggest/asgi.py | 043460afdf64407137e1bebcf16eb3810e094fef | [] | no_license | clarkeustaquio/NLP-WordSuggestion | 65915d5ea11f9684ecd7bea97fdfcdf71d6b9c77 | cb681f24e1e55570b8d943bf631d8a6bd2e24285 | refs/heads/main | 2023-07-08T16:19:41.713217 | 2021-08-06T03:22:04 | 2021-08-06T03:22:04 | 386,520,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for autosuggest project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'autosuggest.settings')
application = get_asgi_application()
| [
"clark.eustaquio@gmail.com"
] | clark.eustaquio@gmail.com |
7003ec4ae5e020487c4ff6ad970d7a07dd84868b | 3d16bcf91c546dfc638bf9e48d7690e8aed37ee2 | /tests/Cpl/Dm/Persistent/_0test/realtime/windows/mingw_w64/mytoolchain.py | 4ca8928d0d14090c111788daa7ab0f54e695bd58 | [] | no_license | johnttaylor/colony.core | 7c3aa43abdd564689e1540795b8044228b97271c | e00902d33c9224a34e9f68edb02c18eb9571b09f | refs/heads/master | 2023-07-24T08:34:04.956247 | 2023-06-20T00:02:55 | 2023-06-20T00:02:55 | 31,176,673 | 2 | 2 | null | 2023-06-17T21:56:08 | 2015-02-22T19:38:07 | C | UTF-8 | Python | false | false | 4,935 | py | #---------------------------------------------------------------------------
# This python module is used to customize a supported toolchain for your
# project specific settings.
#
# Notes:
# - ONLY edit/add statements in the sections marked by BEGIN/END EDITS
# markers.
# - Maintain indentation level and use spaces (it's a python thing)
# - rvalues must be enclosed in quotes (single ' ' or double " ")
# - The structure/class 'BuildValues' contains (at a minimum the
# following data members. Any member not specifically set defaults
# to null/empty string
# .inc
# .asminc
# .cflags
# .cppflags
# .asmflags
# .linkflags
# .linklibs
#
#---------------------------------------------------------------------------
# get definition of the Options structure
from nqbplib.base import BuildValues
from nqbplib.my_globals import NQBP_WORK_ROOT
#===================================================
# BEGIN EDITS/CUSTOMIZATIONS
#---------------------------------------------------
# Set the name for the final output item
FINAL_OUTPUT_NAME = 'a.exe'
# Link unittest directory by object module so that Catch's self-registration mechanism 'works'
unit_test_objects = '_BUILT_DIR_.src/Cpl/Dm/Persistent/_0test'
#
# For build config/variant: "Release"
#
# Set project specific 'base' (i.e always used) options
base_release = BuildValues() # Do NOT comment out this line
base_release.cflags = '-m32 -std=c++11 -Wall -Werror -x c++ -fprofile-arcs -ftest-coverage -DCATCH_CONFIG_FAST_COMPILE'
base_release.linkflags = '-m32 -fprofile-arcs'
base_release.linklibs = '-lgcov'
base_release.firstobjs = unit_test_objects
# Set project specific 'optimized' options
optimzed_release = BuildValues() # Do NOT comment out this line
optimzed_release.cflags = '-O3'
optimzed_release.linklibs = '-lstdc++'
# Set project specific 'debug' options
debug_release = BuildValues() # Do NOT comment out this line
debug_release.linklibs = '-lstdc++'
#
# For build config/variant: "cpp11"
# (note: uses same internal toolchain options as the 'Release' variant,
# only the 'User' options will/are different)
#
# Construct option structs
base_cpp11 = BuildValues()
optimzed_cpp11 = BuildValues()
debug_cpp11 = BuildValues()
# Set 'base' options
base_cpp11.cflags = '-m64 -std=c++11 -Wall -Werror -x c++ -DCATCH_CONFIG_FAST_COMPILE'
base_cpp11.linkflags = '-m64'
base_cpp11.firstobjs = unit_test_objects
# Set 'Optimized' options
optimzed_cpp11.cflags = '-O3'
optimzed_cpp11.linklibs = '-lstdc++'
# Set 'debug' options
debug_cpp11.linklibs = '-lstdc++'
#
# For build config/variant: "win64"
# (note: uses same internal toolchain options as the 'Release' variant,
# only the 'User' options will/are different)
#
# Construct option structs
base_win64 = BuildValues()
optimzed_win64 = BuildValues()
debug_win64 = BuildValues()
# Set 'base' options
base_win64.cflags = '-m64 -std=c++11 -Wall -Werror -x c++ -DCATCH_CONFIG_FAST_COMPILE'
base_win64.linkflags = '-m64'
base_win64.firstobjs = unit_test_objects
# Set 'Optimized' options
optimzed_win64.cflags = '-O3'
optimzed_win64.linklibs = '-lstdc++'
# Set 'debug' options
debug_win64.linklibs = '-lstdc++'
#-------------------------------------------------
# ONLY edit this section if you are ADDING options
# for build configurations/variants OTHER than the
# 'release' build
#-------------------------------------------------
release_opts = { 'user_base':base_release,
'user_optimized':optimzed_release,
'user_debug':debug_release
}
# Add new dictionary of for new build configuration options
cpp11_opts = { 'user_base':base_cpp11,
'user_optimized':optimzed_cpp11,
'user_debug':debug_cpp11
}
# Add new dictionary of for new build configuration options
win64_opts = { 'user_base':base_win64,
'user_optimized':optimzed_win64,
'user_debug':debug_win64
}
# Add new variant option dictionary to # dictionary of
# build variants
build_variants = { 'win32':release_opts,
'win64':win64_opts,
'cpp11':cpp11_opts,
}
#---------------------------------------------------
# END EDITS/CUSTOMIZATIONS
#===================================================
# Capture project/build directory
import os
prjdir = os.path.dirname(os.path.abspath(__file__))
# Select Module that contains the desired toolchain
from nqbplib.toolchains.windows.mingw_w64.console_exe import ToolChain
# Function that instantiates an instance of the toolchain
def create():
tc = ToolChain( FINAL_OUTPUT_NAME, prjdir, build_variants, "win32" )
return tc
| [
"john.t.taylor@gmail.com"
] | john.t.taylor@gmail.com |
1eeb2872ff71e27e27f2fa1eb0bc528b03eae27a | 6d5545faf2af0a6bb565ad698bb824110b40e121 | /WEBAPP/MLmodel/inception_client.py.runfiles/tf_serving/external/org_tensorflow/tensorflow/core/profiler/tfprof_options_pb2.py | 702b65e132b0390574ca5b34aa38f1b9655ea484 | [
"MIT"
] | permissive | sunsuntianyi/mlWebApp_v2 | abb129cd43540b1be51ecc840127d6e40c2151d3 | 5198685bf4c4e8973988722282e863a8eaeb426f | refs/heads/master | 2021-06-23T22:02:38.002145 | 2020-11-20T02:17:43 | 2020-11-20T02:17:43 | 162,194,249 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | /private/var/tmp/_bazel_tianyi/f29d1e61689e4e4b318f483932fff4d0/execroot/tf_serving/bazel-out/darwin-opt/genfiles/external/org_tensorflow/tensorflow/core/profiler/tfprof_options_pb2.py | [
"sun.suntianyi@gmail.com"
] | sun.suntianyi@gmail.com |
79c0640dfe81c2911cd35bee527ab1c36f02d83e | 89e6c3548fbdd06178aae712de1ff19004bc2faa | /django_hg/admin.py | 6fc1e1082734fd909d4fff6278782f225444eeac | [
"BSD-2-Clause"
] | permissive | bhgv/ublog_git.hg.repo-django.python-engine | a3f3cdcbacc95ec98f022f9719d3b300dd6541d4 | 74cdae100bff5e8ab8fb9c3e8ba95623333c2d43 | refs/heads/master | 2020-03-23T01:04:07.431749 | 2018-07-25T12:59:21 | 2018-07-25T12:59:21 | 140,899,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from my_django.contrib import admin
from django_hg.models import HgRepository, RepositoryUser
class UserInline(admin.TabularInline):
model=RepositoryUser
class HgRepositoryAdmin(admin.ModelAdmin):
inlines=[UserInline,]
admin.site.register(HgRepository, HgRepositoryAdmin)
| [
"bhgv.empire@gmail.com"
] | bhgv.empire@gmail.com |
794c6c0782b7f633bd97356ec66fd21e7256b650 | db24aa665c499aebddfa1f430827c1f71ddd5567 | /tests/test_ast.py | fc221a2de0fd6b9dfce589da64e5dc05028394d0 | [
"MIT"
] | permissive | imclab/jmespath | 4e04c14d9a9b7885a00cc406926177085af3babf | fd8af349160d2ab7fa5b0fa06dfe4fb513f9d7bc | refs/heads/master | 2021-01-14T08:51:20.955670 | 2013-12-19T23:12:51 | 2013-12-19T23:12:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,763 | py | #!/usr/bin/env python
import unittest
from tests import OrderedDict
from jmespath import ast
class TestAST(unittest.TestCase):
def setUp(self):
pass
def test_field(self):
# jmespath: foo
field = ast.Field('foo')
match = field.search({'foo': 'bar'})
self.assertEqual(match, 'bar')
def test_field_no_match(self):
# jmespath: foo
field = ast.Field('foo')
match = field.search({'bar': 'bar'})
self.assertEqual(match, None)
def test_field_when_dict(self):
# jmespath: foo
field = ast.Field('foo')
match = field.search({'foo': {'bar': 'baz'}})
self.assertEqual(match, {'bar': 'baz'})
def test_field_when_list(self):
# jmespath: foo
field = ast.Field('foo')
match = field.search({'foo': ['bar', 'baz']})
self.assertEqual(match, ['bar', 'baz'])
def test_dot_syntax(self):
# jmespath: foo.bar
child = ast.SubExpression(ast.Field('foo'), ast.Field('bar'))
match = child.search({'foo': {'bar': 'correct', 'baz': 'wrong'}})
self.assertEqual(match, 'correct')
def test_multiple_nestings(self):
# jmespath: foo.bar.baz
child = ast.SubExpression(
ast.Field('foo'),
ast.SubExpression(ast.Field('bar'), ast.Field('baz')))
match = child.search(
{'foo': {'bar': {'baz': 'correct'}}})
self.assertEqual(match, 'correct')
self.assertEqual(
child.search({'foo': {'bar': {'wrong': 'wrong'}}}), None)
self.assertEqual(child.search({}), None)
self.assertEqual(child.search([]), None)
self.assertEqual(child.search(''), None)
def test_index(self):
# jmespath: foo[1]
child = ast.SubExpression(ast.Field('foo'), ast.Index(1))
match = child.search(
{'foo': ['one', 'two', 'three']})
self.assertEqual(match, 'two')
def test_bad_index(self):
# jmespath: foo[100]
child = ast.SubExpression(ast.Field('foo'), ast.Index(100))
match = child.search(
{'foo': ['one', 'two', 'three']})
self.assertEqual(match, None)
def test_negative_index(self):
# jmespath: foo[-1]
child = ast.SubExpression(ast.Field('foo'), ast.Index(-1))
match = child.search(
{'foo': ['one', 'two', 'last']})
self.assertEqual(match, 'last')
def test_index_with_children(self):
# jmespath: foo.bar[-1]
child = ast.SubExpression(
ast.Field('foo'),
ast.SubExpression(ast.Field('bar'), ast.Index(-1)))
match = child.search(
{'foo': {'bar': ['first', 'middle', 'last']}})
self.assertEqual(match, 'last')
def test_multiple_indices(self):
# jmespath: foo[1].bar[1]
child = ast.SubExpression(
ast.SubExpression(
ast.Field('foo'), ast.Index(1)),
ast.SubExpression(
ast.Field('bar'), ast.Index(1)))
match = child.search(
{'foo': ['one', {'bar': ['zero', 'one']}]})
self.assertEqual(match, 'one')
def test_index_with_star(self):
# jmespath: foo[*]
child = ast.SubExpression(ast.Field('foo'), ast.WildcardIndex())
match = child.search({'foo': ['one', 'two']})
self.assertEqual(match, ['one', 'two'])
def test_associative(self):
data = {'foo': {'bar': ['one']}}
# jmespath: foo.bar[0]
first = ast.SubExpression(
ast.Field('foo'),
ast.SubExpression(ast.Field('bar'), ast.Index(0)))
second = ast.SubExpression(
ast.SubExpression(ast.Field('foo'), ast.Field('bar')),
ast.Index(0))
self.assertEqual(first.search(data), 'one')
self.assertEqual(second.search(data), 'one')
def test_wildcard_branches_on_dict_values(self):
data = {'foo': {'bar': {'get': 'one'}, 'baz': {'get': 'two'}}}
# ast for "foo.*.get"
expression = ast.SubExpression(
ast.SubExpression(ast.Field('foo'), ast.WildcardValues()),
ast.Field('get'))
match = expression.search(data)
self.assertEqual(sorted(match), ['one', 'two'])
self.assertEqual(expression.search({'foo': [{'bar': 'one'}]}), None)
def test_wildcard_dot_wildcard(self):
_ = OrderedDict
data = _([(
"top1", _({
"sub1": _({"foo": "one"})
})),
("top2", _({
"sub1": _({"foo": "two"})
})),
("top3", _({
"sub3": _({"notfoo": "notfoo"})
}))
])
# ast for "*.*"
expression = ast.SubExpression(
ast.WildcardValues(), ast.WildcardValues())
match = expression.search(data)
self.assertEqual(match, [[{'foo': 'one'}], [{'foo': 'two'}],
[{'notfoo': 'notfoo'}]])
def test_wildcard_with_field_node(self):
data = {
"top1": {
"sub1": {"foo": "one"}
},
"top2": {
"sub1": {"foo": "two"}
},
"top3": {
"sub3": {"notfoo": "notfoo"}
}
}
# ast for "*.*.foo"
expression = ast.SubExpression(
ast.WildcardValues(), ast.SubExpression(ast.WildcardValues(),
ast.Field('foo')))
match = expression.search(data)
self.assertEqual(sorted(match), sorted([[],
['one'],
['two']]))
def test_wildcard_branches_with_index(self):
# foo[*].bar
child = ast.SubExpression(
ast.SubExpression(ast.Field('foo'), ast.WildcardIndex()),
ast.Field('bar')
)
match = child.search(
{'foo': [{'bar': 'one'}, {'bar': 'two'}]})
self.assertTrue(isinstance(match, list))
self.assertEqual(match, ['one', 'two'])
def test_index_with_multi_match(self):
# foo[*].bar[0]
child = ast.SubExpression(
ast.SubExpression(ast.Field('foo'), ast.WildcardIndex()),
ast.SubExpression(
ast.Field('bar'),
ast.Index(0)))
data = {'foo': [{'bar': ['one', 'two']}, {'bar': ['three', 'four']}]}
match = child.search(data)
self.assertEqual(match, ['one', 'three'])
def test_or_expression(self):
# foo or bar
field_foo = ast.Field('foo')
field_bar = ast.Field('bar')
or_expression = ast.ORExpression(field_foo, field_bar)
self.assertEqual(or_expression.search({'foo': 'foo'}), 'foo')
self.assertEqual(or_expression.search({'bar': 'bar'}), 'bar')
self.assertEqual(or_expression.search(
{'foo': 'foo', 'bar': 'bar'}), 'foo')
def test_multiselect_dict(self):
# foo.{bar,baz
field_foo = ast.KeyValPair(key_name='foo', node=ast.Field('foo'))
field_bar = ast.KeyValPair(key_name='bar', node=ast.Field('bar'))
field_baz = ast.KeyValPair(key_name='baz', node=ast.Field('baz'))
multiselect = ast.MultiFieldDict([field_bar, field_baz])
subexpr = ast.SubExpression(field_foo, multiselect)
self.assertEqual(
subexpr.search({'foo': {'bar': 1, 'baz': 2, 'qux': 3}}),
{'bar': 1, 'baz': 2})
def test_multiselect_different_key_names(self):
field_foo = ast.KeyValPair(key_name='arbitrary', node=ast.Field('foo'))
field_bar = ast.KeyValPair(key_name='arbitrary2', node=ast.Field('bar'))
multiselect = ast.MultiFieldDict([field_foo, field_bar])
self.assertEqual(multiselect.search({'foo': 'value1', 'bar': 'value2'}),
{'arbitrary': 'value1', 'arbitrary2': 'value2'})
def test_multiselect_list(self):
# foo.[bar,baz]
field_foo = ast.Field('foo')
field_bar = ast.Field('bar')
field_baz = ast.Field('baz')
multiselect = ast.MultiFieldList([field_bar, field_baz])
subexpr = ast.SubExpression(field_foo, multiselect)
self.assertEqual(
subexpr.search({'foo': {'bar': 1, 'baz': 2, 'qux': 3}}),
[1, 2])
def test_multiselect_list_wildcard(self):
data = {
'foo': {
'ignore1': {
'one': 1, 'two': 2, 'three': 3,
},
'ignore2': {
'one': 1, 'two': 2, 'three': 3,
},
}
}
expr = ast.SubExpression(
ast.Field("foo"),
ast.SubExpression(
ast.WildcardValues(),
ast.MultiFieldList([ast.Field("one"), ast.Field("two")])))
self.assertEqual(expr.search(data), [[1, 2], [1, 2]])
def test_wildcard_values_index_not_a_list(self):
parsed = ast.SubExpression(
ast.WildcardValues(),
ast.SubExpression(ast.Field("foo"), ast.Index(0)))
data = {"a": {"foo": 1}, "b": {"foo": 1}, "c": {"bar": 1}}
self.assertEqual(parsed.search(data), [])
def test_wildcard_values_index_does_exist(self):
parsed = ast.SubExpression(
ast.WildcardValues(),
ast.SubExpression(ast.Field("foo"), ast.Index(0)))
data = {"a": {"foo": [1]}, "b": {"foo": 1}, "c": {"bar": 1}}
self.assertEqual(parsed.search(data), [1])
def test_flattened_wildcard(self):
parsed = ast.SubExpression(
# foo[].bar
ast.SubExpression(ast.Field("foo"), ast.ListElements()),
ast.Field("bar"))
data = {'foo': [{'bar': 1}, {'bar': 2}, {'bar': 3}]}
self.assertEqual(parsed.search(data), [1, 2, 3])
def test_multiple_nested_wildcards(self):
# foo[].bar[].baz
parsed = ast.SubExpression(
ast.SubExpression(
ast.Field("foo"),
ast.ListElements()),
ast.SubExpression(
ast.SubExpression(
ast.Field("bar"),
ast.ListElements()),
ast.Field("baz")))
data = {
"foo": [
{"bar": [{"baz": 1}, {"baz": 2}]},
{"bar": [{"baz": 3}, {"baz": 4}]},
]
}
self.assertEqual(parsed.search(data), [1, 2, 3, 4])
def test_multiple_nested_wildcards_with_list_values(self):
parsed = ast.SubExpression(
ast.SubExpression(
ast.Field("foo"),
ast.ListElements()),
ast.SubExpression(
ast.SubExpression(
ast.Field("bar"),
ast.ListElements()),
ast.Field("baz")))
data = {
"foo": [
{"bar": [{"baz": [1]}, {"baz": [2]}]},
{"bar": [{"baz": [3]}, {"baz": [4]}]},
]
}
self.assertEqual(parsed.search(data), [[1], [2], [3], [4]])
def test_flattened_multiselect_list(self):
# foo[].[bar,baz]
field_foo = ast.Field('foo')
parent = ast.SubExpression(field_foo, ast.ListElements())
field_bar = ast.Field('bar')
field_baz = ast.Field('baz')
multiselect = ast.MultiFieldList([field_bar, field_baz])
subexpr = ast.SubExpression(parent, multiselect)
self.assertEqual(
subexpr.search({'foo': [{'bar': 1, 'baz': 2, 'qux': 3}]}),
[[1, 2]])
if __name__ == '__main__':
unittest.main()
| [
"js@jamesls.com"
] | js@jamesls.com |
1d41c54ec8ce0af470639cebf2a279f1299f0c15 | 50cbc789f765610b1074b414a4cb5fbecb65b340 | /djangosige/apps/estoque/models/movimento.py | b23514c840fb941be3044a344591b3a4a1a54fa4 | [
"MIT"
] | permissive | jonatasoli/djangoSIGE | 6f79a92737e281ab6e999ad1353c5f24a0e54d97 | 31bba22cf5ce304bc96068e93d49002f99066218 | refs/heads/master | 2021-10-22T20:43:37.591470 | 2019-03-12T21:20:36 | 2019-03-12T21:20:36 | 109,386,717 | 1 | 0 | null | 2017-11-03T11:12:21 | 2017-11-03T11:12:20 | null | UTF-8 | Python | false | false | 4,915 | py | # -*- coding: utf-8 -*-
from django.db import models
from django.core.validators import MinValueValidator
from decimal import Decimal
from django.core.urlresolvers import reverse_lazy
from django.template.defaultfilters import date
from . import DEFAULT_LOCAL_ID
import locale
locale.setlocale(locale.LC_ALL, '')
TIPOS_MOVIMENTO_ENTRADA = (
(u'0', u'Ajuste'),
(u'1', u'Entrada por pedido de compra'),
(u'2', u'Entrada por importação de nota fiscal de fornecedor'),
(u'3', u'Ajuste inicial'),
)
TIPOS_MOVIMENTO_SAIDA = (
(u'0', u'Ajuste'),
(u'1', u'Saída por pedido de venda'),
(u'2', u'Saída por importação de nota fiscal'),
)
class ItensMovimento(models.Model):
produto = models.ForeignKey('cadastro.Produto', related_name="moviment_estoque_produto",
on_delete=models.CASCADE, null=True, blank=True)
movimento_id = models.ForeignKey(
'estoque.MovimentoEstoque', related_name="itens_movimento", on_delete=models.CASCADE)
quantidade = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
valor_unit = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
subtotal = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
def get_estoque_atual_produto(self):
if self.produto:
if self.produto.controlar_estoque and self.produto.estoque_atual:
return self.produto.estoque_atual
else:
return 'Não controlado'
def format_estoque_atual_produto(self):
estoque_atual = self.get_estoque_atual_produto()
if isinstance(estoque_atual, Decimal):
return locale.format(u'%.2f', estoque_atual, 1)
else:
return estoque_atual
class MovimentoEstoque(models.Model):
data_movimento = models.DateField(null=True, blank=True)
quantidade_itens = models.IntegerField(
validators=[MinValueValidator(0)], default=0)
valor_total = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
observacoes = models.CharField(max_length=1055, null=True, blank=True)
class Meta:
verbose_name = "Movimento de Estoque"
permissions = (
("view_movimentoestoque", "Can view movimento estoque"),
("consultar_estoque", "Pode consultar estoque"),
)
@property
def format_data_movimento(self):
return '%s' % date(self.data_movimento, "d/m/Y")
def format_quantidade_itens(self):
return locale.format(u'%.2f', self.quantidade_itens, 1)
def format_valor_total(self):
return locale.format(u'%.2f', self.valor_total, 1)
class EntradaEstoque(MovimentoEstoque):
tipo_movimento = models.CharField(
max_length=1, choices=TIPOS_MOVIMENTO_ENTRADA, default='0')
pedido_compra = models.ForeignKey(
'compras.PedidoCompra', related_name="entrada_estoque_pedido", on_delete=models.SET_NULL, null=True, blank=True)
fornecedor = models.ForeignKey(
'cadastro.Fornecedor', related_name="entrada_estoque_fornecedor", on_delete=models.SET_NULL, null=True, blank=True)
local_dest = models.ForeignKey(
'estoque.LocalEstoque', related_name="entrada_estoque_local", default=DEFAULT_LOCAL_ID)
def get_edit_url(self):
return reverse_lazy('estoque:detalharentradaestoqueview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Entrada'
class SaidaEstoque(MovimentoEstoque):
tipo_movimento = models.CharField(
max_length=1, choices=TIPOS_MOVIMENTO_SAIDA, default='0')
pedido_venda = models.ForeignKey(
'vendas.PedidoVenda', related_name="saida_estoque", on_delete=models.SET_NULL, null=True, blank=True)
local_orig = models.ForeignKey(
'estoque.LocalEstoque', related_name="saida_estoque_local", default=DEFAULT_LOCAL_ID)
def get_edit_url(self):
return reverse_lazy('estoque:detalharsaidaestoqueview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Saída'
class TransferenciaEstoque(MovimentoEstoque):
local_estoque_orig = models.ForeignKey(
'estoque.LocalEstoque', related_name="transf_estoque_orig", on_delete=models.CASCADE)
local_estoque_dest = models.ForeignKey(
'estoque.LocalEstoque', related_name="transf_estoque_dest", on_delete=models.CASCADE)
def get_edit_url(self):
return reverse_lazy('estoque:detalhartransferenciaestoqueview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Transferência'
| [
"thiagovilelap@hotmail.com"
] | thiagovilelap@hotmail.com |
9472d2e6c1f8c59f279a64790c1afe940a3f77db | 7c74ceb9f8addcc0816d012e0b84b174b96e0def | /src/azure-cli-core/azure/cli/core/decorators.py | 55a1d88e11e60f053ba780fa7627e97a9dc7db78 | [
"MIT",
"LGPL-2.1-only",
"LGPL-2.1-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | microsoft/azure-cli | 4c826290e7a6f6bd27da3829b05e4f02ff6dc8d9 | 9ba64b33f6f78e2c3e42f8a147f59484300e8779 | refs/heads/dev | 2023-08-31T08:51:39.526556 | 2022-11-28T19:08:23 | 2022-11-28T19:08:23 | 370,900,439 | 7 | 7 | MIT | 2023-08-01T23:34:50 | 2021-05-26T03:59:41 | Python | UTF-8 | Python | false | false | 2,777 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Utility decorators
This module will be executed in separate process after az process is terminated to upload traces, so it is preferable
that it doesn't import modules other than those in the Python Standard Library
"""
import hashlib
from functools import wraps
from knack.log import get_logger
# pylint: disable=too-few-public-methods
class Completer:
def __init__(self, func):
self.func = func
def __call__(self, **kwargs):
namespace = kwargs['parsed_args']
prefix = kwargs['prefix']
cmd = namespace._cmd # pylint: disable=protected-access
return self.func(cmd, prefix, namespace)
def call_once(factory_func):
""""
When a function is annotated by this decorator, it will be only executed once. The result will be cached and
returned for following invocations.
"""
factory_func.executed = False
factory_func.cached_result = None
def _wrapped(*args, **kwargs):
if not factory_func.executed:
factory_func.cached_result = factory_func(*args, **kwargs)
return factory_func.cached_result
return _wrapped
def hash256_result(func):
"""
Secure the return string of the annotated function with SHA256 algorithm. If the annotated function doesn't return
string or return None, raise ValueError.
"""
@wraps(func)
def _decorator(*args, **kwargs):
val = func(*args, **kwargs)
if val is None:
raise ValueError('Return value is None')
if not isinstance(val, str):
raise ValueError('Return value is not string')
if not val:
return val
hash_object = hashlib.sha256(val.encode('utf-8'))
return str(hash_object.hexdigest())
return _decorator
def suppress_all_exceptions(fallback_return=None, **kwargs): # pylint: disable=unused-argument
# The kwargs is a fallback to ensure extensions (eg. alias) are not broken
def _decorator(func):
@wraps(func)
def _wrapped_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception: # nopa pylint: disable=broad-except
import traceback
get_logger(__name__).info('Suppress exception:\n%s', traceback.format_exc())
if fallback_return is not None:
return fallback_return
return _wrapped_func
return _decorator
| [
"noreply@github.com"
] | microsoft.noreply@github.com |
2ec4b9ec43ae5d79f8a74733d95e82c82a27f429 | f37ae889f7ef4d6e57cf7b51a1c25d8c7a259590 | /pur_beurre/urls.py | a0193697b4559f131d9d496f48e2d0f1caab0f81 | [] | no_license | cleliofavoccia/Projet10 | f9ee05ed1586dbcf14475fc36d4aa3f24b58d66c | 802806afe19f7e6fe4766fd0e3ee26ba1bda6fa0 | refs/heads/master | 2023-05-10T07:36:15.637348 | 2021-06-12T07:25:23 | 2021-06-12T07:25:23 | 349,119,643 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | """pur_beurre URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('website.urls')),
path('products/', include('products.urls')),
path('users/', include('users.urls')),
path('users/', include('django.contrib.auth.urls')),
path('favorites/', include('favorites.urls')),
]
| [
"favoccia.c@live.fr"
] | favoccia.c@live.fr |
385a49c13b863a8896488f4156d05502aa2f7c80 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/appcontainers/azure-mgmt-appcontainers/generated_samples/connected_environments_dapr_components_list_secrets.py | ea1924799cfe381f8684b2226852edd26e271f44 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,696 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.appcontainers import ContainerAppsAPIClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-appcontainers
# USAGE
python connected_environments_dapr_components_list_secrets.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ContainerAppsAPIClient(
credential=DefaultAzureCredential(),
subscription_id="8efdecc5-919e-44eb-b179-915dca89ebf9",
)
response = client.connected_environments_dapr_components.list_secrets(
resource_group_name="examplerg",
connected_environment_name="myenvironment",
component_name="reddog",
)
print(response)
# x-ms-original-file: specification/app/resource-manager/Microsoft.App/stable/2023-05-01/examples/ConnectedEnvironmentsDaprComponents_ListSecrets.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
9026865925b672227cd58a155c84cd23b74dd83d | a06cd01b474e74d7b57144fca3930b7a2fe01ec4 | /ethpm/validation/uri.py | 009ceaf58091465a85e442722fdaa395a10bf1f1 | [
"MIT"
] | permissive | XTAUEMC/web3.py | e95b9b2e8bb18cae9794a0eaecb2bef3cbd87ec6 | 36224cfd19c3cf50746ccdeae8521ce4c08b7e3a | refs/heads/master | 2020-06-20T00:01:35.345192 | 2019-07-14T19:56:28 | 2019-07-14T19:56:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,339 | py | import hashlib
from typing import (
List,
)
from urllib import (
parse,
)
from eth_utils import (
is_checksum_address,
to_bytes,
to_int,
to_text,
)
from ethpm._utils.chains import (
is_supported_chain_id,
)
from ethpm._utils.ipfs import (
is_ipfs_uri,
)
from ethpm._utils.registry import (
is_ens_domain,
)
from ethpm.constants import (
REGISTRY_URI_SCHEME,
)
from ethpm.exceptions import (
ValidationError,
)
from ethpm.validation.package import (
validate_package_name,
)
from web3 import Web3
def validate_ipfs_uri(uri: str) -> None:
"""
Raise an exception if the provided URI is not a valid IPFS URI.
"""
if not is_ipfs_uri(uri):
raise ValidationError(f"URI: {uri} is not a valid IPFS URI.")
def validate_registry_uri(uri: str) -> None:
"""
Raise an exception if the URI does not conform to the registry URI scheme.
"""
parsed = parse.urlparse(uri)
scheme, authority, pkg_name, query = (
parsed.scheme,
parsed.netloc,
parsed.path,
parsed.query,
)
validate_registry_uri_scheme(scheme)
validate_registry_uri_authority(authority)
if query:
validate_registry_uri_version(query)
validate_package_name(pkg_name[1:])
def validate_registry_uri_authority(auth: str) -> None:
"""
Raise an exception if the authority is not a valid ENS domain
or a valid checksummed contract address.
"""
try:
address, chain_id = auth.split(':')
except ValueError:
raise ValidationError(
f"{auth} is not a valid registry URI authority. "
"Please try again with a valid registry URI."
)
if is_ens_domain(address) is False and not is_checksum_address(address):
raise ValidationError(
f"{auth} is not a valid registry address. "
"Please try again with a valid registry URI."
)
if not is_supported_chain_id(to_int(text=chain_id)):
raise ValidationError(
f"Chain ID: {chain_id} is not supported. Supported chain ids include: "
"1 (mainnet), 3 (ropsten), 4 (rinkeby), 5 (goerli) and 42 (kovan). "
"Please try again with a valid registry URI."
)
def validate_registry_uri_scheme(scheme: str) -> None:
"""
Raise an exception if the scheme is not the valid registry URI scheme ('ercXXX').
"""
if scheme != REGISTRY_URI_SCHEME:
raise ValidationError(f"{scheme} is not a valid registry URI scheme.")
def validate_registry_uri_version(query: str) -> None:
"""
Raise an exception if the version param is malformed.
"""
query_dict = parse.parse_qs(query, keep_blank_values=True)
if "version" not in query_dict:
raise ValidationError(f"{query} is not a correctly formatted version param.")
def validate_single_matching_uri(all_blockchain_uris: List[str], w3: Web3) -> str:
"""
Return a single block URI after validating that it is the *only* URI in
all_blockchain_uris that matches the w3 instance.
"""
from ethpm.uri import check_if_chain_matches_chain_uri
matching_uris = [
uri for uri in all_blockchain_uris if check_if_chain_matches_chain_uri(w3, uri)
]
if not matching_uris:
raise ValidationError("Package has no matching URIs on chain.")
elif len(matching_uris) != 1:
raise ValidationError(
f"Package has too many ({len(matching_uris)}) matching URIs: {matching_uris}."
)
return matching_uris[0]
def validate_blob_uri_contents(contents: bytes, blob_uri: str) -> None:
"""
Raises an exception if the sha1 hash of the contents does not match the hash found in te
blob_uri. Formula for how git calculates the hash found here:
http://alblue.bandlem.com/2011/08/git-tip-of-week-objects.html
"""
blob_path = parse.urlparse(blob_uri).path
blob_hash = blob_path.split("/")[-1]
contents_str = to_text(contents)
content_length = len(contents_str)
hashable_contents = "blob " + str(content_length) + "\0" + contents_str
hash_object = hashlib.sha1(to_bytes(text=hashable_contents))
if hash_object.hexdigest() != blob_hash:
raise ValidationError(
f"Hash of contents fetched from {blob_uri} do not match its hash: {blob_hash}."
)
| [
"nickgheorghita@gmail.com"
] | nickgheorghita@gmail.com |
151ef292dd5b4742d46553aee188acfd3a2cd8b2 | 1ff41a2393c969aaf662a198f405b4e76d4ce957 | /myshop/myshop/settings.py | f85c2d92c211522f0c679ef4a12a154d55a90ecd | [] | no_license | asimonia/django-bookstore | 7a15495c8a522a4c8f5151948d1a0f2a67a4f63a | 29fd151eb9269c3e43e1b2736f11f49247ec2994 | refs/heads/master | 2021-01-17T17:27:53.268028 | 2016-07-19T02:15:52 | 2016-07-19T02:15:52 | 63,547,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,086 | py | """
Django settings for myshop project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-kt3)=5e1730c!(18nq@zuo%e*%7yccy66f5*8_yyt+a&%)mod'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shop',
'cart',
'orders',
'paypal.standard.ipn',
'payment',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'myshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'cart.context_processors.cart',
],
},
},
]
WSGI_APPLICATION = 'myshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# Add key to store the cart in the user session
CART_SESSION_ID = 'cart'
# Send emails to the backend. Set up email later
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
PAYPAL_RECEIVER_EMAIL = 'alex@myshop.com'
PAYPAL_TEST = True | [
"alex.simonian@gmail.com"
] | alex.simonian@gmail.com |
c11fdcc792918562865cbdc580eb0b8eb3b9459a | 67df0894b961a8d1729214f07c954c551fd06f3f | /re_search_en/o_O_re_search_en.py | fbcb34853079f23d19e01ac81ea62b4f37047ea5 | [] | no_license | hihumi/enjapy | 9261b209e3dbe3628229acff0e425da08ab46caf | 4d7344cb7291abe4deb4bca42590693d89ba328e | refs/heads/master | 2020-05-23T10:11:01.260143 | 2017-09-30T06:10:22 | 2017-09-30T06:10:22 | 80,397,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,838 | py | #!/usr/bin/env python3
"""main.pyで入力されたwordが、
oxxx_reモジュールのoxxx_re_func()で作成した正規表現のo、またはOからはじまる英単語と合致した場合、
oxxx_jaモジュールのoxxx_ja_func()を呼ぶ
ただし、o-listまたはO-list(すべて大文字小文字は問わない)と入力された場合、o_O_listモジュールのo_O_list_func()を呼ぶ
"""
# o: a b c d e f g h i j k l m n o p q r s t u v w x y z
#
# from re_en.oxxx_re import oxxx_re_func
# office:
from re_en.office_re import office_re_func
# order:
from re_en.order_re import order_re_func
# outlook:
from re_en.outlook_re import outlook_re_func
# 0-list:
from re_en.o_O_list_re import o_O_list_re_func
# o: a b c d e f g h i j k l m n o p q r s t u v w x y z
#
# from print_ja.oxxx_ja import oxxx_ja_func
# office:
from print_ja.office_ja import office_ja_func
# order:
from print_ja.order_ja import order_ja_func
# outlook:
from print_ja.outlook_ja import outlook_ja_func
from print_en_lists.o_O_list import o_O_list_func
def o_O_re_search_en_func(o_O_word):
"""main.pyで入力されたwordが、
oxxx_reモジュールのoxxx_re_func()で作成した正規表現のo、またはOからはじまる英単語と合致した場合、
oxxx_jaモジュールのoxxx_ja_func()を呼ぶ関数
ただし、最後のelifは、o_O_listモジュールのo_O_list_func()を呼ぶ
"""
# o: a b c d e f g h i j k l m n o p q r s t u v w x y z
if office_re_func().search(o_O_word): # office
office_ja_func()
elif order_re_func().search(o_O_word): # order
order_ja_func()
elif outlook_re_func().search(o_O_word): # outlook
outlook_ja_func()
elif o_O_list_re_func().search(o_O_word): # o-list
o_O_list_func()
else:
print('not found...')
| [
"yui.maa5800@gmail.com"
] | yui.maa5800@gmail.com |
8cb50e722107fc700d8f17e3ee0ff96f2942120c | 908554f8250780024ffdd6c6f32a65acc36ec5cd | /backend/task_category/migrations/0001_initial.py | 63a0837d759cfbb72a35d63a7f45744f3ea04b43 | [] | no_license | crowdbotics-apps/petsker-23110 | 799032a970fce4da74d9ced06655b073ecee1621 | ca4d40e9b4b72d36fc60c8544c4e27635864a203 | refs/heads/master | 2023-01-21T21:39:29.108783 | 2020-12-01T15:54:38 | 2020-12-01T15:54:38 | 317,582,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | # Generated by Django 2.2.17 on 2020-12-01 15:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Category",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("icon", models.URLField()),
("description", models.TextField(blank=True, null=True)),
("is_recurring", models.BooleanField(blank=True, null=True)),
],
),
migrations.CreateModel(
name="Subcategory",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("description", models.TextField(blank=True, null=True)),
(
"category",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="subcategory_category",
to="task_category.Category",
),
),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
d97eaea3fb21768d3835c717678dd583e27be0e8 | df789505c99974c0ba45adc57e52fc7865ff2a28 | /python练习题/常用案例.py | f75ecfae83f1b4f2f8b671fcb764e2d1ca341108 | [] | no_license | zhiwenwei/python | 6fc231e47a9fbb555efa287ac121546e07b70f06 | 76d267e68f762ee9d7706e1800f160929544a0a3 | refs/heads/master | 2021-01-20T04:21:44.825752 | 2018-12-19T06:20:10 | 2018-12-19T06:20:10 | 89,676,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #coding:utf-8
#Author:Mr Zhi
#排列组合,将4个数字可能组成的所有互不相同且无重复数字的排列组合列出。
for i in range(1,5):
for j in range(1,5):
for k in range(1,5):
if (i != k) and (i != j) and (j != k ):
print(i,j,k)
import fileinput
| [
"ddzhiwenwei@163.com"
] | ddzhiwenwei@163.com |
01743d7f3fd3ed87763ed1ea40e97069ac07d8c2 | 4be2c72579486ad04a00db0349028de96d2dce89 | /scripts/aTools/animTools/animBar/animBarUI.py | c47fbf7c062fa74f255ab5c363e08a4142f0e8b9 | [] | no_license | italic-r/maya-prefs | 6a617d40beee8937186b4699c5cead44e01c2d40 | aa21e5e2938dc2698ce5f555ee74a594e08aed2b | refs/heads/master | 2021-09-09T16:31:00.411349 | 2018-03-18T01:40:10 | 2018-03-18T01:40:10 | 86,961,959 | 16 | 8 | null | null | null | null | UTF-8 | Python | false | false | 6,651 | py | '''
========================================================================================================================
Author: Alan Camilo
www.alancamilo.com
Requirements: aTools Package
------------------------------------------------------------------------------------------------------------------------
To install aTools, please follow the instructions in the file how_to_install.txt, located in the folder aTools
------------------------------------------------------------------------------------------------------------------------
To unistall aTools, go to menu (the last button on the right), Uninstall
========================================================================================================================
'''
# maya modulesspecialTools
from maya import cmds
from aTools.generalTools.aToolsGlobals import aToolsGlobals as G
from aTools.generalTools import aToolsClasses; reload(aToolsClasses)
from aTools.commonMods import animMod; reload(animMod)
from aTools.generalTools import generalToolsUI; reload(generalToolsUI)
from aTools.commonMods import utilMod; reload(utilMod)
from aTools.commonMods import commandsMod; reload(commandsMod)
from aTools.commonMods import aToolsMod; reload(aToolsMod)
from aTools import setup; reload(setup)
# constants
SUB_UI_MODS = ["tweenMachine", "keyTransform", "tangents", "specialTools", "tUtilities"]
# import subUI modules
for loopMod in SUB_UI_MODS:
exec("import aTools.animTools.animBar.subUIs.%s as %s; reload(%s)"%(loopMod, loopMod, loopMod))
def show(mode="show"):
G.aToolsBar = G.aToolsBar or AnimationBar_Gui()
if mode == False: mode = "show"
if mode == True: mode = "toggle"
if mode == "launch":
lastState = aToolsMod.loadInfoWithUser("userPrefs", "animationBarLastState")
if lastState: show()
return
if mode == "show" or mode == "hide":
if cmds.toolBar("aTools_Animation_Bar", query=True, exists=True):
visible = (mode == "show")
cmds.toolBar("aTools_Animation_Bar", edit=True, visible=visible)
G.aToolsBar.saveLastState(visible)
return
elif mode == "show":
G.aToolsBar.start()
G.aToolsBar.saveLastState()
return
if mode == "toggle":
if cmds.toolBar("aTools_Animation_Bar", query=True, exists=True):
state = cmds.toolBar("aTools_Animation_Bar", query=True, visible=True)
visible = (not state)
G.aToolsBar.toggleToolbars(visible)
cmds.toolBar("aTools_Animation_Bar", edit=True, visible=visible)
G.aToolsBar.saveLastState(visible)
return
else:
show()
return
if mode == "refresh":
G.aToolsBar = AnimationBar_Gui()
G.aToolsBar.start()
G.aToolsBar.saveLastState()
class AnimationBar_Gui(object):
def __init__(self):
self.winName = "aAnimationBarWin"
self.toolbarName = "aTools_Animation_Bar"
self.allWin = [self.winName, self.toolbarName]
self.buttonSize = {"small":[15, 20], "big":[25, 25]}
self.barOffset = 0
self.barHotkeys = {}
G.aToolsUIs = {"toolbars":[
],
"windows":[
]}
# [ SUBUIs ]
self.uiList = None
self.subUIs = None
def __getattr__(self, attr):
return None
def start(self):
from aTools.generalTools import aToolsClasses; reload(aToolsClasses)
self.startUpFunctions()
self.delWindows()
self.createWin()
def startUpFunctions(self):
#wait cursor state
n = 0
while True:
if not cmds.waitCursor(query=True, state=True) or n > 100: break
cmds.waitCursor(state=False)
n += 1
#refresh state
cmds.refresh(suspend=False)
#undo state
if not cmds.undoInfo(query=True, stateWithoutFlush=True): cmds.undoInfo(stateWithoutFlush=True)
#progress bar state
utilMod.setProgressBar(status=None, progress=None, endProgress=True)
def saveLastState(self, state=True):
aToolsMod.saveInfoWithUser("userPrefs", "animationBarLastState", state)
def createWin(self):
# Creates window
self.mainWin = cmds.window(self.winName, sizeable=True)
# Main frame
cmds.frameLayout("mainFrameLayout", labelVisible=False, borderVisible=False, w=10, marginHeight=0, marginWidth=0, labelIndent=0, collapsable=False)
cmds.rowLayout(numberOfColumns=2, adjustableColumn=1, columnAttach=([2, 'right', self.barOffset]), h=37)
cmds.text(label="")
self.subUIsLayout = cmds.rowLayout("mainLayout", numberOfColumns=len(SUB_UI_MODS)+2)
# subUIs
self.uiList = [eval("%s.%s%s_Gui"%(loopUi, loopUi[0].upper(), loopUi[1:])) for loopUi in SUB_UI_MODS]
# append general tools ui
self.uiList.append(generalToolsUI.GeneralTools_Gui)
# define subUis
self.subUIs = [loopUi(self.subUIsLayout, self.buttonSize) for loopUi in self.uiList]
self.addSubUIs()
# shows toolbar
cmds.toolBar(self.toolbarName, area='bottom', content=self.mainWin, allowedArea=['bottom'])
# end method createWin
#---------------------------------------------------------------------
def addSubUIs(self):
# parent subUis to the main layout
for loopIndex, loopSubUI in enumerate(self.subUIs):
loopSubUI.createLayout()
# space
if loopIndex < len(self.subUIs) -1:
cmds.rowLayout(numberOfColumns=2)
cmds.text( label=' ', h=1 )
# end for
def toggleToolbars(self, visible):
pass
def delWindows(self, onOff=True, forceOff=False):
for loopWin in self.allWin:
if cmds.window(loopWin, query=True, exists=True): cmds.deleteUI(loopWin)
if cmds.toolBar(loopWin, query=True, exists=True):
cmds.deleteUI(loopWin)
| [
"italic.rendezvous@gmail.com"
] | italic.rendezvous@gmail.com |
18a1c1067bad2013e8ca5d5d30bccc4e31eb262f | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/Cases/2468/.mooctest/answer.py | 97b5f1afb2362e43de2e28c40adb9a4f8b3ccb58 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | t=int(input())
while(t>0):
n=int(input())
a=[]
num=input()
for x in num.split():
a.append(int(x))
P=[]
i=0
p=1
for x in a:
p=p*x
while i<n:
P.insert(i,p//a[i])
i=i+1
for x in P:
print(x,end=" ")
print()
t=t-1
| [
"382335657@qq.com"
] | 382335657@qq.com |
c865d2dcc1d9e00316123c8b37dc16ed422db3ef | 2735c5f1a9b1f1a3d2468f0838fc0f20725cbe31 | /usr/lib/pymodules/python2.7/numpy/numarray/compat.py | d2b76797fbaa44016996f3e2659d515a9a20c9ca | [] | no_license | sanyaade-iot/rpi-sysroot | f202b9188fd99c372d28b59ebe1b8fcabbfb7a67 | 6e13f05b5b2b44b29ead66c96230a17f077d5198 | refs/heads/master | 2020-04-08T16:14:25.745147 | 2014-03-21T06:27:54 | 2014-03-21T09:47:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | /usr/share/pyshared/numpy/numarray/compat.py | [
"weitjong@gmail.com"
] | weitjong@gmail.com |
421123dd75c5d9c60f99ea7d66de6b553bae9db3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/148/usersdata/272/86767/submittedfiles/testes.py | db6500d7b4c0772c218a8e23b5ab043f7012a066 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
def impar(x):
if ((x%2)!=0):
return (True)
else:
return (False)
a=int(input('Digite a: '))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8c1ec39b364c12047ff70031d242a8c292be9e94 | 81b0e6fe7a6e56ed8a91748499b81ddd3f2e45f8 | /neural_network.py | 56ddb8ba46c9ef0d3b02ce7f51677509286dce26 | [] | no_license | shieldforever/DeepLearning | cfef817602b9677df4df4c1b87e60c5e91f2315a | b8080938a4b22395379be9032266df36cb5491e6 | refs/heads/master | 2021-01-05T14:12:26.110888 | 2019-10-29T11:23:06 | 2019-10-29T11:23:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,885 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 14 17:55:42 2019
@author: Administrator
"""
import numpy as np
from scipy import special
class neuralNetwork(object):
#initialize the neural network
def __init__(self,inputnodes,hiddennodes,outputnodes,learningrate):
self.inodes=inputnodes
self.hnodes=hiddennodes
self.onodes=outputnodes
self.lr=learningrate
#self.wih=np.random.rand(self.hnodes,self.inodes)-0.5
#self.who=np.random.rand(self.onodes,self.hnodes)-0.5
#means,Variance,shape of array
self.wih = np.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))
self.who = np.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
#sigmoid function
self.activation_function=lambda x:special.expit(x)
pass
#train the neural network
def train(self,inputs_list,targets_list):
#convert inputs list and targets list to 2d array
inputs=np.array(inputs_list,ndmin=2).T
targets=np.array(targets_list,ndmin=2).T
hidden_inputs = np.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = np.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
output_errors=targets-final_outputs
hidden_errors=np.dot(self.who.T,output_errors)
self.who +=self.lr*np.dot((output_errors*final_outputs*(1.0-final_outputs)),
np.transpose(hidden_outputs))
self.wih +=self.lr*np.dot((hidden_errors*hidden_outputs*(1.0-hidden_outputs)),
np.transpose(inputs))
pass
#query the neural network
def query(self,inputs_list):
# convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = np.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = np.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
input_nodes=3
hidden_nodes=3
output_nodes=3
learning_rate=0.3
n=neuralNetwork(input_nodes,hidden_nodes,output_nodes,learning_rate)
n.train([1,0.5,-0.5],[0.8,0.6,0.6])
print(n.query([1,0.5,-0.5]))
'''
a1=np.random.rand(3,3) #0-1的随机值
a2=a1-0.5 #-0.5-0.5的随机值
'''
| [
"870407139@qq.com"
] | 870407139@qq.com |
c0a0e1a4bdaa1c76b49f676e19384408c030d1c1 | e1f519fc0c4f76d11db9584f74c5b49ca95b0798 | /cs_notes/arrays/RLE_iterator.py | 8a78d35ba581f17f8aca1d70416549bdd8f05e1b | [] | no_license | hwc1824/LeetCodeSolution | 22d41327cde2b9562f58cc73e6205c7c2f9a5e1c | ac53dd9bf2c4c9d17c9dc5f7fdda32e386658fdd | refs/heads/master | 2023-08-16T10:15:39.351933 | 2018-12-19T00:43:07 | 2018-12-19T00:43:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | # 900. RLE Iterator
# https://leetcode.com/problems/rle-iterator/description/
class RLEIterator:
# 下面的解法的想法是,只要記錄目前走到第幾步就好了
# 儲存 A 的時候將連續出現的次數變換成最後一次出現的位置
# 當 next 被呼叫的時候,將 n 累計起來
# 然後就只要查閱 n 介於那兩個數字的最後一次出現位置之間
def __init__(self, A):
"""
:type A: List[int]
"""
self.curr = 0
self.idx = 0
self.record = []
c_sum = 0
for i in range(0, len(A), 2):
c_sum += A[i]
if A[i] != 0: self.record.append((c_sum, A[i+1]))
def next(self, n):
"""
:type n: int
:rtype: int
"""
if self.idx >= len(self.record): return -1
self.curr+=n
for count, val in self.record[self.idx:]:
if self.curr <= count: return val
self.idx+=1
return -1
# Your RLEIterator object will be instantiated and called as such:
# obj = RLEIterator(A)
# param_1 = obj.next(n)
| [
"eraxer0165749@gmail.com"
] | eraxer0165749@gmail.com |
c549025fe6b58ffe1ccc0b22996cabc8736f8306 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/4499367/snippet.py | 5581424b74f80c38427bb49d1be8aff71949ed4c | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 6,695 | py | class ReloaderEventHandler(FileSystemEventHandler):
"""
Listen for changes to modules within the Django project
On change, reload the module in the Python Shell
Custom logic required to reload django models.py modules
Due to the singleton AppCache, which caches model references.
For those models files, we must clear and repopulate the AppCache
"""
def __init__(self, *args, **kwargs):
self.project_root = kwargs.pop('project_root', None)
self.shell_globals = kwargs.pop('shell_globals', None)
self.model_globals = kwargs.pop('model_globals', None)
super(ReloaderEventHandler, self).__init__(*args, **kwargs)
def dispatch(self, event):
event_path = event.src_path
path, file_extension = os.path.splitext(event_path)
if all([
file_extension == '.py',
'shell_plus' not in path,
self.project_root in path
]):
return super(ReloaderEventHandler, self).dispatch(event)
def on_created(self, event):
super(ReloaderEventHandler, self).on_created(event)
self._force_reload(event)
def on_modified(self, event):
"""
Called by dispatch on modification of file in the Django project
"""
super(ReloaderEventHandler, self).on_modified(event)
self._force_reload(event)
def _force_reload(self, event):
"""
Reload the altered module
models.py files and all other python files are handled differently
This is because models are cached by Django in a singleton
We need to clear this singleton to properly reload
"""
cleaned_path = self._clean_path(event.src_path)
path_components = cleaned_path.split(os.path.sep)
if path_components[-1] == 'models':
self._reload_models_module(path_components[-2])
# This redundant call bizarrely seems necessary
# Issue exists around recompiling models.pyc file on 1st attempt
# Subsequent reloads always work
# time.sleep(1)
self._reload_models_module(path_components[-2])
else:
self._reload_module(path_components)
def _clean_path(self, path):
"""Remove the leading project path"""
project_root = self.project_root if self.project_root.endswith('/') else "{}/".format(self.project_root)
path_from_project_root = path.replace(project_root, '')
# Remove trailing ".py" from module for importing purposes
return os.path.splitext(path_from_project_root)[0]
def _reload_module(self, path_components):
"""
Wrapper for __builtin__ reload() function
In addition to reloading the module,
we reset the associated classes in the global scope of the shell.
Consequently, we don't have to manaully reimport (i.e. 'from app import MyClass')
Instead, MyClass will have changed for us automagically
More interestingly, we also dynamically update the classes
of existing object instances in the global scope with `_update_class_instances`.
## In a Shell session
obj = MyKLS()
obj.getchar() --> 'a'
## While still in the Shell,
### We change the function definition of getchar() in the filesytem to return 'b'
### In our Shell, we will see that
obj.getchar() --> 'b'
This behavior is very experimental and possibly dangerous but powerful
Cuts down time and frustration during pdb debugging
"""
# We attempt to import the module from the project root
# This SHOULD be agnostic of app/project structure
while True:
try:
module = importlib.import_module('.'.join(path_components))
except ImportError:
path_components.pop(0)
if not path_components:
return
else:
break
reload(module)
# Reload objects into the global scope
# This has the potential to cause namespace collisions
# The upside is that we don't have to reimport (i.e. from module import ObjName)
for attr in dir(module):
if (
not(attr.startswith('__') and attr.endswith('__'))
and self.shell_globals.get(attr)
):
self.shell_globals[attr] = getattr(module, attr)
self._update_class_instances(module, attr)
def _reload_models_module(self, app_name):
"""
Reload Django models
Based on http://stackoverflow.com/questions/890924/how-do-you-reload-a-django-model-module-using-the-interactive-interpreter-via-m
"""
curdir = os.getcwd()
cache = AppCache()
for app in cache.get_apps():
f = app.__file__
if f.startswith(curdir) and f.endswith('.pyc'):
try:
os.remove(f)
except Exception:
pass
__import__(app.__name__)
reload(app)
cache.app_store = SortedDict()
cache.app_models = SortedDict()
cache.app_errors = {}
cache.handled = {}
cache.loaded = False
# Reset app's models in global scope
# Using a dictionary here instead of cache.get_models(app_name)
# The latter does not seem to work (look into that)
reimported_app = importlib.import_module("{}.models".format(app_name))
model_names = self.model_globals[app_name]
for model_name in model_names:
self.shell_globals[model_name] = getattr(reimported_app, model_name)
self._update_class_instances(reimported_app, model_name)
def _update_class_instances(self, module, attr):
"""
Reset the __class__ of all instances whoses
class has been reloaded into the shell
This allows us to do CRAZY things such as
effectively manipulate an instance's source code
while inside a debugger
"""
module_obj = getattr(module, attr)
if inspect.isclass(module_obj):
for obj in self.shell_globals.values():
# hasattr condition attempts to handle old style classes
# The class __str__ check may not be ideal but it seems to work
# The one exception being if you changes the __str__ method
# of the reloaded object. That edge case is not handled
if hasattr(obj, '__class__') and str(obj.__class__) == str(module_obj):
obj.__class__ = module_obj | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
b9dc54cabc677d71d8007287459dced2f2617cad | 099b57613250ae0a0c3c75cc2a9b8095a5aac312 | /一些总结/测试文件.py | a6b7753c5aaf1e6131ed711b632a56cb8e88a6b0 | [] | no_license | MitsurugiMeiya/Leetcoding | 36e41c8d649b777e5c057a5241007d04ad8f61cd | 87a6912ab4e21ab9be4dd6e90c2a6f8da9c68663 | refs/heads/master | 2022-06-17T19:48:41.692320 | 2020-05-13T16:45:54 | 2020-05-13T16:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def test(self,head):
while head.next != None:
print(head.val)
head = head.next
if __name__ == "__main__":
solution = Solution()
solution.test()
| [
"yifu3@ualberta.ca"
] | yifu3@ualberta.ca |
96485320f651c95c1a482e4192a688a2b0660b02 | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/2577781/snippet.py | e33c008d1679fcecad43252d5649b7c362333a02 | [
"Apache-2.0"
] | permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 2,201 | py | #!/usr/bin/env python
# Thread pool based on: http://code.activestate.com/recipes/577187-python-thread-pool/
from queue import Queue
from threading import Thread
from functools import partial
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado import gen
from tornado.ioloop import IOLoop
import himitsu
def make_hash(text):
b = himitsu.Bcrypt()
return b.encode(text)
class WorkerThread(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
self.daemon = True
self.start()
def run(self):
while True:
func, args, kwargs, callback = self.queue.get()
try:
result = func(*args, **kwargs)
if callback is not None:
IOLoop.instance().add_callback(partial(callback, result))
except Exception as e:
print(e)
self.queue.task_done()
class ThreadPool(object):
def __init__(self, num_threads):
self.queue = Queue()
for _ in range(num_threads):
WorkerThread(self.queue)
def add_task(self, func, args=(), kwargs={}, callback=None):
self.queue.put((func, args, kwargs, callback))
def wait_completion(self):
self.queue.join()
class BaseHandler(tornado.web.RequestHandler):
@property
def pool(self):
if not hasattr(self.application, 'pool'):
self.application.pool = ThreadPool(20)
return self.application.pool
class IndexHandler(BaseHandler):
@tornado.web.asynchronous
@gen.engine
def get(self):
result = yield gen.Task(
self.pool.add_task, make_hash, ('Test',)
)
self.write(result)
self.finish()
def main():
try:
tornado.options.parse_command_line()
application = tornado.web.Application([
(r'/', IndexHandler)
], debug=True)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print('Exit')
if __name__ == '__main__':
main()
| [
"42325807+dockerizeme@users.noreply.github.com"
] | 42325807+dockerizeme@users.noreply.github.com |
e7f286d32115897aa45a6bdb8da562cfd9ae6f5d | 1eab574606dffb14a63195de994ee7c2355989b1 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/bmacmappedip_dfnlz21lbnrzl2jnywnnyxbwzwrjca.py | f27128cbf257a11174bdaf3fe003acc13969aa81 | [
"MIT"
] | permissive | steiler/ixnetwork_restpy | 56b3f08726301e9938aaea26f6dcd20ebf53c806 | dd7ec0d311b74cefb1fe310d57b5c8a65d6d4ff9 | refs/heads/master | 2020-09-04T12:10:18.387184 | 2019-11-05T11:29:43 | 2019-11-05T11:29:43 | 219,728,796 | 0 | 0 | null | 2019-11-05T11:28:29 | 2019-11-05T11:28:26 | null | UTF-8 | Python | false | false | 5,867 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class BMacMappedIp(Base):
"""This objects holds all the IP (V4/V6) addresses associated with a B-MAC of an ethernet segment.
The BMacMappedIp class encapsulates a list of bMacMappedIp resources that is be managed by the user.
A list of resources can be retrieved from the server using the BMacMappedIp.find() method.
The list can be managed by the user by using the BMacMappedIp.add() and BMacMappedIp.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'bMacMappedIp'
def __init__(self, parent):
super(BMacMappedIp, self).__init__(parent)
@property
def Enabled(self):
"""If true then this IP is associated with the B-MAC of the ethernet segment. Default value is false.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def IpAddress(self):
"""IP address value is given here depending on the IP Type. Default value is all zero.
Returns:
str
"""
return self._get_attribute('ipAddress')
@IpAddress.setter
def IpAddress(self, value):
self._set_attribute('ipAddress', value)
@property
def IpType(self):
"""Drop down of {IPv4, IPv6}. If IPv4 is selected then IPv4 address is used. If IPv6 is selected then IPv6 address is used. Default value is IPv4.
Returns:
str(ipV4|ipV6)
"""
return self._get_attribute('ipType')
@IpType.setter
def IpType(self, value):
self._set_attribute('ipType', value)
def update(self, Enabled=None, IpAddress=None, IpType=None):
"""Updates a child instance of bMacMappedIp on the server.
Args:
Enabled (bool): If true then this IP is associated with the B-MAC of the ethernet segment. Default value is false.
IpAddress (str): IP address value is given here depending on the IP Type. Default value is all zero.
IpType (str(ipV4|ipV6)): Drop down of {IPv4, IPv6}. If IPv4 is selected then IPv4 address is used. If IPv6 is selected then IPv6 address is used. Default value is IPv4.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def add(self, Enabled=None, IpAddress=None, IpType=None):
"""Adds a new bMacMappedIp node on the server and retrieves it in this instance.
Args:
Enabled (bool): If true then this IP is associated with the B-MAC of the ethernet segment. Default value is false.
IpAddress (str): IP address value is given here depending on the IP Type. Default value is all zero.
IpType (str(ipV4|ipV6)): Drop down of {IPv4, IPv6}. If IPv4 is selected then IPv4 address is used. If IPv6 is selected then IPv6 address is used. Default value is IPv4.
Returns:
self: This instance with all currently retrieved bMacMappedIp data using find and the newly added bMacMappedIp data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the bMacMappedIp data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Enabled=None, IpAddress=None, IpType=None):
"""Finds and retrieves bMacMappedIp data from the server.
All named parameters support regex and can be used to selectively retrieve bMacMappedIp data from the server.
By default the find method takes no parameters and will retrieve all bMacMappedIp data from the server.
Args:
Enabled (bool): If true then this IP is associated with the B-MAC of the ethernet segment. Default value is false.
IpAddress (str): IP address value is given here depending on the IP Type. Default value is all zero.
IpType (str(ipV4|ipV6)): Drop down of {IPv4, IPv6}. If IPv4 is selected then IPv4 address is used. If IPv6 is selected then IPv6 address is used. Default value is IPv4.
Returns:
self: This instance with matching bMacMappedIp data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of bMacMappedIp data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the bMacMappedIp data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
280180605538ca9ee2017ad7f97de20f1a449c35 | 74549d7c57b4746ac2a9c275aa12bfc577b0e8af | /funny_string.py | ade6aa44436eeb9690be1bfa5f29809cbb0e93ea | [] | no_license | abidkhan484/hackerrank_solution | af9dbf6ec1ead920dc18df233f40db0c867720b4 | b0a98e4bdfa71a4671999f16ab313cc5c76a1b7a | refs/heads/master | 2022-05-02T11:13:29.447127 | 2022-04-13T03:02:59 | 2022-04-13T03:02:59 | 99,207,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | #!/bin/python3
n = int(input().strip())
for i in range(n):
s = input().strip()
# tmp list, to get the consequtive subtraction of the s list(string)
s = list(s)
tmp = []
l = len(s)
for j in range(1, l):
temp = abs(ord(s[j]) - ord(s[j-1]))
tmp.append(temp)
# now reverse operation occer and check tmp with the reverse,
# consequtive subtractions
s.reverse()
for j in range(1, l):
temp = abs(ord(s[j]) - ord(s[j-1]))
if temp != tmp[j-1]:
break
# if j is checked till the last, then print funny
if j == l-1:
print("Funny")
else:
print("Not Funny")
| [
"abidkhan484@gmail.com"
] | abidkhan484@gmail.com |
ce7b7d741e6cdf375737b61908957bd3e62f89a9 | 7b2a3ea853dc44aea204f02abedaad6a2029f4ff | /preprocess_4mem2d.py | 4511128676f7bf408c9f199ba12a43ca4469807f | [] | no_license | NoisyLeon/SW4Py | 7d45503282dc988b5f886c039706bd79fdd6b339 | 7029f18eb526bcb46b4aa244da1e088ca57a56aa | refs/heads/master | 2020-12-22T14:57:11.265397 | 2016-12-20T18:27:18 | 2016-12-20T18:27:18 | 56,792,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,742 | py | import vmodel, stations
# SLst=stations.StaLst()
# # SLst.HomoStaLst(xmin=20000, Nx=149, dx=20000, ymin=20000, Ny=29, dy=20000)
# SLst.LineStaLst(xmin=300000, Nx=133, dx=20000, y=300000)
# SLst.WriteStaList('/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/station_4mem2d.lst')
# # SLst.Write2Input(infname='/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/single_ring_basin.in')
# SLst.Write2Input(infname='/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/single_homo_basin.in')
# #
rmodel=vmodel.rModel()
rmodel.ak135(zmin=0., zmax=100., ni=3001, nj=601, hh=1000., hv=1000., CPS=True)
rmodel.checkInput('/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/single_staircase_basin.in')
# rmodel.checkInput('/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/single_homo_basin.in')
# # rmodel.CylinderCosineAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='vs', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderCosineAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='vp', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderCosineAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='rho', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderCosineAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='vs', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderCosineAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='vp', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderCosineAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='rho', zmin=0, zmax=20000, nb=2)
# rmodel.CynlinderRingBasin(x0=1100000, y0=300000, zmax=4000., Rmax=200000., vs=2000., outfname='./cpsin.txt')
# rmodel.CylinderHomoAnomaly(x0=1100000, y0=300000, zmax=4000., R=200000., dm=-0.1)
rmodel.CylinderLinearDepthAnomalyAll(x0=1100000, y0=300000, R=100000, vt=2000., vb=3000., zmax=5000, zmin=0, nb=None, outfname='cpsin_staircase.txt')
# rmodel.CylinderHomoSediment(x0=1100000, y0=300000, R=100000, zmax=3000, vs=2000.)
#
# # rmodel.CylinderHomoAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='vs', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderHomoAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='vp', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderHomoAnomaly(x0=2300000, y0=300000, R=100000, dm=0.1, mname='rho', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderHomoAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='vs', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderHomoAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='vp', zmin=0, zmax=20000, nb=2)
# # rmodel.CylinderHomoAnomaly(x0=700000, y0=300000, R=100000, dm=-0.1, mname='rho', zmin=0, zmax=20000, nb=2)
# rmodel.writeVprofile('./cpsinput_4km_0.3.txt')
#
rmodel.write('/lustre/janus_scratch/life9360/sw4_working_dir_4mem2d/single_staircase_basin.rfile')
#
| [
"lili.feng@colorado.edu"
] | lili.feng@colorado.edu |
9fedd6c3390c1fce9081e9c32411b6ec4b73d856 | ba0a2b0d2d1534443ea34320675aadfa378457b6 | /String/Q843_Guess the Word.py | 14717252223355cffe90ae93b5a66190717a6e68 | [] | no_license | Luolingwei/LeetCode | 73abd58af116f3ec59fd6c76f662beb2a413586c | 79d4824879d0faed117eee9d99615cd478432a14 | refs/heads/master | 2021-08-08T17:45:19.215454 | 2021-06-17T17:03:15 | 2021-06-17T17:03:15 | 152,186,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,444 | py |
# 思路1: 每次随机选择一个word, 得到score, 留下list中和当前guess match结果为score的word
# 思路2: 因为最有可能的情况是一个字符都不match, 概率为 (25/26)^6 = 80%, 所以最有可能的情况是首先guess到match score=0,
# 可以从candidates中选择和其他word的 match score = 0 数量最小的word, 这样candidates 集合减小最快
# O(N^2)
# 思路3: 根据char frequency的总和 选择family 最大的那个word, 这样一旦guess到match score=0, 所有和这个word有重叠的candidate都会被剔除掉
# 理论上guess所有位置freq最大char组成的word, 可以一次剔除最多的candidate, 但是本题只能在wordlist中guess
# O(N)
# """
# This is Master's API interface.
# You should not implement it, or speculate about its implementation
# """
# class Master:
# def guess(self, word: str) -> int:
import random
from collections import Counter
class Solution:
def findSecretWord1(self, wordlist, master):
def match(x, y):
return sum(x[i] == y[i] for i in range(len(x)))
for _ in range(10):
word = random.choice(wordlist)
score = master.guess(word)
wordlist = [w for w in wordlist if match(w, word) == score]
def findSecretWord2(self, wordlist, master):
def match(x, y):
return sum(x[i] == y[i] for i in range(len(x)))
def select(wordlist):
count = Counter()
for i in range(len(wordlist)):
for j in range(len(wordlist)):
if j != i and match(wordlist[i], wordlist[j]) == 0:
count[i] += 1
return min(range(len(wordlist)), key=lambda x: count[x])
for _ in range(10):
word = wordlist[select(wordlist)]
score = master.guess(word)
wordlist = [w for w in wordlist if match(w, word) == score]
def findSecretWord3(self, wordlist, master) -> None:
def match(x, y):
return sum(x[i] == y[i] for i in range(len(x)))
def select(wordlist):
freqs = [Counter(w[i] for w in wordlist) for i in range(6)]
word = max(wordlist, key=lambda x: sum(freqs[i][c] for i, c in enumerate(x)))
return word
for _ in range(10):
word = select(wordlist)
score = master.guess(word)
wordlist = [w for w in wordlist if match(w, word) == score] | [
"564258080@qq.com"
] | 564258080@qq.com |
40bc42df545f07ac7d1662bc6dfebf1fdf0c95a7 | 107e869bc298c74bf2418b53b630ca57c00bc68b | /src/repro/model/densenet201.py | 977eb43ebc36e871f5c9e9ee61ac995971421ec5 | [
"BSD-3-Clause"
] | permissive | bouthilx/repro | 699f1f635872507bd054b57ec03140f998a9f7d1 | 611734e4eddd6a76dd4c1e7114a28a634a2a75c1 | refs/heads/dev | 2020-04-18T03:26:55.670831 | 2019-01-30T17:56:21 | 2019-01-30T17:56:21 | 167,199,050 | 0 | 0 | BSD-3-Clause | 2019-01-23T19:48:35 | 2019-01-23T14:44:15 | Python | UTF-8 | Python | false | false | 283 | py | from repro.model.densenet import DenseNet, distribute
def build(input_size, num_classes, distributed=0):
model = DenseNet(input_size=input_size, num_init_features=64, growth_rate=32,
block_config=(6, 12, 48, 32))
return distribute(model, distributed)
| [
"xavier.bouthillier@umontreal.ca"
] | xavier.bouthillier@umontreal.ca |
b64d25c8ad3b2647cc130320042a122d807f1f72 | 50518b396163f9ee07e762cc30ec86a49c35782c | /DACON_LG_블럭장난감제조공정/baseline/module/simulator.py | 57b0815bd4d8b53ad80621dd2ec89028bb09e55e | [] | no_license | madfalc0n/Dacon_AI | 0185fe4cfd7ba9716b7420d7f464f6901be7382e | 68f9aaab2a7fcc0e634bce67199d8b22049c4a09 | refs/heads/master | 2023-03-23T08:39:34.910144 | 2021-03-09T10:28:05 | 2021-03-09T10:28:05 | 274,060,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,772 | py | import os
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
from pathlib import Path
class Simulator:
def __init__(self):
self.sample_submission = pd.read_csv(os.path.join(Path(__file__).resolve().parent, 'sample_submission.csv'))
self.max_count = pd.read_csv(os.path.join(Path(__file__).resolve().parent, 'max_count.csv'))
self.stock = pd.read_csv(os.path.join(Path(__file__).resolve().parent, 'stock.csv'))
order = pd.read_csv(os.path.join(Path(__file__).resolve().parent, 'order.csv'), index_col=0)
order.index = pd.to_datetime(order.index)
self.order = order
def get_state(self, data):
if 'CHECK' in data:
return int(data[-1])
elif 'CHANGE' in data:
return int(data[-1])
else:
return np.nan
def cal_schedule_part_1(self, df):
columns = ['PRT_1', 'PRT_2', 'PRT_3', 'PRT_4']
df_set = df[columns]
df_out = df_set * 0
p = 0.985
dt = pd.Timedelta(days=23)
end_time = df_out.index[-1]
for time in df_out.index:
out_time = time + dt
if end_time < out_time:
break
else:
for column in columns:
set_num = df_set.loc[time, column]
if set_num > 0:
out_num = np.sum(np.random.choice(2, set_num, p=[1-p, p]))
df_out.loc[out_time, column] = out_num
df_out['MOL_1'] = 0.0
df_out['MOL_2'] = 0.0
df_out['MOL_3'] = 0.0
df_out['MOL_4'] = 0.0
df_out['BLK_1'] = 0.0
df_out['BLK_2'] = 0.0
df_out['BLK_3'] = 0.0
df_out['BLK_4'] = 0.0
return df_out
def cal_schedule_part_2(self, df, line='A'):
if line == 'A':
columns = ['Event_A', 'MOL_A']
elif line == 'B':
columns = ['Event_B', 'MOL_B']
else:
columns = ['Event_A', 'MOL_A']
schedule = df[columns].copy()
schedule['state'] = 0
schedule['state'] = schedule[columns[0]].apply(lambda x: self.get_state(x))
schedule['state'] = schedule['state'].fillna(method='ffill')
schedule['state'] = schedule['state'].fillna(0)
schedule_process = schedule.loc[schedule[columns[0]]=='PROCESS']
df_out = schedule.drop(schedule.columns, axis=1)
df_out['PRT_1'] = 0.0
df_out['PRT_2'] = 0.0
df_out['PRT_3'] = 0.0
df_out['PRT_4'] = 0.0
df_out['MOL_1'] = 0.0
df_out['MOL_2'] = 0.0
df_out['MOL_3'] = 0.0
df_out['MOL_4'] = 0.0
p = 0.975
times = schedule_process.index
for i, time in enumerate(times):
value = schedule.loc[time, columns[1]]
state = int(schedule.loc[time, 'state'])
df_out.loc[time, 'PRT_'+str(state)] = -value
if i+48 < len(times):
out_time = times[i+48]
df_out.loc[out_time, 'MOL_'+str(state)] = value*p
df_out['BLK_1'] = 0.0
df_out['BLK_2'] = 0.0
df_out['BLK_3'] = 0.0
df_out['BLK_4'] = 0.0
return df_out
def cal_stock(self, df, df_order):
df_stock = df * 0
blk2mol = {}
blk2mol['BLK_1'] = 'MOL_1'
blk2mol['BLK_2'] = 'MOL_2'
blk2mol['BLK_3'] = 'MOL_3'
blk2mol['BLK_4'] = 'MOL_4'
cut = {}
cut['BLK_1'] = 506
cut['BLK_2'] = 506
cut['BLK_3'] = 400
cut['BLK_4'] = 400
p = {}
p['BLK_1'] = 0.851
p['BLK_2'] = 0.901
blk_diffs = []
for i, time in enumerate(df.index):
month = time.month
if month == 4:
p['BLK_3'] = 0.710
p['BLK_4'] = 0.700
elif month == 5:
p['BLK_3'] = 0.742
p['BLK_4'] = 0.732
elif month == 6:
p['BLK_3'] = 0.759
p['BLK_4'] = 0.749
else:
p['BLK_3'] = 0.0
p['BLK_4'] = 0.0
if i == 0:
df_stock.iloc[i] = df.iloc[i]
else:
df_stock.iloc[i] = df_stock.iloc[i-1] + df.iloc[i]
for column in df_order.columns:
val = df_order.loc[time, column]
if val > 0:
mol_col = blk2mol[column]
mol_num = df_stock.loc[time, mol_col]
df_stock.loc[time, mol_col] = 0
blk_gen = int(mol_num*p[column]*cut[column])
blk_stock = df_stock.loc[time, column] + blk_gen
blk_diff = blk_stock - val
df_stock.loc[time, column] = blk_diff
blk_diffs.append(blk_diff)
return df_stock, blk_diffs
def subprocess(self, df):
out = df.copy()
column = 'time'
out.index = pd.to_datetime(out[column])
out = out.drop([column], axis=1)
out.index.name = column
return out
def add_stock(self, df, df_stock):
df_out = df.copy()
for column in df_out.columns:
df_out.iloc[0][column] = df_out.iloc[0][column] + df_stock.iloc[0][column]
return df_out
def order_rescale(self, df, df_order):
df_rescale = df.drop(df.columns, axis=1)
dt = pd.Timedelta(hours=18)
for column in ['BLK_1', 'BLK_2', 'BLK_3', 'BLK_4']:
for time in df_order.index:
df_rescale.loc[time+dt, column] = df_order.loc[time, column]
df_rescale = df_rescale.fillna(0)
return df_rescale
def cal_score(self, blk_diffs):
# Block Order Difference
blk_diff_m = 0
blk_diff_p = 0
for item in blk_diffs:
if item < 0:
blk_diff_m = blk_diff_m + abs(item)
if item > 0:
blk_diff_p = blk_diff_p + abs(item)
score = blk_diff_m + blk_diff_p
return score
def get_score(self, df):
df = self.subprocess(df)
out_1 = self.cal_schedule_part_1(df)
out_2 = self.cal_schedule_part_2(df, line='A')
out_3 = self.cal_schedule_part_2(df, line='B')
out = out_1 + out_2 + out_3
out = self.add_stock(out, self.stock)
order = self.order_rescale(out, self.order)
out, blk_diffs = self.cal_stock(out, order)
score = self.cal_score(blk_diffs)
return score, out
| [
"chadool116@naver.com"
] | chadool116@naver.com |
0d4e4f9ccb9e3e351670700962ce088e8e6fff3e | d0758e0ca004226cec8ad8b26c9565c98534a8b8 | /02-core/06-lists/solutions/comprehension.py | 37759fc84e7eed5b988af80934a5df00fc85234d | [] | no_license | pythoncanarias/eoi | 334d64a96afc76ac1fa10282378f291b6d8c94b3 | 349367254f85e3e4273cede067ca950913a1332c | refs/heads/master | 2023-07-06T08:00:11.366345 | 2023-06-30T15:19:33 | 2023-06-30T15:19:33 | 222,742,870 | 26 | 19 | null | 2023-06-25T16:03:46 | 2019-11-19T16:41:25 | Jupyter Notebook | UTF-8 | Python | false | false | 48 | py | f = [3 * x + 2 for x in range(20)]
print(f)
| [
"euribates@gmail.com"
] | euribates@gmail.com |
2c6f87182a68ad85ede0cd34bc337b5fcded27ab | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/twentyPercent/rank_4eig_B.py | 02617609224e47a8260dfad0e57980bf1975417d | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '4eig.csv'
identifier = 'B'
coefFrac = 0.2
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/twentyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/twentyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"tanemur1@msu.edu"
] | tanemur1@msu.edu |
915b37b464c9f5b749ec2d2ea2c391ad6ad174d3 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/gLt.py | 5e286227723bdcb075163be345cff8d7a82aa4fa | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'gLT':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
c6bf383d8e5a35af168c40a96a2535f5a4986f6c | 306afd5282d9c24d58297478a1728a006c29e57e | /lintcode/lintcode_040_Implement_Queue by_Two_Stacks.py | 9f608da5577673762929157d354418a80e4c236b | [] | no_license | ytatus94/Leetcode | d2c1fe3995c7a065139f772569485dc6184295a9 | 01ee75be4ec9bbb080f170cb747f3fc443eb4d55 | refs/heads/master | 2023-06-08T17:32:34.439601 | 2023-05-29T04:33:19 | 2023-05-29T04:33:19 | 171,921,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | class MyQueue:
def __init__(self):
# do intialization if necessary
self.stack1 = []
self.stack2 = [] # 用來暫時放 stack1 的東西
"""
@param: element: An integer
@return: nothing
"""
def push(self, element):
# write your code here
self.stack1.append(element)
"""
@return: An integer
"""
def pop(self):
# write your code here
# 只看 stack1 就好
# 把 stack1 的內容都拿出來放到 stack2
# 這樣在 stack2 內的順序正好和 stack1 顛倒
while len(self.stack1) > 0:
self.stack2.append(self.stack1.pop())
res = self.stack2.pop() # stack2 的最後一個就是 stack1 的第一個
# 再把全部塞回 stack1
while len(self.stack2) > 0:
self.stack1.append(self.stack2.pop())
return res
"""
@return: An integer
"""
def top(self):
# write your code here
return self.stack1[0] # 題目說 top 傳回的是第一個元素
| [
"noreply@github.com"
] | ytatus94.noreply@github.com |
d4295b1a86339eafa1623e827e5b977d4aa665b6 | 10c26e25f7da2289d50b1138b7da48bf9a02d42f | /Oj/users/views.py | 2058ad4470ff413c387bbdd003f3b5c7b3b3942e | [] | no_license | ParitoshAggarwal/OJ | e1392a02dd95d42b4d72ba69b891db9df5e406ad | 1a4acb5e620b0575d744fd8e4c13148062d1670c | refs/heads/master | 2022-10-19T21:18:02.512008 | 2017-12-27T06:53:46 | 2017-12-27T06:53:46 | 97,516,099 | 0 | 1 | null | 2022-10-13T00:05:44 | 2017-07-17T19:50:06 | JavaScript | UTF-8 | Python | false | false | 3,067 | py | from django.contrib.auth.models import User
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import View
from .modelForms import CoderForm, UserForm
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.views.generic.edit import UpdateView
from .models import Coder
from django.urls import reverse
class UserFormView(View):
form_class1 = UserForm
form_class2 = CoderForm
template_name = 'users/reg_form.html'
def get(self, request):
form1 = self.form_class1(None)
form2 = self.form_class2(None)
return render(request, self.template_name,
{'form1' : form1,'form2':form2})
def post(self, request):
form1=self.form_class1(request.POST)
form2 = self.form_class2(request.POST)
if form1.is_valid() and form2.is_valid():
user = form1.save(commit=False)
coder = form2.save(commit=False)
username = form1.cleaned_data['username']
password = form1.cleaned_data['password']
email = form1.clean_email()
user.set_password(password)
user.save()
coder.user = user
coder.save()
user = authenticate(username = username,password = password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('/')
return render(request, self.template_name, {'form1': form1, 'form2': form2})
class MainPageView(View):
template_name = 'main_page.html'
def get(self, request):
user = None
if request.user.is_authenticated():
user = request.user
return render(request, self.template_name, {'user': user})
class LoginPageView(View):
template_name = 'users/login_page.html'
def get(self,request):
return render(request,self.template_name)
def post(self,request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username,password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('/')
return render(request,self.template_name)
class LogoutPageView(View):
template_name = 'main_page.html'
def post(self, request):
logout(request)
return redirect('/')
class DetailPageView(View):
template_name = 'users/detail.html'
def get(self,request, user):
userobj = get_object_or_404(User, username=user)
return render(request,self.template_name,{'user': userobj})
# edit profile fields other than user id and password
class UserUpdate(UpdateView):
model = Coder
fields = ['institution','city','state','resume']
def get_object(self,*args,**kwargs):
user = self.request.user
return user.coder
def get_success_url(self, *args, **kwargs):
return reverse("home")
| [
"paritoshmait@gmail.com"
] | paritoshmait@gmail.com |
da83acb376af17ee4889f2e2866bb87ad92bc1a7 | 1220f32fbf835e7a853ee7ccc8ca13c215bc79cf | /Kivy_tutorial_files/Kivy_App_Tutorial_00/Box&Buttons/ButtonWidget_06.py | 03565d1cbf7eaf290a6f27c19ecf4a39a966067a | [] | no_license | CyborgVillager/Gui_Tutorial | 6775fc570427b424dc38b56cdaad2c17ef3d178d | 9f39efc3d62bcb2f22bbf1fa9d23ad96a04cc412 | refs/heads/master | 2022-11-06T10:14:44.006658 | 2020-01-11T15:35:14 | 2020-01-11T15:35:14 | 232,301,052 | 1 | 1 | null | 2022-10-21T05:28:32 | 2020-01-07T10:31:01 | Python | UTF-8 | Python | false | false | 370 | py | from kivy.lang import Builder
from kivy.base import runTouchApp
runTouchApp(Builder.load_string("""
Label:
Button:
text:'Jonathan'
font_size:32
color:222, 233, 7, 0.96
size:250,200
pos:50,100
Button:
text:'Joshua'
font_size:26
color:.8,.1,0,1
size:200,100
pos:75,350
""")) | [
"almawijonathan@gmail.com"
] | almawijonathan@gmail.com |
223827bb659d011399d16c21781ff4f7c4693e37 | 8253a563255bdd5797873c9f80d2a48a690c5bb0 | /configurationengine/source/plugins/symbian/ConeHCRPlugin/hcrplugin/tests/unittest_reader.py | bbb2421193268f7d267aa23848823b90add748c2 | [] | no_license | SymbianSource/oss.FCL.sftools.depl.swconfigmdw | 4e6ab52bf564299f1ed7036755cf16321bd656ee | d2feb88baf0e94da760738fc3b436c3d5d1ff35f | refs/heads/master | 2020-03-28T10:16:11.362176 | 2010-11-06T14:59:14 | 2010-11-06T14:59:14 | 73,009,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,883 | py | #
# Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved.
# This component and the accompanying materials are made available
# under the terms of "Eclipse Public License v1.0"
# which accompanies this distribution, and is available
# at the URL "http://www.eclipse.org/legal/epl-v10.html".
#
# Initial Contributors:
# Nokia Corporation - initial contribution.
#
# Contributors:
#
# Description:
#
import os, unittest
from testautomation.utils import hex_to_bindata
from hcrplugin.hcr_reader import HcrReader
from hcrplugin import hcr_exceptions
from hcrplugin.hcrrepository import HcrRecord
class TestHcrReader(unittest.TestCase):
def setUp(self):
self.reader = HcrReader()
def test_read_repo_with_invalid_record_section_size(self):
# Record section size: 4 * 20 = 80
# LSD offset: 32 + 80 = 112
# LSD size: 0
data = [
# Header
# Record count should be 4, but is 6 here
"48435266 0200 0300 06000000 70000000",
"00000000 000000000000000000000000",
# Record section
"01000000 01000000 08000000 0000 0000 01000000", # bool
"02000000 01000000 04000000 0000 0000 85FFFFFF", # int8
"03000000 01000000 40000000 0000 0000 CC000000", # uint8
"01000000 02000000 02000000 0000 0000 91CBFFFF", # int16
]
data = ''.join(map(lambda x: hex_to_bindata(x), data))
try:
self.reader.parse_repository_from_bindata(data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidHcrDataSizeError:
pass
def test_read_repo_with_invalid_lsd_section_size(self):
# Record section size: 4 * 20 = 80
# LSD offset: 32 + 80 = 112
# LSD size: 0
data = [
# Header
# LSD section size should be 0, but is 40 here
"48435266 0200 0300 04000000 70000000",
"28000000 000000000000000000000000",
# Record section
"01000000 01000000 08000000 0000 0000 01000000", # bool
"02000000 01000000 04000000 0000 0000 85FFFFFF", # int8
"03000000 01000000 40000000 0000 0000 CC000000", # uint8
"01000000 02000000 02000000 0000 0000 91CBFFFF", # int16
]
data = ''.join(map(lambda x: hex_to_bindata(x), data))
try:
self.reader.parse_repository_from_bindata(data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidHcrDataSizeError:
pass
def test_read_repo_with_invalid_lsd_section_offset(self):
# Record section size: 2 * 20 = 40
# LSD offset: 32 + 40 = 72
# LSD size: 8 + 8 = 16
data = [
# Header, LSD offset here is 60
"48435266 0200 0300 02000000 3C000000",
"10000000 000000000000000000000000",
# Record section
"01000000 01000000 00000001 0000 0800 00000000", # int64, lsd pos = (0, 8)
"02000000 01000000 00000002 0000 0800 08000000", # uint64, lsd pos = (8, 8)
# LSD section
"FC73 978B B823 D47F", # 8 bytes
"14FD 32B4 F410 2295", # 8 bytes
]
data = ''.join(map(lambda x: hex_to_bindata(x), data))
try:
self.reader.parse_repository_from_bindata(data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidLsdSectionOffsetError:
pass
def test_read_repo_with_invalid_lsd_pos_in_record(self):
# Record section size: 2 * 20 = 40
# LSD offset: 32 + 40 = 72
# LSD size: 8 + 8 = 16
data = [
# Header
"48435266 0200 0300 02000000 48000000",
"10000000 000000000000000000000000",
# Record section
"01000000 01000000 00000001 0000 0800 00000000", # int64, lsd pos = (0, 8)
"02000000 01000000 00000002 0000 0800 0C000000", # uint64, lsd pos = (12, 8), should be (8, 8)
# LSD section
"FC73 978B B823 D47F", # 8 bytes
"14FD 32B4 F410 2295", # 8 bytes
]
data = ''.join(map(lambda x: hex_to_bindata(x), data))
try:
self.reader.parse_repository_from_bindata(data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidRecordLsdPositionError:
pass
def test_read_repo_with_invalid_record_value_type(self):
# Record section size: 2 * 20 = 40
# LSD offset: 32 + 40 = 72
# LSD size: 8 + 8 = 16
data = [
# Header
"48435266 0200 0300 02000000 48000000",
"10000000 000000000000000000000000",
# Record section
"01000000 01000000 00000001 0000 0800 00000000", # int64, lsd pos = (0, 8)
"02000000 01000000 DEADBEEF 0000 0800 0C000000", # invalid type
# LSD section
"FC73 978B B823 D47F", # 8 bytes
"14FD 32B4 F410 2295", # 8 bytes
]
data = ''.join(map(lambda x: hex_to_bindata(x), data))
try:
self.reader.parse_repository_from_bindata(data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidRecordValueTypeError:
pass
def _run_test_read_record_with_invalid_lsd_size(self, value_type, lsd_data):
try:
self.reader.parse_record_value_from_lsd_bindata(value_type, lsd_data)
self.fail("Expected exception not raised")
except hcr_exceptions.InvalidRecordLsdPositionError:
pass
def test_read_record_with_invalid_lsd_size_int64(self):
data = hex_to_bindata("0000 0000 0000 00")
self._run_test_read_record_with_invalid_lsd_size(HcrRecord.VALTYPE_INT64, data)
def test_read_record_with_invalid_lsd_size_uint64(self):
data = hex_to_bindata("0000 0000 0000 00")
self._run_test_read_record_with_invalid_lsd_size(HcrRecord.VALTYPE_UINT64, data)
def test_read_record_with_invalid_lsd_size_arrayint32(self):
data = hex_to_bindata("0000 0000 0000 00")
self._run_test_read_record_with_invalid_lsd_size(HcrRecord.VALTYPE_ARRAY_INT32, data)
def test_read_record_with_invalid_lsd_size_arrayuint32(self):
data = hex_to_bindata("0000 0000 0000 00")
self._run_test_read_record_with_invalid_lsd_size(HcrRecord.VALTYPE_ARRAY_UINT32, data)
def test_read_record_with_invalid_data_size(self):
try:
self.reader.parse_record_from_bindata('1234')
self.fail("Parsing invalid record data succeeded!")
except hcr_exceptions.HcrReaderError:
pass
def test_read_signed_integer_in_record(self):
#Test that padding bytes don't matter when reading the type
def check(record, data):
self.assertEquals(self.reader.parse_record_from_bindata(data)[0], record)
r = HcrRecord(HcrRecord.VALTYPE_INT8, -123, 12, 43, 5)
d = hex_to_bindata("0C000000 2B000000 04000000 0500 0000 85FFFFFF")
check(r, d)
d = hex_to_bindata("0C000000 2B000000 04000000 0500 0000 85000000")
check(r, d)
r = HcrRecord(HcrRecord.VALTYPE_INT16, -12345, 12, 43, 5)
d = hex_to_bindata("0C000000 2B000000 02000000 0500 0000 C7CFFFFF")
check(r, d)
d = hex_to_bindata("0C000000 2B000000 02000000 0500 0000 C7CF0000")
check(r, d)
| [
"none@none"
] | none@none |
1d780768995828113deaed921b00fc415c7672d7 | 2a3743ced45bd79826dcdc55f304da049f627f1b | /venv/lib/python3.7/site-packages/bokeh/document/document.py | 8563f505ee23e2082c852a86dd0190def1b76933 | [
"MIT"
] | permissive | Dimasik007/Deribit_funding_rate_indicator | 12cc8cd7c0be564d6e34d9eae91940c62492ae2a | 3251602ae5249069489834f9afb57b11ff37750e | refs/heads/master | 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 | MIT | 2023-05-22T22:29:24 | 2019-07-24T20:32:19 | Python | UTF-8 | Python | false | false | 39,843 | py | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide the ``Document`` class, which is a container for Bokeh Models to
be reflected to the client side BokehJS library.
As a concrete example, consider a column layout with ``Slider`` and ``Select``
widgets, and a plot with some tools, an axis and grid, and a glyph renderer
for circles. A simplified representation of this document might look like the
figure below:
.. figure:: /_images/document.svg
:align: center
:width: 65%
A Bokeh Document is a collection of Bokeh Models (e.g. plots, tools,
glyphs, etc.) that can be serialized as a single collection.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from collections import defaultdict
from json import loads
import sys
# External imports
import jinja2
from six import string_types
# Bokeh imports
from ..core.enums import HoldPolicy
from ..core.json_encoder import serialize_json
from ..core.query import find
from ..core.templates import FILE
from ..core.validation import check_integrity
from ..events import Event
from ..themes import default as default_theme, built_in_themes
from ..themes import Theme
from ..util.callback_manager import _check_callback
from ..util.datatypes import MultiValuedDict
from ..util.future import wraps
from ..util.version import __version__
from .events import ModelChangedEvent, RootAddedEvent, RootRemovedEvent, SessionCallbackAdded, SessionCallbackRemoved, TitleChangedEvent
from .locking import UnlockedDocumentProxy
from .util import initialize_references_json, instantiate_references_json, references_json
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
DEFAULT_TITLE = "Bokeh Application"
__all__ = (
'Document',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Document(object):
''' The basic unit of serialization for Bokeh.
Document instances collect Bokeh models (e.g. plots, layouts, widgets,
etc.) so that they may be reflected into the BokehJS client runtime.
Because models may refer to other models (e.g., a plot *has* a list of
renderers), it is not generally useful or meaningful to convert individual
models to JSON. Accordingly, the ``Document`` is thus the smallest unit
of serialization for Bokeh.
'''
def __init__(self, **kwargs):
self._roots = list()
self._theme = kwargs.pop('theme', default_theme)
# use _title directly because we don't need to trigger an event
self._title = kwargs.pop('title', DEFAULT_TITLE)
self._template = FILE
self._all_models_freeze_count = 0
self._all_models = dict()
self._all_models_by_name = MultiValuedDict()
self._all_former_model_ids = set()
self._callbacks = {}
self._session_destroyed_callbacks = set()
self._session_callbacks = set()
self._session_context = None
self._modules = []
self._template_variables = {}
self._hold = None
self._held_events = []
# set of models subscribed to user events
self._subscribed_models = defaultdict(set)
self._callback_objs_by_callable = {self.add_next_tick_callback: defaultdict(set),
self.add_periodic_callback: defaultdict(set),
self.add_timeout_callback: defaultdict(set)}
# Properties --------------------------------------------------------------
@property
def roots(self):
''' A list of all the root models in this Document.
'''
return list(self._roots)
@property
def session_callbacks(self):
''' A list of all the session callbacks on this document.
'''
return list(self._session_callbacks)
@property
def session_destroyed_callbacks(self):
''' A list of all the on_session_destroyed callbacks on this document.
'''
return self._session_destroyed_callbacks
@session_destroyed_callbacks.setter
def session_destroyed_callbacks(self, callbacks):
self._session_destroyed_callbacks = callbacks
@property
def session_context(self):
''' The ``SessionContext`` for this document.
'''
return self._session_context
@property
def template(self):
''' A Jinja2 template to use for rendering this document.
'''
return self._template
@template.setter
def template(self, template):
if not isinstance(template, (jinja2.Template, string_types)):
raise ValueError("document template must be Jinja2 template or a string")
self._template = template
@property
def template_variables(self):
''' A dictionary of template variables to pass when rendering
``self.template``.
'''
return self._template_variables
@property
def theme(self):
''' The current ``Theme`` instance affecting models in this Document.
Setting this to ``None`` sets the default theme. (i.e this property
never returns ``None``.)
Changing theme may trigger model change events on the models in the
document if the theme modifies any model properties.
'''
return self._theme
@theme.setter
def theme(self, theme):
if theme is None:
theme = default_theme
if self._theme is theme:
return
if isinstance(theme, string_types):
try:
self._theme = built_in_themes[theme]
except KeyError:
raise ValueError(
"{0} is not a built-in theme; available themes are "
"{1}".format(theme, ', '.join(built_in_themes.keys()))
)
elif isinstance(theme, Theme):
self._theme = theme
else:
raise ValueError("Theme must be a string or an instance of the Theme class")
for model in self._all_models.values():
self._theme.apply_to_model(model)
@property
def title(self):
''' A title for this document.
This title will be set on standalone HTML documents, but not e.g. when
``autoload_server`` is used.
'''
return self._title
@title.setter
def title(self, title):
self._set_title(title)
# Public methods ----------------------------------------------------------
def add_next_tick_callback(self, callback):
''' Add callback to be invoked once on the next tick of the event loop.
Args:
callback (callable) :
A callback function to execute on the next tick.
Returns:
NextTickCallback : can be used with ``remove_next_tick_callback``
.. note::
Next tick callbacks only work within the context of a Bokeh server
session. This function will no effect when Bokeh outputs to
standalone HTML or Jupyter notebook cells.
'''
from ..server.callbacks import NextTickCallback
cb = NextTickCallback(self, None)
return self._add_session_callback(cb, callback, one_shot=True, originator=self.add_next_tick_callback)
def add_periodic_callback(self, callback, period_milliseconds):
''' Add a callback to be invoked on a session periodically.
Args:
callback (callable) :
A callback function to execute periodically
period_milliseconds (int) :
Number of milliseconds between each callback execution.
Returns:
PeriodicCallback : can be used with ``remove_periodic_callback``
.. note::
Periodic callbacks only work within the context of a Bokeh server
session. This function will no effect when Bokeh outputs to
standalone HTML or Jupyter notebook cells.
'''
from ..server.callbacks import PeriodicCallback
cb = PeriodicCallback(self,
None,
period_milliseconds)
return self._add_session_callback(cb, callback, one_shot=False, originator=self.add_periodic_callback)
def add_root(self, model, setter=None):
''' Add a model as a root of this Document.
Any changes to this model (including to other models referred to
by it) will trigger ``on_change`` callbacks registered on this
document.
Args:
model (Model) :
The model to add as a root of this document.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
'''
if model in self._roots:
return
self._push_all_models_freeze()
# TODO (bird) Should we do some kind of reporting of how many
# LayoutDOM's are in the document roots? In vanilla bokeh cases e.g.
# output_file more than one LayoutDOM is probably not going to go
# well. But in embedded cases, you may well want more than one.
try:
self._roots.append(model)
finally:
self._pop_all_models_freeze()
self._trigger_on_change(RootAddedEvent(self, model, setter))
def add_timeout_callback(self, callback, timeout_milliseconds):
''' Add callback to be invoked once, after a specified timeout passes.
Args:
callback (callable) :
A callback function to execute after timeout
timeout_milliseconds (int) :
Number of milliseconds before callback execution.
Returns:
TimeoutCallback : can be used with ``remove_timeout_callback``
.. note::
Timeout callbacks only work within the context of a Bokeh server
session. This function will no effect when Bokeh outputs to
standalone HTML or Jupyter notebook cells.
'''
from ..server.callbacks import TimeoutCallback
cb = TimeoutCallback(self,
None,
timeout_milliseconds)
return self._add_session_callback(cb, callback, one_shot=True, originator=self.add_timeout_callback)
def apply_json_event(self, json):
event = loads(json, object_hook=Event.decode_json)
if not isinstance(event, Event):
log.warning('Could not decode event json: %s' % json)
else:
subscribed = self._subscribed_models[event.event_name].copy()
for model in subscribed:
model._trigger_event(event)
def apply_json_patch(self, patch, setter=None):
''' Apply a JSON patch object and process any resulting events.
Args:
patch (JSON-data) :
The JSON-object containing the patch to apply.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
'''
references_json = patch['references']
events_json = patch['events']
references = instantiate_references_json(references_json)
# Use our existing model instances whenever we have them
for obj in references.values():
if obj.id in self._all_models:
references[obj.id] = self._all_models[obj.id]
# The model being changed isn't always in references so add it in
for event_json in events_json:
if 'model' in event_json:
model_id = event_json['model']['id']
if model_id in self._all_models:
references[model_id] = self._all_models[model_id]
initialize_references_json(references_json, references, setter)
for event_json in events_json:
if event_json['kind'] == 'ModelChanged':
patched_id = event_json['model']['id']
if patched_id not in self._all_models:
if patched_id not in self._all_former_model_ids:
raise RuntimeError("Cannot apply patch to %s which is not in the document" % (str(patched_id)))
else:
log.warning("Cannot apply patch to %s which is not in the document anymore" % (str(patched_id)))
break
patched_obj = self._all_models[patched_id]
attr = event_json['attr']
value = event_json['new']
patched_obj.set_from_json(attr, value, models=references, setter=setter)
elif event_json['kind'] == 'ColumnDataChanged':
source_id = event_json['column_source']['id']
if source_id not in self._all_models:
raise RuntimeError("Cannot apply patch to %s which is not in the document" % (str(source_id)))
source = self._all_models[source_id]
value = event_json['new']
source.set_from_json('data', value, models=references, setter=setter)
elif event_json['kind'] == 'ColumnsStreamed':
source_id = event_json['column_source']['id']
if source_id not in self._all_models:
raise RuntimeError("Cannot stream to %s which is not in the document" % (str(source_id)))
source = self._all_models[source_id]
data = event_json['data']
rollover = event_json.get('rollover', None)
source._stream(data, rollover, setter)
elif event_json['kind'] == 'ColumnsPatched':
source_id = event_json['column_source']['id']
if source_id not in self._all_models:
raise RuntimeError("Cannot apply patch to %s which is not in the document" % (str(source_id)))
source = self._all_models[source_id]
patches = event_json['patches']
source.patch(patches, setter)
elif event_json['kind'] == 'RootAdded':
root_id = event_json['model']['id']
root_obj = references[root_id]
self.add_root(root_obj, setter)
elif event_json['kind'] == 'RootRemoved':
root_id = event_json['model']['id']
root_obj = references[root_id]
self.remove_root(root_obj, setter)
elif event_json['kind'] == 'TitleChanged':
self._set_title(event_json['title'], setter)
else:
raise RuntimeError("Unknown patch event " + repr(event_json))
def apply_json_patch_string(self, patch):
''' Apply a JSON patch provided as a string.
Args:
patch (str) :
Returns:
None
'''
json_parsed = loads(patch)
self.apply_json_patch(json_parsed)
def clear(self):
''' Remove all content from the document but do not reset title.
Returns:
None
'''
self._push_all_models_freeze()
try:
while len(self._roots) > 0:
r = next(iter(self._roots))
self.remove_root(r)
finally:
self._pop_all_models_freeze()
def destroy(self, session):
self.remove_on_change(session)
# probably better to implement a destroy protocol on models to
# untangle everything, then the collect below might not be needed
for m in self._all_models.values():
m._document = None
del m
self._roots = []
self._all_models = None
self._all_models_by_name = None
self._theme = None
self._template = None
self._session_context = None
self.delete_modules()
import gc
gc.collect()
def delete_modules(self):
''' Clean up after any modules created by this Document when its session is
destroyed.
'''
from gc import get_referrers
from types import FrameType
log.debug("Deleting %s modules for %s" % (len(self._modules), self))
for module in self._modules:
# Modules created for a Document should have three referrers at this point:
#
# - sys.modules
# - self._modules
# - a frame object
#
# This function will take care of removing these expected references.
#
# If there are any additional referrers, this probably means the module will be
# leaked. Here we perform a detailed check that the only referrers are expected
# ones. Otherwise issue an error log message with details.
referrers = get_referrers(module)
referrers = [x for x in referrers if x is not sys.modules]
referrers = [x for x in referrers if x is not self._modules]
referrers = [x for x in referrers if not isinstance(x, FrameType)]
if len(referrers) != 0:
log.error("Module %r has extra unexpected referrers! This could indicate a serious memory leak. Extra referrers: %r" % (module, referrers))
# remove the reference from sys.modules
if module.__name__ in sys.modules:
del sys.modules[module.__name__]
# remove the reference from self._modules
self._modules = None
# the frame reference will take care of itself
@classmethod
def from_json(cls, json):
''' Load a document from JSON.
json (JSON-data) :
A JSON-encoded document to create a new Document from.
Returns:
Document :
'''
roots_json = json['roots']
root_ids = roots_json['root_ids']
references_json = roots_json['references']
references = instantiate_references_json(references_json)
initialize_references_json(references_json, references)
doc = Document()
for r in root_ids:
doc.add_root(references[r])
doc.title = json['title']
return doc
@classmethod
def from_json_string(cls, json):
''' Load a document from JSON.
json (str) :
A string with a JSON-encoded document to create a new Document
from.
Returns:
Document :
'''
json_parsed = loads(json)
return cls.from_json(json_parsed)
def get_model_by_id(self, model_id):
''' Find the model for the given ID in this document, or ``None`` if it
is not found.
Args:
model_id (str) : The ID of the model to search for
Returns:
Model or None
'''
return self._all_models.get(model_id)
def get_model_by_name(self, name):
''' Find the model for the given name in this document, or ``None`` if
it is not found.
Args:
name (str) : The name of the model to search for
Returns:
Model or None
'''
return self._all_models_by_name.get_one(name, "Found more than one model named '%s'" % name)
def hold(self, policy="combine"):
''' Activate a document hold.
While a hold is active, no model changes will be applied, or trigger
callbacks. Once ``unhold`` is called, the events collected during the
hold will be applied according to the hold policy.
Args:
hold ('combine' or 'collect', optional)
Whether events collected during a hold should attempt to be
combined (default: 'combine')
When set to ``'collect'`` all events will be collected and
replayed in order as-is when ``unhold`` is called.
When set to ``'combine'`` Bokeh will attempt to combine
compatible events together. Typically, different events that
change the same property on the same mode can be combined.
For example, if the following sequence occurs:
.. code-block:: python
doc.hold('combine')
slider.value = 10
slider.value = 11
slider.value = 12
Then only *one* callback, for the last ``slider.value = 12``
will be triggered.
Returns:
None
.. note::
``hold`` only applies to document change events, i.e. setting
properties on models. It does not apply to events such as
``ButtonClick``, etc.
'''
if self._hold is not None and self._hold != policy:
log.warning("hold already active with '%s', ignoring '%s'" % (self._hold, policy))
return
if policy not in HoldPolicy:
raise ValueError("Unknown hold policy %r" % policy)
self._hold = policy
def unhold(self):
''' Turn off any active document hold and apply any collected events.
Returns:
None
'''
# no-op if we are already no holding
if self._hold is None: return
self._hold = None
events = list(self._held_events)
self._held_events = []
for event in events:
self._trigger_on_change(event)
def on_change(self, *callbacks):
''' Provide callbacks to invoke if the document or any Model reachable
from its roots changes.
'''
for callback in callbacks:
if callback in self._callbacks: continue
_check_callback(callback, ('event',))
self._callbacks[callback] = callback
def on_change_dispatch_to(self, receiver):
if not receiver in self._callbacks:
self._callbacks[receiver] = lambda event: event.dispatch(receiver)
def on_session_destroyed(self, *callbacks):
''' Provide callbacks to invoke when the session serving the Document
is destroyed
'''
for callback in callbacks:
_check_callback(callback, ('session_context',))
self._session_destroyed_callbacks.add(callback)
def remove_next_tick_callback(self, callback_obj):
''' Remove a callback added earlier with ``add_next_tick_callback``.
Args:
callback_obj : a value returned from ``add_next_tick_callback``
Returns:
None
Raises:
ValueError, if the callback was never added or has already been run or removed
'''
self._remove_session_callback(callback_obj, self.add_next_tick_callback)
def remove_on_change(self, *callbacks):
''' Remove a callback added earlier with ``on_change``.
Raises:
KeyError, if the callback was never added
'''
for callback in callbacks:
del self._callbacks[callback]
def remove_periodic_callback(self, callback_obj):
''' Remove a callback added earlier with ``add_periodic_callback``
Args:
callback_obj : a value returned from ``add_periodic_callback``
Returns:
None
Raises:
ValueError, if the callback was never added or has already been removed
'''
self._remove_session_callback(callback_obj, self.add_periodic_callback)
def remove_root(self, model, setter=None):
''' Remove a model as root model from this Document.
Changes to this model may still trigger ``on_change`` callbacks
on this document, if the model is still referred to by other
root models.
Args:
model (Model) :
The model to add as a root of this document.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
'''
if model not in self._roots:
return # TODO (bev) ValueError?
self._push_all_models_freeze()
try:
self._roots.remove(model)
finally:
self._pop_all_models_freeze()
self._trigger_on_change(RootRemovedEvent(self, model, setter))
def remove_timeout_callback(self, callback_obj):
''' Remove a callback added earlier with ``add_timeout_callback``.
Args:
callback_obj : a value returned from ``add_timeout_callback``
Returns:
None
Raises:
ValueError, if the callback was never added or has alraedy been run or removed
'''
self._remove_session_callback(callback_obj, self.add_timeout_callback)
def replace_with_json(self, json):
''' Overwrite everything in this document with the JSON-encoded
document.
json (JSON-data) :
A JSON-encoded document to overwrite this one.
Returns:
None
'''
replacement = self.from_json(json)
replacement._destructively_move(self)
def select(self, selector):
''' Query this document for objects that match the given selector.
Args:
selector (JSON-like query dictionary) : you can query by type or by
name, e.g. ``{"type": HoverTool}``, ``{"name": "mycircle"}``
Returns:
seq[Model]
'''
if self._is_single_string_selector(selector, 'name'):
# special-case optimization for by-name query
return self._all_models_by_name.get_all(selector['name'])
else:
return find(self._all_models.values(), selector)
def select_one(self, selector):
''' Query this document for objects that match the given selector.
Raises an error if more than one object is found. Returns
single matching object, or None if nothing is found
Args:
selector (JSON-like query dictionary) : you can query by type or by
name, e.g. ``{"type": HoverTool}``, ``{"name": "mycircle"}``
Returns:
Model or None
'''
result = list(self.select(selector))
if len(result) > 1:
raise ValueError("Found more than one model matching %s: %r" % (selector, result))
if len(result) == 0:
return None
return result[0]
def set_select(self, selector, updates):
''' Update objects that match a given selector with the specified
attribute/value updates.
Args:
selector (JSON-like query dictionary) : you can query by type or by
name,i e.g. ``{"type": HoverTool}``, ``{"name": "mycircle"}``
updates (dict) :
Returns:
None
'''
for obj in self.select(selector):
for key, val in updates.items():
setattr(obj, key, val)
def to_json(self):
''' Convert this document to a JSON object.
Return:
JSON-data
'''
# this is a total hack to go via a string, needed because
# our BokehJSONEncoder goes straight to a string.
doc_json = self.to_json_string()
return loads(doc_json)
def to_json_string(self, indent=None):
''' Convert the document to a JSON string.
Args:
indent (int or None, optional) : number of spaces to indent, or
None to suppress all newlines and indentation (default: None)
Returns:
str
'''
root_ids = []
for r in self._roots:
root_ids.append(r.id)
root_references = self._all_models.values()
json = {
'title' : self.title,
'roots' : {
'root_ids' : root_ids,
'references' : references_json(root_references)
},
'version' : __version__
}
return serialize_json(json, indent=indent)
def validate(self):
''' Perform integrity checks on the modes in this document.
Returns:
None
'''
for r in self.roots:
refs = r.references()
check_integrity(refs)
# Private methods ---------------------------------------------------------
def _add_session_callback(self, callback_obj, callback, one_shot, originator):
''' Internal implementation for adding session callbacks.
Args:
callback_obj (SessionCallback) :
A session callback object that wraps a callable and is
passed to ``trigger_on_change``.
callback (callable) :
A callable to execute when session events happen.
one_shot (bool) :
Whether the callback should immediately auto-remove itself
after one execution.
Returns:
SessionCallback : passed in as ``callback_obj``.
Raises:
ValueError, if the callback has been previously added
'''
if one_shot:
@wraps(callback)
def remove_then_invoke(*args, **kwargs):
if callback_obj in self._session_callbacks:
self._remove_session_callback(callback_obj, originator)
return callback(*args, **kwargs)
actual_callback = remove_then_invoke
else:
actual_callback = callback
callback_obj._callback = self._wrap_with_self_as_curdoc(actual_callback)
self._session_callbacks.add(callback_obj)
self._callback_objs_by_callable[originator][callback].add(callback_obj)
# emit event so the session is notified of the new callback
self._trigger_on_change(SessionCallbackAdded(self, callback_obj))
return callback_obj
def _destructively_move(self, dest_doc):
''' Move all data in this doc to the dest_doc, leaving this doc empty.
Args:
dest_doc (Document) :
The Bokeh document to populate with data from this one
Returns:
None
'''
if dest_doc is self:
raise RuntimeError("Attempted to overwrite a document with itself")
dest_doc.clear()
# we have to remove ALL roots before adding any
# to the new doc or else models referenced from multiple
# roots could be in both docs at once, which isn't allowed.
roots = []
self._push_all_models_freeze()
try:
while self.roots:
r = next(iter(self.roots))
self.remove_root(r)
roots.append(r)
finally:
self._pop_all_models_freeze()
for r in roots:
if r.document is not None:
raise RuntimeError("Somehow we didn't detach %r" % (r))
if len(self._all_models) != 0:
raise RuntimeError("_all_models still had stuff in it: %r" % (self._all_models))
for r in roots:
dest_doc.add_root(r)
dest_doc.title = self.title
def _invalidate_all_models(self):
'''
'''
# if freeze count is > 0, we'll recompute on unfreeze
if self._all_models_freeze_count == 0:
self._recompute_all_models()
def _is_single_string_selector(self, selector, field):
'''
'''
if len(selector) != 1:
return False
if field not in selector:
return False
return isinstance(selector[field], string_types)
def _notify_change(self, model, attr, old, new, hint=None, setter=None, callback_invoker=None):
''' Called by Model when it changes
'''
# if name changes, update by-name index
if attr == 'name':
if old is not None:
self._all_models_by_name.remove_value(old, model)
if new is not None:
self._all_models_by_name.add_value(new, model)
if hint is None:
serializable_new = model.lookup(attr).serializable_value(model)
else:
serializable_new = None
event = ModelChangedEvent(self, model, attr, old, new, serializable_new, hint, setter, callback_invoker)
self._trigger_on_change(event)
def _push_all_models_freeze(self):
'''
'''
self._all_models_freeze_count += 1
def _pop_all_models_freeze(self):
'''
'''
self._all_models_freeze_count -= 1
if self._all_models_freeze_count == 0:
self._recompute_all_models()
def _recompute_all_models(self):
'''
'''
new_all_models_set = set()
for r in self.roots:
new_all_models_set = new_all_models_set.union(r.references())
old_all_models_set = set(self._all_models.values())
to_detach = old_all_models_set - new_all_models_set
to_attach = new_all_models_set - old_all_models_set
recomputed = {}
recomputed_by_name = MultiValuedDict()
for m in new_all_models_set:
recomputed[m.id] = m
if m.name is not None:
recomputed_by_name.add_value(m.name, m)
for d in to_detach:
self._all_former_model_ids.add(d.id)
d._detach_document()
for a in to_attach:
a._attach_document(self)
self._all_models = recomputed
self._all_models_by_name = recomputed_by_name
def _remove_session_callback(self, callback_obj, originator):
''' Remove a callback added earlier with ``add_periodic_callback``,
``add_timeout_callback``, or ``add_next_tick_callback``.
Returns:
None
Raises:
KeyError, if the callback was never added
'''
try:
callback_objs = [callback_obj]
self._session_callbacks.remove(callback_obj)
for cb, cb_objs in list(self._callback_objs_by_callable[originator].items()):
try:
cb_objs.remove(callback_obj)
if not cb_objs:
del self._callback_objs_by_callable[originator][cb]
except KeyError:
pass
except KeyError:
raise ValueError("callback already ran or was already removed, cannot be removed again")
# emit event so the session is notified and can remove the callback
for callback_obj in callback_objs:
self._trigger_on_change(SessionCallbackRemoved(self, callback_obj))
def _set_title(self, title, setter=None):
'''
'''
if title is None:
raise ValueError("Document title may not be None")
if self._title != title:
self._title = title
self._trigger_on_change(TitleChangedEvent(self, title, setter))
def _trigger_on_change(self, event):
'''
'''
if self._hold == "collect":
self._held_events.append(event)
return
elif self._hold == "combine":
_combine_document_events(event, self._held_events)
return
if event.callback_invoker is not None:
self._with_self_as_curdoc(event.callback_invoker)
def invoke_callbacks():
for cb in self._callbacks.values():
cb(event)
self._with_self_as_curdoc(invoke_callbacks)
def _with_self_as_curdoc(self, f):
'''
'''
from bokeh.io.doc import set_curdoc, curdoc
old_doc = curdoc()
try:
if getattr(f, "nolock", False):
set_curdoc(UnlockedDocumentProxy(self))
else:
set_curdoc(self)
return f()
finally:
set_curdoc(old_doc)
def _wrap_with_self_as_curdoc(self, f):
'''
'''
doc = self
@wraps(f)
def wrapper(*args, **kwargs):
@wraps(f)
def invoke():
return f(*args, **kwargs)
return doc._with_self_as_curdoc(invoke)
return wrapper
def _combine_document_events(new_event, old_events):
''' Attempt to combine a new event with a list of previous events.
The ``old_event`` will be scanned in reverse, and ``.combine(new_event)``
will be called on each. If a combination can be made, the function
will return immediately. Otherwise, ``new_event`` will be appended to
``old_events``.
Args:
new_event (DocumentChangedEvent) :
The new event to attempt to combine
old_events (list[DocumentChangedEvent])
A list of previous events to attempt to combine new_event with
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
None
'''
for event in reversed(old_events):
if event.combine(new_event):
return
# no combination was possible
old_events.append(new_event)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| [
"dmitriy00vn@gmail.com"
] | dmitriy00vn@gmail.com |
8a8c9a3189a6ca290d4e9ac347532ca02cb14649 | 895f79e57861f2e2d552750fe56b5f742bdbd4cb | /tcv_mrp/model/tcv_mrp_basic_task.py | 5f3d7c5be857b1b3f13d2a225232f9bb55d14d36 | [] | no_license | Tecvemar/openerp60 | e9899eebcfa150dd52537db8dcbf7264fafc63cd | 8534c448f63c71e57d91b21656f1bc1aa8f7aea8 | refs/heads/master | 2023-02-15T04:55:05.817013 | 2023-01-26T21:01:35 | 2023-01-26T21:01:35 | 74,976,919 | 1 | 1 | null | 2022-01-21T14:42:29 | 2016-11-28T13:45:07 | Python | UTF-8 | Python | false | false | 43,716 | py | # -*- encoding: utf-8 -*-
##############################################################################
# Company: Tecvemar, c.a.
# Author: Juan V. Márquez L.
# Creation Date: 03/10/2012
# Version: 0.0.0.0
#
# Description:
#
#
##############################################################################
#~ from datetime import datetime
from osv import fields, osv
from tools.translate import _
#~ import pooler
import decimal_precision as dp
import time
#~ import netsvc
import logging
logger = logging.getLogger('server')
##---------------------------------------------------------- tcv_mrp_basic_task
class tcv_mrp_basic_task(osv.osv):
_name = 'tcv.mrp.basic.task'
_description = ''
_stock_picking_type = 'out'
# Form this date: Oper & F/O cost from hrs to m2
_change_method_date = '2014-04-01'
def _template_params(self):
res = [
{'sequence': 10, 'name': 'operator_cost', 'type': 'float',
'help': 'Costo por hora del personal asignado a la tarea'},
{'sequence': 15, 'name': 'operator_cost_m2', 'type': 'float',
'help': 'Costo por m2 del personal asignado a la tarea'},
{'sequence': 20, 'name': 'factory_overhead', 'type': 'float',
'help': 'Costo por hora por concepto de carga fabril'},
{'sequence': 25, 'name': 'factory_overhead_m2', 'type': 'float',
'help': 'Costo por m2 por concepto de carga fabril'},
{'sequence': 10, 'name': 'account_operator_cost', 'type':
'account', 'help':
_('Account for the cost of operator cost applied')},
{'sequence': 120, 'name': 'account_factory_overhead', 'type':
'account', 'help':
_('Account for the cost of the factory overhead applied')},
{'sequence': 121, 'name': 'account_applied_cost', 'type':
'account', 'help': _('Account for the proccess applied cost')},
]
return res
def _account_move_settings(self):
res = {
'operator_cost': {'name': _('Operator cost'), 'isproduct': False},
'factory_overhead': {'name': _('Factory overhead'),
'isproduct': False},
}
return res
##-------------------------------------------------------------------------
def _compute_run_time(self, cr, uid, date_start, date_end, context=None):
'''Must return date_end - date_start in hours'''
if date_start and date_end and date_start < date_end:
try:
ts = time.mktime(time.strptime(
date_start, '%Y-%m-%d %H:%M:%S'))
#TODO usar: tools.DEFAULT_SERVER_DATE_FORMAT
te = time.mktime(time.strptime(
date_end, '%Y-%m-%d %H:%M:%S'))
rt = (te - ts) # Result in seconds
h = (rt) // 3600
m = ((rt) % 3600.0) / 60.0 / 60.0
# decimales (0.10 = 6 seg) usa regla de 3: 1 -> 60seg | m -> s
res = h + m
except:
return None
return res
def _compute_operator_cost(self, task, round_to):
if task.run_time < 0:
raise osv.except_osv(_('Error!'), _('Run time must be > 0'))
return round(task.run_time * task.operator_cost, round_to)
def _compute_factory_overhead(self, task, round_to):
if task.run_time < 0:
raise osv.except_osv(_('Error!'), _('Run time must be > 0'))
return round(task.run_time * task.factory_overhead, round_to)
def _compute_cost_by_m2(self, cr, uid, task, area, round_to):
obj_tmp = self.pool.get('tcv.mrp.template')
tmpl_id = task.parent_id.template_id.id
operator_m2 = obj_tmp.get_var_value(
cr, uid, tmpl_id, 'operator_cost_m2')
factory_m2 = obj_tmp.get_var_value(
cr, uid, tmpl_id, 'factory_overhead_m2')
res = {'operator_m2': operator_m2,
'operator_cost_m2': round(area * operator_m2, round_to),
'factory_m2': factory_m2,
'factory_overhead_m2': round(area * factory_m2, round_to),
}
return res
def _get_task_info(self, cr, uid, obj_task, context=None):
return ''
##--------------------------------------------------------- function fields
def _compute_all_fields(self, cr, uid, ids, name, arg, context=None):
context = context or {}
if not len(ids):
return []
res = {}
for item in self.browse(cr, uid, ids, context={}):
run_time = self._compute_run_time(
cr, uid, item.date_start, item.date_end, context) - \
item.downtime
supplies_cost = 0
for supp in item.supplies_ids:
supplies_cost += supp.amount
res[item.id] = {'run_time': run_time,
'supplies_cost': supplies_cost,
'task_info': self._get_task_info(
cr, uid, item, context)}
return res
##-------------------------------------------------------------------------
_columns = {
'parent_id': fields.many2one(
'tcv.mrp.subprocess', 'Subprocess', required=True,
readonly=True, ondelete='cascade'),
'name': fields.char(
'Name', size=64, required=False, readonly=True,
states={'draft': [('readonly', False)]}),
'narration': fields.text(
'Notes', readonly=False),
'supplies_ids': fields.one2many(
'tcv.mrp.basic.task.supplies', 'task_id', 'Supplies',
readonly=True, states={'draft': [('readonly', False)]}),
'costs_ids': fields.one2many(
'tcv.mrp.basic.task.costs', 'task_id', 'Output data',
readonly=True),
'stops_ids': fields.one2many(
'tcv.mrp.basic.task.stops', 'task_id', 'Stop issues',
readonly=False),
'date_start': fields.datetime(
'Date started', required=True, readonly=True,
states={'draft': [('readonly', False)]}, select=True,
help="Date on which this process has been started."),
'date_end': fields.datetime(
'Date finished', required=True, select=True, readonly=True,
states={'draft': [('readonly', False)]},
help="Date on which this process has been finished."),
'run_time': fields.function(
_compute_all_fields, method=True, type='float',
string='Production runtime', multi='all',
help="The production time in hours (the decimal part represents " +
"the hour's fraction 0.50 = 30 min) (minus downtime)."),
'downtime': fields.float(
'Downtime', required=True, readonly=True,
states={'draft': [('readonly', False)]},
help="The downtime (in hours) of actual process"),
'operator_cost': fields.float(
'Operator cost', digits_compute=dp.get_precision('Account'),
readonly=True, states={'draft': [('readonly', False)]},
help="Estimated operator cost per hour, from template " +
"(operator_cost)"),
'factory_overhead': fields.float(
'Factory Overhead', digits_compute=dp.get_precision('Account'),
readonly=True, states={'draft': [('readonly', False)]},
help="Estimated factory Overhead per hour, from template " +
"(factory_overhead)"),
'supplies_cost': fields.function(
_compute_all_fields, method=True, type='float',
string='Supplies cost', digits_compute=dp.get_precision('Account'),
multi='all'),
'valid_cost': fields.boolean(
'Valid cost', help="set to true when cost is calculated ok",
readonly=True),
# This state field not is a standart workflow "state" fields,
# is only a flag (draft-done) to lock-unlock data
'state': fields.selection([(
'draft', 'Draft'), ('done', 'Done')], string='State',
required=True, readonly=True),
'move_id': fields.many2one(
'account.move', 'Account move', ondelete='restrict',
help="The accounting move of this entry.", readonly=True),
'picking_id': fields.many2one(
'stock.picking', 'Stock picking', ondelete='restrict',
help="The stock picking of this entry.", select=True,
readonly=True),
'task_info': fields.function(
_compute_all_fields, method=True, type='char', size=128,
string='Task name', multi='all'),
}
_defaults = {
'valid_cost': lambda *a: False,
'downtime': lambda *a: 0,
'state': lambda *a: 'draft',
}
_sql_constraints = [
('run_time_gt_zero', 'CHECK (date_start<date_end)',
'The run time must be > 0 !'),
('downtime_gt_zero', 'CHECK (downtime>=0)',
'The downtime must be >= 0 !'),
]
##-------------------------------------------------------------------------
def _clear_previous_cost_distribution(self, cr, uid, obj_cost,
context=None):
unlink_ids = []
for line in obj_cost.costs_ids:
unlink_ids.append((2, line.id))
if unlink_ids:
self.write(
cr, uid, obj_cost.id, {'costs_ids': unlink_ids}, context)
def cost_distribution(self, cr, uid, ids, context=None):
return True
def process_all_input(self, cr, uid, ids, context=None):
'''
Send all input data to output valid for input = io.slab
'''
output_ids = []
for item in self.browse(cr, uid, ids, context):
for line in item.input_ids:
output_ids.append((0, 0, {'input_id': line.id,
'product_id': line.product_id.id,
'pieces': line.pieces,
'length': line.length,
'heigth': line.heigth,
'thickness': line.thickness,
}))
if output_ids:
self.write(cr, uid, item.id,
{'output_ids': output_ids}, context)
return True
def show_products_resulting(self, cr, uid, ids, context=None):
if not ids:
return []
res = {}
brw = self.browse(cr, uid, ids[0], context={})
model = brw.parent_id.template_id.output_model.model
res.update({'name':
_('Products resulting (%s)') %
brw.parent_id.template_id.name,
'type': 'ir.actions.act_window',
'res_model': model,
'view_type': 'tree',
'view_id': self.pool.get('ir.ui.view').search(
cr, uid, [('model', '=', model)]),
'view_mode': 'tree',
'nodestroy': True,
'target': 'current',
'domain': [('task_ref', '=', brw.id),
('subprocess_ref', '=', brw.parent_id.id)],
'context': {}
})
return res
def load_default_values(self, cr, uid, parent_id, context=None):
'''
If required, here you can create here a new task with any default data
this method must be overriden in inherited models
Return a dict with default values
ex:{'default_product_id':product_id}
Don't forget to add "default_" in field names
'''
obj_tmp = self.pool.get('tcv.mrp.template')
obj_spr = self.pool.get('tcv.mrp.subprocess')
subp = obj_spr.browse(cr, uid, parent_id, context=context)
if subp.template_id.input_model and subp.prior_id.state != 'done':
raise osv.except_osv(
_('Error!'),
_('You must set the prior task as "Done" before continuing'))
operator_cost = obj_tmp.get_var_value(
cr, uid, subp.template_id.id, 'operator_cost_m2')
factory_overhead = obj_tmp.get_var_value(
cr, uid, subp.template_id.id, 'factory_overhead_m2')
return {'default_operator_cost': operator_cost,
'default_factory_overhead': factory_overhead}
def get_output_data_line(self, item, line):
'''
Must be overridden in models inherited
Create output data in template.output_model
'''
return {}
def save_output_products(self, cr, uid, ids, context=None):
so_brw = self.browse(cr, uid, ids, context={})
for item in so_brw:
model = item.parent_id.template_id.output_model.model
obj_out = self.pool.get(model)
unlink_ids = obj_out.search(
cr, uid, [('task_ref', '=', item.id),
('subprocess_ref', '=', item.parent_id.id)])
if unlink_ids:
obj_out.unlink(cr, uid, unlink_ids, context)
for line in item.costs_ids:
data = self.get_output_data_line(item, line)
if data:
obj_out.create(cr, uid, data, context)
else:
logger.warn('No output products generated ' +
'(%s.save_output_products)' % self._name)
return True
def _gen_account_move_line(self, company_id, account_id,
name, debit, credit):
return (0, 0, {'auto': True,
'company_id': company_id,
'account_id': account_id,
'name': name[: 64],
'debit': float('%.2f' % (debit)),
'credit': float('%.2f' % (credit)),
'reconcile': False,
})
def _check_rounding_diff(self, lines, company_id, name, context):
amount_diff = 0.0
for l in lines:
amount_diff += l[2]['credit'] - l[2]['debit']
if abs(amount_diff) > 0.0001:
account_id = context.get('task_config').rounding_account_id.id
amount_diff = round(amount_diff, 2)
debit, credit = (0.0, abs(amount_diff)) if amount_diff < 0 else (
amount_diff, 0.0)
lines.append(self._gen_account_move_line(
company_id, account_id,
_('Unit cost rounding diff %s') % (name), debit, credit))
def _get_settings_acc_cost_id(self, cr, uid, cost_name, cost_line, task):
'''
Must be overridden in models inherited
Return an account id for cost applied (blades & abrasive)
'''
return 0
def _create_model_account_move_lines(self, cr, uid, task, lines, context):
'''
Must be overridden in models inherited
Here you create and return a acount.move.lines (list of dict)
task is a task.browse object
return a sum of created lines amounts
'''
return 0.0
def call_create_account_move_lines(self, cr, uid, ids, context=None):
context = context or {}
obj_cfg = self.pool.get('tcv.mrp.config')
company_id = self.pool.get('res.users').browse(
cr, uid, uid, context=context).company_id.id
cfg_id = obj_cfg.search(cr, uid, [('company_id', '=', company_id)])
if cfg_id:
mrp_cfg = obj_cfg.browse(cr, uid, cfg_id[0], context=context)
task = self.browse(
cr, uid, ids, context=context)
context.update({
'task_company_id': company_id,
'task_config': mrp_cfg,
'task_date': task.date_end})
return self.create_account_move_lines(
cr, uid, task, lines=None, context=context)
def create_account_move_lines(self, cr, uid, task, lines=None,
context=None):
'''
task is a task.browse object
'''
if lines is None:
lines = []
company_id = context.get('task_company_id')
obj_tmp = self.pool.get('tcv.mrp.template')
template_params = obj_tmp.get_all_values(
cr, uid, task.parent_id.template_id.id)
#~ total_amount = 0.0
name = '(%s/%s) %s' % (task.parent_id.process_id.ref,
task.parent_id.ref[-6:],
task.parent_id.template_id.name)
context.update({'move_name': name, 'move_company_id': company_id})
# inherited models extra lines
total_amount = self._create_model_account_move_lines(
cr, uid, task, lines, context)
# Supplies
for cost_supp in task.supplies_ids:
account_id = cost_supp.product_id.property_stock_account_output.\
id or cost_supp.product_id.categ_id.\
property_stock_account_output_categ.id
acc_cost_id = cost_supp.product_id.property_account_expense.id or \
cost_supp.product_id.categ_id.property_account_expense_categ.id
if not account_id:
raise osv.except_osv(
_('Error!'),
_('No supplies account found, please check product and ' +
'category account settings (%s)') %
cost_supp.product_id.name)
total_amount += cost_supp.amount
lines.append(self._gen_account_move_line(
company_id, account_id,
_('%s: %s') % (cost_supp.product_id.name, name), 0.0,
cost_supp.amount))
lines.append(self._gen_account_move_line(
company_id, acc_cost_id,
_('%s: %s') % (cost_supp.product_id.name, name),
cost_supp.amount, 0.0))
# Operator & factory overhead
for cost_line in task.costs_ids:
settings = self._account_move_settings()
# operator and factory overhead
fo_oc_cost = 0
for key in settings:
amount = getattr(cost_line, key, 0.0)
account_id = template_params['account_%s' % key]
if amount != 0.0:
#~ total_amount += amount
lines.append(self._gen_account_move_line(
company_id, account_id, _('%s: %s') %
(settings[key]['name'], name), 0.0, amount))
if not settings[key].get('isproduct'):
fo_oc_cost += amount
else:
acc_cost_id = self._get_settings_acc_cost_id(
cr, uid, key, cost_line, task)
lines.append(self._gen_account_move_line(
company_id, acc_cost_id, _('%s: %s') %
(settings[key]['name'], name), amount, 0.0))
# warning!
# this code call .output_id and this field is defined in inherited
# models mut be created in this model (also output_ids field
# and _output model)
total_cost = cost_line.total_cost - cost_line.cumulative_cost
# Productos en proceso
product = cost_line.output_id.product_id
account_id = product.property_stock_account_input.id or \
product.categ_id.property_stock_account_input_categ.id
acc_cost_id = template_params.get('account_applied_cost')
if not account_id:
raise osv.except_osv(
_('Error!'),
_('No output product account found, please check ' +
'product and category account settings (%s)') %
product.name)
# Added to fix very low cost special case (roundig error)
if total_cost - fo_oc_cost < 0 and \
total_cost - fo_oc_cost > -0.02:
fo_oc_cost += total_cost - fo_oc_cost
lines.append(self._gen_account_move_line(
company_id, acc_cost_id, _('%s: %s') %
(product.name, name), 0.0, total_cost - fo_oc_cost))
lines.append(self._gen_account_move_line(
company_id, account_id, _('%s: %s') %
(product.name, name), total_cost, 0.0))
self._check_rounding_diff(lines, company_id, name, context)
return lines
def create_account_move(self, cr, uid, ids, context=None):
if context is None:
context = {}
obj_move = self.pool.get('account.move')
obj_cfg = self.pool.get('tcv.mrp.config')
obj_per = self.pool.get('account.period')
company_id = self.pool.get('res.users').browse(
cr, uid, uid, context=context).company_id.id
cfg_id = obj_cfg.search(cr, uid, [('company_id', '=', company_id)])
if cfg_id:
mrp_cfg = obj_cfg.browse(cr, uid, cfg_id[0], context=context)
else:
raise osv.except_osv(_('Error!'),
_('Please set a valid configuration '))
so_brw = self.browse(cr, uid, ids, context={})
move_ids = []
for task in so_brw:
date = task.date_end # account_move.date = end of task
context.update({'task_company_id': company_id,
'task_config': mrp_cfg, 'task_date': date})
ref = '[%s - %s] %s' % (
task.parent_id.process_id.ref, task.parent_id.ref,
task.name) if task.name else '[%s - %s] %s' % (
task.parent_id.process_id.ref, task.parent_id.ref,
task.parent_id.template_id.name)
period_id = obj_per.find(cr, uid, date)[0]
move = {'ref': ref[: 64],
'journal_id': task.parent_id.template_id.journal_id.id,
'date': date,
'min_date': date,
'company_id': company_id,
'state': 'draft',
'to_check': False,
'period_id': period_id,
'narration': _('Production process: \n\tTemplate: ' +
'%s\n\tProcess: %s\n\tSubprocess: ' +
'%s\n\tDate: %s - %s\n\tInfo: %s') % (
task.parent_id.template_id.name,
task.parent_id.process_id.ref,
task.parent_id.ref,
task.date_start, task.date_end,
task.task_info),
}
lines = self.create_account_move_lines(
cr, uid, task, None, context)
if lines:
move.update({'line_id': lines})
move_id = obj_move.create(cr, uid, move, context)
if move_id:
obj_move.post(cr, uid, [move_id], context=context)
move_ids.append(move_id)
self.write(
cr, uid, task.id, {'move_id': move_id,
'valid_cost': True}, context)
return move_ids
def _create_model_stock_move_lines(self, cr, uid, task, lines,
context=None):
return lines
def create_stock_move_lines(self, cr, uid, task, lines, context=None):
'''
task is a task.browse object
'''
if lines is None:
lines = []
if context is None:
context = {}
name = '(%s/%s) %s' % (
task.parent_id.process_id.ref, task.parent_id.ref[-6:],
task.parent_id.template_id.name)
context.update({'task_name': name})
obj_lot = self.pool.get('stock.production.lot')
# inherited models extra lines
lines = []
lines = self._create_model_stock_move_lines(
cr, uid, task, lines, context)
# Supplies
for cost_supp in task.supplies_ids:
location_id = [97]
if cost_supp.prod_lot_id:
location_id = obj_lot.get_actual_lot_location(
cr, uid, cost_supp.prod_lot_id.id, context) or [97]
if cost_supp.product_id.id in (744, 608, 2518, 2519, 2520) and \
location_id == [97]:
location_id = [3510]
if not location_id:
raise osv.except_osv(_('Error!'),
_('Missign location for product (%s)') %
cost_supp.prod_lot_id.name)
supp = {'product_id': cost_supp.product_id.id,
'name': name,
'date': context.get('task_date'),
'location_id': location_id[0],
'location_dest_id': context.get(
'task_config').location_id.id,
'product_qty': cost_supp.quantity,
'product_uos_qty': cost_supp.quantity,
'product_uom': cost_supp.product_id.uom_id.id,
'prodlot_id': cost_supp.prod_lot_id.id if
cost_supp.prod_lot_id else 0,
'state': 'draft'}
lines.append((0, 0, supp))
return lines
def create_stock_picking(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
obj_pck = self.pool.get('stock.picking')
obj_cfg = self.pool.get('tcv.mrp.config')
company_id = self.pool.get('res.users').browse(
cr, uid, uid, context=context).company_id.id
company = self.pool.get('res.company').browse(
cr, uid, company_id, context=context)
cfg_id = obj_cfg.search(cr, uid, [('company_id', '=', company_id)])
if cfg_id:
mrp_cfg = obj_cfg.browse(cr, uid, cfg_id[0], context=context)
else:
raise osv.except_osv(_('Error!'),
_('Please set a valid configuration '))
so_brw = self.browse(cr, uid, ids, context={})
pick_ids = []
for task in so_brw:
date = task.date_start # stock_move.date = start of task
context.update({'task_company_id': company_id,
'task_config': mrp_cfg,
'task_date': date})
origin = '[%s - %s] %s' % (
task.parent_id.process_id.ref, task.parent_id.ref,
task.name) if task.name else '[%s - %s] %s' % (
task.parent_id.process_id.ref, task.parent_id.ref,
task.parent_id.template_id.name)
picking = {'name': '/',
'type': self._stock_picking_type,
'origin': origin[:64],
'date': date,
'invoice_state': 'none',
'stock_journal_id': mrp_cfg.stock_journal_id.id,
'company_id': company_id,
'auto_picking': False,
'move_type': 'one',
'partner_id': company.partner_id.id,
'state_rw': 0,
'note': _(
'Production process: \n\tTemplate: %s\n\t' +
'Process: %s\n\tSubprocess: %s\n\t' +
'Date: %s - %s\n\tInfo: %s') % (
task.parent_id.template_id.name,
task.parent_id.process_id.ref,
task.parent_id.ref,
task.date_start, task.date_end,
task.task_info),
}
lines = self.create_stock_move_lines(cr, uid, task, None, context)
if lines:
picking.update({'move_lines': lines})
pick_id = obj_pck.create(cr, uid, picking, context)
if pick_id:
pick_ids.append(pick_id)
self.write(
cr, uid, task.id, {'picking_id': pick_id,
'valid_cost': True}, context)
return pick_ids
def get_task_ids_by_date_range(self, cr, uid, template_id,
date_from, date_to, context=None):
sql = """
select tk.id as task_id from tcv_mrp_subprocess sp
left join %s tk on sp.id = tk.parent_id
where template_id = %s and
tk.date_end between '%s 00:00:00' and '%s 23:59:59'
""" % (self._name.replace('.', '_'), template_id, date_from, date_to)
cr.execute(sql)
res = cr.fetchall()
if res:
l_ids = map(lambda i: i[0], res)
res = (str(l_ids)[1: - 1]).replace('L', '')
return '(%s)' % res, len(l_ids)
else:
return [], 0
def get_task_input_sumary(self, cr, uid, ids_str, context=None):
return {}
def get_task_output_sumary(self, cr, uid, ids_str, context=None):
return {}
def get_task_runtime_sumary(self, cr, uid, ids_str, context=None):
sql = """
select sum(EXTRACT(EPOCH FROM date_end-date_start)/3600) as run_time,
sum(downtime) as down_time
from %s g
where g.id in %s;
""" % (self._name.replace('.', '_'), ids_str)
cr.execute(sql)
return cr.dictfetchone() or {'run_time': 0, 'downtime': 0}
def button_update_downtime(self, cr, uid, ids, context):
ids = isinstance(ids, (int, long)) and [ids] or ids
for item in self.browse(cr, uid, ids, context={}):
downtime = 0
for dt in item.stops_ids:
downtime += dt.stop_time
if downtime:
self.write(
cr, uid, [item.id], {'downtime': downtime},
context=context)
return True
#------------------------------------------------------------- on_change...
def on_change_run_time(self, cr, uid, ids, date_start, date_end):
return {'value': {'run_time': self._compute_run_time(
cr, uid, date_start, date_end)}}
##----------------------------------------------------- create write unlink
def write(self, cr, uid, ids, vals, context=None):
invalidate_calc = False
for key in vals:
invalidate_calc = invalidate_calc or bool(vals[key])
if invalidate_calc and not vals.get('valid_cost'):
vals.update({'valid_cost': False})
res = super(tcv_mrp_basic_task, self).write(
cr, uid, ids, vals, context)
return res
def unlink(self, cr, uid, ids, context=None):
so_brw = self.browse(cr, uid, ids, context={})
unlink_ids = []
for task in so_brw:
if task.state == 'draft':
unlink_ids.append(task['id'])
else:
raise osv.except_osv(_('Invalid action !'),
_('Cannot delete task that are ' +
'already Done!'))
res = super(tcv_mrp_basic_task, self).unlink(cr, uid, ids, context)
return res
##---------------------------------------------------------------- Workflow
def test_draft(self, cr, uid, ids, *args):
if len(ids) != 1:
raise osv.except_osv(_('Error!'),
_('Multiple operations not allowed'))
for task in self.browse(cr, uid, ids, context=None):
if task.parent_id.progress > 0 and \
task._name != 'tcv.mrp.finished.slab':
raise osv.except_osv(_('Error!'),
_('Can\'t reset a process with ' +
'proceced outputs'))
if task.move_id and task.move_id.state != 'draft':
raise osv.except_osv(_('Error!'), _(
'Can\'t reset a process while account move state ' +
'<> "Draft"'))
return True
def button_draft(self, cr, uid, ids, context=None):
if self.test_draft(cr, uid, ids, context):
task = self.browse(cr, uid, ids[0], context=context)
task_move_id = task.move_id.id if task and task.move_id else False
task_picking_id = task.picking_id.id if task and \
task. picking_id else False
vals = {'state': 'draft', 'move_id': 0, 'picking_id': 0,
'operator_cost': 0, 'factory_overhead': 0}
res = self.write(cr, uid, ids, vals, context)
if task_move_id:
self.pool.get('account.move').unlink(
cr, uid, [task_move_id], context)
if task_picking_id:
self.pool.get('stock.picking').unlink(
cr, uid, [task_picking_id], context)
#~ self.reverse_stock_picking(cr,uid,task_picking_id,context)
return res
def test_done(self, cr, uid, ids, *args):
ids = isinstance(ids, (int, long)) and [ids] or ids
for item in self.browse(cr, uid, ids, context={}):
if not item.valid_cost:
raise osv.except_osv(_('Error!'),
_('You must calculate cost ' +
'distribution first!'))
return True
def do_before_done(self, cr, uid, ids, context=None):
return True
def button_done(self, cr, uid, ids, context=None):
context = context or {}
if self.test_done(cr, uid, ids, context):
self.do_before_done(cr, uid, ids, context)
self.create_account_move(cr, uid, ids, context)
self.create_stock_picking(cr, uid, ids, context)
vals = {'state': 'done', 'valid_cost': True}
return self.write(cr, uid, ids, vals, context)
tcv_mrp_basic_task()
##------------------------------------------------- tcv_mrp_basic_task_supplies
class tcv_mrp_basic_task_supplies(osv.osv):
_name = 'tcv.mrp.basic.task.supplies'
_description = ''
##-------------------------------------------------------------------------
##--------------------------------------------------------- function fields
def _get_unit_price(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
if not len(ids):
return []
res = {}
obj_cst = self.pool.get('tcv.cost.management')
for item in self.browse(cr, uid, ids, context={}):
res[item.id] = {}
unit_price = obj_cst.get_tcv_cost(
cr, uid, item.prod_lot_id.id, item.product_id.id, context)
res[item.id]['unit_price'] = unit_price
res[item.id]['amount'] = unit_price * item.quantity
return res
##-------------------------------------------------------------------------
_columns = {
'task_id': fields.many2one(
'tcv.mrp.basic.task', 'Supplies',
required=True, ondelete='cascade'),
'date_start': fields.related(
'task_id', 'date_start', type='datetime', string='Date started',
store=False, readonly=True),
'date_end': fields.related(
'task_id', 'date_end', type='datetime', string='Date finished',
store=False, readonly=True),
'product_id': fields.many2one(
'product.product', 'Product', ondelete='restrict',
required=True),
'prod_lot_id': fields.many2one(
'stock.production.lot', 'lot #'),
'quantity': fields.float(
'Quantity', digits_compute=dp.get_precision('Product UoM'),
required=True),
'unit_price': fields.function(
_get_unit_price, method=True, type='float', string='Unit price',
digits_compute=dp.get_precision('Account'), multi='all'),
'amount': fields.function(
_get_unit_price, method=True, type='float', string='Amount',
digits_compute=dp.get_precision('Account'), multi='all'),
}
_defaults = {
}
_sql_constraints = [
('quantity_gt_zero', 'CHECK (quantity>0)',
'The quantity of supplies must be > 0!'),
]
##-------------------------------------------------------------------------
def _get_total_supplies_cost(self, cr, uid, ids, context):
total = 0.0
for item in self.browse(cr, uid, ids, context={}):
total += item.amount
return total
##------------------------------------------------------------ on_change...
def on_change_prod_lot_id(self, cr, uid, ids, prod_lot_id):
res = {}
if prod_lot_id:
obj_lot = self.pool.get('stock.production.lot')
lot_brw = obj_lot.browse(cr, uid, prod_lot_id, context=None)
res.update({'product_id': lot_brw.product_id.id})
return {'value': res}
##----------------------------------------------------- create write unlink
##---------------------------------------------------------------- Workflow
tcv_mrp_basic_task_supplies()
##---------------------------------------------------- tcv_mrp_basic_task_costs
class tcv_mrp_basic_task_costs(osv.osv):
_name = 'tcv.mrp.basic.task.costs'
_description = 'Calc basic costs'
#~ _rec_name='prod_lot_id'
##-------------------------------------------------------------------------
##--------------------------------------------------------- function fields
_columns = {
'task_id': fields.many2one(
'tcv.mrp.basic.task', 'costs', required=False, ondelete='cascade'),
'cumulative_cost': fields.float(
'Cumulative cost', digits_compute=dp.get_precision('Account'),
readonly=False, help="Cumulative cost of processed products"),
'supplies_cost': fields.float(
'Supplies cost', digits_compute=dp.get_precision('Account'),
readonly=False, help="Cost of supplies used, distributed " +
"according to the relative area"),
'operator_cost': fields.float(
'Operator cost', digits_compute=dp.get_precision('Account'),
readonly=False, help="Labor cost determined based on runtime, " +
"distributed according to the relative area, use the factor of " +
"the template: operator_cost"),
'factory_overhead': fields.float(
'Factory overhead', digits_compute=dp.get_precision('Account'),
readonly=False, help="Factory overhead determined based on " +
"runtime, distributed according to the relative area, use the " +
"factor of the template: factory_overhead"),
'real_unit_cost': fields.float(
'Unit cost', digits_compute=dp.get_precision('MRP unit cost'),
readonly=True),
'total_cost': fields.float(
'Total cost', digits_compute=dp.get_precision('Account'),
readonly=False),
}
_defaults = {
'cumulative_cost': lambda *a: 0.0,
'supplies_cost': lambda *a: 0.0,
'operator_cost': lambda *a: 0.0,
'factory_overhead': lambda *a: 0.0,
'real_unit_cost': lambda *a: 0.0,
'total_cost': lambda *a: 0.0,
}
_sql_constraints = [
]
##-------------------------------------------------------------------------
##------------------------------------------------------------ on_change...
##----------------------------------------------------- create write unlink
##---------------------------------------------------------------- Workflow
tcv_mrp_basic_task_costs()
##---------------------------------------------------- tcv_mrp_basic_task_stops
class tcv_mrp_basic_task_stops(osv.osv):
_name = 'tcv.mrp.basic.task.stops'
_description = ''
_order = 'stop_start'
##-------------------------------------------------------------------------
##------------------------------------------------------- _internal methods
##--------------------------------------------------------- function fields
def _compute_all_fields(self, cr, uid, ids, name, arg, context=None):
context = context or {}
if not len(ids):
return []
res = {}
obj_tsk = self.pool.get('tcv.mrp.basic.task')
for item in self.browse(cr, uid, ids, context={}):
stop_time = obj_tsk._compute_run_time(
cr, uid, item.stop_start, item.stop_end, context)
res[item.id] = {'stop_time': stop_time,
}
return res
##-------------------------------------------------------------------------
_columns = {
'task_id': fields.many2one(
'tcv.mrp.basic.task', 'Stops', required=True, ondelete='cascade'),
'parent_id': fields.related(
'task_id', 'parent_id', type='many2one',
relation='tcv.mrp.subprocess', string='Subprocess',
store=False, readonly=True),
'template_id': fields.related(
'parent_id', 'template_id', type='many2one',
relation='tcv.mrp.template', string='Task template',
store=False, readonly=True),
'stop_issue_id': fields.many2one(
'tcv.mrp.stops.issues', 'Issue', readonly=False, required=True,
ondelete='restrict'),
'name': fields.char(
'Description', size=256, required=False, readonly=False),
'stop_start': fields.datetime(
'Stop start', required=True, readonly=False,
help="Date on which this stop has been started."),
'stop_end': fields.datetime(
'Stop end', required=True, select=True, readonly=False,
help="Date on which this stop has been finished."),
'stop_time': fields.function(
_compute_all_fields, method=True, type='float',
string='Stop time', multi='all',
help="The stop time in hours (the decimal part represents " +
"the hour's fraction 0.50 = 30 min) (minus downtime)."),
'employee_id': fields.many2one(
'hr.employee', "Operator", required=False, ondelete='restrict',
help="Machine operator name"),
}
_defaults = {
}
_sql_constraints = [
('stop_time_gt_zero', 'CHECK (stop_start<stop_end)',
'The stop time must be > 0 !'),
]
##-------------------------------------------------------------------------
##---------------------------------------------------------- public methods
##-------------------------------------------------------- buttons (object)
##------------------------------------------------------------ on_change...
def on_change_stop_time(self, cr, uid, ids, stop_start, stop_end):
obj_tsk = self.pool.get('tcv.mrp.basic.task')
return {'value':
{'stop_time': obj_tsk._compute_run_time(
cr, uid, stop_start, stop_end)}}
##----------------------------------------------------- create write unlink
##---------------------------------------------------------------- Workflow
tcv_mrp_basic_task_stops()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"juanvmarquezl@gmail.com"
] | juanvmarquezl@gmail.com |
60392a30744a47c26902a16ef07ffd786dbd9d0d | f0e6b08f33ea27ca0382f0bf8d9b5e33c01d1dfc | /djangoproject/myproject/myproject/settings.py | 0fcb5842788261b489b6bce73e7e2cb25479c510 | [] | no_license | nupur3101/batch430 | 8edef26909c2223031d8ef8690ba5cc6a0c83335 | cb9f0644a9f6662e731eb8b9b3f0762a738864f4 | refs/heads/master | 2020-12-28T04:45:52.825960 | 2020-01-30T12:35:33 | 2020-01-30T12:35:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,832 | py | """
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ti=wk#krw4t4-2lt$i5weld7yf!%zxqh^nw4^p9a9u0r##6@*f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'users',
'blog',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_SSL = True
EMAIL_PORT = 465
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = "simrangrover5@gmail.com"
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
#create database signup character set 'utf8' -->to create database at mysql
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'NAME' : 'signup', #database name
'HOST' : 'localhost',
'PORT' : 3306,
'USER' : 'root',
'PASSWORD' : ""
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = ""
STATICFILES_DIRS = (os.path.join(BASE_DIR,'static'),)
| [
"simrangrover5@gmail.com"
] | simrangrover5@gmail.com |
2cfd1c024c96fe49b55a5504ae8a2442e1d5c830 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /TkL6GTu9QMhYnv869_14.py | a02ec9a366f6a9388b2c80a13da2f67a67dc42a7 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | """
Create a function that adds a string ending to each member in a list.
### Examples
add_ending(["clever", "meek", "hurried", "nice"], "ly")
➞ ["cleverly", "meekly", "hurriedly", "nicely"]
add_ending(["new", "pander", "scoop"], "er")
➞ ["newer", "panderer", "scooper"]
add_ending(["bend", "sharpen", "mean"], "ing")
➞ ["bending", "sharpening", "meaning"]
### Notes
* Don't forget to `return` the result.
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
def add_ending(lst, ending):
a = []
for i in range(len(lst)):
a1 = []
a1.append(lst[i])
a1.append(ending)
a.append(''.join(a1))
return a
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9babd500af3d3514b3868f6b6c35a7c0b134ea0b | 77ab53380f74c33bb3aacee8effc0e186b63c3d6 | /5389_food_orders.py | 608d2efac050ccce436916c2505a26f57eaf268a | [] | no_license | tabletenniser/leetcode | 8e3aa1b4df1b79364eb5ca3a97db57e0371250b6 | d3ebbfe2e4ab87d5b44bc534984dfa453e34efbd | refs/heads/master | 2023-02-23T18:14:31.577455 | 2023-02-06T07:09:54 | 2023-02-06T07:09:54 | 94,496,986 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,594 | py | '''
Given the array orders, which represents the orders that customers have done in a restaurant. More specifically orders[i]=[customerNamei,tableNumberi,foodItemi] where customerNamei is the name of the customer, tableNumberi is the table customer sit at, and foodItemi is the item customer orders.
Return the restaurant's “display table”. The “display table” is a table whose row entries denote how many of each food item each table ordered. The first column is the table number and the remaining columns correspond to each food item in alphabetical order. The first row should be a header whose first column is “Table”, followed by the names of the food items. Note that the customer names are not part of the table. Additionally, the rows should be sorted in numerically increasing order.
Example 1:
Input: orders = [["David","3","Ceviche"],["Corina","10","Beef Burrito"],["David","3","Fried Chicken"],["Carla","5","Water"],["Carla","5","Ceviche"],["Rous","3","Ceviche"]]
Output: [["Table","Beef Burrito","Ceviche","Fried Chicken","Water"],["3","0","2","1","0"],["5","0","1","0","1"],["10","1","0","0","0"]]
Explanation:
The displaying table looks like:
Table,Beef Burrito,Ceviche,Fried Chicken,Water
3 ,0 ,2 ,1 ,0
5 ,0 ,1 ,0 ,1
10 ,1 ,0 ,0 ,0
For the table 3: David orders "Ceviche" and "Fried Chicken", and Rous orders "Ceviche".
For the table 5: Carla orders "Water" and "Ceviche".
For the table 10: Corina orders "Beef Burrito".
Example 2:
Input: orders = [["James","12","Fried Chicken"],["Ratesh","12","Fried Chicken"],["Amadeus","12","Fried Chicken"],["Adam","1","Canadian Waffles"],["Brianna","1","Canadian Waffles"]]
Output: [["Table","Canadian Waffles","Fried Chicken"],["1","2","0"],["12","0","3"]]
Explanation:
For the table 1: Adam and Brianna order "Canadian Waffles".
For the table 12: James, Ratesh and Amadeus order "Fried Chicken".
Example 3:
Input: orders = [["Laura","2","Bean Burrito"],["Jhon","2","Beef Burrito"],["Melissa","2","Soda"]]
Output: [["Table","Bean Burrito","Beef Burrito","Soda"],["2","1","1","1"]]
Constraints:
1 <= orders.length <= 5 * 10^4
orders[i].length == 3
1 <= customerNamei.length, foodItemi.length <= 20
customerNamei and foodItemi consist of lowercase and uppercase English letters and the space character.
tableNumberi is a valid integer between 1 and 500.
'''
from collections import defaultdict
class Solution:
def displayTable(self, orders):
table_orders = defaultdict(lambda: defaultdict(int))
food = set()
for c,t,f in orders:
food.add(f)
table_orders[t][f] += 1
# print(table_orders)
# print(food)
res = [[] for _ in range(len(table_orders))]
header = []
for f in food:
header.append(f)
header.sort()
header.insert(0, 'Table')
for i,key in enumerate(table_orders):
res[i].append(str(key))
table = table_orders[key]
for f in header[1:]:
res[i].append(str(table[f]))
res.sort(key=lambda x: int(x[0]))
res.insert(0, header)
return res
s = Solution()
# orders = [["James","12","Fried Chicken"],["Ratesh","12","Fried Chicken"],["Amadeus","12","Fried Chicken"],["Adam","1","Canadian Waffles"],["Brianna","1","Canadian Waffles"]]
orders = [["David","3","Ceviche"],["Corina","10","Beef Burrito"],["David","3","Fried Chicken"],["Carla","5","Water"],["Carla","5","Ceviche"],["Rous","3","Ceviche"]]
res = s.displayTable(orders)
print(res)
| [
"tabletenniser@gmail.com"
] | tabletenniser@gmail.com |
7674273a6bb9f50a92b66b039b3705b528f49169 | 549a573c35dd79f77ded35a0c9cc0b6074daba64 | /src/pipelines/epidemiology/ch_openzh.py | 84b9bfc4c3b7e25a898a84d4e355b0320286a7ec | [
"Apache-2.0",
"CC-BY-4.0",
"CC-BY-SA-3.0",
"LicenseRef-scancode-proprietary-license",
"GPL-3.0-only",
"AGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"Unlicense"
] | permissive | harrisonzhu508/data | f91d5fb2847bfcba1c7debaad490266a11423424 | a3b95ced4abad6653d20f67f3f285abeeb0c2b25 | refs/heads/master | 2022-11-30T13:33:20.176773 | 2020-05-26T10:24:47 | 2020-05-26T10:24:47 | 266,201,099 | 0 | 0 | Apache-2.0 | 2020-08-03T20:55:05 | 2020-05-22T20:27:29 | HTML | UTF-8 | Python | false | false | 1,318 | py | from typing import Any, Dict, List
from pandas import DataFrame, concat, merge
from lib.pipeline import DefaultPipeline
from lib.time import datetime_isoformat
from lib.utils import grouped_diff
class OpenZHPipeline(DefaultPipeline):
data_urls: List[str] = [
"https://raw.github.com/openZH/covid_19/master/COVID19_Fallzahlen_CH_total.csv"
]
def parse_dataframes(
self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = (
dataframes[0]
.rename(
columns={
"ncumul_tested": "tested",
"ncumul_conf": "confirmed",
"ncumul_deceased": "deceased",
"ncumul_hosp": "hospitalized",
"ncumul_ICU": "intensive_care",
"ncumul_vent": "ventilator",
"ncumul_released": "recovered",
"abbreviation_canton_and_fl": "subregion1_code",
}
)
.drop(columns=["time", "source"])
)
# TODO: Match FL subdivision (not a canton?)
data = data[data.subregion1_code != "FL"]
data = grouped_diff(data, ["subregion1_code", "date"])
data["country_code"] = "CH"
return data
| [
"oscar@wahltinez.org"
] | oscar@wahltinez.org |
0dea551c1c168da413200bb795660b6ad3d2ebed | ee76919635ce69e14ddf64ee9483dca073625aaf | /pythonAlgorithm/Practice/2049统计最高分的节点数目.py | f02db61c51e3be23fda138522eb59a428d8074f0 | [] | no_license | bossjoker1/algorithm | 574e13f0dd8fe6b3e810efc03649493e90504288 | c745168a01380edb52155ca3918787d2dd356e5b | refs/heads/master | 2022-07-13T16:26:10.324544 | 2022-07-10T03:28:15 | 2022-07-10T03:28:15 | 407,361,838 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | class Solution:
def countHighestScoreNodes(self, parents: List[int]) -> int:
n = len(parents)
g = defaultdict(list)
for i in range(1, n):
g[parents[i]].append(i)
nums = [1] * n
def dfs(root:int) -> int:
if root not in g:
return 1
for item in g[root]:
nums[root] += dfs(item)
return nums[root]
dfs(0)
maxn, cnt = -1, 0
for i in range(n):
res = 1
if parents[i] == -1:
res *= 1
else:
res *= nums[0] - nums[i]
for item in g[i]:
res *= nums[item]
if res == maxn:
cnt += 1
elif res > maxn:
maxn = res
cnt = 1
return cnt
| [
"1397157763@qq.com"
] | 1397157763@qq.com |
d146de0869cc4d752315a9eeeccfb815d0d81a47 | 45a0434de7cb5aaf51f372a9ea39c2e62528e8d7 | /preprocessor_inspec.py | de915f6306dfb7b0760aa56d71dab59e5c778e71 | [] | no_license | hongtaowutj/Seq2Seq-Keyphrase-Generation | 44b5b24f3af7a85c24fc5ef231c53c1dac7e48ff | 6f2d08222b108b543b7628b32e98480f2e3a32b0 | refs/heads/master | 2020-03-27T10:43:09.941194 | 2018-07-23T07:21:35 | 2018-07-23T07:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137,582 | py | import os
import sys
sys.path.append(os.getcwd())
import numpy as np
from datetime import datetime
import time
import random
from math import log
import json
from collections import OrderedDict
from utils.data_preprocessing_v2 import Preprocessing
from utils.reading_files import ReadingFiles
from utils.indexing import Indexing
from utils.data_connector import DataConnector
from utils.sequences_processing import SequenceProcessing
from utils.true_keyphrases import TrueKeyphrases
def reading_(params):
train_path = params['train_path']
valid_path = params['valid_path']
test_path = params['test_path']
# separated folders for training, validation, and test set
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Reading raw data...")
sys.stdout.flush()
## from training set
read_data = ReadingFiles(train_path, 'inspec_train_doc_keyphrases.pkl')
read_data.listing_files()
read_data.reading_inspec()
read_data.merging_inspec()
# raw text data is stored in python dictionary format
read_data.save_files()
t1 = time.time()
print("Reading raw training data done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
t2 = time.time()
print("Reading raw validation data...")
sys.stdout.flush()
## from validation set
read_data = ReadingFiles(valid_path, 'inspec_val_doc_keyphrases.pkl')
read_data.listing_files()
read_data.reading_inspec()
read_data.merging_inspec()
# raw text data is stored in python dictionary format
read_data.save_files()
t3 = time.time()
print("Reading raw validation data done in %.3fsec" % (t3 - t2))
sys.stdout.flush()
t4 = time.time()
print("Reading raw test data...")
sys.stdout.flush()
## from validation set
read_data = ReadingFiles(test_path, 'inspec_test_doc_keyphrases.pkl')
read_data.listing_files()
read_data.reading_inspec()
read_data.merging_inspec()
# raw text data is stored in python dictionary format
read_data.save_files()
t5 = time.time()
print("Reading raw test data done in %.3fsec" % (t5 - t4))
sys.stdout.flush()
def preprocessing_train(params):
train_path = params['train_path']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Preprocessing raw training data...")
sys.stdout.flush()
data_connector = DataConnector(train_path, 'inspec_train_doc_keyphrases.pkl', data=None)
data_connector.read_pickle()
data = data_connector.read_file
in_text = []
out_keyphrases = []
for k,v in data.items():
title = v[0]
abstract = v[1]
text = title + " . " + abstract
kps = v[2]
in_text.append(text)
out_keyphrases.append(kps)
print("\nnumber of examples in raw data inputs: %s\n"%(len(in_text)))
sys.stdout.flush()
print("\nnumber of examples in raw data outputs: %s\n"%(len(out_keyphrases)))
sys.stdout.flush()
print("\n in_text[0]: %s\n"%(in_text[0]))
sys.stdout.flush()
print("\n out_keyphrases[0]: %s\n"%(out_keyphrases[0]))
sys.stdout.flush()
prep = Preprocessing()
prep_inputs = prep.preprocess_in(in_text)
prep_outputs = prep.preprocess_out(out_keyphrases)
input_tokens = prep.tokenize_in(prep_inputs)
output_tokens = prep.tokenize_out(prep_outputs)
all_tokens = prep.get_all_tokens(input_tokens, output_tokens)
# without splitting data into training and test set
print("\nnumber of examples in preprocessed data inputs: %s\n"%(len(input_tokens)))
sys.stdout.flush()
print("\nnumber of examples in preprocessed data outputs: %s\n"%(len(output_tokens)))
sys.stdout.flush()
print("\n input_tokens[0]: %s\n"%(input_tokens[0]))
sys.stdout.flush()
print("\n output_tokens[0]: %s\n"%(output_tokens[0]))
sys.stdout.flush()
in_connector = DataConnector(train_path, 'train_input_tokens.npy', input_tokens)
in_connector.save_numpys()
out_connector = DataConnector(train_path, 'train_output_tokens.npy', output_tokens)
out_connector.save_numpys()
tokens_connector = DataConnector(train_path, 'train_tokens.npy', all_tokens)
tokens_connector.save_numpys()
t1 = time.time()
print("Preprocessing raw training data done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def preprocessing_sent_train(params):
train_path = params['train_path']
data_path = params['data_path']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Preprocessing raw training data...")
sys.stdout.flush()
data_connector = DataConnector(train_path, 'inspec_train_doc_keyphrases.pkl', data=None)
data_connector.read_pickle()
data = data_connector.read_file
in_text = []
out_keyphrases = []
for k,v in data.items():
title = v[0]
abstract = v[1]
text = title + " . " + abstract
kps = v[2]
in_text.append(text)
out_keyphrases.append(kps)
print("\nnumber of examples in raw data inputs: %s\n"%(len(in_text)))
sys.stdout.flush()
print("\nnumber of examples in raw data outputs: %s\n"%(len(out_keyphrases)))
sys.stdout.flush()
print("\n in_text[0]: %s\n"%(in_text[0]))
sys.stdout.flush()
print("\n out_keyphrases[0]: %s\n"%(out_keyphrases[0]))
sys.stdout.flush()
prep = Preprocessing()
prep_inputs = prep.split_sent(in_text)
prep_outputs = prep.preprocess_out(out_keyphrases)
input_tokens = prep.tokenized_sent(prep_inputs)
output_tokens = prep.tokenize_out(prep_outputs)
# without splitting data into training and test set
print("\nnumber of examples in preprocessed data inputs: %s\n"%(len(input_tokens)))
sys.stdout.flush()
print("\nnumber of examples in preprocessed data outputs: %s\n"%(len(output_tokens)))
sys.stdout.flush()
print("\n input_tokens[0]: %s\n"%(input_tokens[0]))
sys.stdout.flush()
print("\n output_tokens[0]: %s\n"%(output_tokens[0]))
sys.stdout.flush()
in_connector = DataConnector(data_path, 'train_input_sent_tokens.npy', input_tokens)
in_connector.save_numpys()
out_connector = DataConnector(data_path, 'train_output_sent_tokens.npy', output_tokens)
out_connector.save_numpys()
t1 = time.time()
print("Preprocessing raw training data done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def preprocessing_valid(params):
valid_path = params['valid_path']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Preprocessing raw validation data...")
sys.stdout.flush()
data_connector = DataConnector(valid_path, 'inspec_val_doc_keyphrases.pkl', data=None)
data_connector.read_pickle()
data = data_connector.read_file
in_text = []
out_keyphrases = []
for k,v in data.items():
title = v[0]
abstract = v[1]
text = title + " . " + abstract
kps = v[2]
in_text.append(text)
out_keyphrases.append(kps)
print("\nnumber of examples in raw data inputs: %s\n"%(len(in_text)))
sys.stdout.flush()
print("\nnumber of examples in raw data outputs: %s\n"%(len(out_keyphrases)))
sys.stdout.flush()
print("\n in_text[0]: %s\n"%(in_text[0]))
sys.stdout.flush()
print("\n out_keyphrases[0]: %s\n"%(out_keyphrases[0]))
sys.stdout.flush()
prep = Preprocessing()
prep_inputs = prep.preprocess_in(in_text)
prep_outputs = prep.preprocess_out(out_keyphrases)
input_tokens = prep.tokenize_in(prep_inputs)
output_tokens = prep.tokenize_out(prep_outputs)
all_tokens = prep.get_all_tokens(input_tokens, output_tokens)
# without splitting data into training and test set
print("\nnumber of examples in preprocessed data inputs: %s\n"%(len(input_tokens)))
sys.stdout.flush()
print("\nnumber of examples in preprocessed data outputs: %s\n"%(len(output_tokens)))
sys.stdout.flush()
print("\n input_tokens[0]: %s\n"%(input_tokens[0]))
sys.stdout.flush()
print("\n output_tokens[0]: %s\n"%(output_tokens[0]))
sys.stdout.flush()
in_connector = DataConnector(valid_path, 'val_input_tokens.npy', input_tokens)
in_connector.save_numpys()
out_connector = DataConnector(valid_path, 'val_output_tokens.npy', output_tokens)
out_connector.save_numpys()
tokens_connector = DataConnector(valid_path, 'val_tokens.npy', all_tokens)
tokens_connector.save_numpys()
t1 = time.time()
print("Preprocessing raw validation data done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def preprocessing_sent_valid(params):
valid_path = params['valid_path']
data_path = params['data_path']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Preprocessing raw validation data...")
sys.stdout.flush()
data_connector = DataConnector(valid_path, 'inspec_val_doc_keyphrases.pkl', data=None)
data_connector.read_pickle()
data = data_connector.read_file
in_text = []
out_keyphrases = []
for k,v in data.items():
title = v[0]
abstract = v[1]
text = title + " . " + abstract
kps = v[2]
in_text.append(text)
out_keyphrases.append(kps)
print("\nnumber of examples in raw data inputs: %s\n"%(len(in_text)))
sys.stdout.flush()
print("\nnumber of examples in raw data outputs: %s\n"%(len(out_keyphrases)))
sys.stdout.flush()
print("\n in_text[0]: %s\n"%(in_text[0]))
sys.stdout.flush()
print("\n out_keyphrases[0]: %s\n"%(out_keyphrases[0]))
sys.stdout.flush()
prep = Preprocessing()
prep_inputs = prep.split_sent(in_text)
prep_outputs = prep.preprocess_out(out_keyphrases)
input_tokens = prep.tokenized_sent(prep_inputs)
output_tokens = prep.tokenize_out(prep_outputs)
# without splitting data into training and test set
print("\nnumber of examples in preprocessed data inputs: %s\n"%(len(input_tokens)))
sys.stdout.flush()
print("\nnumber of examples in preprocessed data outputs: %s\n"%(len(output_tokens)))
sys.stdout.flush()
print("\n input_tokens[0]: %s\n"%(input_tokens[0]))
sys.stdout.flush()
print("\n output_tokens[0]: %s\n"%(output_tokens[0]))
sys.stdout.flush()
in_connector = DataConnector(data_path, 'val_input_sent_tokens.npy', input_tokens)
in_connector.save_numpys()
out_connector = DataConnector(data_path, 'val_output_sent_tokens.npy', output_tokens)
out_connector.save_numpys()
t1 = time.time()
print("Preprocessing raw validation data done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def preprocessing_test(params):
test_path = params['test_path']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Preprocessing raw test data...")
sys.stdout.flush()
data_connector = DataConnector(test_path, 'inspec_test_doc_keyphrases.pkl', data=None)
data_connector.read_pickle()
data = data_connector.read_file
in_text = []
out_keyphrases = []
for k,v in data.items():
title = v[0]
abstract = v[1]
text = title + " . " + abstract
kps = v[2]
in_text.append(text)
out_keyphrases.append(kps)
print("\nnumber of examples in raw data inputs: %s\n"%(len(in_text)))
sys.stdout.flush()
print("\nnumber of examples in raw data outputs: %s\n"%(len(out_keyphrases)))
sys.stdout.flush()
print("\n in_text[0]: %s\n"%(in_text[0]))
sys.stdout.flush()
print("\n out_keyphrases[0]: %s\n"%(out_keyphrases[0]))
sys.stdout.flush()
prep = Preprocessing()
prep_inputs = prep.preprocess_in(in_text)
prep_outputs = prep.preprocess_out(out_keyphrases)
input_tokens = prep.tokenize_in(prep_inputs)
output_tokens = prep.tokenize_out(prep_outputs)
all_tokens = prep.get_all_tokens(input_tokens, output_tokens)
# without splitting data into training and test set
print("\nnumber of examples in preprocessed data inputs: %s\n"%(len(input_tokens)))
sys.stdout.flush()
print("\nnumber of examples in preprocessed data outputs: %s\n"%(len(output_tokens)))
sys.stdout.flush()
print("\n input_tokens[0]: %s\n"%(input_tokens[0]))
sys.stdout.flush()
print("\n output_tokens[0]: %s\n"%(output_tokens[0]))
sys.stdout.flush()
in_connector = DataConnector(test_path, 'test_input_tokens.npy', input_tokens)
in_connector.save_numpys()
out_connector = DataConnector(test_path, 'test_output_tokens.npy', output_tokens)
out_connector.save_numpys()
tokens_connector = DataConnector(test_path, 'test_tokens.npy', all_tokens)
tokens_connector.save_numpys()
t1 = time.time()
print("Preprocessing raw test data done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def preprocessing_sent_test(params):
test_path = params['test_path']
data_path = params['data_path']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Preprocessing raw test data...")
sys.stdout.flush()
data_connector = DataConnector(test_path, 'inspec_test_doc_keyphrases.pkl', data=None)
data_connector.read_pickle()
data = data_connector.read_file
in_text = []
out_keyphrases = []
for k,v in data.items():
title = v[0]
abstract = v[1]
text = title + " . " + abstract
kps = v[2]
in_text.append(text)
out_keyphrases.append(kps)
print("\nnumber of examples in raw data inputs: %s\n"%(len(in_text)))
sys.stdout.flush()
print("\nnumber of examples in raw data outputs: %s\n"%(len(out_keyphrases)))
sys.stdout.flush()
print("\n in_text[0]: %s\n"%(in_text[0]))
sys.stdout.flush()
print("\n out_keyphrases[0]: %s\n"%(out_keyphrases[0]))
sys.stdout.flush()
prep = Preprocessing()
prep_inputs = prep.split_sent(in_text)
prep_outputs = prep.preprocess_out(out_keyphrases)
input_tokens = prep.tokenized_sent(prep_inputs)
output_tokens = prep.tokenize_out(prep_outputs)
# without splitting data into training and test set
print("\nnumber of examples in preprocessed data inputs: %s\n"%(len(input_tokens)))
sys.stdout.flush()
print("\nnumber of examples in preprocessed data outputs: %s\n"%(len(output_tokens)))
sys.stdout.flush()
print("\n input_tokens[0]: %s\n"%(input_tokens[0]))
sys.stdout.flush()
print("\n output_tokens[0]: %s\n"%(output_tokens[0]))
sys.stdout.flush()
in_connector = DataConnector(data_path, 'test_input_sent_tokens.npy', input_tokens)
in_connector.save_numpys()
out_connector = DataConnector(data_path, 'test_output_sent_tokens.npy', output_tokens)
out_connector.save_numpys()
t1 = time.time()
print("Preprocessing raw test data done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def compute_stats(params):
data_path = params['data_path']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Computing statistics...")
sys.stdout.flush()
train_in_connector = DataConnector(data_path, 'train_input_sent_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in = train_in_connector.read_file
val_in_connector = DataConnector(data_path, 'val_input_sent_tokens.npy', data=None)
val_in_connector.read_numpys()
val_in = val_in_connector.read_file
test_in_connector = DataConnector(data_path, 'test_input_sent_tokens.npy', data=None)
test_in_connector.read_numpys()
test_in = test_in_connector.read_file
sent_in = np.concatenate((train_in, val_in, test_in))
len_sent_x = []
num_sent_x = []
ids = []
for i, doc in enumerate(sent_in):
if (len(doc) < 100):
num_sent_x.append(len(doc))
for j, sent in enumerate(doc):
if (len(sent)) < 50:
ids.append((i,j))
len_sent_x.append(len(sent))
avg_num_sent = np.mean(np.array(num_sent_x))
std_num_sent = np.std(np.array(num_sent_x))
max_num_sent = max(num_sent_x)
idx_num_sent = np.argmax(np.array(num_sent_x))
avg_len_sent = np.mean(np.array(len_sent_x))
std_len_sent = np.std(np.array(len_sent_x))
max_len_sent = max(len_sent_x)
idx_len_sent = np.argmax(np.array(len_sent_x))
id_doc = ids[idx_len_sent]
print("len_sent_x[:10]: %s"%(len_sent_x[:10]))
print("num_sent_x[:10]: %s"%(num_sent_x[:10]))
print("average number of sentences per document: %s"%avg_num_sent)
print("standard deviation number of sentences per document: %s"%std_num_sent)
print("max number of sentences per document: %s"%max_num_sent)
print("average number of words per sentences: %s"%avg_len_sent)
print("standard deviation number of words per sentences: %s"%std_len_sent)
print("max number of words per sentences: %s"%max_len_sent)
t1 = time.time()
print("Computing stats done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def indexing_(params):
data_path = params['data_path']
train_path = params['train_path']
valid_path = params['valid_path']
test_path = params['test_path']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Vocabulary indexing...")
sys.stdout.flush()
'''
read all tokens from training, validation, and testing set
to create vocabulary index of word tokens
'''
train_tokens_connector = DataConnector(train_path, 'train_tokens.npy', data=None)
train_tokens_connector.read_numpys()
train_tokens = train_tokens_connector.read_file
print("\n train_tokens[:10]: %s\n"%(train_tokens[:10]))
sys.stdout.flush()
valid_tokens_connector = DataConnector(valid_path, 'val_tokens.npy', data=None)
valid_tokens_connector.read_numpys()
valid_tokens = valid_tokens_connector.read_file
print("\n valid_tokens[:10]: %s\n"%(valid_tokens[:10]))
sys.stdout.flush()
test_tokens_connector = DataConnector(test_path, 'test_tokens.npy', data=None)
test_tokens_connector.read_numpys()
test_tokens = test_tokens_connector.read_file
print("\n test_tokens[:10]: %s\n"%(test_tokens[:10]))
sys.stdout.flush()
all_tokens = np.concatenate((train_tokens, valid_tokens, test_tokens))
indexing = Indexing()
term_freq, indices_words, words_indices = indexing.vocabulary_indexing(all_tokens)
term_freq_conn = DataConnector(data_path, 'term_freq.pkl', term_freq)
term_freq_conn.save_pickle()
indices_words_conn = DataConnector(data_path, 'indices_words.pkl', indices_words)
indices_words_conn.save_pickle()
words_indices_conn = DataConnector(data_path, 'words_indices.pkl', words_indices)
words_indices_conn.save_pickle()
print("\nvocabulary size: %s\n"%len(indices_words))
sys.stdout.flush()
print("\n indices_words[:10]: %s\n"%list(indices_words.items())[:10])
sys.stdout.flush()
t1 = time.time()
print("Indexing done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_train(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
preprocessed_kp20k = params['preprocessed_kp20k']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_kp20k, 'all_idxword_vocabulary.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_kp20k, 'all_wordidx_vocabulary.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
print("\nshape of train_input_tokens in training set: %s\n"%str(np.array(train_in_tokens).shape))
print("\nshape of train_output_tokens in training set: %s\n"%str(np.array(train_out_tokens).shape))
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.intexts_to_integers(train_in_tokens)
X_train_pad = sequences_processing.pad_sequences_in(encoder_length, X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(train_out_tokens)
print("\nshape of X_train in training set: %s\n"%str(np.array(X_train).shape))
print("\nshape of X_train_pad in training set: %s\n"%str(np.array(X_train_pad).shape))
print("\nshape of y_train_in in training set: %s\n"%str(np.array(y_train_in).shape))
print("\nshape of y_train_out in training set: %s\n"%str(np.array(y_train_out).shape))
x_in_connector = DataConnector(preprocessed_data, 'X_train.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_train_pad.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_train_in.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_train_out.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_train_v1(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_indices_words.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
print("\nshape of train_input_tokens in training set: %s\n"%str(np.array(train_in_tokens).shape))
print("\nshape of train_output_tokens in training set: %s\n"%str(np.array(train_out_tokens).shape))
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.intexts_to_integers(train_in_tokens)
X_train_pad = sequences_processing.pad_sequences_in(encoder_length, X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(train_out_tokens)
print("\nshape of X_train in training set: %s\n"%str(np.array(X_train).shape))
print("\nshape of X_train_pad in training set: %s\n"%str(np.array(X_train_pad).shape))
print("\nshape of y_train_in in training set: %s\n"%str(np.array(y_train_in).shape))
print("\nshape of y_train_out in training set: %s\n"%str(np.array(y_train_out).shape))
x_in_connector = DataConnector(preprocessed_data, 'X_train.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_train_pad.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_train_in.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_train_out.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_train_v2(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
print("\nshape of train_input_tokens in training set: %s\n"%str(np.array(train_in_tokens).shape))
print("\nshape of train_output_tokens in training set: %s\n"%str(np.array(train_out_tokens).shape))
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.intexts_to_integers(train_in_tokens)
X_train_pad = sequences_processing.pad_sequences_in(encoder_length, X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(train_out_tokens)
print("\nshape of X_train in training set: %s\n"%str(np.array(X_train).shape))
print("\nshape of X_train_pad in training set: %s\n"%str(np.array(X_train_pad).shape))
print("\nshape of y_train_in in training set: %s\n"%str(np.array(y_train_in).shape))
print("\nshape of y_train_out in training set: %s\n"%str(np.array(y_train_out).shape))
x_in_connector = DataConnector(preprocessed_data, 'X_train.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_train_pad.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_train_in.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_train_out.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_train_v1_fsoftmax(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_indices_words_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
print("\nshape of train_input_tokens in training set: %s\n"%str(np.array(train_in_tokens).shape))
print("\nshape of train_output_tokens in training set: %s\n"%str(np.array(train_out_tokens).shape))
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.intexts_to_integers(train_in_tokens)
X_train_pad = sequences_processing.pad_sequences_in(encoder_length, X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(train_out_tokens)
print("\nshape of X_train in training set: %s\n"%str(np.array(X_train).shape))
print("\nshape of X_train_pad in training set: %s\n"%str(np.array(X_train_pad).shape))
print("\nshape of y_train_in in training set: %s\n"%str(np.array(y_train_in).shape))
print("\nshape of y_train_out in training set: %s\n"%str(np.array(y_train_out).shape))
x_in_connector = DataConnector(preprocessed_data, 'X_train_fsoftmax.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_train_pad_fsoftmax.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_train_in_fsoftmax.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_train_out_fsoftmax.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_train_v2_fsoftmax(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
print("\nshape of train_input_tokens in training set: %s\n"%str(np.array(train_in_tokens).shape))
print("\nshape of train_output_tokens in training set: %s\n"%str(np.array(train_out_tokens).shape))
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.intexts_to_integers(train_in_tokens)
X_train_pad = sequences_processing.pad_sequences_in(encoder_length, X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(train_out_tokens)
print("\nshape of X_train in training set: %s\n"%str(np.array(X_train).shape))
print("\nshape of X_train_pad in training set: %s\n"%str(np.array(X_train_pad).shape))
print("\nshape of y_train_in in training set: %s\n"%str(np.array(y_train_in).shape))
print("\nshape of y_train_out in training set: %s\n"%str(np.array(y_train_out).shape))
x_in_connector = DataConnector(preprocessed_data, 'X_train_fsoftmax.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_train_pad_fsoftmax.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_train_in_fsoftmax.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_train_out_fsoftmax.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_train_sub(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(kp20k_path, 'all_indices_words_r3.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(kp20k_path, 'all_words_indices_r3.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
print("\nshape of train_input_tokens in training set: %s\n"%str(np.array(train_in_tokens).shape))
print("\nshape of train_output_tokens in training set: %s\n"%str(np.array(train_out_tokens).shape))
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.intexts_to_integers(train_in_tokens)
X_train_pad = sequences_processing.pad_sequences_in(encoder_length, X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(train_out_tokens)
print("\nshape of X_train in training set: %s\n"%str(np.array(X_train).shape))
print("\nshape of X_train_pad in training set: %s\n"%str(np.array(X_train_pad).shape))
print("\nshape of y_train_in in training set: %s\n"%str(np.array(y_train_in).shape))
print("\nshape of y_train_out in training set: %s\n"%str(np.array(y_train_out).shape))
x_in_connector = DataConnector(data_path, 'X_train_r3.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(data_path, 'X_train_pad_r3.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_train_in_r3.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_train_out_r3.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_train(params):
data_path = params['data_path']
train_path = params['train_path']
max_sents= params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(data_path, 'indices_words.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(data_path, 'words_indices.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_sent_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_sent_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.in_sents_to_integers(in_texts=train_in_tokens, max_sents=max_sents)
X_train_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents,sequences=X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(out_texts=train_out_tokens)
x_in_connector = DataConnector(data_path, 'X_train_sent.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(data_path, 'X_train_pad_sent.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_train_sent_in.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_train_sent_out.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_train_fsoftmax_v1(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_indices_words_sent_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices_sent_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_sent_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_sent_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.in_sents_to_integers(in_texts=train_in_tokens, max_sents=max_sents)
X_train_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents,sequences=X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(out_texts=train_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_train_sent_fsoftmax.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_train_pad_sent_fsoftmax.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_train_sent_in_fsoftmax.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_train_sent_out_fsoftmax.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_train_fsoftmax_v2(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary_sent_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary_sent_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_sent_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_sent_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.in_sents_to_integers(in_texts=train_in_tokens, max_sents=max_sents)
X_train_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents,sequences=X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(out_texts=train_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_train_sent_fsoftmax.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_train_pad_sent_fsoftmax.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_train_sent_in_fsoftmax.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_train_sent_out_fsoftmax.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_train_v1(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_indices_words_sent.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices_sent.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_sent_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_sent_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.in_sents_to_integers(in_texts=train_in_tokens, max_sents=max_sents)
X_train_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents,sequences=X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(out_texts=train_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_train_sent.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_train_pad_sent.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_train_sent_in.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_train_sent_out.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_train_v2(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary_sent.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary_sent.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_sent_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_sent_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.in_sents_to_integers(in_texts=train_in_tokens, max_sents=max_sents)
X_train_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents,sequences=X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(out_texts=train_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_train_sent.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_train_pad_sent.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_train_sent_in.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_train_sent_out.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_train_sub(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming training set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(kp20k_path, 'all_indices_words_sent_r3.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(kp20k_path, 'all_words_indices_sent_r3.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
train_in_connector = DataConnector(data_path, 'train_input_sent_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_out_connector = DataConnector(data_path, 'train_output_sent_tokens.npy', data=None)
train_out_connector.read_numpys()
train_out_tokens = train_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_train = sequences_processing.in_sents_to_integers(in_texts=train_in_tokens, max_sents=max_sents)
X_train_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents,sequences=X_train)
y_train_in, y_train_out = sequences_processing.outtexts_to_integers(out_texts=train_out_tokens)
x_in_connector = DataConnector(data_path, 'X_train_sent_r3.npy', X_train)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(data_path, 'X_train_pad_sent_r3.npy', X_train_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_train_sent_in_r3.npy', y_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_train_sent_out_r3.npy', y_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming training set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_valid(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
preprocessed_kp20k = params['preprocessed_kp20k']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_kp20k, 'all_idxword_vocabulary.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_kp20k, 'all_wordidx_vocabulary.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.intexts_to_integers(valid_in_tokens)
X_valid_pad = sequences_processing.pad_sequences_in(encoder_length, X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_valid.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_valid_pad.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_valid_in.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_valid_out.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_valid_v1_fsoftmax(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_indices_words_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.intexts_to_integers(valid_in_tokens)
X_valid_pad = sequences_processing.pad_sequences_in(encoder_length, X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_valid_fsoftmax.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_valid_pad_fsoftmax.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_valid_in_fsoftmax.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_valid_out_fsoftmax.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_valid_v2_fsoftmax(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.intexts_to_integers(valid_in_tokens)
X_valid_pad = sequences_processing.pad_sequences_in(encoder_length, X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_valid_fsoftmax.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_valid_pad_fsoftmax.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_valid_in_fsoftmax.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_valid_out_fsoftmax.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_valid_v1(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_indices_words.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.intexts_to_integers(valid_in_tokens)
X_valid_pad = sequences_processing.pad_sequences_in(encoder_length, X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_valid.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_valid_pad.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_valid_in.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_valid_out.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_valid_v2(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.intexts_to_integers(valid_in_tokens)
X_valid_pad = sequences_processing.pad_sequences_in(encoder_length, X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_valid.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_valid_pad.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_valid_in.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_valid_out.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_valid_sub(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(kp20k_path, 'all_indices_words_r3.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(kp20k_path, 'all_words_indices_r3.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.intexts_to_integers(valid_in_tokens)
X_valid_pad = sequences_processing.pad_sequences_in(encoder_length, X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(data_path, 'X_valid_r3.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(data_path, 'X_valid_pad_r3.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_valid_in_r3.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_valid_out_r3.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_valid(params):
data_path = params['data_path']
valid_path = params['valid_path']
max_sents= params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(data_path, 'indices_words.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(data_path, 'words_indices.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_sent_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_sent_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.in_sents_to_integers(valid_in_tokens, max_sents)
X_valid_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(data_path, 'X_valid_sent.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(data_path, 'X_valid_pad_sent.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_valid_sent_in.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_valid_sent_out.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_valid_fsoftmax_v2(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary_sent_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary_sent_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_sent_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_sent_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.in_sents_to_integers(valid_in_tokens, max_sents)
X_valid_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_valid_sent_fsoftmax.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_valid_pad_sent_fsoftmax.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_valid_sent_in_fsoftmax.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_valid_sent_out_fsoftmax.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_valid_fsoftmax_v1(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_indices_words_sent_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices_sent_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_sent_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_sent_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.in_sents_to_integers(valid_in_tokens, max_sents)
X_valid_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_valid_sent_fsoftmax.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_valid_pad_sent_fsoftmax.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_valid_sent_in_fsoftmax.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_valid_sent_out_fsoftmax.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_valid_v1(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_indices_words_sent.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices_sent.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_sent_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_sent_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.in_sents_to_integers(valid_in_tokens, max_sents)
X_valid_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_valid_sent.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_valid_pad_sent.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_valid_sent_in.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_valid_sent_out.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_valid_v2(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary_sent.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary_sent.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_sent_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_sent_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.in_sents_to_integers(valid_in_tokens, max_sents)
X_valid_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_valid_sent.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_valid_pad_sent.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_valid_sent_in.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_valid_sent_out.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_valid_sub(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming validation set into integer sequences")
sys.stdout.flush()
'''
read stored vocabulary index
'''
vocab = DataConnector(kp20k_path, 'all_indices_words_sent_r3.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(kp20k_path, 'all_words_indices_sent_r3.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
valid_in_connector = DataConnector(data_path, 'val_input_sent_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_out_connector = DataConnector(data_path, 'val_output_sent_tokens.npy', data=None)
valid_out_connector.read_numpys()
valid_out_tokens = valid_out_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_valid = sequences_processing.in_sents_to_integers(valid_in_tokens, max_sents)
X_valid_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=X_valid)
y_valid_in, y_valid_out = sequences_processing.outtexts_to_integers(valid_out_tokens)
x_in_connector = DataConnector(data_path, 'X_valid_sent_r3.npy', X_valid)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(data_path, 'X_valid_pad_sent_r3.npy', X_valid_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_valid_sent_in_r3.npy', y_valid_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_valid_sent_out_r3.npy', y_valid_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming validation set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_test(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
preprocessed_kp20k = params['preprocessed_kp20k']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(preprocessed_kp20k, 'all_idxword_vocabulary.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_kp20k, 'all_wordidx_vocabulary.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.intexts_to_integers(test_in_tokens)
X_test_pad = sequences_processing.pad_sequences_in(encoder_length, X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_test.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_test_pad.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_test_in.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_test_out.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_test_v1_fsoftmax(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(preprocessed_v2, 'all_indices_words_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.intexts_to_integers(test_in_tokens)
X_test_pad = sequences_processing.pad_sequences_in(encoder_length, X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_test_fsoftmax.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_test_pad_fsoftmax.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_test_in_fsoftmax.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_test_out_fsoftmax.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_test_v2_fsoftmax(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.intexts_to_integers(test_in_tokens)
X_test_pad = sequences_processing.pad_sequences_in(encoder_length, X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_test_fsoftmax.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_test_pad_fsoftmax.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_test_in_fsoftmax.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_test_out_fsoftmax.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_test_v1(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(preprocessed_v2, 'all_indices_words.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.intexts_to_integers(test_in_tokens)
X_test_pad = sequences_processing.pad_sequences_in(encoder_length, X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_test.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_test_pad.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_test_in.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_test_out.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_test_v2(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.intexts_to_integers(test_in_tokens)
X_test_pad = sequences_processing.pad_sequences_in(encoder_length, X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_test.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_test_pad.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_test_in.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_test_out.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_test_sub(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(kp20k_path, 'all_indices_words_r3.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(kp20k_path, 'all_words_indices_r3.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.intexts_to_integers(test_in_tokens)
X_test_pad = sequences_processing.pad_sequences_in(encoder_length, X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(data_path, 'X_test_r3.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(data_path, 'X_test_pad_r3.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_test_in_r3.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_test_out_r3.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_test(params):
data_path = params['data_path']
test_path = params['test_path']
max_sents = params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(data_path, 'indices_words.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(data_path, 'words_indices.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_sent_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_sent_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.in_sents_to_integers(test_in_tokens, max_sents)
X_test_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents= max_sents, sequences=X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(data_path, 'X_test_sent.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(data_path, 'X_test_pad_sent.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_test_sent_in.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_test_sent_out.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_test_fsoftmax_v1(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(preprocessed_v2, 'all_indices_words_sent_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices_sent_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_sent_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_sent_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.in_sents_to_integers(test_in_tokens, max_sents)
X_test_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents= max_sents, sequences=X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_test_sent_fsoftmax.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_test_pad_sent_fsoftmax.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_test_sent_in_fsoftmax.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_test_sent_out_fsoftmax.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_test_fsoftmax_v2(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary_sent_fsoftmax.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary_sent_fsoftmax.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_sent_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_sent_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.in_sents_to_integers(test_in_tokens, max_sents)
X_test_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents= max_sents, sequences=X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_test_sent_fsoftmax.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_test_pad_sent_fsoftmax.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_test_sent_in_fsoftmax.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_test_sent_out_fsoftmax.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_test_v1(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(preprocessed_v2, 'all_indices_words_sent.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_words_indices_sent.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_sent_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_sent_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.in_sents_to_integers(test_in_tokens, max_sents)
X_test_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents= max_sents, sequences=X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_test_sent.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_test_pad_sent.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_test_sent_in.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_test_sent_out.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_test_v2(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents= params['max_sents']
preprocessed_v2 = params['preprocessed_v2']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(preprocessed_v2, 'all_idxword_vocabulary_sent.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(preprocessed_v2, 'all_wordidx_vocabulary_sent.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_sent_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_sent_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.in_sents_to_integers(test_in_tokens, max_sents)
X_test_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents= max_sents, sequences=X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(preprocessed_data, 'X_test_sent.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(preprocessed_data, 'X_test_pad_sent.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_test_sent_in.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_test_sent_out.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def transform_sent_test_sub(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
max_sents = params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Transforming test set into integer sequences")
sys.stdout.flush()
vocab = DataConnector(kp20k_path, 'all_indices_words_sent_r3.pkl', data=None)
vocab.read_pickle()
indices_words = vocab.read_file
reversed_vocab = DataConnector(kp20k_path, 'all_words_indices_sent_r3.pkl', data=None)
reversed_vocab.read_pickle()
words_indices = reversed_vocab.read_file
'''
read tokenized data set
'''
test_in_tokens_connector = DataConnector(data_path, 'test_input_sent_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_out_tokens_connector = DataConnector(data_path, 'test_output_sent_tokens.npy', data=None)
test_out_tokens_connector.read_numpys()
test_out_tokens = test_out_tokens_connector.read_file
'''
transforming texts into integer sequences
'''
sequences_processing = SequenceProcessing(indices_words, words_indices, encoder_length, decoder_length)
X_test = sequences_processing.in_sents_to_integers(test_in_tokens, max_sents)
X_test_pad = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents= max_sents, sequences=X_test)
y_test_in, y_test_out = sequences_processing.outtexts_to_integers(test_out_tokens)
x_in_connector = DataConnector(data_path, 'X_test_sent_r3.npy', X_test)
x_in_connector.save_numpys()
x_pad_in_connector = DataConnector(data_path, 'X_test_pad_sent_r3.npy', X_test_pad)
x_pad_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_test_sent_in_r3.npy', y_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_test_sent_out_r3.npy', y_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Transforming test set into integer sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_train(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
preprocessed_kp20k = params['preprocessed_kp20k']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing training data to train the model...")
sys.stdout.flush()
x_train_connector = DataConnector(preprocessed_data, 'X_train.npy', data=None)
x_train_connector.read_numpys()
X_train = x_train_connector.read_file
y_train_in_connector = DataConnector(preprocessed_data, 'y_train_in.npy', data=None)
y_train_in_connector.read_numpys()
y_train_in = y_train_in_connector.read_file
y_train_out_connector = DataConnector(preprocessed_data, 'y_train_out.npy', data=None)
y_train_out_connector.read_numpys()
y_train_out = y_train_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=encoder_length, decoder_length=decoder_length)
doc_pair, x_pair, y_pair_in, y_pair_out = sequences_processing.pairing_data_(X_train, y_train_in, y_train_out)
print("\nshape of x_pair in training set: %s\n"%str(np.array(x_pair).shape))
print("\nshape of y_pair_in in training set: %s\n"%str(np.array(y_pair_in).shape))
print("\nshape of y_pair_out in training set: %s\n"%str(np.array(y_pair_out).shape))
x_pair_train, y_pair_train_in, y_pair_train_out = sequences_processing.pad_sequences(encoder_length, decoder_length, x_pair, y_pair_in, y_pair_out)
print("\nshape of x_pair in training set: %s\n"%str(x_pair_train.shape))
print("\nshape of y_pair_in in training set: %s\n"%str(y_pair_train_in.shape))
print("\nshape of y_pair_out in training set: %s\n"%str(y_pair_train_out.shape))
doc_in_connector = DataConnector(preprocessed_data, 'doc_pair_train.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(preprocessed_data, 'x_pair_train.npy', x_pair_train)
x_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_pair_train_in.npy', y_pair_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_pair_train_out.npy', y_pair_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing training set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_train_sub(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing training data to train the model...")
sys.stdout.flush()
x_train_connector = DataConnector(data_path, 'X_train_r2.npy', data=None)
x_train_connector.read_numpys()
X_train = x_train_connector.read_file
y_train_in_connector = DataConnector(data_path, 'y_train_in_r2.npy', data=None)
y_train_in_connector.read_numpys()
y_train_in = y_train_in_connector.read_file
y_train_out_connector = DataConnector(data_path, 'y_train_out_r2.npy', data=None)
y_train_out_connector.read_numpys()
y_train_out = y_train_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=encoder_length, decoder_length=decoder_length)
doc_pair, x_pair, y_pair_in, y_pair_out = sequences_processing.pairing_data_(X_train, y_train_in, y_train_out)
print("\nshape of x_pair in training set: %s\n"%str(np.array(x_pair).shape))
print("\nshape of y_pair_in in training set: %s\n"%str(np.array(y_pair_in).shape))
print("\nshape of y_pair_out in training set: %s\n"%str(np.array(y_pair_out).shape))
x_pair_train, y_pair_train_in, y_pair_train_out = sequences_processing.pad_sequences(encoder_length, decoder_length, x_pair, y_pair_in, y_pair_out)
print("\nshape of x_pair in training set: %s\n"%str(x_pair_train.shape))
print("\nshape of y_pair_in in training set: %s\n"%str(y_pair_train_in.shape))
print("\nshape of y_pair_out in training set: %s\n"%str(y_pair_train_out.shape))
doc_in_connector = DataConnector(data_path, 'doc_pair_train_r2.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(data_path, 'x_pair_train_r2.npy', x_pair_train)
x_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_pair_train_in_r2.npy', y_pair_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_pair_train_out_r2.npy', y_pair_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing training set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_sent_train(params):
data_path = params['data_path']
train_path = params['train_path']
max_sents = params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing training data to train the model...")
sys.stdout.flush()
x_train_connector = DataConnector(data_path, 'X_train_sent.npy', data=None)
x_train_connector.read_numpys()
X_train = x_train_connector.read_file
y_train_in_connector = DataConnector(data_path, 'y_train_sent_in.npy', data=None)
y_train_in_connector.read_numpys()
y_train_in = y_train_in_connector.read_file
y_train_out_connector = DataConnector(data_path, 'y_train_sent_out.npy', data=None)
y_train_out_connector.read_numpys()
y_train_out = y_train_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
doc_pair, x_pair_train_, y_pair_train_in_, y_pair_train_out_ = sequences_processing.pairing_data_(X_train, y_train_in, y_train_out)
x_pair_train = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=x_pair_train_)
y_pair_train_in = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_train_in_)
y_pair_train_out = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_train_out_)
print("\nshape of x_pair in training set: %s\n"%str(x_pair_train.shape))
print("\nshape of y_pair_in in training set: %s\n"%str(y_pair_train_in.shape))
print("\nshape of y_pair_out in training set: %s\n"%str(y_pair_train_out.shape))
doc_in_connector = DataConnector(data_path, 'doc_pair_train_sent.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(data_path, 'x_pair_train_sent.npy', x_pair_train)
x_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_pair_train_sent_in.npy', y_pair_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_pair_train_sent_out.npy', y_pair_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing training set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_sent_train_sub(params):
data_path = params['data_path']
max_sents = params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing training data to train the model...")
sys.stdout.flush()
x_train_connector = DataConnector(data_path, 'X_train_sent_r2.npy', data=None)
x_train_connector.read_numpys()
X_train = x_train_connector.read_file
y_train_in_connector = DataConnector(data_path, 'y_train_sent_in_r2.npy', data=None)
y_train_in_connector.read_numpys()
y_train_in = y_train_in_connector.read_file
y_train_out_connector = DataConnector(data_path, 'y_train_sent_out_r2.npy', data=None)
y_train_out_connector.read_numpys()
y_train_out = y_train_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
doc_pair, x_pair_train_, y_pair_train_in_, y_pair_train_out_ = sequences_processing.pairing_data_(X_train, y_train_in, y_train_out)
x_pair_train = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=x_pair_train_)
y_pair_train_in = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_train_in_)
y_pair_train_out = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_train_out_)
print("\nshape of x_pair in training set: %s\n"%str(x_pair_train.shape))
print("\nshape of y_pair_in in training set: %s\n"%str(y_pair_train_in.shape))
print("\nshape of y_pair_out in training set: %s\n"%str(y_pair_train_out.shape))
doc_in_connector = DataConnector(data_path, 'doc_pair_train_sent_r2.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(data_path, 'x_pair_train_sent_r2.npy', x_pair_train)
x_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_pair_train_sent_in_r2.npy', y_pair_train_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_pair_train_sent_out_r2.npy', y_pair_train_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing training set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_valid(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
preprocessed_kp20k = params['preprocessed_kp20k']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing validation set...")
sys.stdout.flush()
X_valid_connector = DataConnector(preprocessed_data, 'X_valid.npy', data=None)
X_valid_connector.read_numpys()
X_valid = X_valid_connector.read_file
y_valid_in_connector = DataConnector(preprocessed_data, 'y_valid_in.npy', data=None)
y_valid_in_connector.read_numpys()
y_valid_in = y_valid_in_connector.read_file
y_valid_out_connector = DataConnector(preprocessed_data, 'y_valid_out.npy', data=None)
y_valid_out_connector.read_numpys()
y_valid_out = y_valid_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
doc_pair, x_pair, y_pair_in, y_pair_out = sequences_processing.pairing_data_(X_valid, y_valid_in, y_valid_out)
x_pair_val, y_pair_val_in, y_pair_val_out = sequences_processing.pad_sequences(encoder_length, decoder_length, x_pair, y_pair_in, y_pair_out)
print("\nshape of x_pair in val set: %s\n"%str(x_pair_val.shape))
print("\nshape of y_pair_in in val set: %s\n"%str(y_pair_val_in.shape))
print("\nshape of y_pair_out in val set: %s\n"%str(y_pair_val_out.shape))
doc_in_connector = DataConnector(preprocessed_data, 'doc_pair_test.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(preprocessed_data, 'x_pair_val.npy', x_pair_val)
x_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_pair_val_in.npy', y_pair_val_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_pair_val_out.npy', y_pair_val_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing validation set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_valid_sub(params):
data_path = params['data_path']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing validation set...")
sys.stdout.flush()
X_valid_connector = DataConnector(data_path, 'X_valid_r2.npy', data=None)
X_valid_connector.read_numpys()
X_valid = X_valid_connector.read_file
y_valid_in_connector = DataConnector(data_path, 'y_valid_in_r2.npy', data=None)
y_valid_in_connector.read_numpys()
y_valid_in = y_valid_in_connector.read_file
y_valid_out_connector = DataConnector(data_path, 'y_valid_out_r2.npy', data=None)
y_valid_out_connector.read_numpys()
y_valid_out = y_valid_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
doc_pair, x_pair, y_pair_in, y_pair_out = sequences_processing.pairing_data_(X_valid, y_valid_in, y_valid_out)
x_pair_val, y_pair_val_in, y_pair_val_out = sequences_processing.pad_sequences(encoder_length, decoder_length, x_pair, y_pair_in, y_pair_out)
print("\nshape of x_pair in val set: %s\n"%str(x_pair_val.shape))
print("\nshape of y_pair_in in val set: %s\n"%str(y_pair_val_in.shape))
print("\nshape of y_pair_out in val set: %s\n"%str(y_pair_val_out.shape))
doc_in_connector = DataConnector(data_path, 'doc_pair_test_r2.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(data_path, 'x_pair_val_r2.npy', x_pair_val)
x_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_pair_val_in_r2.npy', y_pair_val_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_pair_val_out_r2.npy', y_pair_val_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing validation set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_sent_valid(params):
data_path = params['data_path']
valid_path = params['valid_path']
max_sents = params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing validation set...")
sys.stdout.flush()
X_valid_connector = DataConnector(data_path, 'X_valid_sent.npy', data=None)
X_valid_connector.read_numpys()
X_valid = X_valid_connector.read_file
y_valid_in_connector = DataConnector(data_path, 'y_valid_sent_in.npy', data=None)
y_valid_in_connector.read_numpys()
y_valid_in = y_valid_in_connector.read_file
y_valid_out_connector = DataConnector(data_path, 'y_valid_sent_out.npy', data=None)
y_valid_out_connector.read_numpys()
y_valid_out = y_valid_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
doc_pair, x_pair_val_, y_pair_val_in_, y_pair_val_out_ = sequences_processing.pairing_data_(X_valid, y_valid_in, y_valid_out)
x_pair_val = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=x_pair_val_)
y_pair_val_in = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_val_in_)
y_pair_val_out = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_val_out_)
print("\nshape of x_pair in val set: %s\n"%str(x_pair_val.shape))
print("\nshape of y_pair_in in val set: %s\n"%str(y_pair_val_in.shape))
print("\nshape of y_pair_out in val set: %s\n"%str(y_pair_val_out.shape))
doc_in_connector = DataConnector(data_path, 'doc_pair_test_sent.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(data_path, 'x_pair_val_sent.npy', x_pair_val)
x_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_pair_val_sent_in.npy', y_pair_val_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_pair_val_sent_out.npy', y_pair_val_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing validation set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_sent_valid_sub(params):
data_path = params['data_path']
max_sents = params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing validation set...")
sys.stdout.flush()
X_valid_connector = DataConnector(data_path, 'X_valid_sent_r2.npy', data=None)
X_valid_connector.read_numpys()
X_valid = X_valid_connector.read_file
y_valid_in_connector = DataConnector(data_path, 'y_valid_sent_in_r2.npy', data=None)
y_valid_in_connector.read_numpys()
y_valid_in = y_valid_in_connector.read_file
y_valid_out_connector = DataConnector(data_path, 'y_valid_sent_out_r2.npy', data=None)
y_valid_out_connector.read_numpys()
y_valid_out = y_valid_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
doc_pair, x_pair_val_, y_pair_val_in_, y_pair_val_out_ = sequences_processing.pairing_data_(X_valid, y_valid_in, y_valid_out)
x_pair_val = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=x_pair_val_)
y_pair_val_in = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_val_in_)
y_pair_val_out = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_val_out_)
print("\nshape of x_pair in val set: %s\n"%str(x_pair_val.shape))
print("\nshape of y_pair_in in val set: %s\n"%str(y_pair_val_in.shape))
print("\nshape of y_pair_out in val set: %s\n"%str(y_pair_val_out.shape))
doc_in_connector = DataConnector(data_path, 'doc_pair_test_sent_r2.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(data_path, 'x_pair_val_sent_r2.npy', x_pair_val)
x_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_pair_val_sent_in_r2.npy', y_pair_val_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_pair_val_sent_out_r2.npy', y_pair_val_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing validation set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_test(params):
data_path = params['data_path']
kp20k_path = params['kp20k_path']
preprocessed_kp20k = params['preprocessed_kp20k']
preprocessed_data = params['preprocessed_data']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing test data to train the model...")
sys.stdout.flush()
X_test_connector = DataConnector(preprocessed_data, 'X_test.npy', data=None)
X_test_connector.read_numpys()
X_test = X_test_connector.read_file
y_test_in_connector = DataConnector(preprocessed_data, 'y_test_in.npy', data=None)
y_test_in_connector.read_numpys()
y_test_in = y_test_in_connector.read_file
y_test_out_connector = DataConnector(preprocessed_data, 'y_test_out.npy', data=None)
y_test_out_connector.read_numpys()
y_test_out = y_test_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
doc_pair, x_pair, y_pair_in, y_pair_out = sequences_processing.pairing_data_(X_test, y_test_in, y_test_out)
x_pair_test, y_pair_test_in, y_pair_test_out = sequences_processing.pad_sequences(encoder_length, decoder_length, x_pair, y_pair_in, y_pair_out)
print("\nshape of x_pair in test set: %s\n"%str(x_pair_test.shape))
print("\nshape of y_pair_in in test set: %s\n"%str(y_pair_test_in.shape))
print("\nshape of y_pair_out in test set: %s\n"%str(y_pair_test_out.shape))
doc_in_connector = DataConnector(preprocessed_data, 'doc_pair_test.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(preprocessed_data, 'x_pair_test.npy', x_pair_test)
x_in_connector.save_numpys()
y_in_connector = DataConnector(preprocessed_data, 'y_pair_test_in.npy', y_pair_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(preprocessed_data, 'y_pair_test_out.npy', y_pair_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing test set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_test_sub(params):
data_path = params['data_path']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing test data to train the model...")
sys.stdout.flush()
X_test_connector = DataConnector(data_path, 'X_test_r2.npy', data=None)
X_test_connector.read_numpys()
X_test = X_test_connector.read_file
y_test_in_connector = DataConnector(data_path, 'y_test_in_r2.npy', data=None)
y_test_in_connector.read_numpys()
y_test_in = y_test_in_connector.read_file
y_test_out_connector = DataConnector(data_path, 'y_test_out_r2.npy', data=None)
y_test_out_connector.read_numpys()
y_test_out = y_test_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
doc_pair, x_pair, y_pair_in, y_pair_out = sequences_processing.pairing_data_(X_test, y_test_in, y_test_out)
x_pair_test, y_pair_test_in, y_pair_test_out = sequences_processing.pad_sequences(encoder_length, decoder_length, x_pair, y_pair_in, y_pair_out)
print("\nshape of x_pair in test set: %s\n"%str(x_pair_test.shape))
print("\nshape of y_pair_in in test set: %s\n"%str(y_pair_test_in.shape))
print("\nshape of y_pair_out in test set: %s\n"%str(y_pair_test_out.shape))
doc_in_connector = DataConnector(data_path, 'doc_pair_test_r2_r2.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(data_path, 'x_pair_test_r2.npy', x_pair_test)
x_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_pair_test_in_r2.npy', y_pair_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_pair_test_out_r2.npy', y_pair_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing test set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_sent_test(params):
data_path = params['data_path']
test_path = params['test_path']
max_sents = params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing test data to train the model...")
sys.stdout.flush()
X_test_connector = DataConnector(data_path, 'X_test_sent.npy', data=None)
X_test_connector.read_numpys()
X_test = X_test_connector.read_file
y_test_in_connector = DataConnector(data_path, 'y_test_sent_in.npy', data=None)
y_test_in_connector.read_numpys()
y_test_in = y_test_in_connector.read_file
y_test_out_connector = DataConnector(data_path, 'y_test_sent_out.npy', data=None)
y_test_out_connector.read_numpys()
y_test_out = y_test_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
doc_pair, x_pair_test_, y_pair_test_in_, y_pair_test_out_ = sequences_processing.pairing_data_(X_test, y_test_in, y_test_out)
x_pair_test = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=x_pair_test_)
y_pair_test_in = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_test_in_)
y_pair_test_out = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_test_out_)
print("\nshape of x_pair in test set: %s\n"%str(x_pair_test.shape))
print("\nshape of y_pair_in in test set: %s\n"%str(y_pair_test_in.shape))
print("\nshape of y_pair_out in test set: %s\n"%str(y_pair_test_out.shape))
doc_in_connector = DataConnector(data_path, 'doc_pair_test_sent.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(data_path, 'x_pair_test_sent.npy', x_pair_test)
x_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_pair_test_sent_in.npy', y_pair_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_pair_test_sent_out.npy', y_pair_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing test set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def pair_sent_test_sub(params):
data_path = params['data_path']
max_sents = params['max_sents']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
print("\n=========\n")
sys.stdout.flush()
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Pairing test data to train the model...")
sys.stdout.flush()
X_test_connector = DataConnector(data_path, 'X_test_sent_r2.npy', data=None)
X_test_connector.read_numpys()
X_test = X_test_connector.read_file
y_test_in_connector = DataConnector(data_path, 'y_test_sent_in_r2.npy', data=None)
y_test_in_connector.read_numpys()
y_test_in = y_test_in_connector.read_file
y_test_out_connector = DataConnector(data_path, 'y_test_sent_out_r2.npy', data=None)
y_test_out_connector.read_numpys()
y_test_out = y_test_out_connector.read_file
sequences_processing = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
doc_pair, x_pair_test_, y_pair_test_in_, y_pair_test_out_ = sequences_processing.pairing_data_(X_test, y_test_in, y_test_out)
x_pair_test = sequences_processing.pad_sequences_sent_in(max_len=encoder_length, max_sents=max_sents, sequences=x_pair_test_)
y_pair_test_in = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_test_in_)
y_pair_test_out = sequences_processing.pad_sequences_in(max_len=decoder_length+1, sequences=y_pair_test_out_)
print("\nshape of x_pair in test set: %s\n"%str(x_pair_test.shape))
print("\nshape of y_pair_in in test set: %s\n"%str(y_pair_test_in.shape))
print("\nshape of y_pair_out in test set: %s\n"%str(y_pair_test_out.shape))
doc_in_connector = DataConnector(data_path, 'doc_pair_test_sent_r2.npy', doc_pair)
doc_in_connector.save_numpys()
x_in_connector = DataConnector(data_path, 'x_pair_test_sent_r2.npy', x_pair_test)
x_in_connector.save_numpys()
y_in_connector = DataConnector(data_path, 'y_pair_test_sent_in_r2.npy', y_pair_test_in)
y_in_connector.save_numpys()
y_out_connector = DataConnector(data_path, 'y_pair_test_sent_out_r2.npy', y_pair_test_out)
y_out_connector.save_numpys()
t1 = time.time()
print("Pairing test set into sequences of inputs - outputs done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
'''
Get average number of key phrases per document in corpus
'''
def compute_average_keyphrases(params):
data_path = params['data_path']
train_path = params['train_path']
valid_path = params['valid_path']
test_path = params['test_path']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Computing average key phrases per document...")
sys.stdout.flush()
# from training set
train_kp_connector = DataConnector(train_path, 'train_output_tokens.npy', data=None)
train_kp_connector.read_numpys()
train_kps = train_kp_connector.read_file
# from validation set
valid_kp_connector = DataConnector(valid_path, 'val_output_tokens.npy', data=None)
valid_kp_connector.read_numpys()
valid_kps = valid_kp_connector.read_file
# from test set
test_kp_connector = DataConnector(test_path, 'test_output_tokens.npy', data=None)
test_kp_connector.read_numpys()
test_kps = test_kp_connector.read_file
all_keyphrases = np.concatenate((train_kps, valid_kps, test_kps))
# transform tokenized y_true (ground truth of keyphrases) into full sentences / keyphrases
keyphrases_transform = TrueKeyphrases(all_keyphrases)
keyphrases_transform.get_true_keyphrases()
keyphrases_transform.get_stat_keyphrases()
y_true = keyphrases_transform.y_true
max_kp_num = keyphrases_transform.max_kp_num
mean_kp_num = keyphrases_transform.mean_kp_num
std_kp_num = keyphrases_transform.std_kp_num
print("Maximum number of key phrases per document in corpus: %s" %max_kp_num)
sys.stdout.flush()
print("Average number of key phrases per document in corpus: %s" %mean_kp_num)
sys.stdout.flush()
print("Standard Deviation of number of key phrases per document in corpus: %s" %std_kp_num)
sys.stdout.flush()
t1 = time.time()
print("Computing average key phrases done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
'''
compute max, average length of key phrases
'''
def compute_keyphrase_length(params):
data_path = params['data_path']
train_path = params['train_path']
valid_path = params['valid_path']
test_path = params['test_path']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Computing statistics of key phrases per document...")
sys.stdout.flush()
# from training set
train_kp_connector = DataConnector(train_path, 'train_output_tokens.npy', data=None)
train_kp_connector.read_numpys()
train_kps = train_kp_connector.read_file
# from validation set
valid_kp_connector = DataConnector(valid_path, 'val_output_tokens.npy', data=None)
valid_kp_connector.read_numpys()
valid_kps = valid_kp_connector.read_file
# from test set
test_kp_connector = DataConnector(test_path, 'test_output_tokens.npy', data=None)
test_kp_connector.read_numpys()
test_kps = test_kp_connector.read_file
all_keyphrases = np.concatenate((train_kps, valid_kps, test_kps))
len_kps = []
for i, kp_list in enumerate(all_keyphrases):
for j, kp in enumerate(kp_list):
len_kps.append(len(kp))
if len(kp) > 20:
print("i,j: (%s, %s)"%(i,j))
print("kp: %s"%kp)
max_kps = max(len_kps)
mean_kps = np.mean(np.array(len_kps))
std_kps = np.std(np.array(len_kps))
print("Maximum number of words per key phrase: %s" %max_kps)
sys.stdout.flush()
print("Average number of words per key phrase: %s" %mean_kps)
sys.stdout.flush()
print("Standard Deviation of number of words per key phrase: %s" %std_kps)
sys.stdout.flush()
t1 = time.time()
print("Computing statistics of key phrases done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
def compute_presence_absence(params):
data_path = params['data_path']
train_path = params['train_path']
valid_path = params['valid_path']
test_path = params['test_path']
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Computing presence or absence of key phrases per document...")
sys.stdout.flush()
# from training set
train_in_connector = DataConnector(train_path, 'train_input_tokens.npy', data=None)
train_in_connector.read_numpys()
train_in_tokens = train_in_connector.read_file
train_kp_connector = DataConnector(train_path, 'train_output_tokens.npy', data=None)
train_kp_connector.read_numpys()
train_kps = train_kp_connector.read_file
compute_presence_train = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
all_npresence_train, all_nabsence_train = compute_presence_train.compute_presence(train_in_tokens, train_kps)
total_train = np.sum(all_npresence_train) + np.sum(all_nabsence_train)
# from validation set
valid_in_connector = DataConnector(valid_path, 'val_input_tokens.npy', data=None)
valid_in_connector.read_numpys()
valid_in_tokens = valid_in_connector.read_file
valid_kp_connector = DataConnector(valid_path, 'val_output_tokens.npy', data=None)
valid_kp_connector.read_numpys()
valid_kps = valid_kp_connector.read_file
compute_presence_val = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
all_npresence_val, all_nabsence_val = compute_presence_val.compute_presence(valid_in_tokens, valid_kps)
total_val = np.sum(all_npresence_val) + np.sum(all_nabsence_val)
# from test set
test_in_tokens_connector = DataConnector(test_path, 'test_input_tokens.npy', data=None)
test_in_tokens_connector.read_numpys()
test_in_tokens = test_in_tokens_connector.read_file
test_kp_connector = DataConnector(test_path, 'test_output_tokens.npy', data=None)
test_kp_connector.read_numpys()
test_kps = test_kp_connector.read_file
compute_presence_test = SequenceProcessing(indices_words=None, words_indices=None, encoder_length=None, decoder_length=None)
all_npresence_test, all_nabsence_test = compute_presence_test.compute_presence(test_in_tokens, test_kps)
total_test = np.sum(all_npresence_test) + np.sum(all_nabsence_test)
n_presence = np.sum(all_npresence_train) + np.sum(all_npresence_val) + np.sum(all_npresence_test)
n_absence = np.sum(all_nabsence_train) + np.sum(all_nabsence_val) + np.sum(all_nabsence_test)
total = total_train + total_val + total_test
persen_absence = n_absence / total
persen_presence = n_presence / total
print(" Absent key phrase: %s" %persen_absence)
sys.stdout.flush()
print(" Present key phrase: %s" %persen_presence)
sys.stdout.flush()
t1 = time.time()
print("Computing presence or absence of key phrases per document done in %.3fsec" % (t1 - t0))
sys.stdout.flush()
| [
"i.nimah@tue.nl"
] | i.nimah@tue.nl |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.