text stringlengths 2 99k | meta dict |
|---|---|
////////////////////////////////////////////////////////////////////////////////
//
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
package org.apache.royale.events.utils
{
/**
* This class holds constants for special keys
* See: https://w3c.github.io/uievents-key/#keys-ui
* See: https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/key/Key_Values#UI_keys
*
* @langversion 3.0
* @playerversion Flash 10.2
* @playerversion AIR 2.6
* @productversion Royale 0.8
*/
public class UIKeys
{
/**
* The Accept, Commit, or OK key or button. Accepts the currently selected option or input method sequence conversion.
*/
public static const ACCEPT:String = "Accept";
/**
* The Again key. Redoes or repeats a previous action.
*/
public static const AGAIN:String = "Again";
/**
* The Attn (Attention) key.
*/
public static const ATTN:String = "Attn";
/**
* The Cancel key.
*/
public static const CANCEL:String = "Cancel";
/**
* Shows the context menu. Typically found between the Windows (or OS) key and the Control key on the right side of the keyboard.
*/
public static const CONTEXT_MENU:String = "ContextMenu";
/**
* The Esc (Escape) key. Typically used as an exit, cancel, or "escape this operation" button. Historically, the Escape character was used to signal the start of a special control sequence of characters called an "escape sequence."
*/
public static const ESCAPE:String = "Escape";
/**
* The Execute key.
*/
public static const EXECUTE:String = "Execute";
/**
* The Find key. Opens an interface (typically a dialog box) for performing a find/search operation.
*/
public static const FIND:String = "Find";
/**
* The Finish key.
*/
public static const FINISH:String = "Finish";
/**
* The Help key. Opens or toggles the display of help information.
*/
public static const HELP:String = "Help";
/**
* The Pause key. Pauses the current application or state, if applicable.
*/
public static const PAUSE:String = "Pause";
/**
* The Play key. Resumes a previously paused application, if applicable.
*/
public static const PLAY:String = "Play";
/**
* The Props (Properties) key.
*/
public static const PROPS:String = "Props";
/**
* The Select key.
*/
public static const SELECT:String = "Select";
/**
* The ZoomIn key.
*/
public static const ZOOM_IN:String = "ZoomIn";
/**
* The ZoomOut key.
*/
public static const ZOOM_OUT:String = "ZoomOut";
}
}
| {
"pile_set_name": "Github"
} |
############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the examples of Qt for Python.
##
## $QT_BEGIN_LICENSE:BSD$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## BSD License Usage
## Alternatively, you may use this file under the terms of the BSD license
## as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
############################################################################
from PySide2.QtGui import *
from PySide2.QtSql import *
def initializeModel(model):
//! [0]
model.setTable("employee")
//! [0]
model.setEditStrategy(QSqlTableModel.OnManualSubmit)
//! [1]
model.setRelation(2, QSqlRelation("city", "id", "name"))
//! [1] //! [2]
model.setRelation(3, QSqlRelation("country", "id", "name"))
//! [2]
//! [3]
model.setHeaderData(0, Qt.Horizontal, QObject::tr("ID"))
model.setHeaderData(1, Qt.Horizontal, QObject::tr("Name"))
model.setHeaderData(2, Qt.Horizontal, QObject::tr("City"))
model.setHeaderData(3, Qt.Horizontal, QObject::tr("Country"))
//! [3]
model.select()
def createView(title, model):
//! [4]
view = QTableView()
view.setModel(model)
view.setItemDelegate(QSqlRelationalDelegate(view))
//! [4]
view.setWindowTitle(title)
return view
def createRelationalTables():
query = QSqlQuery()
query.exec_("create table employee(id int primary key, name varchar(20), city int, country int)")
query.exec_("insert into employee values(1, 'Espen', 5000, 47)")
query.exec_("insert into employee values(2, 'Harald', 80000, 49)")
query.exec_("insert into employee values(3, 'Sam', 100, 1)")
query.exec_("create table city(id int, name varchar(20))")
query.exec_("insert into city values(100, 'San Jose')")
query.exec_("insert into city values(5000, 'Oslo')")
query.exec_("insert into city values(80000, 'Munich')")
query.exec_("create table country(id int, name varchar(20))")
query.exec_("insert into country values(1, 'USA')")
query.exec_("insert into country values(47, 'Norway')")
query.exec_("insert into country values(49, 'Germany')")
def main():
app = QApplication([])
if !createConnection():
return 1
createRelationalTables()
model = QSqlRelationalTableModel()
initializeModel(model)
view = createView(QObject.tr("Relational Table Model"), model)
view.show()
return app.exec_()
| {
"pile_set_name": "Github"
} |
#include "random.h"
#include "TFormula.h"
| {
"pile_set_name": "Github"
} |
<?php
/*+**********************************************************************************
* The contents of this file are subject to the vtiger CRM Public License Version 1.0
* ("License"); You may not use this file except in compliance with the License
* The Original Code is: vtiger CRM Open Source
* The Initial Developer of the Original Code is vtiger.
* Portions created by vtiger are Copyright (C) vtiger.
* All Rights Reserved.
************************************************************************************/
require_once('include/quickcreate.php');
?>
| {
"pile_set_name": "Github"
} |
#region Copyright and license
// /*
// The MIT License (MIT)
//
// Copyright (c) 2014 BlackJet Software Ltd
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// */
#endregion
using System.Text;
namespace MarkdownLog
{
public abstract class HeaderBase : MarkdownElement
{
private readonly char _underlineChar;
private readonly string _text = "";
protected HeaderBase(string text, char underlineChar)
{
_text = text ?? "";
_underlineChar = underlineChar;
}
public override string ToMarkdown()
{
var builder = new StringBuilder();
var textLines = _text.SplitByLine();
foreach(var textLine in textLines)
{
if (builder.Length > 0)
builder.AppendLine();
var markdown = textLine.EscapeMarkdownCharacters();
builder.Append(markdown);
builder.AppendLine();
builder.Append(new string(_underlineChar, markdown.Length));
builder.AppendLine();
}
return builder.ToString();
}
}
} | {
"pile_set_name": "Github"
} |
/*
* This file is part of wlcore
*
* Copyright (C) 2011 Texas Instruments Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#ifndef __WLCORE_HW_OPS_H__
#define __WLCORE_HW_OPS_H__
#include "wlcore.h"
#include "rx.h"
static inline u32
wlcore_hw_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
{
if (!wl->ops->calc_tx_blocks)
BUG_ON(1);
return wl->ops->calc_tx_blocks(wl, len, spare_blks);
}
static inline void
wlcore_hw_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
u32 blks, u32 spare_blks)
{
if (!wl->ops->set_tx_desc_blocks)
BUG_ON(1);
return wl->ops->set_tx_desc_blocks(wl, desc, blks, spare_blks);
}
static inline void
wlcore_hw_set_tx_desc_data_len(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
{
if (!wl->ops->set_tx_desc_data_len)
BUG_ON(1);
wl->ops->set_tx_desc_data_len(wl, desc, skb);
}
static inline enum wl_rx_buf_align
wlcore_hw_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
{
if (!wl->ops->get_rx_buf_align)
BUG_ON(1);
return wl->ops->get_rx_buf_align(wl, rx_desc);
}
static inline int
wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
{
if (wl->ops->prepare_read)
return wl->ops->prepare_read(wl, rx_desc, len);
return 0;
}
static inline u32
wlcore_hw_get_rx_packet_len(struct wl1271 *wl, void *rx_data, u32 data_len)
{
if (!wl->ops->get_rx_packet_len)
BUG_ON(1);
return wl->ops->get_rx_packet_len(wl, rx_data, data_len);
}
static inline int wlcore_hw_tx_delayed_compl(struct wl1271 *wl)
{
if (wl->ops->tx_delayed_compl)
return wl->ops->tx_delayed_compl(wl);
return 0;
}
static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl)
{
if (wl->ops->tx_immediate_compl)
wl->ops->tx_immediate_compl(wl);
}
static inline int
wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
if (wl->ops->init_vif)
return wl->ops->init_vif(wl, wlvif);
return 0;
}
static inline void
wlcore_hw_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
struct wl_fw_status *fw_status)
{
BUG_ON(!wl->ops->convert_fw_status);
wl->ops->convert_fw_status(wl, raw_fw_status, fw_status);
}
static inline u32
wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
if (!wl->ops->sta_get_ap_rate_mask)
BUG_ON(1);
return wl->ops->sta_get_ap_rate_mask(wl, wlvif);
}
static inline int wlcore_identify_fw(struct wl1271 *wl)
{
if (wl->ops->identify_fw)
return wl->ops->identify_fw(wl);
return 0;
}
static inline void
wlcore_hw_set_tx_desc_csum(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
{
if (!wl->ops->set_tx_desc_csum)
BUG_ON(1);
wl->ops->set_tx_desc_csum(wl, desc, skb);
}
static inline void
wlcore_hw_set_rx_csum(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct sk_buff *skb)
{
if (wl->ops->set_rx_csum)
wl->ops->set_rx_csum(wl, desc, skb);
}
static inline u32
wlcore_hw_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
if (wl->ops->ap_get_mimo_wide_rate_mask)
return wl->ops->ap_get_mimo_wide_rate_mask(wl, wlvif);
return 0;
}
static inline int
wlcore_debugfs_init(struct wl1271 *wl, struct dentry *rootdir)
{
if (wl->ops->debugfs_init)
return wl->ops->debugfs_init(wl, rootdir);
return 0;
}
static inline int
wlcore_handle_static_data(struct wl1271 *wl, void *static_data)
{
if (wl->ops->handle_static_data)
return wl->ops->handle_static_data(wl, static_data);
return 0;
}
static inline int
wlcore_hw_get_spare_blocks(struct wl1271 *wl, bool is_gem)
{
if (!wl->ops->get_spare_blocks)
BUG_ON(1);
return wl->ops->get_spare_blocks(wl, is_gem);
}
static inline int
wlcore_hw_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
if (!wl->ops->set_key)
BUG_ON(1);
return wl->ops->set_key(wl, cmd, vif, sta, key_conf);
}
static inline u32
wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len)
{
if (wl->ops->pre_pkt_send)
return wl->ops->pre_pkt_send(wl, buf_offset, last_len);
return buf_offset;
}
static inline void
wlcore_hw_sta_rc_update(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
if (wl->ops->sta_rc_update)
wl->ops->sta_rc_update(wl, wlvif);
}
static inline int
wlcore_hw_interrupt_notify(struct wl1271 *wl, bool action)
{
if (wl->ops->interrupt_notify)
return wl->ops->interrupt_notify(wl, action);
return 0;
}
static inline int
wlcore_hw_rx_ba_filter(struct wl1271 *wl, bool action)
{
if (wl->ops->rx_ba_filter)
return wl->ops->rx_ba_filter(wl, action);
return 0;
}
static inline int
wlcore_hw_ap_sleep(struct wl1271 *wl)
{
if (wl->ops->ap_sleep)
return wl->ops->ap_sleep(wl);
return 0;
}
static inline int
wlcore_hw_set_peer_cap(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation,
u32 rate_set, u8 hlid)
{
if (wl->ops->set_peer_cap)
return wl->ops->set_peer_cap(wl, ht_cap, allow_ht_operation,
rate_set, hlid);
return 0;
}
static inline u32
wlcore_hw_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
{
if (!wl->ops->convert_hwaddr)
BUG_ON(1);
return wl->ops->convert_hwaddr(wl, hwaddr);
}
static inline bool
wlcore_hw_lnk_high_prio(struct wl1271 *wl, u8 hlid,
struct wl1271_link *lnk)
{
if (!wl->ops->lnk_high_prio)
BUG_ON(1);
return wl->ops->lnk_high_prio(wl, hlid, lnk);
}
static inline bool
wlcore_hw_lnk_low_prio(struct wl1271 *wl, u8 hlid,
struct wl1271_link *lnk)
{
if (!wl->ops->lnk_low_prio)
BUG_ON(1);
return wl->ops->lnk_low_prio(wl, hlid, lnk);
}
static inline int
wlcore_smart_config_start(struct wl1271 *wl, u32 group_bitmap)
{
if (!wl->ops->smart_config_start)
return -EINVAL;
return wl->ops->smart_config_start(wl, group_bitmap);
}
static inline int
wlcore_smart_config_stop(struct wl1271 *wl)
{
if (!wl->ops->smart_config_stop)
return -EINVAL;
return wl->ops->smart_config_stop(wl);
}
static inline int
wlcore_smart_config_set_group_key(struct wl1271 *wl, u16 group_id,
u8 key_len, u8 *key)
{
if (!wl->ops->smart_config_set_group_key)
return -EINVAL;
return wl->ops->smart_config_set_group_key(wl, group_id, key_len, key);
}
static inline int
wlcore_hw_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start)
{
if (!wl->ops->set_cac)
return -EINVAL;
return wl->ops->set_cac(wl, wlvif, start);
}
static inline int
wlcore_hw_dfs_master_restart(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
if (!wl->ops->dfs_master_restart)
return -EINVAL;
return wl->ops->dfs_master_restart(wl, wlvif);
}
#endif
| {
"pile_set_name": "Github"
} |
using System.Text;
using SunEngine.Core.DataBase;
using SunEngine.Core.Utils;
namespace SunEngine.Core.Services
{
public interface IImagesNamesService
{
FileAndDir GetNewImageNameAndDir(string ext);
}
public class FileAndDir
{
public string File { get; }
public string Dir { get; }
public string Path { get; }
public FileAndDir(string file, string dir)
{
this.File = file;
this.Dir = dir;
this.Path = System.IO.Path.Combine(dir, file);
}
public override string ToString()
{
return Path;
}
}
public class ImagesNamesService : IImagesNamesService
{
public FileAndDir GetNewImageNameAndDir(string ext)
{
var cid = CryptoRandomizer.GetRandomString(
DbColumnSizes.FileNameWithDirSize -
8); // Why -8. 4 needed for directory start "123/", and 4 needed for extension ".jpg"
byte[] bites = Encoding.UTF8.GetBytes(cid);
return new FileAndDir(cid + ext, bites[0].ToString());
}
}
} | {
"pile_set_name": "Github"
} |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.painless.ir;
import org.elasticsearch.painless.ClassWriter;
import org.elasticsearch.painless.MethodWriter;
import org.elasticsearch.painless.phase.IRTreeVisitor;
import org.elasticsearch.painless.symbol.WriteScope;
public class DupNode extends UnaryNode {
/* ---- begin node data ---- */
private int size;
private int depth;
public void setSize(int size) {
this.size = size;
}
public int getSize() {
return size;
}
public void setDepth(int depth) {
this.depth = depth;
}
public int getDepth() {
return depth;
}
/* ---- end node data, begin visitor ---- */
@Override
public <Scope> void visit(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
irTreeVisitor.visitDup(this, scope);
}
@Override
public <Scope> void visitChildren(IRTreeVisitor<Scope> irTreeVisitor, Scope scope) {
getChildNode().visit(irTreeVisitor, scope);
}
/* ---- end visitor ---- */
@Override
protected void write(ClassWriter classWriter, MethodWriter methodWriter, WriteScope writeScope) {
getChildNode().write(classWriter, methodWriter, writeScope);
methodWriter.writeDup(size, depth);
}
}
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2008-2020, Hazelcast, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proto
import (
"github.com/hazelcast/hazelcast-go-client/serialization"
)
func queuePutCalculateSize(name string, value serialization.Data) int {
// Calculates the request payload size
dataSize := 0
dataSize += stringCalculateSize(name)
dataSize += dataCalculateSize(value)
return dataSize
}
// QueuePutEncodeRequest creates and encodes a client message
// with the given parameters.
// It returns the encoded client message.
func QueuePutEncodeRequest(name string, value serialization.Data) *ClientMessage {
// Encode request into clientMessage
clientMessage := NewClientMessage(nil, queuePutCalculateSize(name, value))
clientMessage.SetMessageType(queuePut)
clientMessage.IsRetryable = false
clientMessage.AppendString(name)
clientMessage.AppendData(value)
clientMessage.UpdateFrameLength()
return clientMessage
}
// QueuePutDecodeResponse(clientMessage *ClientMessage), this message has no parameters to decode
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>Minim : : Line : : setLineTime</title>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<link href="stylesheet.css" rel="stylesheet" type="text/css">
</head>
<body>
<center>
<table class="mainTable">
<tr>
<td class="header">
<span class="indexheader">Minim</span><br/>
<span class="indexnavigation">
<a href="index.html">core</a> |
<a href="index_ugens.html">ugens</a> |
<a href="index_analysis.html">analysis</a>
</span>
</td>
<td class="border-left"> </td>
</tr>
<tr>
<td class="classNavigation">
<p class="mainTextName"><A href="line_class_line.html">Line</A></p>
<p class="methodName">setLineTime</p>
</td>
<td class="mainText border-left">
<p class="memberSectionHeader">Description</p>
Set the length of this Line's transition.
<p class="memberSectionHeader">Signature</p>
<pre>void setLineTime(float newLineTime)
</pre>
<p class="memberSectionHeader">Parameters</p>
<span class="parameterName">newLineTime</span> — <span class="parameterDescription">float: the new transition time (in seconds)</span><br/>
<p class="memberSectionHeader">Returns</p>
<p>None</p>
<p class="memberSectionHeader">Related</p>
<p class="memberSectionHeader">Example</p>
<pre>None available</pre>
<p class="memberSectionHeader">Usage</p>
Web & Application
</td>
</tr>
</table>
</center>
</body>
</html>
| {
"pile_set_name": "Github"
} |
from __future__ import division, absolute_import, print_function
from decimal import Decimal
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_almost_equal, assert_allclose,
assert_equal, assert_raises
)
class TestFinancial(object):
def test_rate(self):
assert_almost_equal(
np.rate(10, 0, -3500, 10000),
0.1107, 4)
def test_rate_decimal(self):
rate = np.rate(Decimal('10'), Decimal('0'), Decimal('-3500'), Decimal('10000'))
assert_equal(Decimal('0.1106908537142689284704528100'), rate)
def test_irr(self):
v = [-150000, 15000, 25000, 35000, 45000, 60000]
assert_almost_equal(np.irr(v), 0.0524, 2)
v = [-100, 0, 0, 74]
assert_almost_equal(np.irr(v), -0.0955, 2)
v = [-100, 39, 59, 55, 20]
assert_almost_equal(np.irr(v), 0.28095, 2)
v = [-100, 100, 0, -7]
assert_almost_equal(np.irr(v), -0.0833, 2)
v = [-100, 100, 0, 7]
assert_almost_equal(np.irr(v), 0.06206, 2)
v = [-5, 10.5, 1, -8, 1]
assert_almost_equal(np.irr(v), 0.0886, 2)
# Test that if there is no solution then np.irr returns nan
# Fixes gh-6744
v = [-1, -2, -3]
assert_equal(np.irr(v), np.nan)
def test_pv(self):
assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2)
def test_pv_decimal(self):
assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')),
Decimal('-127128.1709461939327295222005'))
def test_fv(self):
assert_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.362673042924)
def test_fv_decimal(self):
assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), 0, 0),
Decimal('86609.36267304300040536731624'))
def test_pmt(self):
res = np.pmt(0.08 / 12, 5 * 12, 15000)
tgt = -304.145914
assert_allclose(res, tgt)
# Test the edge case where rate == 0.0
res = np.pmt(0.0, 5 * 12, 15000)
tgt = -250.0
assert_allclose(res, tgt)
# Test the case where we use broadcast and
# the arguments passed in are arrays.
res = np.pmt([[0.0, 0.8], [0.3, 0.8]], [12, 3], [2000, 20000])
tgt = np.array([[-166.66667, -19311.258], [-626.90814, -19311.258]])
assert_allclose(res, tgt)
def test_pmt_decimal(self):
res = np.pmt(Decimal('0.08') / Decimal('12'), 5 * 12, 15000)
tgt = Decimal('-304.1459143262052370338701494')
assert_equal(res, tgt)
# Test the edge case where rate == 0.0
res = np.pmt(Decimal('0'), Decimal('60'), Decimal('15000'))
tgt = -250
assert_equal(res, tgt)
# Test the case where we use broadcast and
# the arguments passed in are arrays.
res = np.pmt([[Decimal('0'), Decimal('0.8')], [Decimal('0.3'), Decimal('0.8')]],
[Decimal('12'), Decimal('3')], [Decimal('2000'), Decimal('20000')])
tgt = np.array([[Decimal('-166.6666666666666666666666667'), Decimal('-19311.25827814569536423841060')],
[Decimal('-626.9081401700757748402586600'), Decimal('-19311.25827814569536423841060')]])
# Cannot use the `assert_allclose` because it uses isfinite under the covers
# which does not support the Decimal type
# See issue: https://github.com/numpy/numpy/issues/9954
assert_equal(res[0][0], tgt[0][0])
assert_equal(res[0][1], tgt[0][1])
assert_equal(res[1][0], tgt[1][0])
assert_equal(res[1][1], tgt[1][1])
def test_ppmt(self):
assert_equal(np.round(np.ppmt(0.1 / 12, 1, 60, 55000), 2), -710.25)
def test_ppmt_decimal(self):
assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000')),
Decimal('-710.2541257864217612489830917'))
# Two tests showing how Decimal is actually getting at a more exact result
# .23 / 12 does not come out nicely as a float but does as a decimal
def test_ppmt_special_rate(self):
assert_equal(np.round(np.ppmt(0.23 / 12, 1, 60, 10000000000), 8), -90238044.232277036)
def test_ppmt_special_rate_decimal(self):
# When rounded out to 8 decimal places like the float based test, this should not equal the same value
# as the float, substituted for the decimal
def raise_error_because_not_equal():
assert_equal(
round(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), 8),
Decimal('-90238044.232277036'))
assert_raises(AssertionError, raise_error_because_not_equal)
assert_equal(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')),
Decimal('-90238044.2322778884413969909'))
def test_ipmt(self):
assert_almost_equal(np.round(np.ipmt(0.1 / 12, 1, 24, 2000), 2), -16.67)
def test_ipmt_decimal(self):
result = np.ipmt(Decimal('0.1') / Decimal('12'), 1, 24, 2000)
assert_equal(result.flat[0], Decimal('-16.66666666666666666666666667'))
def test_nper(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
21.54, 2)
def test_nper2(self):
assert_almost_equal(np.nper(0.0, -2000, 0, 100000.),
50.0, 1)
def test_npv(self):
assert_almost_equal(
np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]),
122.89, 2)
def test_npv_decimal(self):
assert_equal(
np.npv(Decimal('0.05'), [-15000, 1500, 2500, 3500, 4500, 6000]),
Decimal('122.894854950942692161628715'))
def test_mirr(self):
val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000]
assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4)
val = [-120000, 39000, 30000, 21000, 37000, 46000]
assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6)
val = [100, 200, -50, 300, -200]
assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4)
val = [39000, 30000, 21000, 37000, 46000]
assert_(np.isnan(np.mirr(val, 0.10, 0.12)))
def test_mirr_decimal(self):
val = [Decimal('-4500'), Decimal('-800'), Decimal('800'), Decimal('800'),
Decimal('600'), Decimal('600'), Decimal('800'), Decimal('800'),
Decimal('700'), Decimal('3000')]
assert_equal(np.mirr(val, Decimal('0.08'), Decimal('0.055')),
Decimal('0.066597175031553548874239618'))
val = [Decimal('-120000'), Decimal('39000'), Decimal('30000'),
Decimal('21000'), Decimal('37000'), Decimal('46000')]
assert_equal(np.mirr(val, Decimal('0.10'), Decimal('0.12')), Decimal('0.126094130365905145828421880'))
val = [Decimal('100'), Decimal('200'), Decimal('-50'),
Decimal('300'), Decimal('-200')]
assert_equal(np.mirr(val, Decimal('0.05'), Decimal('0.06')), Decimal('0.342823387842176663647819868'))
val = [Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')]
assert_(np.isnan(np.mirr(val, Decimal('0.10'), Decimal('0.12'))))
def test_when(self):
# begin
assert_equal(np.rate(10, 20, -3500, 10000, 1),
np.rate(10, 20, -3500, 10000, 'begin'))
# end
assert_equal(np.rate(10, 20, -3500, 10000),
np.rate(10, 20, -3500, 10000, 'end'))
assert_equal(np.rate(10, 20, -3500, 10000, 0),
np.rate(10, 20, -3500, 10000, 'end'))
# begin
assert_equal(np.pv(0.07, 20, 12000, 0, 1),
np.pv(0.07, 20, 12000, 0, 'begin'))
# end
assert_equal(np.pv(0.07, 20, 12000, 0),
np.pv(0.07, 20, 12000, 0, 'end'))
assert_equal(np.pv(0.07, 20, 12000, 0, 0),
np.pv(0.07, 20, 12000, 0, 'end'))
# begin
assert_equal(np.fv(0.075, 20, -2000, 0, 1),
np.fv(0.075, 20, -2000, 0, 'begin'))
# end
assert_equal(np.fv(0.075, 20, -2000, 0),
np.fv(0.075, 20, -2000, 0, 'end'))
assert_equal(np.fv(0.075, 20, -2000, 0, 0),
np.fv(0.075, 20, -2000, 0, 'end'))
# begin
assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 1),
np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'begin'))
# end
assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0),
np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end'))
assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 0),
np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end'))
# begin
assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 1),
np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'begin'))
# end
assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0),
np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end'))
assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 0),
np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end'))
# begin
assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 1),
np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'begin'))
# end
assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0),
np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end'))
assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 0),
np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end'))
# begin
assert_equal(np.nper(0.075, -2000, 0, 100000., 1),
np.nper(0.075, -2000, 0, 100000., 'begin'))
# end
assert_equal(np.nper(0.075, -2000, 0, 100000.),
np.nper(0.075, -2000, 0, 100000., 'end'))
assert_equal(np.nper(0.075, -2000, 0, 100000., 0),
np.nper(0.075, -2000, 0, 100000., 'end'))
def test_decimal_with_when(self):
"""Test that decimals are still supported if the when argument is passed"""
# begin
assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('1')),
np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'begin'))
# end
assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000')),
np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end'))
assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('0')),
np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end'))
# begin
assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('1')),
np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'begin'))
# end
assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')),
np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end'))
assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('0')),
np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end'))
# begin
assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('1')),
np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'begin'))
# end
assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0')),
np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end'))
assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('0')),
np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end'))
# begin
assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
Decimal('0'), Decimal('1')),
np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
Decimal('0'), 'begin'))
# end
assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
Decimal('0')),
np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
Decimal('0'), 'end'))
assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
Decimal('0'), Decimal('0')),
np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
Decimal('0'), 'end'))
# begin
assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
Decimal('0'), Decimal('1')),
np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
Decimal('0'), 'begin'))
# end
assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
Decimal('0')),
np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
Decimal('0'), 'end'))
assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
Decimal('0'), Decimal('0')),
np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
Decimal('0'), 'end'))
# begin
assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
Decimal('0'), Decimal('1')).flat[0],
np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
Decimal('0'), 'begin').flat[0])
# end
assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
Decimal('0')).flat[0],
np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
Decimal('0'), 'end').flat[0])
assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
Decimal('0'), Decimal('0')).flat[0],
np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
Decimal('0'), 'end').flat[0])
def test_broadcast(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]),
[21.5449442, 20.76156441], 4)
assert_almost_equal(np.ipmt(0.1 / 12, list(range(5)), 24, 2000),
[-17.29165168, -16.66666667, -16.03647345,
-15.40102862, -14.76028842], 4)
assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000),
[-74.998201, -75.62318601, -76.25337923,
-76.88882405, -77.52956425], 4)
assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000, 0,
[0, 0, 1, 'end', 'begin']),
[-74.998201, -75.62318601, -75.62318601,
-76.88882405, -76.88882405], 4)
def test_broadcast_decimal(self):
# Use almost equal because precision is tested in the explicit tests, this test is to ensure
# broadcast with Decimal is not broken.
assert_almost_equal(np.ipmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')),
[Decimal('-17.29165168'), Decimal('-16.66666667'), Decimal('-16.03647345'),
Decimal('-15.40102862'), Decimal('-14.76028842')], 4)
assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')),
[Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-76.25337923'),
Decimal('-76.88882405'), Decimal('-77.52956425')], 4)
assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000'),
Decimal('0'), [Decimal('0'), Decimal('0'), Decimal('1'), 'end', 'begin']),
[Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-75.62318601'),
Decimal('-76.88882405'), Decimal('-76.88882405')], 4)
if __name__ == "__main__":
run_module_suite()
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) International Business Machines Corp., 2000-2004
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* jfs_logmgr.c: log manager
*
* for related information, see transaction manager (jfs_txnmgr.c), and
* recovery manager (jfs_logredo.c).
*
* note: for detail, RTFS.
*
* log buffer manager:
* special purpose buffer manager supporting log i/o requirements.
* per log serial pageout of logpage
* queuing i/o requests and redrive i/o at iodone
* maintain current logpage buffer
* no caching since append only
* appropriate jfs buffer cache buffers as needed
*
* group commit:
* transactions which wrote COMMIT records in the same in-memory
* log page during the pageout of previous/current log page(s) are
* committed together by the pageout of the page.
*
* TBD lazy commit:
* transactions are committed asynchronously when the log page
* containing it COMMIT is paged out when it becomes full;
*
* serialization:
* . a per log lock serialize log write.
* . a per log lock serialize group commit.
* . a per log lock serialize log open/close;
*
* TBD log integrity:
* careful-write (ping-pong) of last logpage to recover from crash
* in overwrite.
* detection of split (out-of-order) write of physical sectors
* of last logpage via timestamp at end of each sector
* with its mirror data array at trailer).
*
* alternatives:
* lsn - 64-bit monotonically increasing integer vs
* 32-bit lspn and page eor.
*/
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/kthread.h>
#include <linux/buffer_head.h> /* for sync_blockdev() */
#include <linux/bio.h>
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
#include "jfs_superblock.h"
#include "jfs_txnmgr.h"
#include "jfs_debug.h"
/*
* lbuf's ready to be redriven. Protected by log_redrive_lock (jfsIO thread)
*/
static struct lbuf *log_redrive_list;
static DEFINE_SPINLOCK(log_redrive_lock);
/*
* log read/write serialization (per log)
*/
#define LOG_LOCK_INIT(log) mutex_init(&(log)->loglock)
#define LOG_LOCK(log) mutex_lock(&((log)->loglock))
#define LOG_UNLOCK(log) mutex_unlock(&((log)->loglock))
/*
* log group commit serialization (per log)
*/
#define LOGGC_LOCK_INIT(log) spin_lock_init(&(log)->gclock)
#define LOGGC_LOCK(log) spin_lock_irq(&(log)->gclock)
#define LOGGC_UNLOCK(log) spin_unlock_irq(&(log)->gclock)
#define LOGGC_WAKEUP(tblk) wake_up_all(&(tblk)->gcwait)
/*
* log sync serialization (per log)
*/
#define LOGSYNC_DELTA(logsize) min((logsize)/8, 128*LOGPSIZE)
#define LOGSYNC_BARRIER(logsize) ((logsize)/4)
/*
#define LOGSYNC_DELTA(logsize) min((logsize)/4, 256*LOGPSIZE)
#define LOGSYNC_BARRIER(logsize) ((logsize)/2)
*/
/*
* log buffer cache synchronization
*/
static DEFINE_SPINLOCK(jfsLCacheLock);
#define LCACHE_LOCK(flags) spin_lock_irqsave(&jfsLCacheLock, flags)
#define LCACHE_UNLOCK(flags) spin_unlock_irqrestore(&jfsLCacheLock, flags)
/*
* See __SLEEP_COND in jfs_locks.h
*/
#define LCACHE_SLEEP_COND(wq, cond, flags) \
do { \
if (cond) \
break; \
__SLEEP_COND(wq, cond, LCACHE_LOCK(flags), LCACHE_UNLOCK(flags)); \
} while (0)
#define LCACHE_WAKEUP(event) wake_up(event)
/*
* lbuf buffer cache (lCache) control
*/
/* log buffer manager pageout control (cumulative, inclusive) */
#define lbmREAD 0x0001
#define lbmWRITE 0x0002 /* enqueue at tail of write queue;
* init pageout if at head of queue;
*/
#define lbmRELEASE 0x0004 /* remove from write queue
* at completion of pageout;
* do not free/recycle it yet:
* caller will free it;
*/
#define lbmSYNC 0x0008 /* do not return to freelist
* when removed from write queue;
*/
#define lbmFREE 0x0010 /* return to freelist
* at completion of pageout;
* the buffer may be recycled;
*/
#define lbmDONE 0x0020
#define lbmERROR 0x0040
#define lbmGC 0x0080 /* lbmIODone to perform post-GC processing
* of log page
*/
#define lbmDIRECT 0x0100
/*
* Global list of active external journals
*/
static LIST_HEAD(jfs_external_logs);
static struct jfs_log *dummy_log = NULL;
static DEFINE_MUTEX(jfs_log_mutex);
/*
* forward references
*/
static int lmWriteRecord(struct jfs_log * log, struct tblock * tblk,
struct lrd * lrd, struct tlock * tlck);
static int lmNextPage(struct jfs_log * log);
static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi,
int activate);
static int open_inline_log(struct super_block *sb);
static int open_dummy_log(struct super_block *sb);
static int lbmLogInit(struct jfs_log * log);
static void lbmLogShutdown(struct jfs_log * log);
static struct lbuf *lbmAllocate(struct jfs_log * log, int);
static void lbmFree(struct lbuf * bp);
static void lbmfree(struct lbuf * bp);
static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp);
static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, int cant_block);
static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag);
static int lbmIOWait(struct lbuf * bp, int flag);
static bio_end_io_t lbmIODone;
static void lbmStartIO(struct lbuf * bp);
static void lmGCwrite(struct jfs_log * log, int cant_block);
static int lmLogSync(struct jfs_log * log, int hard_sync);
/*
* statistics
*/
#ifdef CONFIG_JFS_STATISTICS
static struct lmStat {
uint commit; /* # of commit */
uint pagedone; /* # of page written */
uint submitted; /* # of pages submitted */
uint full_page; /* # of full pages submitted */
uint partial_page; /* # of partial pages submitted */
} lmStat;
#endif
static void write_special_inodes(struct jfs_log *log,
int (*writer)(struct address_space *))
{
struct jfs_sb_info *sbi;
list_for_each_entry(sbi, &log->sb_list, log_list) {
writer(sbi->ipbmap->i_mapping);
writer(sbi->ipimap->i_mapping);
writer(sbi->direct_inode->i_mapping);
}
}
/*
* NAME: lmLog()
*
* FUNCTION: write a log record;
*
* PARAMETER:
*
* RETURN: lsn - offset to the next log record to write (end-of-log);
* -1 - error;
*
* note: todo: log error handler
*/
int lmLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
int lsn;
int diffp, difft;
struct metapage *mp = NULL;
unsigned long flags;
jfs_info("lmLog: log:0x%p tblk:0x%p, lrd:0x%p tlck:0x%p",
log, tblk, lrd, tlck);
LOG_LOCK(log);
/* log by (out-of-transaction) JFS ? */
if (tblk == NULL)
goto writeRecord;
/* log from page ? */
if (tlck == NULL ||
tlck->type & tlckBTROOT || (mp = tlck->mp) == NULL)
goto writeRecord;
/*
* initialize/update page/transaction recovery lsn
*/
lsn = log->lsn;
LOGSYNC_LOCK(log, flags);
/*
* initialize page lsn if first log write of the page
*/
if (mp->lsn == 0) {
mp->log = log;
mp->lsn = lsn;
log->count++;
/* insert page at tail of logsynclist */
list_add_tail(&mp->synclist, &log->synclist);
}
/*
* initialize/update lsn of tblock of the page
*
* transaction inherits oldest lsn of pages associated
* with allocation/deallocation of resources (their
* log records are used to reconstruct allocation map
* at recovery time: inode for inode allocation map,
* B+-tree index of extent descriptors for block
* allocation map);
* allocation map pages inherit transaction lsn at
* commit time to allow forwarding log syncpt past log
* records associated with allocation/deallocation of
* resources only after persistent map of these map pages
* have been updated and propagated to home.
*/
/*
* initialize transaction lsn:
*/
if (tblk->lsn == 0) {
/* inherit lsn of its first page logged */
tblk->lsn = mp->lsn;
log->count++;
/* insert tblock after the page on logsynclist */
list_add(&tblk->synclist, &mp->synclist);
}
/*
* update transaction lsn:
*/
else {
/* inherit oldest/smallest lsn of page */
logdiff(diffp, mp->lsn, log);
logdiff(difft, tblk->lsn, log);
if (diffp < difft) {
/* update tblock lsn with page lsn */
tblk->lsn = mp->lsn;
/* move tblock after page on logsynclist */
list_move(&tblk->synclist, &mp->synclist);
}
}
LOGSYNC_UNLOCK(log, flags);
/*
* write the log record
*/
writeRecord:
lsn = lmWriteRecord(log, tblk, lrd, tlck);
/*
* forward log syncpt if log reached next syncpt trigger
*/
logdiff(diffp, lsn, log);
if (diffp >= log->nextsync)
lsn = lmLogSync(log, 0);
/* update end-of-log lsn */
log->lsn = lsn;
LOG_UNLOCK(log);
/* return end-of-log address */
return lsn;
}
/*
* NAME: lmWriteRecord()
*
* FUNCTION: move the log record to current log page
*
* PARAMETER: cd - commit descriptor
*
* RETURN: end-of-log address
*
* serialization: LOG_LOCK() held on entry/exit
*/
static int
lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck)
{
int lsn = 0; /* end-of-log address */
struct lbuf *bp; /* dst log page buffer */
struct logpage *lp; /* dst log page */
caddr_t dst; /* destination address in log page */
int dstoffset; /* end-of-log offset in log page */
int freespace; /* free space in log page */
caddr_t p; /* src meta-data page */
caddr_t src;
int srclen;
int nbytes; /* number of bytes to move */
int i;
int len;
struct linelock *linelock;
struct lv *lv;
struct lvd *lvd;
int l2linesize;
len = 0;
/* retrieve destination log page to write */
bp = (struct lbuf *) log->bp;
lp = (struct logpage *) bp->l_ldata;
dstoffset = log->eor;
/* any log data to write ? */
if (tlck == NULL)
goto moveLrd;
/*
* move log record data
*/
/* retrieve source meta-data page to log */
if (tlck->flag & tlckPAGELOCK) {
p = (caddr_t) (tlck->mp->data);
linelock = (struct linelock *) & tlck->lock;
}
/* retrieve source in-memory inode to log */
else if (tlck->flag & tlckINODELOCK) {
if (tlck->type & tlckDTREE)
p = (caddr_t) &JFS_IP(tlck->ip)->i_dtroot;
else
p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot;
linelock = (struct linelock *) & tlck->lock;
}
#ifdef _JFS_WIP
else if (tlck->flag & tlckINLINELOCK) {
inlinelock = (struct inlinelock *) & tlck;
p = (caddr_t) & inlinelock->pxd;
linelock = (struct linelock *) & tlck;
}
#endif /* _JFS_WIP */
else {
jfs_err("lmWriteRecord: UFO tlck:0x%p", tlck);
return 0; /* Probably should trap */
}
l2linesize = linelock->l2linesize;
moveData:
ASSERT(linelock->index <= linelock->maxcnt);
lv = linelock->lv;
for (i = 0; i < linelock->index; i++, lv++) {
if (lv->length == 0)
continue;
/* is page full ? */
if (dstoffset >= LOGPSIZE - LOGPTLRSIZE) {
/* page become full: move on to next page */
lmNextPage(log);
bp = log->bp;
lp = (struct logpage *) bp->l_ldata;
dstoffset = LOGPHDRSIZE;
}
/*
* move log vector data
*/
src = (u8 *) p + (lv->offset << l2linesize);
srclen = lv->length << l2linesize;
len += srclen;
while (srclen > 0) {
freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset;
nbytes = min(freespace, srclen);
dst = (caddr_t) lp + dstoffset;
memcpy(dst, src, nbytes);
dstoffset += nbytes;
/* is page not full ? */
if (dstoffset < LOGPSIZE - LOGPTLRSIZE)
break;
/* page become full: move on to next page */
lmNextPage(log);
bp = (struct lbuf *) log->bp;
lp = (struct logpage *) bp->l_ldata;
dstoffset = LOGPHDRSIZE;
srclen -= nbytes;
src += nbytes;
}
/*
* move log vector descriptor
*/
len += 4;
lvd = (struct lvd *) ((caddr_t) lp + dstoffset);
lvd->offset = cpu_to_le16(lv->offset);
lvd->length = cpu_to_le16(lv->length);
dstoffset += 4;
jfs_info("lmWriteRecord: lv offset:%d length:%d",
lv->offset, lv->length);
}
if ((i = linelock->next)) {
linelock = (struct linelock *) lid_to_tlock(i);
goto moveData;
}
/*
* move log record descriptor
*/
moveLrd:
lrd->length = cpu_to_le16(len);
src = (caddr_t) lrd;
srclen = LOGRDSIZE;
while (srclen > 0) {
freespace = (LOGPSIZE - LOGPTLRSIZE) - dstoffset;
nbytes = min(freespace, srclen);
dst = (caddr_t) lp + dstoffset;
memcpy(dst, src, nbytes);
dstoffset += nbytes;
srclen -= nbytes;
/* are there more to move than freespace of page ? */
if (srclen)
goto pageFull;
/*
* end of log record descriptor
*/
/* update last log record eor */
log->eor = dstoffset;
bp->l_eor = dstoffset;
lsn = (log->page << L2LOGPSIZE) + dstoffset;
if (lrd->type & cpu_to_le16(LOG_COMMIT)) {
tblk->clsn = lsn;
jfs_info("wr: tclsn:0x%x, beor:0x%x", tblk->clsn,
bp->l_eor);
INCREMENT(lmStat.commit); /* # of commit */
/*
* enqueue tblock for group commit:
*
* enqueue tblock of non-trivial/synchronous COMMIT
* at tail of group commit queue
* (trivial/asynchronous COMMITs are ignored by
* group commit.)
*/
LOGGC_LOCK(log);
/* init tblock gc state */
tblk->flag = tblkGC_QUEUE;
tblk->bp = log->bp;
tblk->pn = log->page;
tblk->eor = log->eor;
/* enqueue transaction to commit queue */
list_add_tail(&tblk->cqueue, &log->cqueue);
LOGGC_UNLOCK(log);
}
jfs_info("lmWriteRecord: lrd:0x%04x bp:0x%p pn:%d eor:0x%x",
le16_to_cpu(lrd->type), log->bp, log->page, dstoffset);
/* page not full ? */
if (dstoffset < LOGPSIZE - LOGPTLRSIZE)
return lsn;
pageFull:
/* page become full: move on to next page */
lmNextPage(log);
bp = (struct lbuf *) log->bp;
lp = (struct logpage *) bp->l_ldata;
dstoffset = LOGPHDRSIZE;
src += nbytes;
}
return lsn;
}
/*
* NAME: lmNextPage()
*
* FUNCTION: write current page and allocate next page.
*
* PARAMETER: log
*
* RETURN: 0
*
* serialization: LOG_LOCK() held on entry/exit
*/
static int lmNextPage(struct jfs_log * log)
{
struct logpage *lp;
int lspn; /* log sequence page number */
int pn; /* current page number */
struct lbuf *bp;
struct lbuf *nextbp;
struct tblock *tblk;
/* get current log page number and log sequence page number */
pn = log->page;
bp = log->bp;
lp = (struct logpage *) bp->l_ldata;
lspn = le32_to_cpu(lp->h.page);
LOGGC_LOCK(log);
/*
* write or queue the full page at the tail of write queue
*/
/* get the tail tblk on commit queue */
if (list_empty(&log->cqueue))
tblk = NULL;
else
tblk = list_entry(log->cqueue.prev, struct tblock, cqueue);
/* every tblk who has COMMIT record on the current page,
* and has not been committed, must be on commit queue
* since tblk is queued at commit queueu at the time
* of writing its COMMIT record on the page before
* page becomes full (even though the tblk thread
* who wrote COMMIT record may have been suspended
* currently);
*/
/* is page bound with outstanding tail tblk ? */
if (tblk && tblk->pn == pn) {
/* mark tblk for end-of-page */
tblk->flag |= tblkGC_EOP;
if (log->cflag & logGC_PAGEOUT) {
/* if page is not already on write queue,
* just enqueue (no lbmWRITE to prevent redrive)
* buffer to wqueue to ensure correct serial order
* of the pages since log pages will be added
* continuously
*/
if (bp->l_wqnext == NULL)
lbmWrite(log, bp, 0, 0);
} else {
/*
* No current GC leader, initiate group commit
*/
log->cflag |= logGC_PAGEOUT;
lmGCwrite(log, 0);
}
}
/* page is not bound with outstanding tblk:
* init write or mark it to be redriven (lbmWRITE)
*/
else {
/* finalize the page */
bp->l_ceor = bp->l_eor;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 0);
}
LOGGC_UNLOCK(log);
/*
* allocate/initialize next page
*/
/* if log wraps, the first data page of log is 2
* (0 never used, 1 is superblock).
*/
log->page = (pn == log->size - 1) ? 2 : pn + 1;
log->eor = LOGPHDRSIZE; /* ? valid page empty/full at logRedo() */
/* allocate/initialize next log page buffer */
nextbp = lbmAllocate(log, log->page);
nextbp->l_eor = log->eor;
log->bp = nextbp;
/* initialize next log page */
lp = (struct logpage *) nextbp->l_ldata;
lp->h.page = lp->t.page = cpu_to_le32(lspn + 1);
lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
return 0;
}
/*
* NAME: lmGroupCommit()
*
* FUNCTION: group commit
* initiate pageout of the pages with COMMIT in the order of
* page number - redrive pageout of the page at the head of
* pageout queue until full page has been written.
*
* RETURN:
*
* NOTE:
* LOGGC_LOCK serializes log group commit queue, and
* transaction blocks on the commit queue.
* N.B. LOG_LOCK is NOT held during lmGroupCommit().
*/
int lmGroupCommit(struct jfs_log * log, struct tblock * tblk)
{
int rc = 0;
LOGGC_LOCK(log);
/* group committed already ? */
if (tblk->flag & tblkGC_COMMITTED) {
if (tblk->flag & tblkGC_ERROR)
rc = -EIO;
LOGGC_UNLOCK(log);
return rc;
}
jfs_info("lmGroup Commit: tblk = 0x%p, gcrtc = %d", tblk, log->gcrtc);
if (tblk->xflag & COMMIT_LAZY)
tblk->flag |= tblkGC_LAZY;
if ((!(log->cflag & logGC_PAGEOUT)) && (!list_empty(&log->cqueue)) &&
(!(tblk->xflag & COMMIT_LAZY) || test_bit(log_FLUSH, &log->flag)
|| jfs_tlocks_low)) {
/*
* No pageout in progress
*
* start group commit as its group leader.
*/
log->cflag |= logGC_PAGEOUT;
lmGCwrite(log, 0);
}
if (tblk->xflag & COMMIT_LAZY) {
/*
* Lazy transactions can leave now
*/
LOGGC_UNLOCK(log);
return 0;
}
/* lmGCwrite gives up LOGGC_LOCK, check again */
if (tblk->flag & tblkGC_COMMITTED) {
if (tblk->flag & tblkGC_ERROR)
rc = -EIO;
LOGGC_UNLOCK(log);
return rc;
}
/* upcount transaction waiting for completion
*/
log->gcrtc++;
tblk->flag |= tblkGC_READY;
__SLEEP_COND(tblk->gcwait, (tblk->flag & tblkGC_COMMITTED),
LOGGC_LOCK(log), LOGGC_UNLOCK(log));
/* removed from commit queue */
if (tblk->flag & tblkGC_ERROR)
rc = -EIO;
LOGGC_UNLOCK(log);
return rc;
}
/*
* NAME: lmGCwrite()
*
* FUNCTION: group commit write
* initiate write of log page, building a group of all transactions
* with commit records on that page.
*
* RETURN: None
*
* NOTE:
* LOGGC_LOCK must be held by caller.
* N.B. LOG_LOCK is NOT held during lmGroupCommit().
*/
static void lmGCwrite(struct jfs_log * log, int cant_write)
{
struct lbuf *bp;
struct logpage *lp;
int gcpn; /* group commit page number */
struct tblock *tblk;
struct tblock *xtblk = NULL;
/*
* build the commit group of a log page
*
* scan commit queue and make a commit group of all
* transactions with COMMIT records on the same log page.
*/
/* get the head tblk on the commit queue */
gcpn = list_entry(log->cqueue.next, struct tblock, cqueue)->pn;
list_for_each_entry(tblk, &log->cqueue, cqueue) {
if (tblk->pn != gcpn)
break;
xtblk = tblk;
/* state transition: (QUEUE, READY) -> COMMIT */
tblk->flag |= tblkGC_COMMIT;
}
tblk = xtblk; /* last tblk of the page */
/*
* pageout to commit transactions on the log page.
*/
bp = (struct lbuf *) tblk->bp;
lp = (struct logpage *) bp->l_ldata;
/* is page already full ? */
if (tblk->flag & tblkGC_EOP) {
/* mark page to free at end of group commit of the page */
tblk->flag &= ~tblkGC_EOP;
tblk->flag |= tblkGC_FREE;
bp->l_ceor = bp->l_eor;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmGC,
cant_write);
INCREMENT(lmStat.full_page);
}
/* page is not yet full */
else {
bp->l_ceor = tblk->eor; /* ? bp->l_ceor = bp->l_eor; */
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
lbmWrite(log, bp, lbmWRITE | lbmGC, cant_write);
INCREMENT(lmStat.partial_page);
}
}
/*
* NAME: lmPostGC()
*
* FUNCTION: group commit post-processing
* Processes transactions after their commit records have been written
* to disk, redriving log I/O if necessary.
*
* RETURN: None
*
* NOTE:
* This routine is called a interrupt time by lbmIODone
*/
static void lmPostGC(struct lbuf * bp)
{
unsigned long flags;
struct jfs_log *log = bp->l_log;
struct logpage *lp;
struct tblock *tblk, *temp;
//LOGGC_LOCK(log);
spin_lock_irqsave(&log->gclock, flags);
/*
* current pageout of group commit completed.
*
* remove/wakeup transactions from commit queue who were
* group committed with the current log page
*/
list_for_each_entry_safe(tblk, temp, &log->cqueue, cqueue) {
if (!(tblk->flag & tblkGC_COMMIT))
break;
/* if transaction was marked GC_COMMIT then
* it has been shipped in the current pageout
* and made it to disk - it is committed.
*/
if (bp->l_flag & lbmERROR)
tblk->flag |= tblkGC_ERROR;
/* remove it from the commit queue */
list_del(&tblk->cqueue);
tblk->flag &= ~tblkGC_QUEUE;
if (tblk == log->flush_tblk) {
/* we can stop flushing the log now */
clear_bit(log_FLUSH, &log->flag);
log->flush_tblk = NULL;
}
jfs_info("lmPostGC: tblk = 0x%p, flag = 0x%x", tblk,
tblk->flag);
if (!(tblk->xflag & COMMIT_FORCE))
/*
* Hand tblk over to lazy commit thread
*/
txLazyUnlock(tblk);
else {
/* state transition: COMMIT -> COMMITTED */
tblk->flag |= tblkGC_COMMITTED;
if (tblk->flag & tblkGC_READY)
log->gcrtc--;
LOGGC_WAKEUP(tblk);
}
/* was page full before pageout ?
* (and this is the last tblk bound with the page)
*/
if (tblk->flag & tblkGC_FREE)
lbmFree(bp);
/* did page become full after pageout ?
* (and this is the last tblk bound with the page)
*/
else if (tblk->flag & tblkGC_EOP) {
/* finalize the page */
lp = (struct logpage *) bp->l_ldata;
bp->l_ceor = bp->l_eor;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
jfs_info("lmPostGC: calling lbmWrite");
lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE,
1);
}
}
/* are there any transactions who have entered lnGroupCommit()
* (whose COMMITs are after that of the last log page written.
* They are waiting for new group commit (above at (SLEEP 1))
* or lazy transactions are on a full (queued) log page,
* select the latest ready transaction as new group leader and
* wake her up to lead her group.
*/
if ((!list_empty(&log->cqueue)) &&
((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) ||
test_bit(log_FLUSH, &log->flag) || jfs_tlocks_low))
/*
* Call lmGCwrite with new group leader
*/
lmGCwrite(log, 1);
/* no transaction are ready yet (transactions are only just
* queued (GC_QUEUE) and not entered for group commit yet).
* the first transaction entering group commit
* will elect herself as new group leader.
*/
else
log->cflag &= ~logGC_PAGEOUT;
//LOGGC_UNLOCK(log);
spin_unlock_irqrestore(&log->gclock, flags);
return;
}
/*
* NAME: lmLogSync()
*
* FUNCTION: write log SYNCPT record for specified log
* if new sync address is available
* (normally the case if sync() is executed by back-ground
* process).
* calculate new value of i_nextsync which determines when
* this code is called again.
*
* PARAMETERS: log - log structure
* hard_sync - 1 to force all metadata to be written
*
* RETURN: 0
*
* serialization: LOG_LOCK() held on entry/exit
*/
static int lmLogSync(struct jfs_log * log, int hard_sync)
{
int logsize;
int written; /* written since last syncpt */
int free; /* free space left available */
int delta; /* additional delta to write normally */
int more; /* additional write granted */
struct lrd lrd;
int lsn;
struct logsyncblk *lp;
unsigned long flags;
/* push dirty metapages out to disk */
if (hard_sync)
write_special_inodes(log, filemap_fdatawrite);
else
write_special_inodes(log, filemap_flush);
/*
* forward syncpt
*/
/* if last sync is same as last syncpt,
* invoke sync point forward processing to update sync.
*/
if (log->sync == log->syncpt) {
LOGSYNC_LOCK(log, flags);
if (list_empty(&log->synclist))
log->sync = log->lsn;
else {
lp = list_entry(log->synclist.next,
struct logsyncblk, synclist);
log->sync = lp->lsn;
}
LOGSYNC_UNLOCK(log, flags);
}
/* if sync is different from last syncpt,
* write a SYNCPT record with syncpt = sync.
* reset syncpt = sync
*/
if (log->sync != log->syncpt) {
lrd.logtid = 0;
lrd.backchain = 0;
lrd.type = cpu_to_le16(LOG_SYNCPT);
lrd.length = 0;
lrd.log.syncpt.sync = cpu_to_le32(log->sync);
lsn = lmWriteRecord(log, NULL, &lrd, NULL);
log->syncpt = log->sync;
} else
lsn = log->lsn;
/*
* setup next syncpt trigger (SWAG)
*/
logsize = log->logsize;
logdiff(written, lsn, log);
free = logsize - written;
delta = LOGSYNC_DELTA(logsize);
more = min(free / 2, delta);
if (more < 2 * LOGPSIZE) {
jfs_warn("\n ... Log Wrap ... Log Wrap ... Log Wrap ...\n");
/*
* log wrapping
*
* option 1 - panic ? No.!
* option 2 - shutdown file systems
* associated with log ?
* option 3 - extend log ?
*/
/*
* option 4 - second chance
*
* mark log wrapped, and continue.
* when all active transactions are completed,
* mark log vaild for recovery.
* if crashed during invalid state, log state
* implies invald log, forcing fsck().
*/
/* mark log state log wrap in log superblock */
/* log->state = LOGWRAP; */
/* reset sync point computation */
log->syncpt = log->sync = lsn;
log->nextsync = delta;
} else
/* next syncpt trigger = written + more */
log->nextsync = written + more;
/* if number of bytes written from last sync point is more
* than 1/4 of the log size, stop new transactions from
* starting until all current transactions are completed
* by setting syncbarrier flag.
*/
if (!test_bit(log_SYNCBARRIER, &log->flag) &&
(written > LOGSYNC_BARRIER(logsize)) && log->active) {
set_bit(log_SYNCBARRIER, &log->flag);
jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn,
log->syncpt);
/*
* We may have to initiate group commit
*/
jfs_flush_journal(log, 0);
}
return lsn;
}
/*
* NAME: jfs_syncpt
*
* FUNCTION: write log SYNCPT record for specified log
*
* PARAMETERS: log - log structure
* hard_sync - set to 1 to force metadata to be written
*/
void jfs_syncpt(struct jfs_log *log, int hard_sync)
{ LOG_LOCK(log);
lmLogSync(log, hard_sync);
LOG_UNLOCK(log);
}
/*
* NAME: lmLogOpen()
*
* FUNCTION: open the log on first open;
* insert filesystem in the active list of the log.
*
* PARAMETER: ipmnt - file system mount inode
* iplog - log inode (out)
*
* RETURN:
*
* serialization:
*/
int lmLogOpen(struct super_block *sb)
{
int rc;
struct block_device *bdev;
struct jfs_log *log;
struct jfs_sb_info *sbi = JFS_SBI(sb);
if (sbi->flag & JFS_NOINTEGRITY)
return open_dummy_log(sb);
if (sbi->mntflag & JFS_INLINELOG)
return open_inline_log(sb);
mutex_lock(&jfs_log_mutex);
list_for_each_entry(log, &jfs_external_logs, journal_list) {
if (log->bdev->bd_dev == sbi->logdev) {
if (memcmp(log->uuid, sbi->loguuid,
sizeof(log->uuid))) {
jfs_warn("wrong uuid on JFS journal\n");
mutex_unlock(&jfs_log_mutex);
return -EINVAL;
}
/*
* add file system to log active file system list
*/
if ((rc = lmLogFileSystem(log, sbi, 1))) {
mutex_unlock(&jfs_log_mutex);
return rc;
}
goto journal_found;
}
}
if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) {
mutex_unlock(&jfs_log_mutex);
return -ENOMEM;
}
INIT_LIST_HEAD(&log->sb_list);
init_waitqueue_head(&log->syncwait);
/*
* external log as separate logical volume
*
* file systems to log may have n-to-1 relationship;
*/
bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE);
if (IS_ERR(bdev)) {
rc = -PTR_ERR(bdev);
goto free;
}
if ((rc = bd_claim(bdev, log))) {
goto close;
}
log->bdev = bdev;
memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid));
/*
* initialize log:
*/
if ((rc = lmLogInit(log)))
goto unclaim;
list_add(&log->journal_list, &jfs_external_logs);
/*
* add file system to log active file system list
*/
if ((rc = lmLogFileSystem(log, sbi, 1)))
goto shutdown;
journal_found:
LOG_LOCK(log);
list_add(&sbi->log_list, &log->sb_list);
sbi->log = log;
LOG_UNLOCK(log);
mutex_unlock(&jfs_log_mutex);
return 0;
/*
* unwind on error
*/
shutdown: /* unwind lbmLogInit() */
list_del(&log->journal_list);
lbmLogShutdown(log);
unclaim:
bd_release(bdev);
close: /* close external log device */
blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
free: /* free log descriptor */
mutex_unlock(&jfs_log_mutex);
kfree(log);
jfs_warn("lmLogOpen: exit(%d)", rc);
return rc;
}
static int open_inline_log(struct super_block *sb)
{
struct jfs_log *log;
int rc;
if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL)))
return -ENOMEM;
INIT_LIST_HEAD(&log->sb_list);
init_waitqueue_head(&log->syncwait);
set_bit(log_INLINELOG, &log->flag);
log->bdev = sb->s_bdev;
log->base = addressPXD(&JFS_SBI(sb)->logpxd);
log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >>
(L2LOGPSIZE - sb->s_blocksize_bits);
log->l2bsize = sb->s_blocksize_bits;
ASSERT(L2LOGPSIZE >= sb->s_blocksize_bits);
/*
* initialize log.
*/
if ((rc = lmLogInit(log))) {
kfree(log);
jfs_warn("lmLogOpen: exit(%d)", rc);
return rc;
}
list_add(&JFS_SBI(sb)->log_list, &log->sb_list);
JFS_SBI(sb)->log = log;
return rc;
}
static int open_dummy_log(struct super_block *sb)
{
int rc;
mutex_lock(&jfs_log_mutex);
if (!dummy_log) {
dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL);
if (!dummy_log) {
mutex_unlock(&jfs_log_mutex);
return -ENOMEM;
}
INIT_LIST_HEAD(&dummy_log->sb_list);
init_waitqueue_head(&dummy_log->syncwait);
dummy_log->no_integrity = 1;
/* Make up some stuff */
dummy_log->base = 0;
dummy_log->size = 1024;
rc = lmLogInit(dummy_log);
if (rc) {
kfree(dummy_log);
dummy_log = NULL;
mutex_unlock(&jfs_log_mutex);
return rc;
}
}
LOG_LOCK(dummy_log);
list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list);
JFS_SBI(sb)->log = dummy_log;
LOG_UNLOCK(dummy_log);
mutex_unlock(&jfs_log_mutex);
return 0;
}
/*
* NAME: lmLogInit()
*
* FUNCTION: log initialization at first log open.
*
* logredo() (or logformat()) should have been run previously.
* initialize the log from log superblock.
* set the log state in the superblock to LOGMOUNT and
* write SYNCPT log record.
*
* PARAMETER: log - log structure
*
* RETURN: 0 - if ok
* -EINVAL - bad log magic number or superblock dirty
* error returned from logwait()
*
* serialization: single first open thread
*/
int lmLogInit(struct jfs_log * log)
{
int rc = 0;
struct lrd lrd;
struct logsuper *logsuper;
struct lbuf *bpsuper;
struct lbuf *bp;
struct logpage *lp;
int lsn = 0;
jfs_info("lmLogInit: log:0x%p", log);
/* initialize the group commit serialization lock */
LOGGC_LOCK_INIT(log);
/* allocate/initialize the log write serialization lock */
LOG_LOCK_INIT(log);
LOGSYNC_LOCK_INIT(log);
INIT_LIST_HEAD(&log->synclist);
INIT_LIST_HEAD(&log->cqueue);
log->flush_tblk = NULL;
log->count = 0;
/*
* initialize log i/o
*/
if ((rc = lbmLogInit(log)))
return rc;
if (!test_bit(log_INLINELOG, &log->flag))
log->l2bsize = L2LOGPSIZE;
/* check for disabled journaling to disk */
if (log->no_integrity) {
/*
* Journal pages will still be filled. When the time comes
* to actually do the I/O, the write is not done, and the
* endio routine is called directly.
*/
bp = lbmAllocate(log , 0);
log->bp = bp;
bp->l_pn = bp->l_eor = 0;
} else {
/*
* validate log superblock
*/
if ((rc = lbmRead(log, 1, &bpsuper)))
goto errout10;
logsuper = (struct logsuper *) bpsuper->l_ldata;
if (logsuper->magic != cpu_to_le32(LOGMAGIC)) {
jfs_warn("*** Log Format Error ! ***");
rc = -EINVAL;
goto errout20;
}
/* logredo() should have been run successfully. */
if (logsuper->state != cpu_to_le32(LOGREDONE)) {
jfs_warn("*** Log Is Dirty ! ***");
rc = -EINVAL;
goto errout20;
}
/* initialize log from log superblock */
if (test_bit(log_INLINELOG,&log->flag)) {
if (log->size != le32_to_cpu(logsuper->size)) {
rc = -EINVAL;
goto errout20;
}
jfs_info("lmLogInit: inline log:0x%p base:0x%Lx "
"size:0x%x", log,
(unsigned long long) log->base, log->size);
} else {
if (memcmp(logsuper->uuid, log->uuid, 16)) {
jfs_warn("wrong uuid on JFS log device");
goto errout20;
}
log->size = le32_to_cpu(logsuper->size);
log->l2bsize = le32_to_cpu(logsuper->l2bsize);
jfs_info("lmLogInit: external log:0x%p base:0x%Lx "
"size:0x%x", log,
(unsigned long long) log->base, log->size);
}
log->page = le32_to_cpu(logsuper->end) / LOGPSIZE;
log->eor = le32_to_cpu(logsuper->end) - (LOGPSIZE * log->page);
/*
* initialize for log append write mode
*/
/* establish current/end-of-log page/buffer */
if ((rc = lbmRead(log, log->page, &bp)))
goto errout20;
lp = (struct logpage *) bp->l_ldata;
jfs_info("lmLogInit: lsn:0x%x page:%d eor:%d:%d",
le32_to_cpu(logsuper->end), log->page, log->eor,
le16_to_cpu(lp->h.eor));
log->bp = bp;
bp->l_pn = log->page;
bp->l_eor = log->eor;
/* if current page is full, move on to next page */
if (log->eor >= LOGPSIZE - LOGPTLRSIZE)
lmNextPage(log);
/*
* initialize log syncpoint
*/
/*
* write the first SYNCPT record with syncpoint = 0
* (i.e., log redo up to HERE !);
* remove current page from lbm write queue at end of pageout
* (to write log superblock update), but do not release to
* freelist;
*/
lrd.logtid = 0;
lrd.backchain = 0;
lrd.type = cpu_to_le16(LOG_SYNCPT);
lrd.length = 0;
lrd.log.syncpt.sync = 0;
lsn = lmWriteRecord(log, NULL, &lrd, NULL);
bp = log->bp;
bp->l_ceor = bp->l_eor;
lp = (struct logpage *) bp->l_ldata;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0);
if ((rc = lbmIOWait(bp, 0)))
goto errout30;
/*
* update/write superblock
*/
logsuper->state = cpu_to_le32(LOGMOUNT);
log->serial = le32_to_cpu(logsuper->serial) + 1;
logsuper->serial = cpu_to_le32(log->serial);
lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);
if ((rc = lbmIOWait(bpsuper, lbmFREE)))
goto errout30;
}
/* initialize logsync parameters */
log->logsize = (log->size - 2) << L2LOGPSIZE;
log->lsn = lsn;
log->syncpt = lsn;
log->sync = log->syncpt;
log->nextsync = LOGSYNC_DELTA(log->logsize);
jfs_info("lmLogInit: lsn:0x%x syncpt:0x%x sync:0x%x",
log->lsn, log->syncpt, log->sync);
/*
* initialize for lazy/group commit
*/
log->clsn = lsn;
return 0;
/*
* unwind on error
*/
errout30: /* release log page */
log->wqueue = NULL;
bp->l_wqnext = NULL;
lbmFree(bp);
errout20: /* release log superblock */
lbmFree(bpsuper);
errout10: /* unwind lbmLogInit() */
lbmLogShutdown(log);
jfs_warn("lmLogInit: exit(%d)", rc);
return rc;
}
/*
* NAME: lmLogClose()
*
* FUNCTION: remove file system <ipmnt> from active list of log <iplog>
* and close it on last close.
*
* PARAMETER: sb - superblock
*
* RETURN: errors from subroutines
*
* serialization:
*/
int lmLogClose(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
struct block_device *bdev;
int rc = 0;
jfs_info("lmLogClose: log:0x%p", log);
mutex_lock(&jfs_log_mutex);
LOG_LOCK(log);
list_del(&sbi->log_list);
LOG_UNLOCK(log);
sbi->log = NULL;
/*
* We need to make sure all of the "written" metapages
* actually make it to disk
*/
sync_blockdev(sb->s_bdev);
if (test_bit(log_INLINELOG, &log->flag)) {
/*
* in-line log in host file system
*/
rc = lmLogShutdown(log);
kfree(log);
goto out;
}
if (!log->no_integrity)
lmLogFileSystem(log, sbi, 0);
if (!list_empty(&log->sb_list))
goto out;
/*
* TODO: ensure that the dummy_log is in a state to allow
* lbmLogShutdown to deallocate all the buffers and call
* kfree against dummy_log. For now, leave dummy_log & its
* buffers in memory, and resuse if another no-integrity mount
* is requested.
*/
if (log->no_integrity)
goto out;
/*
* external log as separate logical volume
*/
list_del(&log->journal_list);
bdev = log->bdev;
rc = lmLogShutdown(log);
bd_release(bdev);
blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
kfree(log);
out:
mutex_unlock(&jfs_log_mutex);
jfs_info("lmLogClose: exit(%d)", rc);
return rc;
}
/*
* NAME: jfs_flush_journal()
*
* FUNCTION: initiate write of any outstanding transactions to the journal
* and optionally wait until they are all written to disk
*
* wait == 0 flush until latest txn is committed, don't wait
* wait == 1 flush until latest txn is committed, wait
* wait > 1 flush until all txn's are complete, wait
*/
void jfs_flush_journal(struct jfs_log *log, int wait)
{
int i;
struct tblock *target = NULL;
/* jfs_write_inode may call us during read-only mount */
if (!log)
return;
jfs_info("jfs_flush_journal: log:0x%p wait=%d", log, wait);
LOGGC_LOCK(log);
if (!list_empty(&log->cqueue)) {
/*
* This ensures that we will keep writing to the journal as long
* as there are unwritten commit records
*/
target = list_entry(log->cqueue.prev, struct tblock, cqueue);
if (test_bit(log_FLUSH, &log->flag)) {
/*
* We're already flushing.
* if flush_tblk is NULL, we are flushing everything,
* so leave it that way. Otherwise, update it to the
* latest transaction
*/
if (log->flush_tblk)
log->flush_tblk = target;
} else {
/* Only flush until latest transaction is committed */
log->flush_tblk = target;
set_bit(log_FLUSH, &log->flag);
/*
* Initiate I/O on outstanding transactions
*/
if (!(log->cflag & logGC_PAGEOUT)) {
log->cflag |= logGC_PAGEOUT;
lmGCwrite(log, 0);
}
}
}
if ((wait > 1) || test_bit(log_SYNCBARRIER, &log->flag)) {
/* Flush until all activity complete */
set_bit(log_FLUSH, &log->flag);
log->flush_tblk = NULL;
}
if (wait && target && !(target->flag & tblkGC_COMMITTED)) {
DECLARE_WAITQUEUE(__wait, current);
add_wait_queue(&target->gcwait, &__wait);
set_current_state(TASK_UNINTERRUPTIBLE);
LOGGC_UNLOCK(log);
schedule();
__set_current_state(TASK_RUNNING);
LOGGC_LOCK(log);
remove_wait_queue(&target->gcwait, &__wait);
}
LOGGC_UNLOCK(log);
if (wait < 2)
return;
write_special_inodes(log, filemap_fdatawrite);
/*
* If there was recent activity, we may need to wait
* for the lazycommit thread to catch up
*/
if ((!list_empty(&log->cqueue)) || !list_empty(&log->synclist)) {
for (i = 0; i < 200; i++) { /* Too much? */
msleep(250);
write_special_inodes(log, filemap_fdatawrite);
if (list_empty(&log->cqueue) &&
list_empty(&log->synclist))
break;
}
}
assert(list_empty(&log->cqueue));
#ifdef CONFIG_JFS_DEBUG
if (!list_empty(&log->synclist)) {
struct logsyncblk *lp;
printk(KERN_ERR "jfs_flush_journal: synclist not empty\n");
list_for_each_entry(lp, &log->synclist, synclist) {
if (lp->xflag & COMMIT_PAGE) {
struct metapage *mp = (struct metapage *)lp;
print_hex_dump(KERN_ERR, "metapage: ",
DUMP_PREFIX_ADDRESS, 16, 4,
mp, sizeof(struct metapage), 0);
print_hex_dump(KERN_ERR, "page: ",
DUMP_PREFIX_ADDRESS, 16,
sizeof(long), mp->page,
sizeof(struct page), 0);
} else
print_hex_dump(KERN_ERR, "tblock:",
DUMP_PREFIX_ADDRESS, 16, 4,
lp, sizeof(struct tblock), 0);
}
}
#else
WARN_ON(!list_empty(&log->synclist));
#endif
clear_bit(log_FLUSH, &log->flag);
}
/*
* NAME: lmLogShutdown()
*
* FUNCTION: log shutdown at last LogClose().
*
* write log syncpt record.
* update super block to set redone flag to 0.
*
* PARAMETER: log - log inode
*
* RETURN: 0 - success
*
* serialization: single last close thread
*/
int lmLogShutdown(struct jfs_log * log)
{
int rc;
struct lrd lrd;
int lsn;
struct logsuper *logsuper;
struct lbuf *bpsuper;
struct lbuf *bp;
struct logpage *lp;
jfs_info("lmLogShutdown: log:0x%p", log);
jfs_flush_journal(log, 2);
/*
* write the last SYNCPT record with syncpoint = 0
* (i.e., log redo up to HERE !)
*/
lrd.logtid = 0;
lrd.backchain = 0;
lrd.type = cpu_to_le16(LOG_SYNCPT);
lrd.length = 0;
lrd.log.syncpt.sync = 0;
lsn = lmWriteRecord(log, NULL, &lrd, NULL);
bp = log->bp;
lp = (struct logpage *) bp->l_ldata;
lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0);
lbmIOWait(log->bp, lbmFREE);
log->bp = NULL;
/*
* synchronous update log superblock
* mark log state as shutdown cleanly
* (i.e., Log does not need to be replayed).
*/
if ((rc = lbmRead(log, 1, &bpsuper)))
goto out;
logsuper = (struct logsuper *) bpsuper->l_ldata;
logsuper->state = cpu_to_le32(LOGREDONE);
logsuper->end = cpu_to_le32(lsn);
lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);
rc = lbmIOWait(bpsuper, lbmFREE);
jfs_info("lmLogShutdown: lsn:0x%x page:%d eor:%d",
lsn, log->page, log->eor);
out:
/*
* shutdown per log i/o
*/
lbmLogShutdown(log);
if (rc) {
jfs_warn("lmLogShutdown: exit(%d)", rc);
}
return rc;
}
/*
* NAME: lmLogFileSystem()
*
* FUNCTION: insert (<activate> = true)/remove (<activate> = false)
* file system into/from log active file system list.
*
* PARAMETE: log - pointer to logs inode.
* fsdev - kdev_t of filesystem.
* serial - pointer to returned log serial number
* activate - insert/remove device from active list.
*
* RETURN: 0 - success
* errors returned by vms_iowait().
*/
static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi,
int activate)
{
int rc = 0;
int i;
struct logsuper *logsuper;
struct lbuf *bpsuper;
char *uuid = sbi->uuid;
/*
* insert/remove file system device to log active file system list.
*/
if ((rc = lbmRead(log, 1, &bpsuper)))
return rc;
logsuper = (struct logsuper *) bpsuper->l_ldata;
if (activate) {
for (i = 0; i < MAX_ACTIVE; i++)
if (!memcmp(logsuper->active[i].uuid, NULL_UUID, 16)) {
memcpy(logsuper->active[i].uuid, uuid, 16);
sbi->aggregate = i;
break;
}
if (i == MAX_ACTIVE) {
jfs_warn("Too many file systems sharing journal!");
lbmFree(bpsuper);
return -EMFILE; /* Is there a better rc? */
}
} else {
for (i = 0; i < MAX_ACTIVE; i++)
if (!memcmp(logsuper->active[i].uuid, uuid, 16)) {
memcpy(logsuper->active[i].uuid, NULL_UUID, 16);
break;
}
if (i == MAX_ACTIVE) {
jfs_warn("Somebody stomped on the journal!");
lbmFree(bpsuper);
return -EIO;
}
}
/*
* synchronous write log superblock:
*
* write sidestream bypassing write queue:
* at file system mount, log super block is updated for
* activation of the file system before any log record
* (MOUNT record) of the file system, and at file system
* unmount, all meta data for the file system has been
* flushed before log super block is updated for deactivation
* of the file system.
*/
lbmDirectWrite(log, bpsuper, lbmWRITE | lbmRELEASE | lbmSYNC);
rc = lbmIOWait(bpsuper, lbmFREE);
return rc;
}
/*
* log buffer manager (lbm)
* ------------------------
*
* special purpose buffer manager supporting log i/o requirements.
*
* per log write queue:
* log pageout occurs in serial order by fifo write queue and
* restricting to a single i/o in pregress at any one time.
* a circular singly-linked list
* (log->wrqueue points to the tail, and buffers are linked via
* bp->wrqueue field), and
* maintains log page in pageout ot waiting for pageout in serial pageout.
*/
/*
* lbmLogInit()
*
* initialize per log I/O setup at lmLogInit()
*/
static int lbmLogInit(struct jfs_log * log)
{ /* log inode */
int i;
struct lbuf *lbuf;
jfs_info("lbmLogInit: log:0x%p", log);
/* initialize current buffer cursor */
log->bp = NULL;
/* initialize log device write queue */
log->wqueue = NULL;
/*
* Each log has its own buffer pages allocated to it. These are
* not managed by the page cache. This ensures that a transaction
* writing to the log does not block trying to allocate a page from
* the page cache (for the log). This would be bad, since page
* allocation waits on the kswapd thread that may be committing inodes
* which would cause log activity. Was that clear? I'm trying to
* avoid deadlock here.
*/
init_waitqueue_head(&log->free_wait);
log->lbuf_free = NULL;
for (i = 0; i < LOGPAGES;) {
char *buffer;
uint offset;
struct page *page;
buffer = (char *) get_zeroed_page(GFP_KERNEL);
if (buffer == NULL)
goto error;
page = virt_to_page(buffer);
for (offset = 0; offset < PAGE_SIZE; offset += LOGPSIZE) {
lbuf = kmalloc(sizeof(struct lbuf), GFP_KERNEL);
if (lbuf == NULL) {
if (offset == 0)
free_page((unsigned long) buffer);
goto error;
}
if (offset) /* we already have one reference */
get_page(page);
lbuf->l_offset = offset;
lbuf->l_ldata = buffer + offset;
lbuf->l_page = page;
lbuf->l_log = log;
init_waitqueue_head(&lbuf->l_ioevent);
lbuf->l_freelist = log->lbuf_free;
log->lbuf_free = lbuf;
i++;
}
}
return (0);
error:
lbmLogShutdown(log);
return -ENOMEM;
}
/*
* lbmLogShutdown()
*
* finalize per log I/O setup at lmLogShutdown()
*/
static void lbmLogShutdown(struct jfs_log * log)
{
struct lbuf *lbuf;
jfs_info("lbmLogShutdown: log:0x%p", log);
lbuf = log->lbuf_free;
while (lbuf) {
struct lbuf *next = lbuf->l_freelist;
__free_page(lbuf->l_page);
kfree(lbuf);
lbuf = next;
}
}
/*
* lbmAllocate()
*
* allocate an empty log buffer
*/
static struct lbuf *lbmAllocate(struct jfs_log * log, int pn)
{
struct lbuf *bp;
unsigned long flags;
/*
* recycle from log buffer freelist if any
*/
LCACHE_LOCK(flags);
LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags);
log->lbuf_free = bp->l_freelist;
LCACHE_UNLOCK(flags);
bp->l_flag = 0;
bp->l_wqnext = NULL;
bp->l_freelist = NULL;
bp->l_pn = pn;
bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize));
bp->l_ceor = 0;
return bp;
}
/*
* lbmFree()
*
* release a log buffer to freelist
*/
static void lbmFree(struct lbuf * bp)
{
unsigned long flags;
LCACHE_LOCK(flags);
lbmfree(bp);
LCACHE_UNLOCK(flags);
}
static void lbmfree(struct lbuf * bp)
{
struct jfs_log *log = bp->l_log;
assert(bp->l_wqnext == NULL);
/*
* return the buffer to head of freelist
*/
bp->l_freelist = log->lbuf_free;
log->lbuf_free = bp;
wake_up(&log->free_wait);
return;
}
/*
* NAME: lbmRedrive
*
* FUNCTION: add a log buffer to the log redrive list
*
* PARAMETER:
* bp - log buffer
*
* NOTES:
* Takes log_redrive_lock.
*/
static inline void lbmRedrive(struct lbuf *bp)
{
unsigned long flags;
spin_lock_irqsave(&log_redrive_lock, flags);
bp->l_redrive_next = log_redrive_list;
log_redrive_list = bp;
spin_unlock_irqrestore(&log_redrive_lock, flags);
wake_up_process(jfsIOthread);
}
/*
* lbmRead()
*/
static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
{
struct bio *bio;
struct lbuf *bp;
/*
* allocate a log buffer
*/
*bpp = bp = lbmAllocate(log, pn);
jfs_info("lbmRead: bp:0x%p pn:0x%x", bp, pn);
bp->l_flag |= lbmREAD;
bio = bio_alloc(GFP_NOFS, 1);
bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio->bi_bdev = log->bdev;
bio->bi_io_vec[0].bv_page = bp->l_page;
bio->bi_io_vec[0].bv_len = LOGPSIZE;
bio->bi_io_vec[0].bv_offset = bp->l_offset;
bio->bi_vcnt = 1;
bio->bi_idx = 0;
bio->bi_size = LOGPSIZE;
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
submit_bio(READ_SYNC, bio);
wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD));
return 0;
}
/*
* lbmWrite()
*
* buffer at head of pageout queue stays after completion of
* partial-page pageout and redriven by explicit initiation of
* pageout by caller until full-page pageout is completed and
* released.
*
* device driver i/o done redrives pageout of new buffer at
* head of pageout queue when current buffer at head of pageout
* queue is released at the completion of its full-page pageout.
*
* LOGGC_LOCK() serializes lbmWrite() by lmNextPage() and lmGroupCommit().
* LCACHE_LOCK() serializes xflag between lbmWrite() and lbmIODone()
*/
static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag,
int cant_block)
{
struct lbuf *tail;
unsigned long flags;
jfs_info("lbmWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn);
/* map the logical block address to physical block address */
bp->l_blkno =
log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize));
LCACHE_LOCK(flags); /* disable+lock */
/*
* initialize buffer for device driver
*/
bp->l_flag = flag;
/*
* insert bp at tail of write queue associated with log
*
* (request is either for bp already/currently at head of queue
* or new bp to be inserted at tail)
*/
tail = log->wqueue;
/* is buffer not already on write queue ? */
if (bp->l_wqnext == NULL) {
/* insert at tail of wqueue */
if (tail == NULL) {
log->wqueue = bp;
bp->l_wqnext = bp;
} else {
log->wqueue = bp;
bp->l_wqnext = tail->l_wqnext;
tail->l_wqnext = bp;
}
tail = bp;
}
/* is buffer at head of wqueue and for write ? */
if ((bp != tail->l_wqnext) || !(flag & lbmWRITE)) {
LCACHE_UNLOCK(flags); /* unlock+enable */
return;
}
LCACHE_UNLOCK(flags); /* unlock+enable */
if (cant_block)
lbmRedrive(bp);
else if (flag & lbmSYNC)
lbmStartIO(bp);
else {
LOGGC_UNLOCK(log);
lbmStartIO(bp);
LOGGC_LOCK(log);
}
}
/*
* lbmDirectWrite()
*
* initiate pageout bypassing write queue for sidestream
* (e.g., log superblock) write;
*/
static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag)
{
jfs_info("lbmDirectWrite: bp:0x%p flag:0x%x pn:0x%x",
bp, flag, bp->l_pn);
/*
* initialize buffer for device driver
*/
bp->l_flag = flag | lbmDIRECT;
/* map the logical block address to physical block address */
bp->l_blkno =
log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize));
/*
* initiate pageout of the page
*/
lbmStartIO(bp);
}
/*
* NAME: lbmStartIO()
*
* FUNCTION: Interface to DD strategy routine
*
* RETURN: none
*
* serialization: LCACHE_LOCK() is NOT held during log i/o;
*/
static void lbmStartIO(struct lbuf * bp)
{
struct bio *bio;
struct jfs_log *log = bp->l_log;
jfs_info("lbmStartIO\n");
bio = bio_alloc(GFP_NOFS, 1);
bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio->bi_bdev = log->bdev;
bio->bi_io_vec[0].bv_page = bp->l_page;
bio->bi_io_vec[0].bv_len = LOGPSIZE;
bio->bi_io_vec[0].bv_offset = bp->l_offset;
bio->bi_vcnt = 1;
bio->bi_idx = 0;
bio->bi_size = LOGPSIZE;
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
/* check if journaling to disk has been disabled */
if (log->no_integrity) {
bio->bi_size = 0;
lbmIODone(bio, 0);
} else {
submit_bio(WRITE_SYNC, bio);
INCREMENT(lmStat.submitted);
}
}
/*
* lbmIOWait()
*/
static int lbmIOWait(struct lbuf * bp, int flag)
{
unsigned long flags;
int rc = 0;
jfs_info("lbmIOWait1: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag);
LCACHE_LOCK(flags); /* disable+lock */
LCACHE_SLEEP_COND(bp->l_ioevent, (bp->l_flag & lbmDONE), flags);
rc = (bp->l_flag & lbmERROR) ? -EIO : 0;
if (flag & lbmFREE)
lbmfree(bp);
LCACHE_UNLOCK(flags); /* unlock+enable */
jfs_info("lbmIOWait2: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag);
return rc;
}
/*
* lbmIODone()
*
* executed at INTIODONE level
*/
static void lbmIODone(struct bio *bio, int error)
{
struct lbuf *bp = bio->bi_private;
struct lbuf *nextbp, *tail;
struct jfs_log *log;
unsigned long flags;
/*
* get back jfs buffer bound to the i/o buffer
*/
jfs_info("lbmIODone: bp:0x%p flag:0x%x", bp, bp->l_flag);
LCACHE_LOCK(flags); /* disable+lock */
bp->l_flag |= lbmDONE;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
bp->l_flag |= lbmERROR;
jfs_err("lbmIODone: I/O error in JFS log");
}
bio_put(bio);
/*
* pagein completion
*/
if (bp->l_flag & lbmREAD) {
bp->l_flag &= ~lbmREAD;
LCACHE_UNLOCK(flags); /* unlock+enable */
/* wakeup I/O initiator */
LCACHE_WAKEUP(&bp->l_ioevent);
return;
}
/*
* pageout completion
*
* the bp at the head of write queue has completed pageout.
*
* if single-commit/full-page pageout, remove the current buffer
* from head of pageout queue, and redrive pageout with
* the new buffer at head of pageout queue;
* otherwise, the partial-page pageout buffer stays at
* the head of pageout queue to be redriven for pageout
* by lmGroupCommit() until full-page pageout is completed.
*/
bp->l_flag &= ~lbmWRITE;
INCREMENT(lmStat.pagedone);
/* update committed lsn */
log = bp->l_log;
log->clsn = (bp->l_pn << L2LOGPSIZE) + bp->l_ceor;
if (bp->l_flag & lbmDIRECT) {
LCACHE_WAKEUP(&bp->l_ioevent);
LCACHE_UNLOCK(flags);
return;
}
tail = log->wqueue;
/* single element queue */
if (bp == tail) {
/* remove head buffer of full-page pageout
* from log device write queue
*/
if (bp->l_flag & lbmRELEASE) {
log->wqueue = NULL;
bp->l_wqnext = NULL;
}
}
/* multi element queue */
else {
/* remove head buffer of full-page pageout
* from log device write queue
*/
if (bp->l_flag & lbmRELEASE) {
nextbp = tail->l_wqnext = bp->l_wqnext;
bp->l_wqnext = NULL;
/*
* redrive pageout of next page at head of write queue:
* redrive next page without any bound tblk
* (i.e., page w/o any COMMIT records), or
* first page of new group commit which has been
* queued after current page (subsequent pageout
* is performed synchronously, except page without
* any COMMITs) by lmGroupCommit() as indicated
* by lbmWRITE flag;
*/
if (nextbp->l_flag & lbmWRITE) {
/*
* We can't do the I/O at interrupt time.
* The jfsIO thread can do it
*/
lbmRedrive(nextbp);
}
}
}
/*
* synchronous pageout:
*
* buffer has not necessarily been removed from write queue
* (e.g., synchronous write of partial-page with COMMIT):
* leave buffer for i/o initiator to dispose
*/
if (bp->l_flag & lbmSYNC) {
LCACHE_UNLOCK(flags); /* unlock+enable */
/* wakeup I/O initiator */
LCACHE_WAKEUP(&bp->l_ioevent);
}
/*
* Group Commit pageout:
*/
else if (bp->l_flag & lbmGC) {
LCACHE_UNLOCK(flags);
lmPostGC(bp);
}
/*
* asynchronous pageout:
*
* buffer must have been removed from write queue:
* insert buffer at head of freelist where it can be recycled
*/
else {
assert(bp->l_flag & lbmRELEASE);
assert(bp->l_flag & lbmFREE);
lbmfree(bp);
LCACHE_UNLOCK(flags); /* unlock+enable */
}
}
int jfsIOWait(void *arg)
{
struct lbuf *bp;
do {
spin_lock_irq(&log_redrive_lock);
while ((bp = log_redrive_list)) {
log_redrive_list = bp->l_redrive_next;
bp->l_redrive_next = NULL;
spin_unlock_irq(&log_redrive_lock);
lbmStartIO(bp);
spin_lock_irq(&log_redrive_lock);
}
if (freezing(current)) {
spin_unlock_irq(&log_redrive_lock);
refrigerator();
} else {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&log_redrive_lock);
schedule();
__set_current_state(TASK_RUNNING);
}
} while (!kthread_should_stop());
jfs_info("jfsIOWait being killed!");
return 0;
}
/*
* NAME: lmLogFormat()/jfs_logform()
*
* FUNCTION: format file system log
*
* PARAMETERS:
* log - volume log
* logAddress - start address of log space in FS block
* logSize - length of log space in FS block;
*
* RETURN: 0 - success
* -EIO - i/o error
*
* XXX: We're synchronously writing one page at a time. This needs to
* be improved by writing multiple pages at once.
*/
int lmLogFormat(struct jfs_log *log, s64 logAddress, int logSize)
{
int rc = -EIO;
struct jfs_sb_info *sbi;
struct logsuper *logsuper;
struct logpage *lp;
int lspn; /* log sequence page number */
struct lrd *lrd_ptr;
int npages = 0;
struct lbuf *bp;
jfs_info("lmLogFormat: logAddress:%Ld logSize:%d",
(long long)logAddress, logSize);
sbi = list_entry(log->sb_list.next, struct jfs_sb_info, log_list);
/* allocate a log buffer */
bp = lbmAllocate(log, 1);
npages = logSize >> sbi->l2nbperpage;
/*
* log space:
*
* page 0 - reserved;
* page 1 - log superblock;
* page 2 - log data page: A SYNC log record is written
* into this page at logform time;
* pages 3-N - log data page: set to empty log data pages;
*/
/*
* init log superblock: log page 1
*/
logsuper = (struct logsuper *) bp->l_ldata;
logsuper->magic = cpu_to_le32(LOGMAGIC);
logsuper->version = cpu_to_le32(LOGVERSION);
logsuper->state = cpu_to_le32(LOGREDONE);
logsuper->flag = cpu_to_le32(sbi->mntflag); /* ? */
logsuper->size = cpu_to_le32(npages);
logsuper->bsize = cpu_to_le32(sbi->bsize);
logsuper->l2bsize = cpu_to_le32(sbi->l2bsize);
logsuper->end = cpu_to_le32(2 * LOGPSIZE + LOGPHDRSIZE + LOGRDSIZE);
bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
bp->l_blkno = logAddress + sbi->nbperpage;
lbmStartIO(bp);
if ((rc = lbmIOWait(bp, 0)))
goto exit;
/*
* init pages 2 to npages-1 as log data pages:
*
* log page sequence number (lpsn) initialization:
*
* pn: 0 1 2 3 n-1
* +-----+-----+=====+=====+===.....===+=====+
* lspn: N-1 0 1 N-2
* <--- N page circular file ---->
*
* the N (= npages-2) data pages of the log is maintained as
* a circular file for the log records;
* lpsn grows by 1 monotonically as each log page is written
* to the circular file of the log;
* and setLogpage() will not reset the page number even if
* the eor is equal to LOGPHDRSIZE. In order for binary search
* still work in find log end process, we have to simulate the
* log wrap situation at the log format time.
* The 1st log page written will have the highest lpsn. Then
* the succeeding log pages will have ascending order of
* the lspn starting from 0, ... (N-2)
*/
lp = (struct logpage *) bp->l_ldata;
/*
* initialize 1st log page to be written: lpsn = N - 1,
* write a SYNCPT log record is written to this page
*/
lp->h.page = lp->t.page = cpu_to_le32(npages - 3);
lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE);
lrd_ptr = (struct lrd *) &lp->data;
lrd_ptr->logtid = 0;
lrd_ptr->backchain = 0;
lrd_ptr->type = cpu_to_le16(LOG_SYNCPT);
lrd_ptr->length = 0;
lrd_ptr->log.syncpt.sync = 0;
bp->l_blkno += sbi->nbperpage;
bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
lbmStartIO(bp);
if ((rc = lbmIOWait(bp, 0)))
goto exit;
/*
* initialize succeeding log pages: lpsn = 0, 1, ..., (N-2)
*/
for (lspn = 0; lspn < npages - 3; lspn++) {
lp->h.page = lp->t.page = cpu_to_le32(lspn);
lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
bp->l_blkno += sbi->nbperpage;
bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT;
lbmStartIO(bp);
if ((rc = lbmIOWait(bp, 0)))
goto exit;
}
rc = 0;
exit:
/*
* finalize log
*/
/* release the buffer */
lbmFree(bp);
return rc;
}
#ifdef CONFIG_JFS_STATISTICS
static int jfs_lmstats_proc_show(struct seq_file *m, void *v)
{
seq_printf(m,
"JFS Logmgr stats\n"
"================\n"
"commits = %d\n"
"writes submitted = %d\n"
"writes completed = %d\n"
"full pages submitted = %d\n"
"partial pages submitted = %d\n",
lmStat.commit,
lmStat.submitted,
lmStat.pagedone,
lmStat.full_page,
lmStat.partial_page);
return 0;
}
static int jfs_lmstats_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, jfs_lmstats_proc_show, NULL);
}
const struct file_operations jfs_lmstats_proc_fops = {
.owner = THIS_MODULE,
.open = jfs_lmstats_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_JFS_STATISTICS */
| {
"pile_set_name": "Github"
} |
.. meta::
:description: Bu bölümde threading modülünü inceleyeceğiz.
:keywords: python, modül, import, threading
.. highlight:: python3
****************
threading Modülü
****************
**Kaynak Kodu:** https://hg.python.org/cpython/file/3.5/Lib/threading.py
Bu modül; düşük seviyeli `_thread` modülü üzerine, yüksek seviyeli iş parçacığı yürütüm ara yüzleri inşa eder. Ayrıca `queue` modülüne de bakınız.
`dummy_threading` modülü, `_thread`'in kayıp olmasından ötürü `threading`'in kullanılamadığı durumlar için sağlanmıştır.
**Not:** Aşağıda listelenmemişken, Python 2.x serilerindeki bu modülün bazı metotlarının ve fonksiyonlarının kullandığı camelCase isimler hala bu modül tarafından desteklenmektedir.
Bu modül aşağıdaki fonksiyonları tanımlar:
**threading.active_count()**
Hazır çalışmakta olan iş parçacığı (Thread) nesnelerinin sayısını geri döndürür. Geri dönen değer `enumerate()` tarafından döndürülen listenin uzunluğuna eşittir.
**threading.current_thread()**
Çağıranın kontrol dizesine karşılık gelen iş parçacığı nesnesini geri döndürür. Eğer çağıranın kontrol dizesi `threading` modülü vasıtasıyla oluşturulmamışsa, işlevselliği sınırlandırılmış bir kukla (dummy) iş parçacığı (thread) nesnesi geri döndürülür.
**threading.get_ident()**
Şimdiki iş parçacığının (Thread'in) iş parçacığı tanımlayıcısını (thread identifier’ı) geri döndürür. Bu, sıfır olmayan bir tam sayıdır. Değerinin doğrudan bir anlamı yoktur; sihirli bir çerez olarak kullanılmak üzere tasarlanmıştır, örneğin iş parçacıklarına özgü verilerden oluşan bir sözlüğü dizinlemek için.
Sürüm 3.3.'de gelmiştir.
**threading.enumerate()**
Hazır çalışmakta olan bütün iş parçacığı (Thread) nesnelerinin listesini geri döndürür. Liste daemonic (kullanıcının doğrudan kontrolünde olmayıp arka planda çalışan) iş parçacıklarını, `current_thread()` tarafından oluşturulmuş dummy (kukla) iş parçacıklarını ve ana iş parçacığını içerir. Listeye sonlandırılmış iş parçacıkları ve henüz başlatılmamış iş parçacıkları dâhil edilmez.
**threading.main_thread()**
Ana iş parçacığı (main-thread) nesnesini geri döndürür. Normal durumlarda, ana iş parçacığı Python yorumlayıcısı tarafından başlatılmış olan iş parçacığıdır.
Sürüm 3.4.'de gelmiştir.
**threading.settrace(func)**
Threading modülünden başlatılan bütün iş parçacıkları için bir tane izleyici fonksiyon ayarlar. Func yazan yere, her bir iş parçacığı için, `run()` metodu çağrılmadan önce, `sys.settrace()` gelecektir.
**threading.setprofile(func)**
Threading modülünden başlatılan bütün iş parçacıkları için bir tane kesit fonksiyonu ayarlar. Func yazan yere, her bir iş parçacığı için, `run()` metodu çağrılmadan önce, `sys.setprofile()` gelecektir.
**threading.stack_size([size])**
Yeni iş parçacıkları oluştururken, kullanılan iş parçacığı yığın boyutunu geri döndürür. Seçeneğe bağlı olan *size* argümanı daha sonradan oluşturulacak iş parçacıkları için yığın boyutunu belirtir ve -platform kullanımında veya ön tanımlı ayar olarak- değeri 0 veya 32,768 (32 KiB)'den büyük pozitif bir tamsayı olmalıdır. Eğer *size* argümanı tanımlanmazsa, değeri 0 olur. Eğer iş parçacığının yığın boyutunun değiştirilmesi desteklenmezse, bir `RuntimeError` hatası yükseltilir. Eğer tanımlanmış yığın boyutu geçersiz ise, `ValueError` hatası yükseltilir ve yığın boyutu değiştirilmemiş olur. 32 KiB, yorumlayıcıya yeterli yığın alanı temin etmek için yığın boyutunun desteklenen geçerli minimum değeridir. Bazı platformların, yığın boyutunun değeri üzerinde, kendilerine özgü bir takım sınırlamaları vardır. Örneğin yığın boyutunun 32 KiB'den büyük olmasının gerekliliği veya sistem hafıza sayfası boyutunun katlarının paylaştırılmasının gerekliliği gibi - platform belgesi daha fazla bilgi vermesi için referans gösterilebilir - (4 KiB'lik sayfalar yaygındır; yığın boyutunun 4096'nın katları olarak kullanılması, daha özel bilgilerin olmaması durumunda önerilen bir yaklaşımdır.) Kullanılabilen platformlar: Windows, POSIX iş parçacığı ile çalışan sistemler.
Bu modül ayrıca aşağıdaki sabiti de tanımlar:
**threading.TIMEOUT_MAX**
`Lock.acquire()`, `RLock.acquire()`, `Condition.wait()` vb. gibi engelleyici fonksiyonların zaman aşımı parametreleri için maksimum değere izin verilir.
Sürüm 3.2'de gelmiştir.
Bu modül aşağıdaki kısımda ayrıntıları verilen birkaç sınıfı tanımlar.
Bu modülün tasarımı yaklaşık olarak Java'nın threading modeli üzerine temellenmiştir. Ancak bununla birlikte, Java'daki her nesnenin temel davranışında olan kilit ve durum değişkenleri, Python'da ayrı nesnelerdir. Python'daki Thread sınıfı Java'daki Thread sınıfının davranışını bir alt set olarak destekler; şimdilik ne bir öncelik, ne bir iş parçacığı grubu vardır. İş parçacıkları yok edilemez, durdurulamaz, yasaklanamaz, devam ettirilemez ve sonlandırılamaz. Java'nın Thread sınıfının statik metotları, uygulandığında, modül düzeyindeki fonksiyonlarla eşleştirilir.
Aşağıda açıklanmış metotların hepsi otomatik olarak çalıştırılır.
Yerel İş Parçacığı (Thread-Local) Verisi
=========================================
Yerel iş parçacığı (thread-local) verisi, değeri iş parçacığı olarak belirlenmiş bir değerdir. Yerel iş parçacığı verisini yönetmek için, sadece yerel sınıftan (veya bir alt sınıftan) bir tane örnek oluşturulur ve özellikler bu sınıfta tutulur::
yerel_veri = threading.local()
yerel_veri.x = 1
Ayrı iş parçacıkları için örneğin değeri değişik olacaktır.
**class threading.local**
Yerel iş parçacığı verisini temsil eden sınıftır.
Daha fazla ayrıntı ve geniş örnekler için, *_threading_local* modülünün belge dizisine bakın.
İş Parçacığı (Thread) Nesneleri
================================
İş parçacığı (thread) sınıfı, ayrı iş parçacıklarını kontrol eden bir etkinliği temsil eder. Bu etkinliği belirtmek için iki yol vardır: yapıcıya, çağrılabilir bir nesne atamak veya bir alt sınıfta `run()` metodunu iptal etmek. Yapıcı dışında hiçbir metot bir alt sınıfta iptal edilmemelidir. Başka bir deyişle, bu sınıfın sadece `__init__()` ve `run()` metotları iptal edilir.
Bir iş parçacığı (thread) nesnesi oluşturulduğunda, bu nesnenin etkinliği, iş parçacığının `start()` metodu çağrılarak başlatılmalıdır. Bu ayrılmış bir iş parçacığının kontrolündeki `run()` metodunu çalıştırır.
Bir iş parçacığı (thread) başlatıldığında, iş parçacığı 'canlanmış' olarak kabul edilir. Normalde bu iş parçacığının `run()` metodu sonlandığında, iş parçacığının canlılığı da sonlanır - veya yürütülemeyen bir beklenti yükseltilir-. İş parçacığının canlı olup olmadığını `is_alive()` metodu test eder.
Diğer iş parçacıkları, bir iş parçacığının `join()` metodunu çağırabilir. Bu metot, çağrılan iş parçacığını, `join()` metodu çağrılan iş parçacığı sonlana kadar engeller.
Bir iş parçacığının bir ismi vardır ve ismi yapıcıya atanabilir ve 'name' özelliği vasıtasıyla okunabilir veya değiştirilebilir.
Bir iş parçacığı *daemon iş parçacığı (=daemon thread)* olarak işaretlenir. Bu işaretin önemi, sadece daemon iş parçacığı kaldığında bütün Python programının sonlanmasıdır. İşaretin başlangıç değeri, oluşturulmuş olan iş parçacığından miras alınır. İşaret, daemon özelliği (property) veya daemon'un yapıcı argümanı tarafından ayarlanabilir.
**Not:** Daemon iş parçacıkları bilgisayar kapatıldığında ani bir şekilde sonlanır. Açılmış dosyalar, veritabanı hareketleri gibi birçok kaynak, düzgün bir şekilde serbest bırakılmayabilir. Eğer iş parçacıklarının düzgün bir şekilde durmasını istiyorsanız, onları non-daemonic (daemonic olmayacak şekilde) ayarlayın ve Event gibi uygun bir sinyal mekanizması kullanın.
Python programında bir tane ana iş parçacığı (main-thread) nesnesi vardır ve bu nesne başlangıçtaki iş parçacığının kontrol edilmesine yarar. Bu nesne bir daemon iş parçacığı değildir.
Kukla iş parçacığı nesnelerinin (dummy thread objects) oluşturulma ihtimali vardır. Bunlar yabancı olarak kabul edilebilecek, kontrolleri threading modülünün dışında olan C kodları gibi iş parçacıklarıdır. Kukla iş parçacıklarının sınırlı işlevsellikleri vardır; daima canlı ve daemonic özelliktedirler ve `join()` ve diğerleri ile kullanılamazlar. Yabancı iş parçacıklarının sonlandırılmalarının saptanmasının imkânsız olduğu sürece asla silinemezler.
**class.threading.Thread(group=None, target=None, name=None, args=(), kwargs={}, *,**
**daemon=None)**
Bu yapıcı her zaman anahtar kelime argümanlarıyla birlikte çağrılmalıdır. Argümanlar şunlardır:
**group:** Değeri, `None` olmalıdır. `ThreadGroupClass` uygulandığında, gelecekteki genişletme için saklanır.
**target:** Değeri, `run()` metodu tarafından çalıştırılan, çağrılabilir bir nesnedir. Değeri ön tanımlı olarak `None` olur ve değeri `None` olursa hiçbir şeyin çağrılmayacağı anlamına gelir.
**name:** İş parçacığının ismidir. Ön tanımlı değeri özel olarak "Thread-N" biçiminden yapılmıştır. Buradaki N'nin değeri küçük ondalık bir sayıdır.
**args:** Hedefin yürütülmesi için demet veri tipinde bir argümandır. Ön tanımlı olarak boş bir demet verisidir.
**kwargs:** Hedefin yürütülmesi için sözlük veri tipinde bir anahtar kelime argümanıdır. Ön tanımlı olarak boş bir sözlük verisidir.
**daemon:** Eğer değeri `None` değilse, daemon, bir iş parçacığının bariz bir şekilde daemonic olup olmadığını ayarlar. Şayet değeri ön tanımlı olarak bırakılırsa (yani değeri `None` olursa), daemonic özellik o andaki aktif iş parçacığından miras alınır.
Eğer bir alt sınıf yapıcıyı iptal ederse, iş parçacığı ile bir işlem yapmadan önce, temel sınıfın yapıcısının (`Thread.__init__()`'in) çalıştırılmış olduğundan emin olunması gerekir.
Sürüm 3.3.'de değiştirildi. Daemon argümanı eklendi.
**start()**
İş parçacığının etkinliğini başlatır.
Her bir iş parçacığı için bir kez çağrılması gerekir. Ayrılmış iş parçacığı kontrolü içinde, `run()` metodunun çalıştırılmasını ayarlar.
Bir iş parçacığı için, bu metot birden çok çağrıldığında, bir `RuntimeError` hatası yükseltir.
**run()**
İş parçacığının etkinliğini temsil eder.
Bu metodu, bir alt sınıfta iptal edebilirsiniz. Standart `run()` metodu, *target* argümanı olarak bilinen nesnenin yapıcısına atanmış çağrılabilir nesneyi, varsa *args* ve kwargs* argümanlarından alınan ardışık ve anahtar kelimeli argümanlarla birlikte sırasıyla çalıştırır.
**join(timeout=None)**
İş parçacığı sonlana kadar bekler. Bu; `join()` metodu çağrılan iş parçacığı ya normal olarak, ya yürütülemeyen bir beklenti vasıtasıyla ya da seçeneğe bağlı zaman aşımı gerçekleşip sonlana kadar, çağrılan başka bir iş parçacığını bloke eder.
*timeout* (zaman aşımı) argümanı hazır olduğunda ve değeri `None` olmadığında, işlemin zaman aşımını saniye olarak belirten, kayan noktalı bir sayı olmalıdır. `join()` her zaman `None` değerini geri döndürdüğü için, bir zaman aşımının gerçekleşip gerçekleşmediğine karar vermek için `join()` sonrasında `is_alive()` metodunu çağırın. Şayet iş parçacığı halen canlı ise, `join()`’in çağrılması zaman aşımına uğrar.
timeout argümanı hazır olmadığında ve değeri `None` olmadığında, işlem, iş parçacığı sonlana kadar bloke olacaktır.
Bir iş parçacığı için birçok kez `join()` metodu çağrılabilir.
Bir girişim, hali hazırdaki iş parçacığını bir çıkmaza sokarsa, `join()` metodu bir `RuntimeError` hatası yükseltir. Aynı hata, bir iş parçacığı başlatılmadan önce `join()` metodu çağrılırsa da yükseltilir.
**Name**
Sadece tanımlama amaçları için bir karakter dizisi (string) kullanılır. Bir anlamı yoktur. Çoklu iş parçacıklarına aynı isim verilebilir. Başlangıç ismi yapıcı tarafından ayarlanır.
**getName()**
**setName()**
İsim için eski program uygulama ara yüzü alıcısı/ayarlayıcısı. Name özelliği (property) yerine doğrudan bunu kullanın.
**ident**
İş parçacığının tanıtlayıcısıdır veya eğer bir iş parçacığı başlatılmamışsa değeri `None`’dır. Değeri sıfır olmayan bir tamsayıdır. Daha fazla bilgi için `_thread.get_ident()` fonksiyonuna bakın. İş parçacığı tanıtlayıcıları, bir iş parçacığı sonlandığında ve başka bir tanesi oluşturulduğunda geri dönüştürülebilir. İş parçacığı sonlandıktan sonra bile tanıtlayıcı kullanılabilir.
**is_alive()**
Bir iş parçacığının aktif olup olmadığının öğrenilmesini sağlar.
Bu metot; `run()` metodunun başlamasından önce ve `run()` metodunun sonlanmasına kadar `True` değerini geri döndürür. `enumerate()` modül fonksiyonu bütün canlı iş parçacıklarının bir listesini geri döndürür.
**daemon**
Bir iş parçacığının, bir daemon iş parçacığı olup olmadığının belirleyen bir boolean (`True` veya `False`) değeridir. Bu özellik `start()` metodu çağrılmadan önce ayarlanmalıdır aksi halde bir `RuntimeError` hatası yükseltilir. Başlangıçtaki değeri, oluşturulan iş parçacığından miras alınır; ana iş parçacığı bir daemon iş parçacığı değildir, böylece ana iş parçacığı içinde oluşturulan bütün iş parçacıklarının daemon değeri ön tanımlı olarak `False` olur.
Geriye, cansız, daemon olmayan iş parçacıkları kaldığında, bütün Python programı sonlandırılır.
**isDaemon()**
**setDaemon()**
Daemonun eski alıcı/ayarlayıcı program uygulama ara yüzü; bir özellik olarak kullanmak yerine doğrudan bunu kullanın.
**CPython Uygulaması Hakkında Ayrıntı:** CPython’da, Global Yorumlayıcı Kilidinden (Global Interpreter Lock) ötürü yalnızca bir adet iş parçacığı bir kere Python kodunu çalıştırabilir (belirli performans odaklı kütüphanelerin bu kısıtlamanın üstesinden gelmesine rağmen). Eğer uygulamanızın çok çekirdekli makinelerin hesaplama kaynaklarından daha fazla yararlanmasını istiyorsanız `multiprocessing`’i veya `concurrent.futures.ProcessPoolExecutor`’u kullanmanız tavsiye edilir. Yine de çoklu girdi/çıktı görevlerini eş zamanlı olarak çalıştırmak istiyorsanız, `threading` bunun için halen uygun bir modeldir.
Lock (Kilit) Nesneleri
=======================
Bir ilkel kilit, kilitlendiğinde belirli bir iş parçacığına ait olmayan, bir eşzamanlama ilkelidir. Bu kilit, Python’da, doğrudan `_thread` uzantı modülünden uyarlanan, hali hazırda kullanılabilir olan en düşük seviyedeki eşzamanlama ilkelidir.
Bir ilkel kilitin “kilitli (=locked)” ve “kilitli değil (=unlocked)” olmak üzere iki tane durumu vardır. Bu kilit oluşturulurken “kilitli değil” durumundadır. Kilidin iki tane temel metodu vardır; `acquire()` ve `release()`. Kilidin durumu “kilitli değil” olduğunda, `acquire()` durumu “kilitli” hale çevirir ve acil olarak geri döndürülür. Kilidin durumu “kilitli” olduğunda, bir başka iş parçacığında `release()` çağrılıp, durumu “kilitli değil” şeklinde değiştirene kadar, `acquire()` iş parçacığını bloke eder, daha sonra `acquire()` çağrısı kilidi “kilitli” şeklinde sıfırlar ve geri döndürür. `release()` metodu, kilit sadece “kilitli” durumda iken çağrılmalıdır; bu metot, kilidin durumunu “kilitli değil” diye değiştirir ve acil olarak geri döndürülür. Şayet bir girişim kilitli olmayan bir kilidi serbest bırakmaya çalışırsa, bir adet `RuntimeError` hatası yükseltilir.
Kilitler ayrıca içerik yönetim protokolünü de desteklerler.
Birden fazla iş parçacığı `acquire()` ile bloke edilip, kilit durumlarının “kilitli değil” şeklinde değişmesi beklendiğinde, sadece bir iş parçacığının kilidi, `release()` çağrısıyla “kilitli değil” durumuna getirilir; bekleyen iş parçacıklarından hangisinin getirileceği tanımlı değildir ve uygulamalara bağlı olarak değişiklik gösterebilir.
Tüm metotlar otomatik olarak yürütülür.
**Class threading.Lock**
Sınıf, ilkel kilit nesnelerini uyarlar. Bir kez bir iş parçacığına kilit kazandırıldığında, sonraki girişimler, kilit serbest bırakılana kadar, iş parçacığını bloke eder; herhangi bir iş parçacığı kilidi serbest bırakabilir.
Sürüm 3.3.’de değiştirildi. Kurucu fonksiyondan bir sınıfa değiştirildi.
**acquire(blocking=True, timeout=-1)**
Bloklayan veya bloklamayan bir kilit kazandırır.
*blocking* argümanı `True` olarak (ön tanımlı değerdir) çağrıldığında, kilit serbest bırakalana kadar iş parçacığını bloke eder ve sonra kilidi tekrar “kilitli” konuma getirir ve `True` değerini geri döndürür.
*blocking* argümanı `False` olarak çağrıldığında, iş parçacığını bloke etmez. Şayet bir çağrı *blocking*’i `True` olarak ayarlarsa, iş parçacığını bloke eder ve acil olarak `False` değerini geri döndürür; diğer türlü, kilidi “kilitli” duruma getirir ve `True` değerini döndürür.
Kayan noktalı *timeout* (zaman aşımı) argümanı pozitif bir değer alarak çağrıldığında, en çok *timeout* argümanında belirtilen değere kadar, kilitlenemediği sürece iş parçacığını bloke eder. *timeout* argümanının -1 olması sınırsız bir bekleme süresi olacağını belirtir. Blocking argümanı `False` ayarlandığında, bir *timeout* argümanı belirlemek yasaklanmıştır.
İş parçacığı başarıyla kilitlenmişse, geri dönen değer `True` olur, şayet başarıyla kilitlenmemişse `False` olur (örneğin zaman aşımına uğramışsa).
Sürüm 3.2.’de değiştirildi. *timeout* parametresi yenidir.
Sürüm 3.2.’de değiştirildi. Kilitleme POSIX’te sinyaller tarafından şimdi iptal edilebilir.
**release()**
Bir kilidi serbest bırakır. Bu metot, kilitlenmiş bir iş parçacığı hariç her iş parçacığından çağrılabilir.
Kilit “kilitli” duruma getirildiğinde, onu “kilitli değil” şeklinde değiştirir ve geri döndürür. Eğer başka iş parçacıkları, kilitlerinin “kilitli değil” şeklinde değişmelerini bekleyerek bloke edilmişse, ilerlemek için kesin olarak bir tanesine izin verin.
Kilitli olmayan bir kilit çağrıldığında, bir `RuntimeError` hatası yükseltilir.
Bu metot ile geri dönen bir değer yoktur.
Rlock (Yeniden Girilir Kilit) Nesneleri
========================================
Bir yeniden girilir kilit, aynı iş parçacığı tarafından bir çok kere kullanıma sokulabilen bir eş zamanlama ilkelidir. Dahili olarak, bu kilit, ilkel kilitlerin kullandığı kilitli/kilitli değil durumuna ilaveten “sahip olunan iş parçacığı” ve “recursion (öz yineleme)” kavramlarını kullanır. Kilitli durumda, bazı iş parçacıkları bu kilide sahip olurken; kilitli olmadığı durumda, hiçbir iş parçacığı bu kilide sahip değildir.
Kilidi kilitlemek için, iş parçacığı bu kilidin `acquire()` metodunu çağırır; bu işlem iş parçacığının kilide sahip olduğunu bir kez geri döndürür. Kilidi açmak için, iş parçacığı kilidin `release()` metodunu çağırır. `acquire()` / `release()` çağrı çiftleri iç içe geçebilir; sadece son `release()` çağrısı (en dıştaki çağrı çiftinden olan `release()`) kilidi “kilitli değil” duruma getirir ve `acquire()` ile bloklanmış diğer iş parçacığının ilerlemesi için izin verir.
Yeniden girilir kilitler ayrıca içerik yönetim protokolünü desteklerler.
**Class threading.Rlock**
Bu sınıf yeniden girilir kilit nesnelerini uygular. Bir yeniden girilir kilit, onu edinmiş bir iş parçacığı tarafından serbest bırakılmalıdır. Bir iş parçacığı bir kez yeniden girilir bir kilidi edindiğinde, aynı iş parçacığı kilidi engellemeden tekrar edinebilir; iş parçacığı, kilidi her edinmesine karşılık bir kez onu serbest bırakmalıdır.
`Rlock`’ın, platform tarafından desteklenen, `Rlock` sınıfının elle tutulur en etkili versiyonunu geri döndüren bir kurucu fonksiyonu olduğunu not edin.
**acquire(blocking=True, timeout=-1)**
Bloklayan ve bloklamayan bir kilit edinin.
Argümansız çağrıldığında: Eğer bu iş parçacığı zaten kilide sahipse, öz-yineleme seviyesini 1 derece arttırır ve ani bir şekilde geri döndürür. Diğer türlü, eğer başka bir iş parçacığı bu kilide sahipse, kilit çözülene kadar iş parçacığını engeller. Eğer bir kez -hiç bir iş parçacığının sahibi olmadığı- bir kilit açılmışsa, sahibini yakalar, öz-yineleme değerini 1 olarak ayarlar ve geri döndürülür. Eğer birden fazla iş parçacığı kilit açılana kadar engelleniyorsa, her seferinde sadece bir tane iş parçacığı bu kilide sahip olacaktır. Bu durumda geri dönen bir değer olmaz.
*blocking* argümanı `True` olarak ayarlanıp çağrılırsa, argümansız çağrıldığında yaptıklarının aynısını yapar ve `True` değeri geri döndürülür.
*blocking* argümanı `False` olarak ayarlanıp çağrılırsa, iş parçacığını bloke etmez. Eğer argümanı olmayan bir çağrı engellenirse, hızlı bir şekilde `False` değeri geri döndürülür; diğer türlü, argümansız çağrıldığında yaptıklarının aynısını yapar ve `True` değeri geri döndürülür.
*timeout* argümanı pozitif bir kayan noktalı sayı olarak ayarlanıp çağrılırsa, iş parçacığı *timeout* argümanında belirlenen saniye kadar kilidi tekrar edinemediği sürece engellenir. Kilit edinilmişse `True` değerini geri döner, *timeout* zamanı dolmuşsa `False` değeri geri döner.
Sürüm 3.2.’de değiştirildi. *timeout* parametresi yenidir.
**release()**
Bir kilidi serbest bırakır, öz yineleme (recursion) seviyesini azaltır. Öz yineleme değeri, azaltımdan sonra sıfır olursa, (hiç bir iş parçacığı tarafından sahip olunmayan) kilidi "kilitli değil" şeklinde sıfırlar ve diğer iş parçacıkları kilidin açılmasını beklemek için engellenirse, bu iş parçacıklarından kesinlikle bir tanesine işlenmesi için izin verir. Eğer öz yineleme seviyesi azaltımdan sonra halen sıfır olmamışsa, kilit "kilitli" duruma gelir ve çağrılan iş parçacığı tarafından sahiplenilir.
Bu yöntemi sadece çağrılan iş parçacığı bir kilide sahip olduğu zaman çağırın. Eğer kilit, açık durumda ise, bu yöntemi çağırmak bir `RuntimeError` hatası yükseltir.
Geri dönen bir değer yoktur.
Condition (Durum) Nesneleri
============================
Bir durum değişkeni her zaman bir kilitle ilişkilidir; bu değişken içeri aktarılabilir veya varsayılan olarak bir tane oluşturulabilir. Bir tanesini içeri aktarmak, bir kaç durum nesnesi aynı kilidi ortaklaşa kullandığında kullanışlıdır. Kilit, durum nesnesinin bir parçasıdır: onu ayrı olarak izleyemezsiniz.
Bir durum nesnesi, içerik yönetim protokolüne uyar: Ekli engelleme süresi için durum değişkenini `with` deyimi ile birlikte kullanmak ilgili kilidi elde edilmesini sağlar. `acquire()` ve `release()` yöntemleri ayrıca bahsi geçen kilitle ilgili olan yöntemleri çağırır.
Diğer yöntemler tutulan kilitle birlikte çağrılmalıdır. `wait()` yöntemi kilidi serbest bırakır ve sonra iş parçacığı onu `notify()` veya `notify_all()` ile çağırıp uyandırana kadar, iş parçacığını engeller. Bir kez uyandırıldığında, `wait()` onu yeniden edinir ve geri döndürür. Ayrıca bir zaman aşımı süresi belirlemek de mümkündür.
`notify()` yöntemi, eğer iş parçacıklarının herhangi biri bekliyorsa, durum değişkenini bekleyen iş parçacıklarından birisini uyandırır. `notify_all()` yöntemi ise durum değişkenini bekleyen bütün iş parçacıklarını uyandırır.
**Not:** `notify()` ve `notify_all()` yöntemleri kilitleri serbest bırakmaz; bu, `notify()` veya `notify_all()`'u çağırmış ve sonunda kilidin sahiplğinden feragat eden bir iş parçası veya iş parçacıkları uyandırıldığında, `wait()` çağrısı ile acil olarak geri döndürülmeyecekleri anlamına gelir.
Durum nesneleri kullanan tipik programlama stillinde kilit, bazı paylaşılan durumlara erişimi senkronize etmek için kullanılır; belirli durum değişimleriyle ilgili olan iş parçacıkları, `notify()` veya `notify_all()`'u çağırırken, bekleyenler için olası istenilen bir duruma göre durumu değiştirdiklerinde, istenen durumu görene kadar tekrar tekrar `wait()` yöntemini çağırır. Örneğin; takip eden kod, sınırsız bir tampon kapasitesine sahip genel bir üretici-tüketici durumudur::
# Bir item'i tüketir
with cv:
while not an_item_is_available():
cv.wait()
get_an_available_item()
# Bir item'i üretir
with cv:
make_an_item_available()
cv.notify()
`while` döngüsü uygulamanın durumunu kontrol etmek için gereklidir, çünkü `wait()` keyfi olarak uzun bir sürede geri dönebilir ve `notify()` çağrısını bildiren koşul, hiç bir zaman doğru olmayabilir. Bu çoklu iş parçacığı programlamaya özgü bir durumdur. `wait_for()` yöntemi durum kontrolünü otomatik hale getirmek ve zaman aşımı hesaplamalarını kolaylaştırmak için kullanılır::
# Bir item'i tüketir
with cv:
cv.wait_for(an_item_is_available)
get_an_available_item()
Sadece bir veya bir kaç bekleyen iş parçacığının, durum değişmesiyle ilgili olup olmamadıklarına göre `notify()` ve `notify_all()` arasında seçim yapın. Örneğin, tipik bir üretici-tüketici durumunda, bir itemi tampona eklemek sadece bir tüketici iş parçacığının uyandırılmasını gerektirir.
**class threading.Condition(lock=None)**
Bu sınıf durum değişkeni nesnelerini sağlar. Bir durum değişkeni bir veya birden çok iş parçacığının, başka bir iş parçacığı tarafından onaylanana kadar, beklemesine izin verir.
Eğer *lock* argümanı veriliyse ve değeri `None` değilse, bir `Lock` veya `RLock` nesnesi olmalıdır ve temel kilit olarak kullanılmalıdır.Diğer türlü, yeni bir `RLock` nesnesi oluşturulur ve temel kilit olarak kullanılır.
Sürüm 3.3'de değiştirildi: Kurucu fonksiyondan bir sınıfa değiştirildi.
**acquire(*args)**
Temel kilidi edinir. Bu yöntem temel kilit üzerinde ilgili yöntemi çağırır; geri dönen değer, yöntem neyi geri döndürüyorsa o olur.
**release()**
Temel kilidi serbest bırakır. Bu yöntem temel kilit üzerinde ilgili yöntemi çağırır; geri dönen bir değeri yoktur.
**wait(timeout=None)**
Onaylanana veya zaman aşımına uğrayana kadar bekler. Eğer çağıran iş parçacığı bu kilidi edinmemişse, bu yöntem çağrıldığında bir `RuntimeError` hatası yükseltilir.
Bu yöntem temel kilidi serbest bırakır ve sonra başka bir iş parçacığının içindeki aynı durum değişkeni için `notify()` veya `notify_all()` çağrısı tarafından uyandırılana kadar veya seçime bağlı zaman aşımı gerçekleşene kadar iş parçacığını engeller. Bir kez uyandırıldığında veya zaman aşımına uğradığında, kilidi yeniden edinir ve geri döndürür.
*timeout* argümanı belirlenmiş ve değeri `None` olmadığında, değeri, işlemin zaman aşımı süresini saniyelerle belirten kayan noktalı bir sayı olmalıdır.
Temel kilit `RLock` olduğunda, `release()` yöntemi kullanılarak serbest bırakılamaz, çünkü bu durum birden çok kez öz yinelemeli olarak elde edildiğinden kilidi açmaz. Bunun yerine, `RLock` sınıfının iç arayüzü, öz yinelemeli olarak bi çok defa elde edilse bile gerçekten kitler. Sonra diğer bir iç arayüz, kilit yeniden edinildiğinde ön yineleme seviyesini yeniden düzenlemek için kullanılır.
Belirli bir zaman aşımına uğramadığı sürece, geri dönen değer `True` olur, bu durumda ise geri dönen değer `False` olur.
Sürüm 3.2'de değiştirildi: Önceden yöntem hep `None` değerini geri döndürüyordu.
**wait_for(predicate, timeout=None)**
Bir durum doğru değerlendirene kadar bekler, *predicate (=yüklem)* sonucu bir boolean değer olarak yorumlanacak olan, çağrılabilir bir şey olmalıdır. *timeout* argümanı maksimum bekleme zamanı olarak sağlanmıştır.
Bu araç yöntemi `wait()`'i yüklem sağlanana kadar veya zaman aşımı oluşana kadar tekrar tekrar çağırabilir. Geri dönen değer yüklemin son geri dönen değeridir ve yöntem zaman aşımına uğrarsa `False` olarak değerlendirilir.
*timeout* özelliğini yok saymak, bu yöntemi çağırmak kabaca aşağıdakini yazmakla eşdeğerdir::
while not predicate():
cv.wait()
Bu yüzden, aynı kural `wait()` ile aynı şekilde kullanılır: Kilit çağrıldığında tutulur ve geri döndürmede yeniden elde edilir. Yüklem, tutulan kilit ile değerlendirilir.
Sürüm 3.2'de gelen yeni bir özellik.
**notify(n=1)**
Ön-tanımlı olarak, varsa bu durumu bekleyen bir iş parçacığını uyandırır. Eğer çağrılan iş parçacığı bu yöntem çağrıldığında daha önce kilidi edinmemişse, bir `RuntimeError` hatası yükseltilir.
Bu yöntem en fazla *n* tane durum değişkenini bekleyen iş parçacığını uyandırır; hiç bir iş parçacığı beklemiyorsa, işlem yapılmaz.
Hali hazırdaki uygulama, eğer en az *n* tane iş parçacığı bekliyorsa, kesinlikle *n* tane iş parçacığını uyandırır. Ancak, bu davranışa
güvenmek pek güvenilir değildir. İleride, iyileştirilmiş bir uygulama zaman zaman *n* taneden fazla iş parçacığı uyandırabilir.
**Not:** Uyandırılmış bir iş parçacığı, kilidi yeniden elde edinceye kadar `wait()` tarafından geri dönmez. `notify()` kilidi serbest bırakmıyorsa, çağıranı serbest bırakmalıdır.
**notify_all()**
Bu durumu bekleyen bütün iş parçacıklarını uyandırır. Bu yöntem `notify()` gibi davranır, fakat bir tanesi yerine, bekleyen bütün iş parçacıklarını uyandırır. Eğer bu yöntem çağrıldığında, çağıran iş parçacığı kilidi daha önce edinmemişse, bir `RuntimeError` hatası yükseltilir.
Semaphore Nesneleri
====================
Bu, bilgisayar bilimi tarihindedeki en eski senkronizasyon ilkellerinden biridir, Hollandalı bilgisayar bilimcisi Edsger W. Dijkstra tarafından icat edilmiştir (`acquire()` ve `release()` yerine `P()` ve `V()` isimlerini kullanıyordu.).
Bir semafor, her `acquire()` çağrısında azaltılan ve her `release()` çağrısında arttırılan içsel bir sayacı yönetir. Sayaç sıfırın altına hiç
bir zaman inemez; `acquire()` bu sayacın sıfır olduğunu bulursa, iş parçacığını başka bir iş parçacığı `release()`'i çağırana kadar engeller.
Semaforlar ayrıca içerik yönetim protokülünü desteklerler.
**class threading.Semaphore(value=1)**
Bu sınıf semafore nesnelerini uygular. Bir semafor `release()`'in çağrılma sayısından, `acquire()`'in çağrılma sayısını çıkartan ve bir başlangıç değerini eklemekle temsil edilen bir sayacı yönetir. `acquire()`, sayacı negatif bir sayı yapmadan geri döndürene kadar, eğer gerekliyse iş parçacığını engelleyebilir. Eğer verili değilse, *value* argümanının değeri ön-tanımlı olarak 1'dir.
Seçeneğe bağlı argüman, iç sayacın başlangıc değerini verir; ön-tanımlı olarak değeri 1'dir. Eğer *value* argümanının değerine 1'den az bir sayı verilirse, `ValueError` hatası yükseltilir.
Sürüm 3.3'de değiştirildi. Kurucu fonksiyondan bir sınıfa değiştirildi.
**acquire(blocking=True, timeout=None)**
Bir semafor elde eder.
Argümanlar olmadan çağrıldığında: eğer iç sayaç girişte sıfırdan büyükse, onu bir birim azaltır ve acilen geri döner. Eğer girişte değeri sıfır ise, başka bir iş parçacığı `release()`'i çağırıp değerini sıfırdan daha büyük bir sayı yapana kadar, engeller. Bu uygun bir kilitleyici ile birlikte yapılır böylece bir çok `acquire()` çağrıları engellenir, `release()` bunlardan kesinlikle bir tanesini uyandıracaktır. Uygulama bir tanesini rastgele seçer, böylece engellenmiş iş parçacıkları uyandırıldığında oluşan düzene güvenmemek gerekir. `True` değeri geri döner (veya süresiz olarak engeller).
*blocking* argümanı `False` olarak ayarlanmış bir şekilde çağrılırsa, iş parçacığını engellemez. Eğer argümansız bir çağrı iş parçacığını engellerse, acil olarak `False` değerini geri döndürür; diğer türlü, argümansız olarak çağrıldığının aynısını yapar ve `True` değerini geri döndürür.
*timeout* argümanı `None`'dan farklı bir şey olacak şekilde çağrılırsa, en fazla *timeout* argümanındaki belirtilen saniye kadar iş parçacığını engeller. Eğer bu arada elde etme başarılı bir şekilde tamamlanmamışsa, `False` değerini geri döndürür. Diğer türlü, `True` değerini geri döndürür.
**release()**
Bir semaforu serbest bırakır, iç sayacı bir birim arttırır. Girişte sıfır olduğunda ve diğer bir iş parçacığı, sayacın tekrar sıfırdan büyük bir sayı olmasını beklediğinde, bu iş parçacığını uyandırır.
**class threading.BoundedSemaphore(value=1)**
Bu sınıf, bağlanmış semafor nesnesini uygular. Bağlanmış semafor, hali hazırdaki değerin, ilk değeri aşmadığından emin olmak için kontrol eder. Eğer aşmışsa, `ValueError` hatası yükseltilir. Bir çok durumda semaforlar sınırlı kapasiteli kaynakları korumak için kullanılır. Eğer semafor birden fazla kez serbest bırakılmışsa, bu bir bug olduğuna işarettir. Eğer verili değilse, *value* argümanının ön-tanımlı değeri 1'dir.
Sürüm 3.3'de değiştirildi. Kurucu fonksiyondan sınıfa değiştirildi.
**Semafor Örneği**
Semaforlar genellikle sınırlı kapasiteli kaynakları korumak için kullanılır, örneğin, bir veritabanı sunucusunda. Kaynağın boyutunun sabit olduğu hangi durumda olursa olsun, bağlanmış bir semafor kullansanız iyi olur. Çalışan iş parçacıklarını oluşturmadan önce, ana iş parçacığınız semaforu başlatacaktır::
maxconnections = 5
# ...
pool_sema = BoundedSemaphore(value=maxconnections)
Bir kez oluşturulduğunda, çalışan iş parçacıkları semafor'un `acquire()` ve `release()` yöntemlerini, sunucuya bağlanmaya ihtiyaç duyduklarında çağırır::
with pool_sema:
conn = connectdb()
try:
# ... bağlantıyı kullan ...
finally:
conn.close()
Bağlanmış semaforun kullanılması, elde edildiğinden daha fazla serbest bırakılması gibi bir programlama hatasını tespit edememe şansını azaltır.
Event (Olay) Nesneleri
======================
Bu, iş parçacıkları arasındaki iletişim için en basit mekanizmadır: Bir iş parçacığı bir olayı sinyal eder ve diğer iş parçacığı da bunu bekler.
Bir olay nesnesi `set()` yöntemi ile değeri `True` olan ve `clear()` yöntemiyle de değeri `False` olan bir iç işareti yönetir. `wait()` yöntemi işaretin değeri `True` olana kadar iş parçacığını engeller.
**class threading.Event**
Bu sınıf olay nesnelerini uygular. Bir olay, `set()` yöntemi ile değeri `True` olan v `clear()` yöntemiyle de değeri `False` olan bir işareti yönetir. `wait()` yöntemi iş parçacığını, işaretin değeri `True` olana kadar engeller. İşaretin değeri ilk olarak `False`'dur.
Sürüm 3.3'de değiştirildi. Kurucu bir fonksiyondan bir sınıfa değiştirildi.
**is_set()**
Sadece iç işaret `True` olduğunda `True` değerini geri döndürür.
**set()**
İç işareti `True` olaak ayarlar. `True` olmasını bekleyen bütün iş parçacıkları uyandırılır. `wait()`'i çağıran iş parçacığı, bir kez işaret `True` olursa, bir daha engellenmeyecektir.
**clear()**
İç işareti `False` olarak sıfırlar. Sonradan, `wait()`'i çağıran iş parçacıkları, `set()`, iç işareti tekrar `True` yapana kadar engellenecektir.
**wait(timeout=None)**
İç işaret `True` olana kadar iş parçacığını engeller. Eğer girişte iç işaret `True` olursa, acil olarak geri döner. Diğer türlü, başka bir iş parçacığı, işareti `True` yapmak için `set()`'i çağırana kadar veya seçime bağlı *timeout* süresi dolana kadar, iş parçacığını engeller.
*timeout* argümanı kullanılarak çağrıldığında ve değeri `None` olmadığında, değeri, işlemin zaman aşımı süresini saniyelerle belirten kayan noktalı bir sayı olmalıdır.
Bu yöntem, ancak iç işaretin değeri `True` olarak ayarlanmışsa, `True` değerini geri döndürür, `wait()` çağrısından önce veya çağrı başladıktan sonra, *timeout* değeri verilmemişse ve işlem zaman aşımına uğramamışsa her zaman `True` değerini geri döndürür.
Sürüm 3.1'de değiştirildi: Daha önceden, bu yöntem her zaman `None` değerini geri döndürürdü.
Timer (Zamanlayıcı) Nesneleri
==============================
Bu sınıf, sadece belirli bir zaman geçtikten sonra çalıştırılan bir eylemi, -bir zamanlayıcıyı- temsil eder. `Timer`, `Thread`'in bir alt sınıfı olup, ayrıca özel bir iş parçacığı oluşturma işlevi örneğidir.
Zamanlayıcılar, tıpkı iş parçacıkları gibi `start()` yöntemi çağrılarak başlatılır. Zamanlayıcı (eylemi başlamadan önce) `cancel()` yöntemi çağrılarak durdurulabilir. Zamanlayıcının eyleminin gerçekleşmesininden önce bekleyeceği aralık, kullanıcının tanımladığı aralık olmayabilir.
Örneğin::
def hello():
print("hello, world")
t = Timer(30.0, hello)
t.start() # 30 saniye sonra, "hello, world" yazısı ekrana bastırılacak.
**class threading.Timer(interval, function, args=None, kwargs=None)**
*interval (=aralık)* argümanında belirtilen saniyelerden sonra, *args* argümanları ve *kwargs* anahtar argümanlarıyla birlikte çalışan bir fonksiyonun atandığı bir zamanlayıcı oluşturur. Eğer *args*, `None` (ön-tanımlı değeri bu) ise, boş bir liste kullanılacaktır. Eğer *kwargs*, `None` ise (ön-tanımlı değeri bu) ise, boş bir sözlük kullanılacaktır.
Sürüm 3.3'de değiştirildi: Kurucu fonksiyondan sınıfa değiştirildi.
**cancel()**
Zamanlayıcıyı durdurur ve zamanlayıcının eyleminin çalıştırılmasını iptal eder. Bu sadece eğer zamanlayıcı halen kendi bekleme evrendiseyse çalışır.
Barrier (Engel) Nesneleri
==========================
Sürüm 3.2'de gelen yeni bir özelliktir.
Bu sınıf, birbirini bekleme ihtiyacında olan sabit sayıdaki iş parçacıklarının kullanması için basit senkronizasyon ilkelleri sağlar. Her bir iş parçacığı `wait()` yöntemini çağırarak engeli aşmaya çalışır ve bütün iş parçacıkları aynı çağrıyı yapana kadar da iş parçacıkları engellenir. Bu noktada bütün iş parçacıkları aynı anda serbest bırakılır.
Engel aynı sayıdaki iş parçacıkları için bir çok kez tekrar kullanılabilir.
Aşağıdaki örnek, bir istemci ve sunucu iş parçacını senkronize etmek için basit bir yoldur::
b = Barrier(2, timeout=5)
def server():
start_server()
b.wait()
while True:
connection = accept_connection()
process_server_connection(connection)
def client():
b.wait()
while True:
connection = make_connection()
process_client_connection(connection)
**class threading.Barrier(parties, action=None, timeout=None)**
Bir partide bulunan değişik sayıdaki iş parçacığı için bir engel nesnesi oluşturur. *action* argümanı yazıldığında, iş parçacıklarından biri tarafından, serbest bırakıldığı zaman çağrılan, çağrılabilir bir şeydir. *timeout* argümanı belirtilmediği zaman değeri `wait()` yöntemi için ön tanımlı değeridir.
**wait(timeout=None)**
Engeli geçer. İş parçacıkları partisi engele doğru bu fonksiyonu çağırmışsa, aynı anda hepsi birden serbest bırakılır. Eğer bir *timeout* değeri belirlenirse, sınıf yapıcısına verilmiş herhangi bir tercih için kullanılır.
Geri dönen değer, 0 ile parti sayısının 1 eksiği arasında bir tamsayıdır, her bir iş parçacığı için değişebilir. Bu, bir takım özel idare işleri yapacak olan bir iş parçacığını seçmek için kullanılabilir. Örneğin::
i = barrier.wait()
if i == 0:
# Sadece bir iş parçacığı bunu bastırmaya ihtiyaç duyar.
print("engel geçildi")
Eğer yapıcıya bir tane *action* sağlanmışsa, iş parçacıklarından bir tanesi serbest bırakılmadan önce onu çağırmış olacaktır. Bu çağrım bir hata yükseltirse, engel kırılan durumun içine yerleştirilir.
Eğer çağrı zaman aşımına uğrarsa, engel kırılan durumun içine yerleştirilir.
Bu yöntem, beklenildiği gibi, eğer engel kırılmışsa veya iş parçacığı beklerken sıfırlanmışsa, `BrokenBarrierError` hatası yükseltebilir.
**reset()**
Engeli ön-tanımlı değerine, boş duruma geri döndürür. Onu bekleyen her iş parçacığı `BrokenBarrierError` hatasını alır.
Durumu bilinmeyen bazı iş parçacıkları olduğunda, bu fonksiyonun bazı dış senkronizasyonlara ihtiyaç duyabileceğini not edin. Eğer bir engel kırıldığında, onu terk edip, yeni bir tane oluşturmak daha iyi bir yoldur.
**abort()**
Bir engeli kırılmış bir duruma sokar. Bu, canlı veya ileride çağrılacak bütün çağrıları `BrokenBarrierError` hatasıyla başarısızlığa uğramaları için `wait()`'i yöntemini çağırır. Bunu, eğer uygulamayı çıkmazdan kurtarmak için, iptal edilmeye ihtiyaç duyuyorsa kullanın.
Bu, iş parçacıklarından bir tanesinin ters gitmesine karşı hassas bir *timeout* değeri ile oluşturulmuş bir engeli otomatik olarak korumak için tercih edilebilir.
**parties**
Engeli geçmesi gereken iş parçacığı sayısıdır.
**n_waiting**
Hali hazırda engelde bekleyen iş parçacığı sayısıdır.
**broken**
Eğer engel kırılan durumun içindeyse, değeri `True` olan bir boolean verisidir.
**exception threading.BrokenBarrierError**
Bu beklenti, `RuntimeError`'un bir alt sınıfıdır, `Barrier` nesnesi sıfırlandığında veya kırıldığında yükseltilir.
Kilitleri, Durumları ve Semaforları `with` deyimi ile birlikte kullanmak
=========================================================================
Bu modül tarafından sağlanan, `acquire()` ve `release()` fonksiyonuna sahip bütün nesneler içerik yönetimi olarak `with` deyimi için kullanılabilir. `acquire()` yöntemi, engellemeye girildiğinde, `release()` yöntemi de engellemeden çıkıldığında çağrılacaktır. Bundan ötürü aşağıdaki kodlar::
with some_lock:
# Bir şeyler yap...
şu işlemin dengidir::
some_lock.acquire()
try:
# Bir şeyler ya...
finally:
some_lock.release()
Hali hazırda, `Lock`, `RLock` `Condition`, `Semaphore` ve `BoundedSemapgore` nesneleri `with` deyimi içerik yönetimi olarak kullanılabilir.
Örnekler:
==========
**Örnek-1:**
Thread'ı kullanmanın en kolay yolu; onu bir hedef fonksiyonuyla örnekleyip, `start()` fonksiyonunu çağırarak çalıştırmaktır.
**Kodlar**::
#/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
def f(): # Thread'in iş fonksiyon.
print("iş")
for i in range(4):
t = threading.Thread(target=f)
t.start()
**Kodların Açıklamaları:**
Yukarıdaki kodlarda, *f* isminde bir tane fonksiyon oluşturulmuş ve içine "iş" string verisini ekrana yazdıran bir *print()* fonksiyonu dahil edilmiştir. Daha sonra `for` döngüsünü kullanarak, dört tane iş parçacığı nesnesi örneği oluşturulmuştur. Bütün iş parçacıklarının hedef fonksiyonu, *f*'tir. Ve bu program çalıştırıldığında dört kere ekrana "iş" yazısı yazdırılır.
**Örnek-2:**
Bir iş parçacığı oluşturmak ve hangi işi yapacağını söylemek için argüman atamak kullanılacak yollardan birisidir. İkinci örnekte `thread`'in sonradan bastıracağı bir sayı argümanı fonksiyonda tanımlanmıştır.
**Kodlar**::
#/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
def f(sayi):
print("iş {}".format(sayi))
for i in range(4):
t = threading.Thread(target=f, args=(i, ))
t.start()
**Kodların Açıklamaları:**
Bir iş parçacığı oluştururken, iş parçacığının etkin olacağı fonksiyonun eğer bir fonksiyon parametresi varsa, onu *args* parametresine yazarak, iş parçacığının hedefi olmasını sağlayabiliriz.
**Örnek-3:**
İş parçacıklarını adlanırmak veya tanıtmak için Örnek-2'de olduğu gibi argümanları kullanmak oldukça gereksizdir. Ancak bu demek değildir ki argüman kullanmak gereksizdir. Sadece iş parçacığının ismini belirtirken bu yöntemi kullanmak gereksizdir demek istiyorum. Yoksa argümanlara ihtiyaç duyacağımız çok fazla durumla karşılaşmamız mümkün. Şundan bahsetmek istiyorum; her `Thread` örneğinin ismiyle birlikte, iş parçacığı oluşturulduğunda değişen, rastgele bir değeri vardır. `Thread`'leri isimlendirmek, sunucu işlemleriyle, birçok farklı hizmet işlerinin birlikte yürütülmesinde kolaylık sağlar.
**Kodlar**::
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
import time
def f():
print(threading.currentThread().getName(), "Başlıyor")
time.sleep(2)
print(threading.currentThread().getName(), "Bitiyor")
def g():
print(threading.currentThread().getName(), "Başlıyor")
time.sleep(5)
print(threading.currentThread().getName(), "Bitiyor")
t1 = threading.Thread(name="Birinci servis", target=f)
t2 = threading.Thread(name="İkinci servis", target=g)
t3 = threading.Thread(target=f)
t4 = threading.Thread(target=g)
t1.start()
t2.start()
t3.start()
t4.start()
**Kodların Açıklamaları:**
Bu örnekteki şu kısma bir bakalım::
def f():
print(threading.currentThread().getName(), "Başlıyor")
time.sleep(2)
print(threading.currentThread().getName(), "Bitiyor")
*f()* fonksiyonu çağrıldığında, ismi neyse o şekilde "filanca Başlıyor" şeklinde bir yazı ekrana bastırılacak. Sonra 2 saniye bekledikten sonra "filanca Bitiyor" şeklinde bir yazı ekrana bastırılacak.
Ancak bu durumu iş parçacığı nesnesini tanımlarken değiştirebiliyoruz. Yani::
t1 = threading.Thread(name="Birinci servis", target=f)
t2 = threading.Thread(name="İkinci servis", target=g)
yukarıda olduğu gibi iş parçacığını tanımladığımızda, t1 ve t2 iş parçacıklarına kendimiz isim vermiş oluyoruz. Bu isimleri vermediğimizde iş parçacığının ismi *Thread-1* şeklinde bir isme sahip olur. t3 ve t4 isimli iş parçacıklarının *name* argümanının yazılmamış olduğuna dikkat edin. Bu iki iş parçacığının ismimleri dolayısıyla *Thread-1* ve *Thread-2* olacaktır.
**Örnek-4:**
Şimdi gelin threading'i daha rahat anlayabileceğimiz bir örnek oluşturalım. Bildiğiniz gibi herhangi bir `tkinter` uygulamasını çalıştırabilmemiz için `mainloop()` fonksiyonunu çağırmamız gerekiyor. Ve bu fonksiyon, programı sonlandıran herhangi bir işlem tanımlanmamışsa, sürekli çalışır durumda oluyor. Peki biz aynı anda bir tanesi `tkinter`'e ait olan iki tane döngüyü aynı anda çalıştıramaz mıyız? Elbette çalıştırabiliriz, işte cevabı:
**Kodlar**::
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
import threading
root = tk.Tk()
entry = tk.Entry(master=root)
entry.grid(row=0, column=0)
def f():
button = tk.Button(master=root, text="Button")
while True:
if entry.get() == "":
button.grid_forget()
else:
button.grid(row=1, column=0)
t1 = threading.Thread(target=f)
t1.daemon = True
t1.start()
t1.join(1)
root.mainloop()
**Kodların Açıklamaları:**
Bu örneği çalıştırdığınızda, göreceksiniz ki, *entry* widgetine yazı yazdığınızda *button* widgeti beliriyor, *entry* widgeti boş olduğunda ise ortadan kayboluyor. Bu işlem basit bir denetleme işlemidir ve tahmin edeceğiniz gibi fonksiyonun içindeki `while` döngüsü bu işe yarıyor. *t1* isimli `threading` örneğini oluşturduktan sonra onun *daemon* özelliğinin değerini `True` olarak değiştirdiğimizi görüyorsunuz. Bu işlemi yapmaktaki amacımız, programı sonlandırdığımızda, geriye sadece *daemonic* iş parçacıklarının kalmasını sağlamak ve böylece programdan çıkmamızı sağlamak. Eğer bu *daemon* özelliğini aktif hale getirmemiş olsaydık, `tkinter` penceresini kapattığımız halde, programın sonlanmadığını görürdük. `t1.join(1)` kodu da, bu iş parçacığının 1 saniye sonrası sonlanmasını istediğimizi belirtir.
**Örnek-5:**
Şimdi de `Lock` nesnesiyle alakalı bir örnek yapalım.
**Kodlar**::
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
def f():
print("f fonksiyonu")
def g():
print("g fonksiyonu")
def h():
print("h fonksiyonu")
t1 = threading.Thread(target=f)
t2 = threading.Thread(target=g)
t3 = threading.Thread(target=h)
lock = threading.Lock()
lock.acquire()
t1.start()
lock.acquire(blocking=True, timeout=3)
t2.start()
lock.acquire(blocking=True, timeout=1)
t3.start()
**Kodların Açıklamaları:**
Önce gerekli modülü programın içine aktardık::
import threading
Sonra farklı iş parçacıklarının çağıracağı üç tane fonksiyon tanımladık::
def f():
print("f fonksiyonu")
def g():
print("g fonksiyonu")
def h():
print("h fonksiyonu")
Daha sonra fonksiyonları iş parçacıklarının hedefihaline getirdik::
t1 = threading.Thread(target=f)
t2 = threading.Thread(target=g)
t3 = threading.Thread(target=h)
Sonra kilit nesnemizi oluşturduk ve kilit nesnemizin `acquire()` fonksiyonunu argümansız olarak çağırdık. Eğer argümanlı çağırsaydık da değişen bir şey olmazdı, çünkü kilit bir sonraki `acquire()` fonksiyonunu çağırdığımız zaman engellemeye başlayacak::
lock = threading.Lock()
lock.acquire()
*t1* isimli iş parçacığını başlattık; engellenmeden çalışmaya başladı::
t1.start()
Ve şimdi `lock.acquire()` yöntemini *blocking* ve *timeout* argümanlarıyla birlikte çağıralım. Bu yöntemi `t1.start()`'ı çağırmadan önce ikinci kez çağırsaydık o zaman, *t1* iş parçacığı da engellenecekti. *timeout* parametresine *3* yazalım. Yani 3 saniyeliğine diğer işlemleri engellesin::
lock.acquire(blocking=True, timeout=3)
Üç saniye geçtikten sonra t2 iş parçacığını başlatalım::
t2.start()
`lock.acquire()` fonksiyonunu bir kez daha çağırabiliriz, bu kez 1 saniyeliğine diğer görevleri engellesin::
lock.acquire(blocking=True, timeout=1)
Ve son olarak da *t3* iş parçacığını başlatalım::
t3.start()
Yukarıdaki örnekte, ekrana önce "f fonksiyonu" yazıldı, "f fonksiyonu" yazısı ekrana yazdırıldıktan üç saniye sonra ekrana "g fonksiyonu" yazıldı, ve "g fonksiyonu" ekrana yazdırıldıktan bir saniye sonra da "h fonksiyonu" ekrana yazıldı.
**Örnek-6:**
Şimdi de `acquire()` yöntemini bir kez yazarak, bu yöntemden sonra gelen işlemlerin engellenmediği bir örnek yazalım.
**Kodlar**::
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
class Thread(threading.Thread):
def __init__(self, lock):
threading.Thread.__init__(self)
self.lock = lock
def run(self):
self.lock.acquire()
print("{} kilidi edindi.".format(self.name))
# self.lock.acquire(blocking=True, timeout=3)
self.lock.release()
print("{} kilidi serbest bıraktı.".format(self.name))
__lock__ = threading.Lock()
t1 = Thread(lock=__lock__)
t2 = Thread(lock=__lock__)
t1.start()
t2.start()
**Kodların Açıklamaları:**
Her zamanki gibi önce modülümüzü programın içine aktaralım::
import threading
Şimdi de `threading.Thread`'i miras alan bir sınıf oluşturalım. Ve bu sınıfın *lock* isminde bir tane de özelliği olsun::
class Thread(threading.Thread):
def __init__(self, lock):
threading.Thread.__init__(self)
self.lock = lock
Bildiğiniz gibi `threading.Thread()`'in `run()` isimli bir yöntemi var. Bu yöntemi *override* yapalım, yani modülün `run()` yöntemi yerine bizim yazacağımız `run()` yöntemi kullanılsın. Bu yöntem, ilk olarak `self.lock.acquire()` fonksiyonunu çağırsın. Hemen altında, iş parçacığının kilidi edindiğine dair mesajı ekrana yazdıran `print()` fonksiyonunu çağıralım. Bir altındaki yoruma alınmış `# self.lock.acquire(blocking=True, timeout=3)` kısmı, yorumdan çıkarırsanız, alttaki işlemlerin çalışabilmesi için üç saniye beklemek zorunda kalırsınız. `self.lock.release()` ile de kilidi serbest bırakıyoruz. ve `run()` fonksiyonunun son satırında da kilidin serbest bırakıldığına dair mesajı ekrana bastıran bir print()` fonksiyonu çağıralım::
def run(self):
self.lock.acquire()
print("{} kilidi edindi.".format(self.name))
# self.lock.acquire(blocking=True, timeout=3)
self.lock.release()
print("{} kilidi serbest bıraktı.".format(self.name))
Sınıfı oluşturduk, örnekleri oluşturmadan önce kilidimizi oluşturalım::
__lock__ = threading.Lock()
Şimdi de iş parçacıklarımızı oluşturup onları başlatalım::
t1 = Thread(lock=__lock__)
t2 = Thread(lock=__lock__)
t1.start()
t2.start()
**Örnek-7:**
Şimdi de `RLock` ile ilgili bir örnek yapalım. `Lock` ile `RLock` arasındaki en belirgin fark, `Lock`'ın kilidini bir başka iş parçacığı açabilir olması, oysa `RLock`'ın kilidini, kilidi edinmiş olan iş parçacığının açması gerekir.
**Kodlar**::
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
class Thread(threading.Thread):
def __init__(self, lock):
threading.Thread.__init__(self)
self.lock = lock
def run(self):
self.lock.acquire(blocking=True, timeout=3)
print("{} çalışıyor.".format(self.name))
self.lock.acquire(blocking=True, timeout=1)
print("{} çalışması bitti.".format(self.name))
__lock__ = threading.RLock()
t1 = Thread(lock=__lock__)
t2 = Thread(lock=__lock__)
t1.start()
t2.start()
**Kodların Açıklamaları:**
Her zamanki gibi önce `threadin` modülünü programın içine aktarıyoruz::
import threading
*lock* parametresi olan ve `threading.Thread()` sınıfını miras alan bir sınıf oluşturuyoruz::
class Thread(threading.Thread):
def __init__(self, lock):
threading.Thread.__init__(self)
self.lock = lock
Yine `run()` yöntemini *override* edelim. Bu `run()` fonksiyonu altında çağırdığımız ilk fonksiyon `self.lock.acquire(blocking=True, timeout=3)` fonksiyonudur. Bu fonksiyon kilidi edinecek olan ilk iş parçacığına uygulanmaz. Bir sonraki satırda, iş parçacığının çalıştığına dair ekrana bir yazı yazdırıyoruz (`print("{} çalışıyor.".format(self.name))`). Onun da altında kilidi `self.lock.acquire(blocking=True, timeout=1)` fonksiyonu ile bir daha ediniyoruz. Bir iş parçacığı RLock kilidini ikinci kez kendi işlemlerini engellemeden elde edebilir. Ve `run()` yönteminin son satırında da çalışmanın bittiğine dair ekrana bir yazı yazdırıyoruz (`print("{} çalışması bitti.".format(self.name))`)::
def run(self):
self.lock.acquire(blocking=True, timeout=3)
print("{} çalışıyor.".format(self.name))
self.lock.acquire(blocking=True, timeout=1)
print("{} çalışması bitti.".format(self.name))
Sınıfı oluşturduk, örnekleri oluşturmadan önce kilidimizi oluşturalım::
__lock__ = threading.RLock()
Şimdi de iş parçacıklarımızı oluşturup onları başlatalım::
t1 = Thread(lock=__lock__)
t2 = Thread(lock=__lock__)
t1.start()
t2.start()
**Not:** Bu örnekte RLock kilidine sahip olan iş parçacığı *t1*'dir. Dolayısıyla kilidi sadece o açabilir. Bu örneği çalıştırdığınızda, *t1* iş parçacığının kilit edindiğini ama serbest bırakmadığını görüyoruz. Eğer *t1* bu kilidi serbest bıraksaydı, iş parçacıkları arasında bekleme süresi olmayacaktı.
**Örnek-8:**
Şimdi de `Condition()` ile ilgili bir örnek yapalım. Bu örnekte bir üretici bir de tüketici iş parçacığı oluşturacağız.
**Kodlar**::
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import threading
class Uretici(threading.Thread):
def __init__(self, condition, liste):
threading.Thread.__init__(self)
self.condition = condition
self.liste = liste
def run(self):
count = 1
while count < 10:
self.condition.acquire()
print("{} condition'u edindi.".format(self.name))
self.liste.append(count)
print("{} listeye {} tarafından eklendi."
.format(count, self.name))
self.condition.notify()
print("condition {} tarafından bildirildi.".format(self.name))
self.condition.release()
print("condition {} tarafından serbest bırakıldı."
.format(self.name))
count += 1
time.sleep(0.5)
class Tuketici(threading.Thread):
def __init__(self, condition, liste):
threading.Thread.__init__(self)
self.condition = condition
self.liste = liste
def run(self):
while True:
self.condition.acquire()
print("{} condition'u edindi.".format(self.name))
while True:
if self.liste:
sayi = self.liste.pop()
print("{}, {} {}".format(
sayi, self.name,
"tarafından listeden düşürüldü."))
break
print("condition {} {}".format(
self.name, "tarafından bekletiliyor."))
self.condition.wait()
self.condition.release()
print("condition {} {}".format(
self.name,
"tarafından serbest bırakıldı."))
__condition__ = threading.Condition()
__liste__ = []
t1 = Uretici(condition=__condition__, liste=__liste__)
t2 = Tuketici(condition=__condition__, liste=__liste__)
t1.start()
t2.start()
**Kodların Açıklamaları:**
Her zamanki gibi önce gerekli modülleri programın içine aktarıyoruz::
import time
import threading
Şimdi, `threading.Thread` sınıfının özelliklerini miras alan bir üretici sınıf tanımlayalım; bu sınıftan bir örnek türetilmek istendiği zaman kullanıcı *condition* argümanını ve *liste* argümanını girmek zorunda kalsın::
class Uretici(threading.Thread):
def __init__(self, condition, liste):
threading.Thread.__init__(self)
self.condition = condition
self.liste = liste
Bu sınıfın bir tane `run()` metodu zaten mevcut ama biz bu `run()` metodunu değiştirelim::
def run(self):
Bu `run()` yönteminde aşağıdakiler yapılsın:
1. *count* isimli daha sonra `self.liste`'ye eklenmek üzere bir değişken tanımlayalım::
count = 1
2. Bir tane döngü oluşturalım, bu döngü *count*, 10'dan küçük olduğu sürece devam etsin::
while count < 10:
3. Döngü içinde iş parçacığı `Condition`'u edinsin ve ekrana da `Condition`'u elde ettiğine dair bir yazı yazdırılsın::
self.condition.acquire()
print("{} condition'u edindi.".format(self.name))
4. İş parçacığı şimdi de *count* değişkenini `self.liste`'ye eklesin ve ekrana bu işlemle ilgili bir yazı yazdırılsın::
self.liste.append(count)
print("{} listeye {} tarafından eklendi."
.format(count, self.name))
5. Sonra, iş parçacığı, durumunu bildirsin ve bildirildiğine dair ekrana bir yazı yazdırılsın::
self.condition.notify()
print("condition {} tarafından bildirildi.".format(self.name))
6. Şimdi de iş parçacığı `Condition`'u serbest bıraksın ve serbest bıraktığına dair ekrana bir yazı yazdıralım::
self.condition.release()
print("condition {} tarafından serbest bırakıldı."
.format(self.name))
7. *count* değişkenini 1 birim arttıralım ve `time.sleep(0.5)` fonksiyonunu çağırarak işlemler arasında biraz zaman geçmesini bekleyelim::
count += 1
time.sleep(0.5)
Şimdi de, `threading.Thread` sınıfının özelliklerini miras alan bir tüketici sınıf tanımlayalım; yine bu sınıftan bir örnek türetilmek istendiği zaman kullanıcı *condition* argümanını ve *liste* argümanını girmek zorunda kalsın::
class Tuketici(threading.Thread):
def __init__(self, condition, liste):
threading.Thread.__init__(self)
self.condition = condition
self.liste = liste
Bu sınıfın da bir tane `run()` metodu zaten mevcut ama biz bu `run()` metodunu değiştirelim::
def run(self):
Bu `run()` yönteminde aşağıdakiler yapılsın:
1. Sonsuz bir döngü oluşturalım, bu döngü içerisindeki tüketici iş parçacığı `Condition`'u elde etsin ve elde ettiğine dair bilgiyi ekrana yazdıralım::
while True:
self.condition.acquire()
print("{} condition'u edindi.".format(self.name))
2. Bir tane daha sonsuz döngü oluşturalım, Bu döngüde de bir koşul oluşturalım, koşulumuz `self.liste` `True` değeri veriyorsa olsun ve bu koşul altında *sayi* isimli bir değişkeni `self.liste`'den düşürelim. Ekrana da iş parçacığının bu sayıyı listeden düşürdüğünün bilgisini yazdıralım, sonra da bu koşul altındaki döngüden çıkılsın::
while True:
if self.liste:
sayi = self.liste.pop()
print("{}, {} {}".format(
sayi, self.name,
"tarafından listeden düşürüldü."))
break
3. Yine ikinci döngünün içindeyken her zaman `Condition`'u bekletelim ve beklediğine dair yazı ekrana yazdırılsın, şayet bunu yapmazsak, döngü başa sardığında iş parçacığı `Condition`'u tekrar edinir ve program orada donup kalır::
print("condition {} {}".format(
self.name, "tarafından bekletiliyor."))
self.condition.wait()
4. İlk döngümüzün içinde `Condition`'u serbest bırakalım. Bu örnekte `Condition()`u serbest bırakmazsak, bir sorunla karşılaşmayız. Ama iki tane tüketici olduğu durumlarda `while` döngüsünü kırabilecek bir durum oluşturabiliriz ve döngü kırıldıktan sonra iş parçacığı kilidi hala tutmaya devam ediyor olabilir, bu yüzden kilidi serbest bırakmak gerekir::
self.condition.release()
print("condition {} {}".format(
self.name,
"tarafından serbest bırakıldı."))
Ve son olarak `Condition()`, `Uretici()`, `Tüketici()` sınıflarından birer örnek ve boş bir liste oluşturalım. `Condition()` sınıfından oluşturduğumuz örnek ve listeyi `Uretici()` ve `Tuketici()` sınıflarından oluşturduğumuz örneklere argüman olarak yazalım. Sonra da iş parçacıklarını çalıştıralım::
__condition__ = threading.Condition()
__liste__ = []
t1 = Uretici(condition=__condition__, liste=__liste__)
t2 = Tuketici(condition=__condition__, liste=__liste__)
t1.start()
t2.start()
**Not:** Bu örneği çalıştırdığımızda `Uretici()` sınıf örneği boş listeye 9 tane eleman ekleyecek ve `Tuketici()` sınıf örneği ise listeye eklenen bu elemanları tek tek silecek. Ve son olarak `Tuketici()` sınıfı kendisini beklemeye alacak.
**Örnek-9:**
Şimdi de `Semaphore()` nesnesiyle alakalı bir örnek yapalım.
**Kodlar**::
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import threading
semaphore = threading.Semaphore()
def f():
print("f fonksiyonu başlıyor.")
semaphore.acquire()
print("f fonksiyonu semaforu edindi.")
for i in range(5):
print("f fonksiyonu '{}' itemini işliyor.".format(i))
time.sleep(1)
semaphore.release()
print("f fonksiyonu semaforu serbest bırakıyor.")
print("f fonksiyonu bitiyor.")
def g():
print("g fonksiyonu başlıyor")
while not semaphore.acquire():
print("Semafor henüz kullanılamıyor.")
time.sleep(1)
else:
print("g fonksiyonu semaforu edindi.")
for i in range(5):
print("g fonksiyonu '{}' itemini işliyor.".format(i))
time.sleep(1)
semaphore.release()
print("g fonksiyonu semaforu serbest bırakıyor.")
t1 = threading.Thread(target=f)
t2 = threading.Thread(target=g)
t1.start()
t2.start()
**Not:** Bu örnekte kullanılan `Semaphore()` nesnesi yerine, `Lock()`, `RLock`, `Condition()` ve `BoundedSemaphore()` nesnelerini de kullanabilirsiniz. Bu örnek `BoundedSemaphore()` ve `Condition()` nesneleri için pek uygun bir örnek olmasa da, `Lock()`, `RLock` nesneleri için bu örneği kullanmakta bir sakınca yok.
**Kodların Açıklamaları:**
Önce modüllerimizi programın içine aktaralım::
import time
import threading
Şimdi `Semaphore()` nesnesinden bir tane örnek oluşturalım::
semaphore = threading.Semaphore()
Bu örnekte `f()` ve `g()` isimli iki tane fonksiyon kullanacağız. Önce `f()` fonksiyonunu oluşturalım, fonksiyon çağrılır çağrılmaz, ekrana bir yazı yazdırılsın::
def f():
print("f fonksiyonu başlıyor.")
Daha sonra iş parçacığı semaforu edinsin ve elde ettiğine dair bir yazı ekrana yazdırılsın::
semaphore.acquire()
print("f fonksiyonu semaforu edindi.")
Şimdi de fonksiyon içinde basit bir işlem tanımlayalım::
for i in range(5):
print("f fonksiyonu '{}' itemini işliyor.".format(i))
time.sleep(1)
İş parçacığı semaforu serbest bıraksın ve serbest bıraktığına dair ekrana bir yazı yazdırılsın, son olarak da fonksiyonun çalışmasının bittiğine dair ekrana bir yazı yazdırılsın::
semaphore.release()
print("f fonksiyonu semaforu serbest bırakıyor.")
print("f fonksiyonu bitiyor.")
Şimdi de `g()` fonksiyonunu oluşturalım. Fonksiyon çağrıldığında, fonksiyonun başladığına dair bir yazı ekrana yazdırılsın::
def g():
print("g fonksiyonu başlıyor")
İş parçacığı bu kilidi edinmediği sürece ekrana bir yazı yazdırılsın. Ancak `acquire()` fonksiyonunun *blocking* argümanını `False` yapmadığımız için bu yazı ekrana yazdırılmayacaktır. İsterseniz bir de `acquire(blocking=None)` yazarak örneği bir daha çalıştırın::
while not semaphore.acquire():
print("Semafor henüz kullanılamıyor.")
time.sleep(1)
Eğer iş parçacığı semaforu edindiyse aşağıdaki işlemler yapılsın::
else:
print("g fonksiyonu semaforu edindi.")
for i in range(5):
print("g fonksiyonu '{}' itemini işliyor.".format(i))
time.sleep(1)
Son olarak bu iş parçacığı da semaforu serbest bıraksın::
semaphore.release()
print("g fonksiyonu semaforu serbest bırakıyor.")
**Örnek-10:**
Şimdi de `BoundedSemaphore()` ile ilgili bir örnek yapalım.
**Kodlar**::
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
import time
def f(item, bs):
bs.acquire()
time.sleep(1)
print(item)
bs.release()
bounded_semaphore = threading.BoundedSemaphore(value=2)
for i in range(10):
t = threading.Thread(target=f, args=(i, bounded_semaphore))
t.start()
**Kodların Açıklamaları:**
Yine her zamanki gibi önce modülleri programın içine aktaralım::
import threading
import time
Şimdi de bir tane `f()` fonksiyonu tanımlayalım. Bu fonksiyonun *item* ve *bs* isminde iki tane argümanı olsun. *item* argümanını for listesindeki her bir eleman için, *bs* argümanını da semaphore için kullanacağız::
def f(item, bs):
Fonksiyonu çağıran iş parçacığı bağlanmış semaforu elde etsin, sonra 1 saniye bekleyelim ve `for` döngüsünün elemanını ekrana yazdıralım, son olarak da bağlanmış semaforu serbest bırakalım::
bs.acquire()
time.sleep(1)
print(item)
bs.release()
Şimdi `global` alanda bir tane bağlanmış semafor oluşturalım ve *value* argümanına 2 yazalım::
bounded_semaphore = threading.BoundedSemaphore(value=2)
Son olarak bir tane `for` döngüsü içinde 10 tane iş parçacığı oluşturalım. Bu iş parçacıklarının *args* argümanında, listenin o sıradaki elemanı ve tanımladığımız bağlanmış semafor olsun::
bounded_semaphore = threading.BoundedSemaphore(value=2)
for i in range(10):
t = threading.Thread(target=f, args=(i, bounded_semaphore))
t.start()
**Not:** Bu örneği çalıştırdığınızda, ekrana sayıların ikişer ikişer yazdırıldığını göreceksiniz. Bunun olmasını sağlayan, bağlanmış semaforun
*value* değerinin 2 olarak yazılmasıdır.
**Örnek-11:**
Şimdi de `Event()` ile alakalı bir örnek yapalım.
**Kodlar**::
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import threading
class Uretici(threading.Thread):
def __init__(self, event, liste):
threading.Thread.__init__(self)
self.event = event
self.liste = liste
def run(self):
count = 1
while count < 10:
self.liste.append(count)
print("{} listeye {} tarafından eklendi."
.format(count, self.name))
self.event.set()
print("event {} tarafından ayarlandı.".format(self.name))
self.event.clear()
print("event {} tarafından temizlendi.".format(self.name))
count += 1
time.sleep(0.5)
class Tuketici(threading.Thread):
def __init__(self, event, liste):
threading.Thread.__init__(self)
self.event = event
self.liste = liste
def run(self):
while True:
if self.liste:
sayi = self.liste.pop()
print("{}, {} tarafından listeden düşürüldü."
.format(sayi, self.name))
self.event.wait()
__event__ = threading.Event()
__liste__ = []
t1 = Uretici(event=__event__, liste=__liste__)
t2 = Tuketici(event=__event__, liste=__liste__)
t1.start()
t2.start()
**Kodların Açıklamaları:**
Modüllerimi programın içine aktaralım::
import time
import threading
Şimdi `Uretici` isminde, *event* ve *liste* argümanlarına sahip, `threading.Thread()` sınıfından türetilmiş bir sınıf oluşturalım::
class Uretici(threading.Thread):
def __init__(self, event, liste):
threading.Thread.__init__(self)
self.event = event
self.liste = liste
Bu sınıfa `run()` isminde bir tane fonksiyon ekleyelim. Bildiğiniz gibi bu fonksiyon `threading.Thread()` sınıfına ait olan bir fonksiyon, dolayısıyla burada yine yazacağımız fonksiyon, orjinal fonksiyonun üzerine yazılacak::
def run(self):
Fonksiyonda *count* isminde bir tane değişken kullanacağız. Bu değişken 10'dan küçük olduğu sürece *while* döngüsü çalışmaya devam edecek::
count = 1
while count < 10:
Şimdi listemize *count* değişkenini ekleyelim ve ekrana *count*'un listeye eklendiğine dair bir yazı yazdıralım::
self.liste.append(count)
print("{} listeye {} tarafından eklendi."
.format(count, self.name))
Şimdi `Event()` sınıfının önce `set()` fonksiyonunu sonra da `clear()` fonksiyonunu çağıralım, her bir işlem için ekrana bir yazı yazdıralım::
self.event.set()
print("event {} tarafından ayarlandı.".format(self.name))
self.event.clear()
print("event {} tarafından temizlendi.".format(self.name))
*count* değişkeni 1 birim artsın ve `time.sleep(0.5)` fonksiyonu ile 0.5 saniye bekleyelim::
count += 1
time.sleep(0.5)
Şimdi de benzer şekilde `Tuketici` sınıfımızı oluşturalım::
class Tuketici(threading.Thread):
def __init__(self, event, liste):
threading.Thread.__init__(self)
self.event = event
self.liste = liste
Bu sınıfın `run()` metodunda da tanımlayalım::
def run(self):
Yine bir döngü oluşturalım ve `self.liste` mevcut olduğu sürece, listeden *sayi* ismindeki değişken düşürülsün ve ekrana bu sayının düşürüldüğüne dair bir yazı yazdırılsın::
while True:
if self.liste:
sayi = self.liste.pop()
print("{}, {} tarafından listeden düşürüldü."
.format(sayi, self.name))
Ve `Event()` sınıfının `wait()` fonksiyonunu çağıralım. Bu fonksiyon, yapacak hiç bir işlem kalmadığında beklemeye devam edilmesini sağlayacak::
self.event.wait()
`Event()`, `Uretici()` ve `Tuketici()` sınıflarından birer örnek oluşturalım ayrıca boş bir liste tanımlayalım son olarak da iş parçacıklarımızı başlatalım::
__event__ = threading.Event()
__liste__ = []
t1 = Uretici(event=__event__, liste=__liste__)
t2 = Tuketici(event=__event__, liste=__liste__)
t1.start()
t2.start()
**Not:** Bu örneği çalıştırdığınızda, `Uretici()` 9 tane elemanı listeye eklerken, `Tuketici()`'de bu listeye eklenen elemanları listeden silecek. Listeden silinecek bir şey kalmayınca da `Tuketici()` kendisini beklemeye alacak.
**Örnek-12:**
Şimdi de `Barrier()` nesnesiyle alakalı bir örnek yapalım.
**Kodlar**::
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import random
import threading
def f(b):
time.sleep(random.randint(2, 10))
print("{} iş parçacığının uyandırıldığı tarih: {}"
.format(threading.current_thread().getName(), time.ctime()))
b.wait()
print("{} iş parçacığının engeli geçtiği tarih: {}"
.format(threading.current_thread().getName(), time.ctime()))
barrier = threading.Barrier(3)
for i in range(3):
t = threading.Thread(target=f, args=(barrier,))
t.start()
**Kodların Açıklamaları:**
Her zamanki gibi önce gerekli modülleri programın içine aktaralım::
import time
import random
import threading
Şimdi *b* argümanına sahip, *f* isminde bir tane fonksiyon oluşturalım. Bu fonksiyonda önce `time.sleep(random.randint(2, 10))` fonksiyonunu çağırarak 2 ile 10 saniye arasında belirsiz bir süre bekleneceğini belirtelim. Daha sonra ekrana iş parçacığının uyandırıldığı tarih ekrana yazdırılsın, sonra da `Barrier()` nesnemizin `wait()` yöntemini çağıralım, son olarak da iş parçacığının engeli geçtiği tarih ekrana yazdırılsın::
def f(b):
time.sleep(random.randint(2, 10))
print("{} iş parçacığının uyandırıldığı tarih: {}"
.format(threading.current_thread().getName(), time.ctime()))
b.wait()
print("{} iş parçacığının engeli geçtiği tarih: {}"
.format(threading.current_thread().getName(), time.ctime()))
Fonksiyonu oluşturduktan sonra *barrier* isminde bir tane `Barrier()` nesnesi örneği oluşturalım. Bu nesnenin argümanına 3 vermemizin sebebi, 3 tane iş parçacığı ile çalışıyor olmamızdır::
barrier = threading.Barrier(3)
Son olarak bir `for` döngüsü oluşturalım, bu `for` döngüsü 3 tane `threading.Thread()` örneği üretsin ve döngü içinde bu örnekleri başlatalım::
barrier = threading.Barrier(3)
for i in range(3):
t = threading.Thread(target=f, args=(barrier,))
t.start()
**Not:** `Barrier()` nesnesinin özelliğine göre, oluşturulan bu iş parçacıklarının uyandırılma zamanları farklı olsa da, iş parçacıkları aynı anda engeli aşarlar.
| {
"pile_set_name": "Github"
} |
/**
* SyntaxHighlighter
* http://alexgorbatchev.com/SyntaxHighlighter
*
* SyntaxHighlighter is donationware. If you are using it, please donate.
* http://alexgorbatchev.com/SyntaxHighlighter/donate.html
*
* @version
* 3.0.83 (July 02 2010)
*
* @copyright
* Copyright (C) 2004-2010 Alex Gorbatchev.
*
* @license
* Dual licensed under the MIT and GPL licenses.
*/
.syntaxhighlighter a,
.syntaxhighlighter div,
.syntaxhighlighter code,
.syntaxhighlighter table,
.syntaxhighlighter table td,
.syntaxhighlighter table tr,
.syntaxhighlighter table tbody,
.syntaxhighlighter table thead,
.syntaxhighlighter table caption,
.syntaxhighlighter textarea {
-moz-border-radius: 0 0 0 0 !important;
-webkit-border-radius: 0 0 0 0 !important;
background: none !important;
border: 0 !important;
bottom: auto !important;
float: none !important;
height: auto !important;
left: auto !important;
line-height: 1.1em !important;
margin: 0 !important;
outline: 0 !important;
overflow: visible !important;
padding: 0 !important;
position: static !important;
right: auto !important;
text-align: left !important;
top: auto !important;
vertical-align: baseline !important;
width: auto !important;
box-sizing: content-box !important;
font-family: "Consolas", "Bitstream Vera Sans Mono", "Courier New", Courier, monospace !important;
font-weight: normal !important;
font-style: normal !important;
font-size: 1em !important;
min-height: inherit !important;
min-height: auto !important;
}
.syntaxhighlighter {
width: 100% !important;
margin: 1em 0 1em 0 !important;
position: relative !important;
overflow: auto !important;
font-size: 1em !important;
}
.syntaxhighlighter.source {
overflow: hidden !important;
}
.syntaxhighlighter .bold {
font-weight: bold !important;
}
.syntaxhighlighter .italic {
font-style: italic !important;
}
.syntaxhighlighter .line {
white-space: pre !important;
}
.syntaxhighlighter table {
width: 100% !important;
}
.syntaxhighlighter table caption {
text-align: left !important;
padding: .5em 0 0.5em 1em !important;
}
.syntaxhighlighter table td.code {
width: 100% !important;
}
.syntaxhighlighter table td.code .container {
position: relative !important;
}
.syntaxhighlighter table td.code .container textarea {
box-sizing: border-box !important;
position: absolute !important;
left: 0 !important;
top: 0 !important;
width: 100% !important;
height: 100% !important;
border: none !important;
background: white !important;
padding-left: 1em !important;
overflow: hidden !important;
white-space: pre !important;
}
.syntaxhighlighter table td.gutter .line {
text-align: right !important;
padding: 0 0.5em 0 1em !important;
}
.syntaxhighlighter table td.code .line {
padding: 0 1em !important;
}
.syntaxhighlighter.nogutter td.code .container textarea, .syntaxhighlighter.nogutter td.code .line {
padding-left: 0em !important;
}
.syntaxhighlighter.show {
display: block !important;
}
.syntaxhighlighter.collapsed table {
display: none !important;
}
.syntaxhighlighter.collapsed .toolbar {
padding: 0.1em 0.8em 0em 0.8em !important;
font-size: 1em !important;
position: static !important;
width: auto !important;
height: auto !important;
}
.syntaxhighlighter.collapsed .toolbar span {
display: inline !important;
margin-right: 1em !important;
}
.syntaxhighlighter.collapsed .toolbar span a {
padding: 0 !important;
display: none !important;
}
.syntaxhighlighter.collapsed .toolbar span a.expandSource {
display: inline !important;
}
.syntaxhighlighter .toolbar {
position: absolute !important;
right: 1px !important;
top: 1px !important;
width: 11px !important;
height: 11px !important;
font-size: 10px !important;
z-index: 10 !important;
}
.syntaxhighlighter .toolbar span.title {
display: inline !important;
}
.syntaxhighlighter .toolbar a {
display: block !important;
text-align: center !important;
text-decoration: none !important;
padding-top: 1px !important;
}
.syntaxhighlighter .toolbar a.expandSource {
display: none !important;
}
.syntaxhighlighter.ie {
font-size: .9em !important;
padding: 1px 0 1px 0 !important;
}
.syntaxhighlighter.ie .toolbar {
line-height: 8px !important;
}
.syntaxhighlighter.ie .toolbar a {
padding-top: 0px !important;
}
.syntaxhighlighter.printing .line.alt1 .content,
.syntaxhighlighter.printing .line.alt2 .content,
.syntaxhighlighter.printing .line.highlighted .number,
.syntaxhighlighter.printing .line.highlighted.alt1 .content,
.syntaxhighlighter.printing .line.highlighted.alt2 .content {
background: none !important;
}
.syntaxhighlighter.printing .line .number {
color: #bbbbbb !important;
}
.syntaxhighlighter.printing .line .content {
color: black !important;
}
.syntaxhighlighter.printing .toolbar {
display: none !important;
}
.syntaxhighlighter.printing a {
text-decoration: none !important;
}
.syntaxhighlighter.printing .plain, .syntaxhighlighter.printing .plain a {
color: black !important;
}
.syntaxhighlighter.printing .comments, .syntaxhighlighter.printing .comments a {
color: #008200 !important;
}
.syntaxhighlighter.printing .string, .syntaxhighlighter.printing .string a {
color: blue !important;
}
.syntaxhighlighter.printing .keyword {
color: #006699 !important;
font-weight: bold !important;
}
.syntaxhighlighter.printing .preprocessor {
color: gray !important;
}
.syntaxhighlighter.printing .variable {
color: #aa7700 !important;
}
.syntaxhighlighter.printing .value {
color: #009900 !important;
}
.syntaxhighlighter.printing .functions {
color: #ff1493 !important;
}
.syntaxhighlighter.printing .constants {
color: #0066cc !important;
}
.syntaxhighlighter.printing .script {
font-weight: bold !important;
}
.syntaxhighlighter.printing .color1, .syntaxhighlighter.printing .color1 a {
color: gray !important;
}
.syntaxhighlighter.printing .color2, .syntaxhighlighter.printing .color2 a {
color: #ff1493 !important;
}
.syntaxhighlighter.printing .color3, .syntaxhighlighter.printing .color3 a {
color: red !important;
}
.syntaxhighlighter.printing .break, .syntaxhighlighter.printing .break a {
color: black !important;
}
.syntaxhighlighter {
background-color: white !important;
}
.syntaxhighlighter .line.alt1 {
background-color: white !important;
}
.syntaxhighlighter .line.alt2 {
background-color: white !important;
}
.syntaxhighlighter .line.highlighted.alt1, .syntaxhighlighter .line.highlighted.alt2 {
background-color: #e0e0e0 !important;
}
.syntaxhighlighter .line.highlighted.number {
color: black !important;
}
.syntaxhighlighter table caption {
color: black !important;
}
.syntaxhighlighter .gutter {
color: #afafaf !important;
}
.syntaxhighlighter .gutter .line {
border-right: 3px solid #6ce26c !important;
}
.syntaxhighlighter .gutter .line.highlighted {
background-color: #6ce26c !important;
color: white !important;
}
.syntaxhighlighter.printing .line .content {
border: none !important;
}
.syntaxhighlighter.collapsed {
overflow: visible !important;
}
.syntaxhighlighter.collapsed .toolbar {
color: blue !important;
background: white !important;
border: 1px solid #6ce26c !important;
}
.syntaxhighlighter.collapsed .toolbar a {
color: blue !important;
}
.syntaxhighlighter.collapsed .toolbar a:hover {
color: red !important;
}
.syntaxhighlighter .toolbar {
color: white !important;
background: #6ce26c !important;
border: none !important;
}
.syntaxhighlighter .toolbar a {
color: white !important;
}
.syntaxhighlighter .toolbar a:hover {
color: black !important;
}
.syntaxhighlighter .plain, .syntaxhighlighter .plain a {
color: black !important;
}
.syntaxhighlighter .comments, .syntaxhighlighter .comments a {
color: #008200 !important;
}
.syntaxhighlighter .string, .syntaxhighlighter .string a {
color: blue !important;
}
.syntaxhighlighter .keyword {
color: #006699 !important;
}
.syntaxhighlighter .preprocessor {
color: gray !important;
}
.syntaxhighlighter .variable {
color: #aa7700 !important;
}
.syntaxhighlighter .value {
color: #009900 !important;
}
.syntaxhighlighter .functions {
color: #ff1493 !important;
}
.syntaxhighlighter .constants {
color: #0066cc !important;
}
.syntaxhighlighter .script {
font-weight: bold !important;
color: #006699 !important;
background-color: none !important;
}
.syntaxhighlighter .color1, .syntaxhighlighter .color1 a {
color: gray !important;
}
.syntaxhighlighter .color2, .syntaxhighlighter .color2 a {
color: #ff1493 !important;
}
.syntaxhighlighter .color3, .syntaxhighlighter .color3 a {
color: red !important;
}
.syntaxhighlighter .keyword {
font-weight: bold !important;
}
| {
"pile_set_name": "Github"
} |
/* Copyright 2019 Axel Huebl, Benjamin Worpitz, Bert Wesarg, Matthias Werner
*
* This file is part of Alpaka.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
#pragma once
#ifdef ALPAKA_ACC_GPU_HIP_ENABLED
#include <alpaka/core/BoostPredef.hpp>
#if !BOOST_LANG_HIP
#error If ALPAKA_ACC_GPU_HIP_ENABLED is set, the compiler has to support HIP!
#endif
#include <alpaka/math/exp/Traits.hpp>
#include <alpaka/core/Unused.hpp>
#if BOOST_COMP_NVCC >= BOOST_VERSION_NUMBER(9, 0, 0)
#include <cuda_runtime_api.h>
#else
#if BOOST_COMP_HCC || BOOST_COMP_HIP
#include <math_functions.h>
#else
#include <math_functions.hpp>
#endif
#endif
#include <type_traits>
namespace alpaka
{
namespace math
{
//#############################################################################
//! The HIP exp.
class ExpHipBuiltIn : public concepts::Implements<ConceptMathExp, ExpHipBuiltIn>
{
};
namespace traits
{
//#############################################################################
//! The HIP exp trait specialization.
template<
typename TArg>
struct Exp<
ExpHipBuiltIn,
TArg,
typename std::enable_if<
std::is_floating_point<TArg>::value>::type>
{
__device__ static auto exp(
ExpHipBuiltIn const & exp_ctx,
TArg const & arg)
-> decltype(::exp(arg))
{
alpaka::ignore_unused(exp_ctx);
return ::exp(arg);
}
};
//! The HIP exp float specialization.
template<>
struct Exp<
ExpHipBuiltIn,
float>
{
__device__ static auto exp(
ExpHipBuiltIn const & exp_ctx,
float const & arg)
-> float
{
alpaka::ignore_unused(exp_ctx);
return ::expf(arg);
}
};
}
}
}
#endif
| {
"pile_set_name": "Github"
} |
### Thread related
* `COUNT`: the count of active threads
* `DAEMON-COUNT`: the count of active daemon threads
* `PEAK-COUNT`: the maximum count of the live threads since JVM starts
* `STARTED-COUNT`: the total count of the created threads since JVM starts
* `DEADLOCK-COUNT`: the count of deadlocked threads
### File descriptor related
* `MAX-FILE-DESCRIPTOR-COUNT`:the count of max file descriptor JVM process can open
* `OPEN-FILE-DESCRIPTOR-COUNT`:the current count of file descriptor JVM process open
| {
"pile_set_name": "Github"
} |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
using Microsoft.MixedReality.Toolkit.Input;
using Microsoft.MixedReality.Toolkit.Utilities;
using UnityEngine;
namespace Microsoft.MixedReality.Toolkit.OpenVR.Input
{
[MixedRealityController(
SupportedControllerType.OculusTouch,
new[] { Handedness.Left, Handedness.Right },
"StandardAssets/Textures/OculusControllersTouch")]
public class OculusTouchController : GenericOpenVRController
{
/// <summary>
/// Constructor.
/// </summary>
/// <param name="trackingState"></param>
/// <param name="controllerHandedness"></param>
/// <param name="inputSource"></param>
/// <param name="interactions"></param>
public OculusTouchController(TrackingState trackingState, Handedness controllerHandedness,
IMixedRealityInputSource inputSource = null, MixedRealityInteractionMapping[] interactions = null)
: base(trackingState, controllerHandedness, inputSource, interactions)
{
}
/// <inheritdoc />
public override MixedRealityInteractionMapping[] DefaultLeftHandedInteractions => new[]
{
new MixedRealityInteractionMapping(0, "Spatial Pointer", AxisType.SixDof, DeviceInputType.SpatialPointer, MixedRealityInputAction.None),
new MixedRealityInteractionMapping(1, "Axis1D.PrimaryIndexTrigger", AxisType.SingleAxis, DeviceInputType.Trigger, ControllerMappingLibrary.AXIS_9),
new MixedRealityInteractionMapping(2, "Axis1D.PrimaryIndexTrigger Touch", AxisType.Digital, DeviceInputType.TriggerTouch, KeyCode.JoystickButton14),
new MixedRealityInteractionMapping(3, "Axis1D.PrimaryIndexTrigger Near Touch", AxisType.Digital, DeviceInputType.TriggerNearTouch, ControllerMappingLibrary.AXIS_13),
new MixedRealityInteractionMapping(4, "Axis1D.PrimaryIndexTrigger Press", AxisType.Digital, DeviceInputType.TriggerPress, ControllerMappingLibrary.AXIS_9),
new MixedRealityInteractionMapping(5, "Axis1D.PrimaryHandTrigger Press", AxisType.SingleAxis, DeviceInputType.Trigger, ControllerMappingLibrary.AXIS_11),
new MixedRealityInteractionMapping(6, "Axis2D.PrimaryThumbstick", AxisType.DualAxis, DeviceInputType.ThumbStick, ControllerMappingLibrary.AXIS_1, ControllerMappingLibrary.AXIS_2),
new MixedRealityInteractionMapping(7, "Button.PrimaryThumbstick Touch", AxisType.Digital, DeviceInputType.ThumbStickTouch, KeyCode.JoystickButton16),
new MixedRealityInteractionMapping(8, "Button.PrimaryThumbstick Near Touch", AxisType.Digital, DeviceInputType.ThumbNearTouch, ControllerMappingLibrary.AXIS_15),
new MixedRealityInteractionMapping(9, "Button.PrimaryThumbstick Press", AxisType.Digital, DeviceInputType.ThumbStickPress, KeyCode.JoystickButton8),
new MixedRealityInteractionMapping(10, "Button.Three Press", AxisType.Digital, DeviceInputType.ButtonPress, KeyCode.JoystickButton2),
new MixedRealityInteractionMapping(11, "Button.Four Press", AxisType.Digital, DeviceInputType.ButtonPress, KeyCode.JoystickButton3),
new MixedRealityInteractionMapping(12, "Button.Start Press", AxisType.Digital, DeviceInputType.ButtonPress, KeyCode.JoystickButton7),
new MixedRealityInteractionMapping(13, "Button.Three Touch", AxisType.Digital, DeviceInputType.ButtonPress, KeyCode.JoystickButton12),
new MixedRealityInteractionMapping(14, "Button.Four Touch", AxisType.Digital, DeviceInputType.ButtonPress, KeyCode.JoystickButton13),
new MixedRealityInteractionMapping(15, "Touch.PrimaryThumbRest Touch", AxisType.Digital, DeviceInputType.ThumbTouch, KeyCode.JoystickButton18),
new MixedRealityInteractionMapping(16, "Touch.PrimaryThumbRest Near Touch", AxisType.Digital, DeviceInputType.ThumbNearTouch, ControllerMappingLibrary.AXIS_17)
};
/// <inheritdoc />
public override MixedRealityInteractionMapping[] DefaultRightHandedInteractions => new[]
{
new MixedRealityInteractionMapping(0, "Spatial Pointer", AxisType.SixDof, DeviceInputType.SpatialPointer, MixedRealityInputAction.None),
new MixedRealityInteractionMapping(1, "Axis1D.SecondaryIndexTrigger", AxisType.SingleAxis, DeviceInputType.Trigger, ControllerMappingLibrary.AXIS_10),
new MixedRealityInteractionMapping(2, "Axis1D.SecondaryIndexTrigger Touch", AxisType.Digital, DeviceInputType.TriggerTouch, KeyCode.JoystickButton15),
new MixedRealityInteractionMapping(3, "Axis1D.SecondaryIndexTrigger Near Touch", AxisType.Digital, DeviceInputType.TriggerNearTouch, ControllerMappingLibrary.AXIS_14),
new MixedRealityInteractionMapping(4, "Axis1D.SecondaryIndexTrigger Press", AxisType.Digital, DeviceInputType.TriggerPress, ControllerMappingLibrary.AXIS_10),
new MixedRealityInteractionMapping(5, "Axis1D.SecondaryHandTrigger Press", AxisType.SingleAxis, DeviceInputType.Trigger, ControllerMappingLibrary.AXIS_12),
new MixedRealityInteractionMapping(6, "Axis2D.SecondaryThumbstick", AxisType.DualAxis, DeviceInputType.ThumbStick, ControllerMappingLibrary.AXIS_4, ControllerMappingLibrary.AXIS_5),
new MixedRealityInteractionMapping(7, "Button.SecondaryThumbstick Touch", AxisType.Digital, DeviceInputType.ThumbStickTouch, KeyCode.JoystickButton17),
new MixedRealityInteractionMapping(8, "Button.SecondaryThumbstick Near Touch", AxisType.Digital, DeviceInputType.ThumbNearTouch, ControllerMappingLibrary.AXIS_16),
new MixedRealityInteractionMapping(9, "Button.SecondaryThumbstick Press", AxisType.Digital, DeviceInputType.ThumbStickPress, KeyCode.JoystickButton9),
new MixedRealityInteractionMapping(10, "Button.One Press", AxisType.Digital, DeviceInputType.ButtonPress, KeyCode.JoystickButton0),
new MixedRealityInteractionMapping(11, "Button.Two Press", AxisType.Digital, DeviceInputType.ButtonPress, KeyCode.JoystickButton1),
new MixedRealityInteractionMapping(12, "Button.One Touch", AxisType.Digital, DeviceInputType.ButtonPress, KeyCode.JoystickButton10),
new MixedRealityInteractionMapping(13, "Button.Two Touch", AxisType.Digital, DeviceInputType.ButtonPress, KeyCode.JoystickButton11),
new MixedRealityInteractionMapping(14, "Touch.SecondaryThumbRest Touch", AxisType.Digital, DeviceInputType.ThumbTouch, KeyCode.JoystickButton19),
new MixedRealityInteractionMapping(15, "Touch.SecondaryThumbRest Near Touch", AxisType.Digital, DeviceInputType.ThumbNearTouch, ControllerMappingLibrary.AXIS_18)
};
/// <inheritdoc />
public override void SetupDefaultInteractions(Handedness controllerHandedness)
{
AssignControllerMappings(controllerHandedness == Handedness.Left ? DefaultLeftHandedInteractions : DefaultRightHandedInteractions);
}
}
} | {
"pile_set_name": "Github"
} |
en:
simple_form:
"yes": 'Yes'
"no": 'No'
required:
text: 'required'
mark: '*'
# You can uncomment the line below if you need to overwrite the whole required html.
# When using html, text and mark won't be used.
# html: '<abbr title="required">*</abbr>'
error_notification:
default_message: "Please review the problems below:"
# Labels and hints examples
# labels:
# defaults:
# password: 'Password'
# user:
# new:
# email: 'E-mail to sign in.'
# edit:
# email: 'E-mail.'
# hints:
# defaults:
# username: 'User name to sign in.'
# password: 'No special characters, please.'
| {
"pile_set_name": "Github"
} |
<?php
/**
* Zend Framework (http://framework.zend.com/)
*
* @link http://github.com/zendframework/zf2 for the canonical source repository
* @copyright Copyright (c) 2005-2015 Zend Technologies USA Inc. (http://www.zend.com)
* @license http://framework.zend.com/license/new-bsd New BSD License
*/
namespace Zend\Feed\Writer\Renderer\Feed\Atom;
use DOMDocument;
use DOMElement;
use Zend\Feed\Writer;
use Zend\Feed\Writer\Renderer;
class Source extends AbstractAtom implements Renderer\RendererInterface
{
/**
* Constructor
*
* @param Writer\Source $container
*/
public function __construct(Writer\Source $container)
{
parent::__construct($container);
}
/**
* Render Atom Feed Metadata (Source element)
*
* @return Writer\Renderer\Feed\Atom
*/
public function render()
{
if (!$this->container->getEncoding()) {
$this->container->setEncoding('UTF-8');
}
$this->dom = new DOMDocument('1.0', $this->container->getEncoding());
$this->dom->formatOutput = true;
$root = $this->dom->createElement('source');
$this->setRootElement($root);
$this->dom->appendChild($root);
$this->_setLanguage($this->dom, $root);
$this->_setBaseUrl($this->dom, $root);
$this->_setTitle($this->dom, $root);
$this->_setDescription($this->dom, $root);
$this->_setDateCreated($this->dom, $root);
$this->_setDateModified($this->dom, $root);
$this->_setGenerator($this->dom, $root);
$this->_setLink($this->dom, $root);
$this->_setFeedLinks($this->dom, $root);
$this->_setId($this->dom, $root);
$this->_setAuthors($this->dom, $root);
$this->_setCopyright($this->dom, $root);
$this->_setCategories($this->dom, $root);
foreach ($this->extensions as $ext) {
$ext->setType($this->getType());
$ext->setRootElement($this->getRootElement());
$ext->setDomDocument($this->getDomDocument(), $root);
$ext->render();
}
return $this;
}
/**
* Set feed generator string
*
* @param DOMDocument $dom
* @param DOMElement $root
* @return void
*/
protected function _setGenerator(DOMDocument $dom, DOMElement $root)
{
if (!$this->getDataContainer()->getGenerator()) {
return;
}
$gdata = $this->getDataContainer()->getGenerator();
$generator = $dom->createElement('generator');
$root->appendChild($generator);
$text = $dom->createTextNode($gdata['name']);
$generator->appendChild($text);
if (array_key_exists('uri', $gdata)) {
$generator->setAttribute('uri', $gdata['uri']);
}
if (array_key_exists('version', $gdata)) {
$generator->setAttribute('version', $gdata['version']);
}
}
}
| {
"pile_set_name": "Github"
} |
/*------------------------------------------------------------------------------
Copyright (c) 2000-2007 Tyrell Corporation. All rights reserved.
Tyrell DarkIce
File : Exception.cpp
Version : $Revision$
Author : $Author$
Location : $HeadURL$
Copyright notice:
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
------------------------------------------------------------------------------*/
/* ============================================================ include files */
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAVE_STRING_H
#include <string.h>
#else
#error need string.h
#endif
#include "Exception.h"
/* =================================================== local data structures */
/* ================================================ local constants & macros */
/*------------------------------------------------------------------------------
* File identity
*----------------------------------------------------------------------------*/
static const char fileid[] = "$Id$";
/* =============================================== local function prototypes */
/* ============================================================= module code */
/*------------------------------------------------------------------------------
* Constructor
*----------------------------------------------------------------------------*/
Exception :: Exception ( const char * file,
unsigned int line,
const char * description1,
const char * description2,
int code ) throw ()
{
size_t len = 0;
if ( description1 ) {
len += strlen( description1);
}
if ( description2 ) {
len += strlen( description2);
}
if ( len ) {
char * str = new char[len+1];
str[0] = '\0';
if ( description1 ) {
strcat( str, description1);
}
if ( description2 ) {
strcat( str, description2);
}
init( file, line, str, code);
delete[] str;
} else {
init( file, line, 0, code);
}
}
/*------------------------------------------------------------------------------
* Constructor
*----------------------------------------------------------------------------*/
Exception :: Exception ( const char * file,
unsigned int line,
const char * description1,
const char * description2,
const char * description3,
int code ) throw ()
{
size_t len = 0;
if ( description1 ) {
len += strlen( description1);
}
if ( description2 ) {
len += strlen( description2);
}
if ( description3 ) {
len += strlen( description3);
}
if ( len ) {
char * str = new char[len+1];
str[0] = '\0';
if ( description1 ) {
strcat( str, description1);
}
if ( description2 ) {
strcat( str, description2);
}
if ( description3 ) {
strcat( str, description3);
}
init( file, line, str, code);
delete[] str;
} else {
init( file, line, 0, code);
}
}
/*------------------------------------------------------------------------------
* Initialize the class
*----------------------------------------------------------------------------*/
void
Exception :: init ( const char * file,
unsigned int line,
const char * description = 0,
int code = 0 ) throw ()
{
if ( !file ) {
this->file = 0;
} else {
size_t len;
len = strlen( file ) + 1;
this->file = new char[len];
if ( this->file ) {
memcpy( this->file, file, len);
}
}
if ( !description ) {
this->description = 0;
} else {
size_t len;
len = strlen( description ) + 1;
this->description = new char[len];
if ( this->description ) {
memcpy( this->description, description, len);
}
}
this->line = line;
this->code = code;
}
/*------------------------------------------------------------------------------
* De-initialize the class
*----------------------------------------------------------------------------*/
void
Exception :: strip ( void ) throw ()
{
if ( description ) {
delete[] description;
}
if ( file ) {
delete[] file;
}
}
| {
"pile_set_name": "Github"
} |
export { default } from './header';
| {
"pile_set_name": "Github"
} |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from .search_space_base import SearchSpaceBase
from .base_layer import conv_bn_layer
from .search_space_registry import SEARCHSPACE
from .utils import check_points
__all__ = ["MobileNetV1Space"]
@SEARCHSPACE.register
class MobileNetV1Space(SearchSpaceBase):
def __init__(self, input_size, output_size, block_num, block_mask):
super(MobileNetV1Space, self).__init__(input_size, output_size,
block_num, block_mask)
# self.head_num means the channel of first convolution
self.head_num = np.array([3, 4, 8, 12, 16, 24, 32]) # 7
# self.filter_num1 ~ self.filtet_num9 means channel of the following convolution
self.filter_num1 = np.array([3, 4, 8, 12, 16, 24, 32, 48]) # 8
self.filter_num2 = np.array([8, 12, 16, 24, 32, 48, 64, 80]) # 8
self.filter_num3 = np.array(
[16, 24, 32, 48, 64, 80, 96, 128, 144, 160]) #10
self.filter_num4 = np.array(
[24, 32, 48, 64, 80, 96, 128, 144, 160, 192]) #10
self.filter_num5 = np.array(
[32, 48, 64, 80, 96, 128, 144, 160, 192, 224, 256, 320]) #12
self.filter_num6 = np.array(
[64, 80, 96, 128, 144, 160, 192, 224, 256, 320, 384]) #11
self.filter_num7 = np.array([
64, 80, 96, 128, 144, 160, 192, 224, 256, 320, 384, 512, 1024, 1048
]) #14
self.filter_num8 = np.array(
[128, 144, 160, 192, 224, 256, 320, 384, 512, 576, 640, 704,
768]) #13
self.filter_num9 = np.array(
[160, 192, 224, 256, 320, 384, 512, 640, 768, 832, 1024,
1048]) #12
# self.k_size means kernel size
self.k_size = np.array([3, 5]) #2
# self.repeat means repeat_num in forth downsample
self.repeat = np.array([1, 2, 3, 4, 5, 6]) #6
def init_tokens(self):
"""
The initial token.
The first one is the index of the first layers' channel in self.head_num,
each line in the following represent the index of the [filter_num1, filter_num2, kernel_size]
and depth means repeat times for forth downsample
"""
# yapf: disable
base_init_tokens = [6, # 32
6, 6, 0, # 32, 64, 3
6, 7, 0, # 64, 128, 3
7, 6, 0, # 128, 128, 3
6, 10, 0, # 128, 256, 3
10, 8, 0, # 256, 256, 3
8, 11, 0, # 256, 512, 3
4, # depth 5
11, 8, 0, # 512, 512, 3
8, 10, 0, # 512, 1024, 3
10, 10, 0] # 1024, 1024, 3
# yapf: enable
return base_init_tokens
def range_table(self):
"""
Get range table of current search space, constrains the range of tokens.
"""
# yapf: disable
base_range_table = [len(self.head_num),
len(self.filter_num1), len(self.filter_num2), len(self.k_size),
len(self.filter_num2), len(self.filter_num3), len(self.k_size),
len(self.filter_num3), len(self.filter_num4), len(self.k_size),
len(self.filter_num4), len(self.filter_num5), len(self.k_size),
len(self.filter_num5), len(self.filter_num6), len(self.k_size),
len(self.filter_num6), len(self.filter_num7), len(self.k_size),
len(self.repeat),
len(self.filter_num7), len(self.filter_num8), len(self.k_size),
len(self.filter_num8), len(self.filter_num9), len(self.k_size),
len(self.filter_num9), len(self.filter_num9), len(self.k_size)]
# yapf: enable
return base_range_table
def token2arch(self, tokens=None):
if tokens is None:
tokens = self.tokens()
self.bottleneck_param_list = []
# tokens[0] = 32
# 32, 64
self.bottleneck_param_list.append(
(self.filter_num1[tokens[1]], self.filter_num2[tokens[2]], 1,
self.k_size[tokens[3]]))
# 64 128 128 128
self.bottleneck_param_list.append(
(self.filter_num2[tokens[4]], self.filter_num3[tokens[5]], 2,
self.k_size[tokens[6]]))
self.bottleneck_param_list.append(
(self.filter_num3[tokens[7]], self.filter_num4[tokens[8]], 1,
self.k_size[tokens[9]]))
# 128 256 256 256
self.bottleneck_param_list.append(
(self.filter_num4[tokens[10]], self.filter_num5[tokens[11]], 2,
self.k_size[tokens[12]]))
self.bottleneck_param_list.append(
(self.filter_num5[tokens[13]], self.filter_num6[tokens[14]], 1,
self.k_size[tokens[15]]))
# 256 512 (512 512) * 5
self.bottleneck_param_list.append(
(self.filter_num6[tokens[16]], self.filter_num7[tokens[17]], 2,
self.k_size[tokens[18]]))
for i in range(self.repeat[tokens[19]]):
self.bottleneck_param_list.append(
(self.filter_num7[tokens[20]], self.filter_num8[tokens[21]], 1,
self.k_size[tokens[22]]))
# 512 1024 1024 1024
self.bottleneck_param_list.append(
(self.filter_num8[tokens[23]], self.filter_num9[tokens[24]], 2,
self.k_size[tokens[25]]))
self.bottleneck_param_list.append(
(self.filter_num9[tokens[26]], self.filter_num9[tokens[27]], 1,
self.k_size[tokens[28]]))
def _modify_bottle_params(output_stride=None):
if output_stride is not None and output_stride % 2 != 0:
raise Exception("output stride must to be even number")
if output_stride is None:
return
else:
stride = 2
for i, layer_setting in enumerate(self.bottleneck_params_list):
f1, f2, s, ks = layer_setting
stride = stride * s
if stride > output_stride:
s = 1
self.bottleneck_params_list[i] = (f1, f2, s, ks)
def net_arch(input,
scale=1.0,
return_block=None,
end_points=None,
output_stride=None):
self.scale = scale
_modify_bottle_params(output_stride)
decode_ends = dict()
input = conv_bn_layer(
input=input,
filter_size=3,
num_filters=self.head_num[tokens[0]],
stride=2,
name='mobilenetv1_conv1')
layer_count = 1
for i, layer_setting in enumerate(self.bottleneck_param_list):
filter_num1, filter_num2, stride, kernel_size = layer_setting
if stride == 2:
layer_count += 1
### return_block and end_points means block num
if check_points((layer_count - 1), return_block):
decode_ends[layer_count - 1] = input
if check_points((layer_count - 1), end_points):
return input, decode_ends
input = self._depthwise_separable(
input=input,
num_filters1=filter_num1,
num_filters2=filter_num2,
num_groups=filter_num1,
stride=stride,
scale=self.scale,
kernel_size=int(kernel_size),
name='mobilenetv1_{}'.format(str(i + 1)))
### return_block and end_points means block num
if check_points(layer_count, end_points):
return input, decode_ends
input = fluid.layers.pool2d(
input=input,
pool_type='avg',
global_pooling=True,
name='mobilenetv1_last_pool')
return input
return net_arch
def _depthwise_separable(self,
input,
num_filters1,
num_filters2,
num_groups,
stride,
scale,
kernel_size,
name=None):
num_groups = input.shape[1]
s_oc = int(num_filters1 * scale)
if s_oc > num_groups:
output_channel = s_oc - (s_oc % num_groups)
else:
output_channel = num_groups
depthwise_conv = conv_bn_layer(
input=input,
filter_size=kernel_size,
num_filters=output_channel,
stride=stride,
num_groups=num_groups,
use_cudnn=False,
name=name + '_dw')
pointwise_conv = conv_bn_layer(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
name=name + '_sep')
return pointwise_conv
| {
"pile_set_name": "Github"
} |
package com.yiqiniu.easytrans.protocol.tcc;
import java.lang.annotation.Annotation;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import com.yiqiniu.easytrans.protocol.AnnotationBusinessProviderBuilder;
import com.yiqiniu.easytrans.protocol.BusinessProvider;
import com.yiqiniu.easytrans.protocol.EasyTransRequest;
import com.yiqiniu.easytrans.protocol.RequestClassAware;
public class EtTccAnnotationBusinessProviderBuilder extends AnnotationBusinessProviderBuilder {
@Override
public BusinessProvider<?> create(Annotation ann, final Object proxyObj, Method targetMethod, Class<?> requestClass, String beanName) {
EtTcc tccAnn = (EtTcc) ann;
Method confirm = null;
Method cancel = null;
Method[] methods = proxyObj.getClass().getMethods();
for(Method m : methods) {
if(m.getName().equals(tccAnn.confirmMethod())) {
if(confirm != null) {
throw new RuntimeException("not allow duplicated method name " + tccAnn.confirmMethod() + " in class " + proxyObj.getClass());
} else {
confirm = m;
}
}
if(m.getName().equals(tccAnn.cancelMethod())) {
if(cancel != null) {
throw new RuntimeException("not allow duplicated method name " + tccAnn.cancelMethod() + " in class " + proxyObj.getClass());
} else {
cancel = m;
}
}
}
if(confirm == null || cancel == null) {
throw new IllegalArgumentException("can not find specifed confirm/cancel method: " + tccAnn);
}
if(tccAnn.cfgClass() != NullEasyTransRequest.class) {
requestClass = tccAnn.cfgClass();
}
final Method finalConfirm = confirm;
final Method finalCancel = cancel;
final Class<?> finalRequestClass = requestClass;
Object tcc = Proxy.newProxyInstance(this.getClass().getClassLoader(), new Class[] {TccMethod.class,RequestClassAware.class}, new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
Object springProxiedBean = getApplicationContext().getBean(beanName);
switch(method.getName()) {
case BusinessProvider.GET_IDEMPOTENT_TYPE:
return tccAnn.idempotentType();
case RequestClassAware.GET_REQUEST_CLASS:
return finalRequestClass;
case TccMethod.DO_TRY:
return targetMethod.invoke(springProxiedBean, args);
case TccMethod.DO_CONFIRM:
return finalConfirm.invoke(springProxiedBean, args);
case TccMethod.DO_CANCEL:
return finalCancel.invoke(springProxiedBean, args);
default:
throw new RuntimeException("not recognized method!" + method);
}
}
});
return (BusinessProvider<?>) tcc;
}
@Override
public Class<? extends Annotation> getTargetAnnotation() {
return EtTcc.class;
}
@SuppressWarnings("unchecked")
@Override
public Class<? extends EasyTransRequest<?, ?>> getActualConfigClass(Annotation ann, Class<?> requestClass) {
EtTcc prividerAnn = (EtTcc) ann;
if(prividerAnn.cfgClass() != NullEasyTransRequest.class) {
return (Class<? extends EasyTransRequest<?, ?>>) prividerAnn.cfgClass();
} else {
return (Class<? extends EasyTransRequest<?, ?>>) requestClass;
}
}
}
| {
"pile_set_name": "Github"
} |
%
% The Rogo problem Intro 7
%
% http://www.rogopuzzle.co.nz/paper-rogos/intro-rogo-7/
%
5
9
20
33
3 2 1 W W W 1 3 5
W W 4 W W W 5 W W
2 W W B 2 B W W 2
W W 3 W W W 2 W W
2 4 4 W W W 1 1 4
| {
"pile_set_name": "Github"
} |
package com.alibaba.json.bvt.bug;
import java.util.List;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.annotation.JSONField;
import junit.framework.TestCase;
public class Bug_for_issue_555_setter extends TestCase {
public void test_for_issue() throws Exception {
JSON.parseObject("{\"list\":[{\"spec\":{}}]}", A.class);
}
public static class A {
public List<B> list;
}
public static class B {
@JSONField(serialize = true, deserialize = false)
private Spec spec;
public Spec getSpec() {
return spec;
}
public void setSpec(Spec spec) {
this.spec = spec;
}
}
public static class Spec {
private int id;
public Spec(int id){
this.id = id;
}
}
}
| {
"pile_set_name": "Github"
} |
"%1$@ %2$@ has been downloaded and is ready to use! Would you like to install it and relaunch %1$@ now?" = "%1$@ %2$@ 已下載且可供使用!您是否要現在安裝並重新啟動%1$@?";
"%1$@ can't be updated, because it was opened from a read-only or a temporary location. Use Finder to copy %1$@ to the Applications folder, relaunch it from there, and try again." = "當 %1$@ 正從唯讀卷宗(如磁碟映像檔或光碟機)執行時,無法進行更新。請將 %1$@ 移至您的“應用程式”檔案夾,從該處重新啟動,然後再試一次。 ";
"%@ %@ is currently the newest version available." = "%1$@ %2$@ 已是目前最新的版本。";
/* Description text for SUUpdateAlert when the update is downloadable. */
"%@ %@ is now available--you have %@. Would you like to download it now?" = "%1$@ %2$@ 現在已可取得,您的版本則是 %3$@。您要現在下載嗎?";
/* Description text for SUUpdateAlert when the update informational with no download. */
"%@ %@ is now available--you have %@. Would you like to learn more about this update on the web?" = "%1$@ %2$@ is now available--you have %3$@. Would you like to learn more about this update on the web?";
"%@ downloaded" = "%@ 已下載";
"%@ of %@" = "%1$@ / %2$@";
"A new version of %@ is available!" = "已有新版本的 %@ 可供下載!";
"A new version of %@ is ready to install!" = "新版本的 %@ 已準備安裝!";
"An error occurred in retrieving update information. Please try again later." = "擷取更新資訊時發生錯誤。請稍後再試一次。";
"An error occurred while downloading the update. Please try again later." = "下載更新項目時發生錯誤。請稍後再試一次。";
"An error occurred while extracting the archive. Please try again later." = "解壓縮封存檔時發生錯誤。請稍後再試一次。";
"An error occurred while installing the update. Please try again later." = "安裝更新項目時發生錯誤。請稍後再試一次。";
"An error occurred while parsing the update feed." = "解析更新 feed 時發生錯誤。";
"An error occurred while relaunching %1$@, but the new version will be available next time you run %1$@." = "重新啟動 %1$@ 時發生錯誤,但下次您執行 %1$@ 時將可使用新版本。";
/* the unit for bytes */
"B" = "B";
"Cancel" = "取消";
"Cancel Update" = "取消更新";
"Checking for updates..." = "正在檢查更新項目⋯";
/* Take care not to overflow the status window. */
"Downloading update..." = "正在下載更新項目⋯";
/* Take care not to overflow the status window. */
"Extracting update..." = "正在解壓縮更新項目⋯";
/* the unit for gigabytes */
"GB" = "GB";
"Install and Relaunch" = "安裝與重新啟動";
/* Take care not to overflow the status window. */
"Installing update..." = "正在安裝更新項目⋯";
/* the unit for kilobytes */
"KB" = "KB";
/* the unit for megabytes */
"MB" = "MB";
"OK" = "好";
"Ready to Install" = "準備安裝";
"Should %1$@ automatically check for updates? You can always check for updates manually from the %1$@ menu." = "%1$@ 是否應自動檢查更新項目?您可隨時從 %1$@ 選單手動檢查更新項目。";
"Update Error!" = "更新發生錯誤!";
"Updating %@" = "正在更新 %@";
"You already have the newest version of %@." = "您已有最新版本的 %@。";
"You're up-to-date!" = "你已有最新版本!";
| {
"pile_set_name": "Github"
} |
/**
* @class UIImage
* @extends UIElement
* Used to display an image. UIImage can set multiple images, but only one will be the current image. Other images will be optional images (currently 15, but this can be increased). A specific optional image can be set to be the current image with setValue.
*
* Sets the first optional image to be the current image:
*
* @example small frame
* this.setValue(0);
*
*/
| {
"pile_set_name": "Github"
} |
import smart_imports
smart_imports.all()
class HistoryTests(utils_testcase.TestCase):
def setUp(self):
super(HistoryTests, self).setUp()
self.place_1, self.place_2, self.place_3 = game_logic.create_test_map()
self.person_1 = self.place_1.persons[0]
self.person_2 = self.place_2.persons[0]
self.account_1_id = self.accounts_factory.create_account().id
self.account_2_id = self.accounts_factory.create_account().id
game_tt_services.debug_clear_service()
def url(self, power_type=views.POWER_TYPE_FILTER.ALL.value, account_id=None, place_id=None, person_id=None):
arguments = {}
if power_type is not None:
arguments['power_type'] = power_type
if account_id is not None:
arguments['account'] = account_id
if place_id is not None:
arguments['place'] = place_id
if person_id is not None:
arguments['person'] = person_id
return utils_urls.url('game:politic-power:history', **arguments)
def test_wrong_account(self):
self.check_html_ok(self.request_html(self.url(account_id='saasda')), texts=[('pgf-error-account.wrong_format', 1)])
def test_wrong_power_type(self):
self.check_html_ok(self.request_html(self.url(power_type='adasd')), texts=[('pgf-error-power_type.wrong_format', 1)])
def test_wrong_place(self):
self.check_html_ok(self.request_html(self.url(place_id='saasda')), texts=[('pgf-error-place.wrong_format', 1)])
def test_wrong_person(self):
self.check_html_ok(self.request_html(self.url(person_id='saasda')), texts=[('pgf-error-person.wrong_format', 1)])
def test_no_records_found(self):
self.check_html_ok(self.request_html(self.url()), texts=[('pgf-no-impacts', 1)])
def test_places_and_person_error_message(self):
self.check_html_ok(self.request_html(self.url(place_id=self.place_1.id, person_id=self.place_1.persons[0].id)),
texts=[('pgf-cannot-filter-by-place-and-master', 1)])
def prepair_filter_data(self):
impacts = []
for impact_type in (game_tt_services.IMPACT_TYPE.INNER_CIRCLE, game_tt_services.IMPACT_TYPE.OUTER_CIRCLE):
for account_id in (self.account_1_id, self.account_2_id):
for person_id in (self.person_1.id, self.person_2.id):
impacts.append(game_tt_services.PowerImpact.hero_2_person(type=impact_type,
hero_id=account_id,
person_id=person_id,
amount=random.randint(1000000, 10000000)))
impacts.append(game_tt_services.PowerImpact(type=impact_type,
actor_type=tt_api_impacts.OBJECT_TYPE.ACCOUNT,
actor_id=account_id,
target_type=tt_api_impacts.OBJECT_TYPE.PERSON,
target_id=person_id,
amount=random.randint(1000000, 10000000),
turn=None,
transaction=None))
for place_id in (self.place_1.id, self.place_2.id):
impacts.append(game_tt_services.PowerImpact.hero_2_place(type=impact_type,
hero_id=account_id,
place_id=place_id,
amount=random.randint(1000000, 10000000)))
impacts.append(game_tt_services.PowerImpact(type=impact_type,
actor_type=tt_api_impacts.OBJECT_TYPE.ACCOUNT,
actor_id=account_id,
target_type=tt_api_impacts.OBJECT_TYPE.PLACE,
target_id=place_id,
amount=random.randint(1000000, 10000000),
turn=None,
transaction=None))
for impact in impacts:
logic.add_power_impacts([impact])
return impacts
def test_filter_by_power_type(self):
impacts = self.prepair_filter_data()
self.check_html_ok(self.request_html(self.url(power_type=views.POWER_TYPE_FILTER.ALL.value)),
texts=[impact.amount for impact in impacts])
self.check_html_ok(self.request_html(self.url(power_type=views.POWER_TYPE_FILTER.PERSONAL.value)),
texts=[impact.amount if impact.type.is_INNER_CIRCLE else (impact.amount, 0)
for impact in impacts if impact.type.is_INNER_CIRCLE])
self.check_html_ok(self.request_html(self.url(power_type=views.POWER_TYPE_FILTER.CROWD.value)),
texts=[impact.amount if impact.type.is_OUTER_CIRCLE else (impact.amount, 0)
for impact in impacts])
def test_filter_by_account_or_hero(self):
impacts = self.prepair_filter_data()
self.check_html_ok(self.request_html(self.url(account_id=self.account_1_id)),
texts=[impact.amount if impact.actor_id == self.account_1_id else (impact.amount, 0)
for impact in impacts])
def test_filter_by_place(self):
impacts = self.prepair_filter_data()
self.check_html_ok(self.request_html(self.url(place_id=self.place_1.id)),
texts=[impact.amount if impact.target_id == self.place_1.id and impact.target_type.is_PLACE else (impact.amount, 0)
for impact in impacts])
def test_filter_by_person(self):
impacts = self.prepair_filter_data()
self.check_html_ok(self.request_html(self.url(person_id=self.person_1.id)),
texts=[impact.amount if impact.target_id == self.person_1.id and impact.target_type.is_PERSON else (impact.amount, 0)
for impact in impacts])
def test_limit(self):
N = 10
for i in range(conf.settings.MAX_HISTORY_LENGTH + N):
logic.add_power_impacts([game_tt_services.PowerImpact.hero_2_place(type=game_tt_services.IMPACT_TYPE.INNER_CIRCLE,
hero_id=self.account_1_id,
place_id=self.place_1.id,
amount=1000000 + i)])
texts = ['{}'.format(1000000 + i) for i in range(N, conf.settings.MAX_HISTORY_LENGTH + N)]
texts.extend(('{}'.format(1000000 + i), 0) for i in range(N))
self.check_html_ok(self.request_html(self.url()), texts=texts)
| {
"pile_set_name": "Github"
} |
d3.permute = function(array, indexes) {
var permutes = [],
i = -1,
n = indexes.length;
while (++i < n) permutes[i] = array[indexes[i]];
return permutes;
};
| {
"pile_set_name": "Github"
} |
import "Exception.proto";
option java_outer_classname="KeyExceptionPackageResponseProtocol";
package Alachisoft.NCache.Common.Protobuf;
message KeyExceptionPackageResponse
{
repeated string keys = 1;
repeated Exception exceptions = 2;
} | {
"pile_set_name": "Github"
} |
"""Tests formatting as writer-agnostic ExcelCells
ExcelFormatter is tested implicitly in pandas/tests/io/excel
"""
import pytest
import pandas._testing as tm
from pandas.io.formats.css import CSSWarning
from pandas.io.formats.excel import CSSToExcelConverter
@pytest.mark.parametrize(
"css,expected",
[
# FONT
# - name
("font-family: foo,bar", {"font": {"name": "foo"}}),
('font-family: "foo bar",baz', {"font": {"name": "foo bar"}}),
("font-family: foo,\nbar", {"font": {"name": "foo"}}),
("font-family: foo, bar, baz", {"font": {"name": "foo"}}),
("font-family: bar, foo", {"font": {"name": "bar"}}),
("font-family: 'foo bar', baz", {"font": {"name": "foo bar"}}),
("font-family: 'foo \\'bar', baz", {"font": {"name": "foo 'bar"}}),
('font-family: "foo \\"bar", baz', {"font": {"name": 'foo "bar'}}),
('font-family: "foo ,bar", baz', {"font": {"name": "foo ,bar"}}),
# - family
("font-family: serif", {"font": {"name": "serif", "family": 1}}),
("font-family: Serif", {"font": {"name": "serif", "family": 1}}),
("font-family: roman, serif", {"font": {"name": "roman", "family": 1}}),
("font-family: roman, sans-serif", {"font": {"name": "roman", "family": 2}}),
("font-family: roman, sans serif", {"font": {"name": "roman"}}),
("font-family: roman, sansserif", {"font": {"name": "roman"}}),
("font-family: roman, cursive", {"font": {"name": "roman", "family": 4}}),
("font-family: roman, fantasy", {"font": {"name": "roman", "family": 5}}),
# - size
("font-size: 1em", {"font": {"size": 12}}),
("font-size: xx-small", {"font": {"size": 6}}),
("font-size: x-small", {"font": {"size": 7.5}}),
("font-size: small", {"font": {"size": 9.6}}),
("font-size: medium", {"font": {"size": 12}}),
("font-size: large", {"font": {"size": 13.5}}),
("font-size: x-large", {"font": {"size": 18}}),
("font-size: xx-large", {"font": {"size": 24}}),
("font-size: 50%", {"font": {"size": 6}}),
# - bold
("font-weight: 100", {"font": {"bold": False}}),
("font-weight: 200", {"font": {"bold": False}}),
("font-weight: 300", {"font": {"bold": False}}),
("font-weight: 400", {"font": {"bold": False}}),
("font-weight: normal", {"font": {"bold": False}}),
("font-weight: lighter", {"font": {"bold": False}}),
("font-weight: bold", {"font": {"bold": True}}),
("font-weight: bolder", {"font": {"bold": True}}),
("font-weight: 700", {"font": {"bold": True}}),
("font-weight: 800", {"font": {"bold": True}}),
("font-weight: 900", {"font": {"bold": True}}),
# - italic
("font-style: italic", {"font": {"italic": True}}),
("font-style: oblique", {"font": {"italic": True}}),
# - underline
("text-decoration: underline", {"font": {"underline": "single"}}),
("text-decoration: overline", {}),
("text-decoration: none", {}),
# - strike
("text-decoration: line-through", {"font": {"strike": True}}),
(
"text-decoration: underline line-through",
{"font": {"strike": True, "underline": "single"}},
),
(
"text-decoration: underline; text-decoration: line-through",
{"font": {"strike": True}},
),
# - color
("color: red", {"font": {"color": "FF0000"}}),
("color: #ff0000", {"font": {"color": "FF0000"}}),
("color: #f0a", {"font": {"color": "FF00AA"}}),
# - shadow
("text-shadow: none", {"font": {"shadow": False}}),
("text-shadow: 0px -0em 0px #CCC", {"font": {"shadow": False}}),
("text-shadow: 0px -0em 0px #999", {"font": {"shadow": False}}),
("text-shadow: 0px -0em 0px", {"font": {"shadow": False}}),
("text-shadow: 2px -0em 0px #CCC", {"font": {"shadow": True}}),
("text-shadow: 0px -2em 0px #CCC", {"font": {"shadow": True}}),
("text-shadow: 0px -0em 2px #CCC", {"font": {"shadow": True}}),
("text-shadow: 0px -0em 2px", {"font": {"shadow": True}}),
("text-shadow: 0px -2em", {"font": {"shadow": True}}),
# FILL
# - color, fillType
(
"background-color: red",
{"fill": {"fgColor": "FF0000", "patternType": "solid"}},
),
(
"background-color: #ff0000",
{"fill": {"fgColor": "FF0000", "patternType": "solid"}},
),
(
"background-color: #f0a",
{"fill": {"fgColor": "FF00AA", "patternType": "solid"}},
),
# BORDER
# - style
(
"border-style: solid",
{
"border": {
"top": {"style": "medium"},
"bottom": {"style": "medium"},
"left": {"style": "medium"},
"right": {"style": "medium"},
}
},
),
(
"border-style: solid; border-width: thin",
{
"border": {
"top": {"style": "thin"},
"bottom": {"style": "thin"},
"left": {"style": "thin"},
"right": {"style": "thin"},
}
},
),
(
"border-top-style: solid; border-top-width: thin",
{"border": {"top": {"style": "thin"}}},
),
(
"border-top-style: solid; border-top-width: 1pt",
{"border": {"top": {"style": "thin"}}},
),
("border-top-style: solid", {"border": {"top": {"style": "medium"}}}),
(
"border-top-style: solid; border-top-width: medium",
{"border": {"top": {"style": "medium"}}},
),
(
"border-top-style: solid; border-top-width: 2pt",
{"border": {"top": {"style": "medium"}}},
),
(
"border-top-style: solid; border-top-width: thick",
{"border": {"top": {"style": "thick"}}},
),
(
"border-top-style: solid; border-top-width: 4pt",
{"border": {"top": {"style": "thick"}}},
),
(
"border-top-style: dotted",
{"border": {"top": {"style": "mediumDashDotDot"}}},
),
(
"border-top-style: dotted; border-top-width: thin",
{"border": {"top": {"style": "dotted"}}},
),
("border-top-style: dashed", {"border": {"top": {"style": "mediumDashed"}}}),
(
"border-top-style: dashed; border-top-width: thin",
{"border": {"top": {"style": "dashed"}}},
),
("border-top-style: double", {"border": {"top": {"style": "double"}}}),
# - color
(
"border-style: solid; border-color: #0000ff",
{
"border": {
"top": {"style": "medium", "color": "0000FF"},
"right": {"style": "medium", "color": "0000FF"},
"bottom": {"style": "medium", "color": "0000FF"},
"left": {"style": "medium", "color": "0000FF"},
}
},
),
(
"border-top-style: double; border-top-color: blue",
{"border": {"top": {"style": "double", "color": "0000FF"}}},
),
(
"border-top-style: solid; border-top-color: #06c",
{"border": {"top": {"style": "medium", "color": "0066CC"}}},
),
# ALIGNMENT
# - horizontal
("text-align: center", {"alignment": {"horizontal": "center"}}),
("text-align: left", {"alignment": {"horizontal": "left"}}),
("text-align: right", {"alignment": {"horizontal": "right"}}),
("text-align: justify", {"alignment": {"horizontal": "justify"}}),
# - vertical
("vertical-align: top", {"alignment": {"vertical": "top"}}),
("vertical-align: text-top", {"alignment": {"vertical": "top"}}),
("vertical-align: middle", {"alignment": {"vertical": "center"}}),
("vertical-align: bottom", {"alignment": {"vertical": "bottom"}}),
("vertical-align: text-bottom", {"alignment": {"vertical": "bottom"}}),
# - wrap_text
("white-space: nowrap", {"alignment": {"wrap_text": False}}),
("white-space: pre", {"alignment": {"wrap_text": False}}),
("white-space: pre-line", {"alignment": {"wrap_text": False}}),
("white-space: normal", {"alignment": {"wrap_text": True}}),
# NUMBER FORMAT
("number-format: 0%", {"number_format": {"format_code": "0%"}}),
],
)
def test_css_to_excel(css, expected):
convert = CSSToExcelConverter()
assert expected == convert(css)
def test_css_to_excel_multiple():
convert = CSSToExcelConverter()
actual = convert(
"""
font-weight: bold;
text-decoration: underline;
color: red;
border-width: thin;
text-align: center;
vertical-align: top;
unused: something;
"""
)
assert {
"font": {"bold": True, "underline": "single", "color": "FF0000"},
"border": {
"top": {"style": "thin"},
"right": {"style": "thin"},
"bottom": {"style": "thin"},
"left": {"style": "thin"},
},
"alignment": {"horizontal": "center", "vertical": "top"},
} == actual
@pytest.mark.parametrize(
"css,inherited,expected",
[
("font-weight: bold", "", {"font": {"bold": True}}),
("", "font-weight: bold", {"font": {"bold": True}}),
(
"font-weight: bold",
"font-style: italic",
{"font": {"bold": True, "italic": True}},
),
("font-style: normal", "font-style: italic", {"font": {"italic": False}}),
("font-style: inherit", "", {}),
(
"font-style: normal; font-style: inherit",
"font-style: italic",
{"font": {"italic": True}},
),
],
)
def test_css_to_excel_inherited(css, inherited, expected):
convert = CSSToExcelConverter(inherited)
assert expected == convert(css)
@pytest.mark.parametrize(
"input_color,output_color",
(
list(CSSToExcelConverter.NAMED_COLORS.items())
+ [("#" + rgb, rgb) for rgb in CSSToExcelConverter.NAMED_COLORS.values()]
+ [("#F0F", "FF00FF"), ("#ABC", "AABBCC")]
),
)
def test_css_to_excel_good_colors(input_color, output_color):
# see gh-18392
css = (
f"border-top-color: {input_color}; "
f"border-right-color: {input_color}; "
f"border-bottom-color: {input_color}; "
f"border-left-color: {input_color}; "
f"background-color: {input_color}; "
f"color: {input_color}"
)
expected = dict()
expected["fill"] = {"patternType": "solid", "fgColor": output_color}
expected["font"] = {"color": output_color}
expected["border"] = {
k: {"color": output_color} for k in ("top", "right", "bottom", "left")
}
with tm.assert_produces_warning(None):
convert = CSSToExcelConverter()
assert expected == convert(css)
@pytest.mark.parametrize("input_color", [None, "not-a-color"])
def test_css_to_excel_bad_colors(input_color):
# see gh-18392
css = (
f"border-top-color: {input_color}; "
f"border-right-color: {input_color}; "
f"border-bottom-color: {input_color}; "
f"border-left-color: {input_color}; "
f"background-color: {input_color}; "
f"color: {input_color}"
)
expected = dict()
if input_color is not None:
expected["fill"] = {"patternType": "solid"}
with tm.assert_produces_warning(CSSWarning):
convert = CSSToExcelConverter()
assert expected == convert(css)
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2018-2020 "Graph Foundation"
* Graph Foundation, Inc. [https://graphfoundation.org]
*
* Copyright (c) 2002-2018 "Neo4j,"
* Neo4j Sweden AB [http://neo4j.com]
*
* This file is part of ONgDB Enterprise Edition. The included source
* code can be redistributed and/or modified under the terms of the
* GNU AFFERO GENERAL PUBLIC LICENSE Version 3
* (http://www.fsf.org/licensing/licenses/agpl-3.0.html) as found
* in the associated LICENSE.txt file.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*/
package org.neo4j.cluster.member.paxos;
import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import org.neo4j.cluster.InstanceId;
import org.neo4j.cluster.protocol.atomicbroadcast.ObjectStreamFactory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull;
public class MemberIsUnavailableTest
{
@Test
public void shouldBeSerializedWhenClusterUriIsNull() throws IOException
{
// Given
MemberIsUnavailable message = new MemberIsUnavailable( "master", new InstanceId( 1 ), null );
// When
byte[] serialized = serialize( message );
// Then
assertNotEquals( 0, serialized.length );
}
@Test
public void shouldBeDeserializedWhenClusterUriIsNull() throws Exception
{
// Given
MemberIsUnavailable message = new MemberIsUnavailable( "slave", new InstanceId( 1 ), null );
byte[] serialized = serialize( message );
// When
MemberIsUnavailable deserialized = deserialize( serialized );
// Then
assertNotSame( message, deserialized );
assertEquals( "slave", message.getRole() );
assertEquals( new InstanceId( 1 ), message.getInstanceId() );
assertNull( message.getClusterUri() );
}
private static byte[] serialize( MemberIsUnavailable message ) throws IOException
{
ObjectOutputStream outputStream = null;
try
{
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
outputStream = new ObjectStreamFactory().create( byteArrayOutputStream );
outputStream.writeObject( message );
return byteArrayOutputStream.toByteArray();
}
finally
{
if ( outputStream != null )
{
outputStream.close();
}
}
}
private static MemberIsUnavailable deserialize( byte[] serialized ) throws Exception
{
ObjectInputStream inputStream = null;
try
{
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream( serialized );
inputStream = new ObjectStreamFactory().create( byteArrayInputStream );
return (MemberIsUnavailable) inputStream.readObject();
}
finally
{
if ( inputStream != null )
{
inputStream.close();
}
}
}
}
| {
"pile_set_name": "Github"
} |
import React, { FC, useState, useEffect, ChangeEvent } from 'react'
import { Table, Button, Popconfirm, Tag, Input, Empty, message } from 'antd';
import { ColumnProps } from 'antd/es/table'
import { RouteComponentProps, withRouter } from 'react-router-dom'
import { CustomBreadcrumb } from '@/admin/components'
import { ExerciseListProps } from '@/admin/modals/exerciseList'
import { generateDifficulty, generateExerciseType } from '@/admin/utils/common'
import { FetchConfig } from '@/admin/modals/http'
import { useService } from '@/admin/hooks'
import http from '@/admin/utils/http'
import './index.scss'
const ExerciseList: FC<RouteComponentProps> = (props: RouteComponentProps) => {
const [currentPage, setCurrentPage] = useState<number>(0)
const [selectedRowKeys, setSelectedRowKeys] = useState<number[]>([])
const [searchValue, setSearchValue] = useState<string>('')
const [fetchConfig, setFetchConfig] = useState<FetchConfig>({
url: '', method: 'GET', params: {}, config: {}
})
const [fetchFlag, setFetchFlag] = useState<number>(0)
const hasSelected: boolean = selectedRowKeys.length > 0
const { history } = props
useEffect(() => {
const fetchConfig: FetchConfig = {
url: '/exercises',
method: 'GET',
params: {},
config: {}
}
setFetchConfig(Object.assign({}, fetchConfig))
}, [fetchFlag])
const handleSelectedChange = (selectedRowKeys: number[]) => {
setSelectedRowKeys(selectedRowKeys)
}
const handleSearchChange = (changeEvent: ChangeEvent<HTMLInputElement>) => {
const { target: { value } } = changeEvent
setSearchValue(value)
}
const handleEditClick = (id: number) => {
history.push(`/admin/content/exercise-modify/${id}`)
}
const handleDeleteClick = async (id: number) => {
const { data: { msg } } = await http.delete(`/exercises/${id}`)
setFetchFlag(fetchFlag + 1)
setSelectedRowKeys([])
message.success(msg)
}
const handleBatchDelete = async () => {
const { data: { msg } } = await http.delete(`/exercises`, {
data: selectedRowKeys
})
setFetchFlag(fetchFlag + 1)
setSelectedRowKeys([])
message.success(msg)
}
const rowSelection = {
selectedRowKeys,
onChange: handleSelectedChange,
}
const columns: ColumnProps<ExerciseListProps>[] = [
{
title: '题库名称',
dataIndex: 'exerciseName',
key: 'exerciseName',
width: 180,
filteredValue: [searchValue],
onFilter: (_, row) => (
row.exerciseName.toString().indexOf(searchValue) !== -1
)
}, {
title: '题库内容',
dataIndex: 'exerciseContent',
key: 'exerciseContent',
ellipsis: true,
}, {
title: '题库类型',
dataIndex: 'exerciseType',
key: 'exerciseType',
width: 120,
render: exerciseType => {
const { type, color } = generateExerciseType(exerciseType)
return (
<Tag color={color}>
{type}
</Tag>
)
},
sorter: (a, b) => a.exerciseType - b.exerciseType
}, {
title: '难易度',
dataIndex: 'exerciseDifficulty',
key: 'exerciseDifficulty',
width: 100,
render: exerciseDifficulty => {
const { difficulty, color } = generateDifficulty(exerciseDifficulty)
return (
<Tag color={color}>
{difficulty}
</Tag>
)
},
sorter: (a, b) => a.exerciseDifficulty - b.exerciseDifficulty
}, {
title: '是否热门',
dataIndex: 'isHot',
key: 'isHot',
width: 120,
render: isHot => isHot ? '是' : '否',
sorter: a => a.isHot ? 1 : -1
}, {
title: '操作',
dataIndex: '',
key: '',
width: 180,
render: (_, row) => (
<span>
<Button
type="primary"
onClick={() => handleEditClick(row.id)}>编辑</Button>
<Popconfirm
title="确定删除此题库吗?"
onConfirm={() => handleDeleteClick(row.id)}
okText="确定"
cancelText="取消"
>
<Button type="danger">删除</Button>
</Popconfirm>
</span>
)
}
]
const { isLoading = false, response } = useService(fetchConfig)
const { data = {} } = response || {}
const { exerciseList = [], total: totalPage = 0 } = data
return (
<div>
<CustomBreadcrumb list={['内容管理', '题库管理']} />
<div className="exercise-list__container">
<div className="exercise-list__header">
<Button type="primary" style={{ marginRight: 10 }} onClick={() => { history.push('/admin/content/exercise-publish') }}>新增题库</Button>
<Popconfirm
disabled={!hasSelected}
title="确定删除这些题库吗?"
onConfirm={handleBatchDelete}
okText="确定"
cancelText="取消"
>
<Button type="danger" disabled={!hasSelected}>批量删除</Button>
</Popconfirm>
<Input.Search
className="search__container"
value={searchValue}
placeholder="请输入要查询的题库名称"
onChange={handleSearchChange}
enterButton />
</div>
<Table
rowSelection={rowSelection}
dataSource={exerciseList}
columns={columns}
rowKey="id"
scroll={{
y: "calc(100vh - 300px)"
}}
loading={{
spinning: isLoading,
tip: "加载中...",
size: "large"
}}
pagination={{
pageSize: 10,
total: totalPage,
current: currentPage,
onChange: (pageNo) => setCurrentPage(pageNo)
}}
locale={{
emptyText: <Empty
image={Empty.PRESENTED_IMAGE_SIMPLE}
description="暂无数据" />
}}
/>
</div>
</div>
)
}
export default withRouter(ExerciseList) | {
"pile_set_name": "Github"
} |
export default function absFloor (number) {
if (number < 0) {
// -0 -> 0
return Math.ceil(number) || 0;
} else {
return Math.floor(number);
}
}
| {
"pile_set_name": "Github"
} |
/*
* poodle.c -- SoC audio for Poodle
*
* Copyright 2005 Wolfson Microelectronics PLC.
* Copyright 2005 Openedhand Ltd.
*
* Authors: Liam Girdwood <lrg@slimlogic.co.uk>
* Richard Purdie <richard@openedhand.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/timer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
#include <asm/hardware/locomo.h>
#include <mach/poodle.h>
#include <mach/audio.h>
#include "../codecs/wm8731.h"
#include "pxa2xx-i2s.h"
#define POODLE_HP 1
#define POODLE_HP_OFF 0
#define POODLE_SPK_ON 1
#define POODLE_SPK_OFF 0
/* audio clock in Hz - rounded from 12.235MHz */
#define POODLE_AUDIO_CLOCK 12288000
static int poodle_jack_func;
static int poodle_spk_func;
static void poodle_ext_control(struct snd_soc_dapm_context *dapm)
{
/* set up jack connection */
if (poodle_jack_func == POODLE_HP) {
/* set = unmute headphone */
locomo_gpio_write(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_MUTE_L, 1);
locomo_gpio_write(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_MUTE_R, 1);
snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
} else {
locomo_gpio_write(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_MUTE_L, 0);
locomo_gpio_write(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_MUTE_R, 0);
snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
}
/* set the enpoints to their new connetion states */
if (poodle_spk_func == POODLE_SPK_ON)
snd_soc_dapm_enable_pin(dapm, "Ext Spk");
else
snd_soc_dapm_disable_pin(dapm, "Ext Spk");
/* signal a DAPM event */
snd_soc_dapm_sync(dapm);
}
static int poodle_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
mutex_lock(&codec->mutex);
/* check the jack status at stream startup */
poodle_ext_control(&codec->dapm);
mutex_unlock(&codec->mutex);
return 0;
}
/* we need to unmute the HP at shutdown as the mute burns power on poodle */
static void poodle_shutdown(struct snd_pcm_substream *substream)
{
/* set = unmute headphone */
locomo_gpio_write(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_MUTE_L, 1);
locomo_gpio_write(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_MUTE_R, 1);
}
static int poodle_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
unsigned int clk = 0;
int ret = 0;
switch (params_rate(params)) {
case 8000:
case 16000:
case 48000:
case 96000:
clk = 12288000;
break;
case 11025:
case 22050:
case 44100:
clk = 11289600;
break;
}
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL, clk,
SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
/* set the I2S system clock as input (unused) */
ret = snd_soc_dai_set_sysclk(cpu_dai, PXA2XX_I2S_SYSCLK, 0,
SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
return 0;
}
static struct snd_soc_ops poodle_ops = {
.startup = poodle_startup,
.hw_params = poodle_hw_params,
.shutdown = poodle_shutdown,
};
static int poodle_get_jack(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = poodle_jack_func;
return 0;
}
static int poodle_set_jack(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (poodle_jack_func == ucontrol->value.integer.value[0])
return 0;
poodle_jack_func = ucontrol->value.integer.value[0];
poodle_ext_control(&card->dapm);
return 1;
}
static int poodle_get_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = poodle_spk_func;
return 0;
}
static int poodle_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (poodle_spk_func == ucontrol->value.integer.value[0])
return 0;
poodle_spk_func = ucontrol->value.integer.value[0];
poodle_ext_control(&card->dapm);
return 1;
}
static int poodle_amp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
if (SND_SOC_DAPM_EVENT_ON(event))
locomo_gpio_write(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_AMP_ON, 0);
else
locomo_gpio_write(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_AMP_ON, 1);
return 0;
}
/* poodle machine dapm widgets */
static const struct snd_soc_dapm_widget wm8731_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_SPK("Ext Spk", poodle_amp_event),
};
/* Corgi machine connections to the codec pins */
static const struct snd_soc_dapm_route poodle_audio_map[] = {
/* headphone connected to LHPOUT1, RHPOUT1 */
{"Headphone Jack", NULL, "LHPOUT"},
{"Headphone Jack", NULL, "RHPOUT"},
/* speaker connected to LOUT, ROUT */
{"Ext Spk", NULL, "ROUT"},
{"Ext Spk", NULL, "LOUT"},
};
static const char *jack_function[] = {"Off", "Headphone"};
static const char *spk_function[] = {"Off", "On"};
static const struct soc_enum poodle_enum[] = {
SOC_ENUM_SINGLE_EXT(2, jack_function),
SOC_ENUM_SINGLE_EXT(2, spk_function),
};
static const struct snd_kcontrol_new wm8731_poodle_controls[] = {
SOC_ENUM_EXT("Jack Function", poodle_enum[0], poodle_get_jack,
poodle_set_jack),
SOC_ENUM_EXT("Speaker Function", poodle_enum[1], poodle_get_spk,
poodle_set_spk),
};
/*
* Logic for a wm8731 as connected on a Sharp SL-C7x0 Device
*/
static int poodle_wm8731_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
snd_soc_dapm_nc_pin(dapm, "LLINEIN");
snd_soc_dapm_nc_pin(dapm, "RLINEIN");
snd_soc_dapm_enable_pin(dapm, "MICIN");
return 0;
}
/* poodle digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link poodle_dai = {
.name = "WM8731",
.stream_name = "WM8731",
.cpu_dai_name = "pxa2xx-i2s",
.codec_dai_name = "wm8731-hifi",
.platform_name = "pxa-pcm-audio",
.codec_name = "wm8731.0-001b",
.init = poodle_wm8731_init,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.ops = &poodle_ops,
};
/* poodle audio machine driver */
static struct snd_soc_card poodle = {
.name = "Poodle",
.dai_link = &poodle_dai,
.num_links = 1,
.owner = THIS_MODULE,
.controls = wm8731_poodle_controls,
.num_controls = ARRAY_SIZE(wm8731_poodle_controls),
.dapm_widgets = wm8731_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8731_dapm_widgets),
.dapm_routes = poodle_audio_map,
.num_dapm_routes = ARRAY_SIZE(poodle_audio_map),
};
static int __devinit poodle_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &poodle;
int ret;
locomo_gpio_set_dir(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_AMP_ON, 0);
/* should we mute HP at startup - burning power ?*/
locomo_gpio_set_dir(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_MUTE_L, 0);
locomo_gpio_set_dir(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_MUTE_R, 0);
card->dev = &pdev->dev;
ret = snd_soc_register_card(card);
if (ret)
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
return ret;
}
static int __devexit poodle_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
return 0;
}
static struct platform_driver poodle_driver = {
.driver = {
.name = "poodle-audio",
.owner = THIS_MODULE,
},
.probe = poodle_probe,
.remove = __devexit_p(poodle_remove),
};
module_platform_driver(poodle_driver);
/* Module information */
MODULE_AUTHOR("Richard Purdie");
MODULE_DESCRIPTION("ALSA SoC Poodle");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:poodle-audio");
| {
"pile_set_name": "Github"
} |
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_TENSORFLOW_CORE_LIB_IO_COMPRESSION_H_
#define THIRD_PARTY_TENSORFLOW_CORE_LIB_IO_COMPRESSION_H_
namespace tensorflow {
namespace io {
namespace compression {
extern const char kNone[];
extern const char kGzip[];
}
}
}
#endif // THIRD_PARTY_TENSORFLOW_CORE_LIB_IO_COMPRESSION_H_
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: bb264e974a9e3914283d4f9a942a88a0
NativeFormatImporter:
externalObjects: {}
mainObjectFileID: 100100000
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
// for a legacy code and future fixes
module.exports = function(){
return Function.call.apply(Array.prototype.splice, arguments);
}; | {
"pile_set_name": "Github"
} |
/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package podautoscaler
import (
context "context"
json "encoding/json"
fmt "fmt"
reflect "reflect"
zap "go.uber.org/zap"
v1 "k8s.io/api/core/v1"
equality "k8s.io/apimachinery/pkg/api/equality"
errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
kmp "knative.dev/pkg/kmp"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
v1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1"
versioned "knative.dev/serving/pkg/client/clientset/versioned"
autoscalingv1alpha1 "knative.dev/serving/pkg/client/listers/autoscaling/v1alpha1"
)
// Interface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1alpha1.PodAutoscaler.
type Interface interface {
// ReconcileKind implements custom logic to reconcile v1alpha1.PodAutoscaler. Any changes
// to the objects .Status or .Finalizers will be propagated to the stored
// object. It is recommended that implementors do not call any update calls
// for the Kind inside of ReconcileKind, it is the responsibility of the calling
// controller to propagate those properties. The resource passed to ReconcileKind
// will always have an empty deletion timestamp.
ReconcileKind(ctx context.Context, o *v1alpha1.PodAutoscaler) reconciler.Event
}
// Finalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1alpha1.PodAutoscaler.
type Finalizer interface {
// FinalizeKind implements custom logic to finalize v1alpha1.PodAutoscaler. Any changes
// to the objects .Status or .Finalizers will be ignored. Returning a nil or
// Normal type reconciler.Event will allow the finalizer to be deleted on
// the resource. The resource passed to FinalizeKind will always have a set
// deletion timestamp.
FinalizeKind(ctx context.Context, o *v1alpha1.PodAutoscaler) reconciler.Event
}
// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
// controller reconciling v1alpha1.PodAutoscaler if they want to process resources for which
// they are not the leader.
type ReadOnlyInterface interface {
// ObserveKind implements logic to observe v1alpha1.PodAutoscaler.
// This method should not write to the API.
ObserveKind(ctx context.Context, o *v1alpha1.PodAutoscaler) reconciler.Event
}
// ReadOnlyFinalizer defines the strongly typed interfaces to be implemented by a
// controller finalizing v1alpha1.PodAutoscaler if they want to process tombstoned resources
// even when they are not the leader. Due to the nature of how finalizers are handled
// there are no guarantees that this will be called.
type ReadOnlyFinalizer interface {
// ObserveFinalizeKind implements custom logic to observe the final state of v1alpha1.PodAutoscaler.
// This method should not write to the API.
ObserveFinalizeKind(ctx context.Context, o *v1alpha1.PodAutoscaler) reconciler.Event
}
type doReconcile func(ctx context.Context, o *v1alpha1.PodAutoscaler) reconciler.Event
// reconcilerImpl implements controller.Reconciler for v1alpha1.PodAutoscaler resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware
reconciler.LeaderAwareFuncs
// Client is used to write back status updates.
Client versioned.Interface
// Listers index properties about resources
Lister autoscalingv1alpha1.PodAutoscalerLister
// Recorder is an event recorder for recording Event resources to the
// Kubernetes API.
Recorder record.EventRecorder
// configStore allows for decorating a context with config maps.
// +optional
configStore reconciler.ConfigStore
// reconciler is the implementation of the business logic of the resource.
reconciler Interface
// finalizerName is the name of the finalizer to reconcile.
finalizerName string
// skipStatusUpdates configures whether or not this reconciler automatically updates
// the status of the reconciled resource.
skipStatusUpdates bool
// classValue is the resource annotation[autoscaling.knative.dev/class] instance value this reconciler instance filters on.
classValue string
}
// Check that our Reconciler implements controller.Reconciler
var _ controller.Reconciler = (*reconcilerImpl)(nil)
// Check that our generated Reconciler is always LeaderAware.
var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister autoscalingv1alpha1.PodAutoscalerLister, recorder record.EventRecorder, r Interface, classValue string, options ...controller.Options) controller.Reconciler {
// Check the options function input. It should be 0 or 1.
if len(options) > 1 {
logger.Fatalf("up to one options struct is supported, found %d", len(options))
}
// Fail fast when users inadvertently implement the other LeaderAware interface.
// For the typed reconcilers, Promote shouldn't take any arguments.
if _, ok := r.(reconciler.LeaderAware); ok {
logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
}
// TODO: Consider validating when folks implement ReadOnlyFinalizer, but not Finalizer.
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client,
Lister: lister,
Recorder: recorder,
reconciler: r,
finalizerName: defaultFinalizerName,
classValue: classValue,
}
for _, opts := range options {
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.SkipStatusUpdates {
rec.skipStatusUpdates = true
}
}
return rec
}
// Reconcile implements controller.Reconciler
func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
// Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determin if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
s, err := newState(key, r)
if err != nil {
logger.Errorf("invalid resource key: %s", key)
return nil
}
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
return nil
}
// If configStore is set, attach the frozen configuration to the context.
if r.configStore != nil {
ctx = r.configStore.ToContext(ctx)
}
// Add the recorder to context.
ctx = controller.WithEventRecorder(ctx, r.Recorder)
// Get the resource with this namespace/name.
getter := r.Lister.PodAutoscalers(s.namespace)
original, err := getter.Get(s.name)
if errors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing.
logger.Debugf("resource %q no longer exists", key)
return nil
} else if err != nil {
return err
}
if classValue, found := original.GetAnnotations()[ClassAnnotationKey]; !found || classValue != r.classValue {
logger.Debugw("Skip reconciling resource, class annotation value does not match reconciler instance value.",
zap.String("classKey", ClassAnnotationKey),
zap.String("issue", classValue+"!="+r.classValue))
return nil
}
// Don't modify the informers copy.
resource := original.DeepCopy()
var reconcileEvent reconciler.Event
name, do := s.reconcileMethodFor(resource)
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
case reconciler.DoReconcileKind:
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", "ReconcileKind"))
// Set and update the finalizer on resource if r.reconciler
// implements Finalizer.
if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
return fmt.Errorf("failed to set finalizers: %w", err)
}
if !r.skipStatusUpdates {
reconciler.PreProcessReconcile(ctx, resource)
}
// Reconcile this copy of the resource and then write back any status
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
if !r.skipStatusUpdates {
reconciler.PostProcessReconcile(ctx, resource, original)
}
case reconciler.DoFinalizeKind:
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
case reconciler.DoObserveKind, reconciler.DoObserveFinalizeKind:
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
}
// Synchronize the status.
switch {
case r.skipStatusUpdates:
// This reconciler implementation is configured to skip resource updates.
// This may mean this reconciler does not observe spec, but reconciles external changes.
case equality.Semantic.DeepEqual(original.Status, resource.Status):
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the injectionInformer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
case !s.isLeader:
// High-availability reconcilers may have many replicas watching the resource, but only
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
if err = r.updateStatus(ctx, original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
return err
}
}
// Report the reconciler event, if any.
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...)
// the event was wrapped inside an error, consider the reconciliation as failed
if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
return reconcileEvent
}
return nil
}
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
return reconcileEvent
}
return nil
}
func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1alpha1.PodAutoscaler, desired *v1alpha1.PodAutoscaler) error {
existing = existing.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
getter := r.Client.AutoscalingV1alpha1().PodAutoscalers(desired.Namespace)
existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// If there's nothing to update, just return.
if reflect.DeepEqual(existing.Status, desired.Status) {
return nil
}
if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
logging.FromContext(ctx).Debugf("Updating status with: %s", diff)
}
existing.Status = desired.Status
updater := r.Client.AutoscalingV1alpha1().PodAutoscalers(existing.Namespace)
_, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{})
return err
})
}
// updateFinalizersFiltered will update the Finalizers of the resource.
// TODO: this method could be generic and sync all finalizers. For now it only
// updates defaultFinalizerName or its override.
func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.PodAutoscaler) (*v1alpha1.PodAutoscaler, error) {
getter := r.Lister.PodAutoscalers(resource.Namespace)
actual, err := getter.Get(resource.Name)
if err != nil {
return resource, err
}
// Don't modify the informers copy.
existing := actual.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.NewString(existing.Finalizers...)
desiredFinalizers := sets.NewString(resource.Finalizers...)
if desiredFinalizers.Has(r.finalizerName) {
if existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, r.finalizerName)
} else {
if !existingFinalizers.Has(r.finalizerName) {
// Nothing to do.
return resource, nil
}
// Remove the finalizer.
existingFinalizers.Delete(r.finalizerName)
finalizers = existingFinalizers.List()
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return resource, err
}
patcher := r.Client.AutoscalingV1alpha1().PodAutoscalers(resource.Namespace)
resourceName := resource.Name
resource, err = patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
if err != nil {
r.Recorder.Eventf(resource, v1.EventTypeWarning, "FinalizerUpdateFailed",
"Failed to update finalizers for %q: %v", resourceName, err)
} else {
r.Recorder.Eventf(resource, v1.EventTypeNormal, "FinalizerUpdate",
"Updated %q finalizers", resource.GetName())
}
return resource, err
}
func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1alpha1.PodAutoscaler) (*v1alpha1.PodAutoscaler, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
finalizers := sets.NewString(resource.Finalizers...)
// If this resource is not being deleted, mark the finalizer.
if resource.GetDeletionTimestamp().IsZero() {
finalizers.Insert(r.finalizerName)
}
resource.Finalizers = finalizers.List()
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource)
}
func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1alpha1.PodAutoscaler, reconcileEvent reconciler.Event) (*v1alpha1.PodAutoscaler, error) {
if _, ok := r.reconciler.(Finalizer); !ok {
return resource, nil
}
if resource.GetDeletionTimestamp().IsZero() {
return resource, nil
}
finalizers := sets.NewString(resource.Finalizers...)
if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) {
if event.EventType == v1.EventTypeNormal {
finalizers.Delete(r.finalizerName)
}
}
} else {
finalizers.Delete(r.finalizerName)
}
resource.Finalizers = finalizers.List()
// Synchronize the finalizers filtered by r.finalizerName.
return r.updateFinalizersFiltered(ctx, resource)
}
| {
"pile_set_name": "Github"
} |
// Generated from definition io.k8s.api.node.v1beta1.RuntimeClass
/// RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md
#[derive(Clone, Debug, Default, PartialEq)]
pub struct RuntimeClass {
/// Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called "runc" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.
pub handler: String,
/// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
pub metadata: crate::apimachinery::pkg::apis::meta::v1::ObjectMeta,
/// Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.
pub overhead: Option<crate::api::node::v1beta1::Overhead>,
/// Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.
pub scheduling: Option<crate::api::node::v1beta1::Scheduling>,
}
// Begin node.k8s.io/v1beta1/RuntimeClass
// Generated from operation createNodeV1beta1RuntimeClass
impl RuntimeClass {
/// create a RuntimeClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::CreateResponse`]`<Self>>` constructor, or [`crate::CreateResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn create_runtime_class(
body: &crate::api::node::v1beta1::RuntimeClass,
optional: crate::CreateOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::CreateResponse<Self>>), crate::RequestError> {
let __url = "/apis/node.k8s.io/v1beta1/runtimeclasses?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::post(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deleteNodeV1beta1CollectionRuntimeClass
impl RuntimeClass {
/// delete collection of RuntimeClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>>` constructor, or [`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `delete_optional`
///
/// Delete options. Use `Default::default()` to not pass any.
///
/// * `list_optional`
///
/// List options. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_collection_runtime_class(
delete_optional: crate::DeleteOptional<'_>,
list_optional: crate::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<crate::List<Self>>>), crate::RequestError> {
let __url = "/apis/node.k8s.io/v1beta1/runtimeclasses?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
list_optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deleteNodeV1beta1RuntimeClass
impl RuntimeClass {
/// delete a RuntimeClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<Self>>` constructor, or [`crate::DeleteResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the RuntimeClass
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_runtime_class(
name: &str,
optional: crate::DeleteOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/node.k8s.io/v1beta1/runtimeclasses/{name}",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation listNodeV1beta1RuntimeClass
impl RuntimeClass {
/// list or watch objects of kind RuntimeClass
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ListResponse`]`<Self>>` constructor, or [`crate::ListResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_runtime_class(
optional: crate::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::ListResponse<Self>>), crate::RequestError> {
let __url = "/apis/node.k8s.io/v1beta1/runtimeclasses?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation patchNodeV1beta1RuntimeClass
impl RuntimeClass {
/// partially update the specified RuntimeClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::PatchResponse`]`<Self>>` constructor, or [`crate::PatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the RuntimeClass
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_runtime_class(
name: &str,
body: &crate::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::PatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::PatchResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/node.k8s.io/v1beta1/runtimeclasses/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::patch(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body {
crate::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation readNodeV1beta1RuntimeClass
impl RuntimeClass {
/// read the specified RuntimeClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadRuntimeClassResponse`]`>` constructor, or [`ReadRuntimeClassResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the RuntimeClass
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_runtime_class(
name: &str,
optional: ReadRuntimeClassOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadRuntimeClassResponse>), crate::RequestError> {
let ReadRuntimeClassOptional {
exact,
export,
pretty,
} = optional;
let __url = format!("/apis/node.k8s.io/v1beta1/runtimeclasses/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(exact) = exact {
__query_pairs.append_pair("exact", &exact.to_string());
}
if let Some(export) = export {
__query_pairs.append_pair("export", &export.to_string());
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`RuntimeClass::read_runtime_class`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadRuntimeClassOptional<'a> {
/// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
pub exact: Option<bool>,
/// Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
pub export: Option<bool>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadRuntimeClassResponse as Response>::try_from_parts` to parse the HTTP response body of [`RuntimeClass::read_runtime_class`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadRuntimeClassResponse {
Ok(crate::api::node::v1beta1::RuntimeClass),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadRuntimeClassResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadRuntimeClassResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadRuntimeClassResponse::Other(result), read))
},
}
}
}
// Generated from operation replaceNodeV1beta1RuntimeClass
impl RuntimeClass {
/// replace the specified RuntimeClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ReplaceResponse`]`<Self>>` constructor, or [`crate::ReplaceResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the RuntimeClass
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_runtime_class(
name: &str,
body: &crate::api::node::v1beta1::RuntimeClass,
optional: crate::ReplaceOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::ReplaceResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/node.k8s.io/v1beta1/runtimeclasses/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::put(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation watchNodeV1beta1RuntimeClass
impl RuntimeClass {
/// list or watch objects of kind RuntimeClass
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::WatchResponse`]`<Self>>` constructor, or [`crate::WatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_runtime_class(
optional: crate::WatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::WatchResponse<Self>>), crate::RequestError> {
let __url = "/apis/node.k8s.io/v1beta1/runtimeclasses?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// End node.k8s.io/v1beta1/RuntimeClass
impl crate::Resource for RuntimeClass {
const API_VERSION: &'static str = "node.k8s.io/v1beta1";
const GROUP: &'static str = "node.k8s.io";
const KIND: &'static str = "RuntimeClass";
const VERSION: &'static str = "v1beta1";
}
impl crate::ListableResource for RuntimeClass {
const LIST_KIND: &'static str = concat!("RuntimeClass", "List");
}
impl crate::Metadata for RuntimeClass {
type Ty = crate::apimachinery::pkg::apis::meta::v1::ObjectMeta;
fn metadata(&self) -> &<Self as crate::Metadata>::Ty {
&self.metadata
}
fn metadata_mut(&mut self) -> &mut<Self as crate::Metadata>::Ty {
&mut self.metadata
}
}
impl<'de> serde::Deserialize<'de> for RuntimeClass {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_kind,
Key_handler,
Key_metadata,
Key_overhead,
Key_scheduling,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"kind" => Field::Key_kind,
"handler" => Field::Key_handler,
"metadata" => Field::Key_metadata,
"overhead" => Field::Key_overhead,
"scheduling" => Field::Key_scheduling,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = RuntimeClass;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(<Self::Value as crate::Resource>::KIND)
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_handler: Option<String> = None;
let mut value_metadata: Option<crate::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None;
let mut value_overhead: Option<crate::api::node::v1beta1::Overhead> = None;
let mut value_scheduling: Option<crate::api::node::v1beta1::Scheduling> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => {
let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?;
if value_api_version != <Self::Value as crate::Resource>::API_VERSION {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::API_VERSION));
}
},
Field::Key_kind => {
let value_kind: String = serde::de::MapAccess::next_value(&mut map)?;
if value_kind != <Self::Value as crate::Resource>::KIND {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::KIND));
}
},
Field::Key_handler => value_handler = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Key_metadata => value_metadata = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Key_overhead => value_overhead = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_scheduling => value_scheduling = serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(RuntimeClass {
handler: value_handler.ok_or_else(|| serde::de::Error::missing_field("handler"))?,
metadata: value_metadata.ok_or_else(|| serde::de::Error::missing_field("metadata"))?,
overhead: value_overhead,
scheduling: value_scheduling,
})
}
}
deserializer.deserialize_struct(
<Self as crate::Resource>::KIND,
&[
"apiVersion",
"kind",
"handler",
"metadata",
"overhead",
"scheduling",
],
Visitor,
)
}
}
impl serde::Serialize for RuntimeClass {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
<Self as crate::Resource>::KIND,
4 +
self.overhead.as_ref().map_or(0, |_| 1) +
self.scheduling.as_ref().map_or(0, |_| 1),
)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::API_VERSION)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::KIND)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "handler", &self.handler)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", &self.metadata)?;
if let Some(value) = &self.overhead {
serde::ser::SerializeStruct::serialize_field(&mut state, "overhead", value)?;
}
if let Some(value) = &self.scheduling {
serde::ser::SerializeStruct::serialize_field(&mut state, "scheduling", value)?;
}
serde::ser::SerializeStruct::end(state)
}
}
| {
"pile_set_name": "Github"
} |
@import '~scss/variables';
.table {
margin: -12px;
border-radius: $border-radius;
overflow: hidden;
}
| {
"pile_set_name": "Github"
} |
model EquationCallIntegerArray
function arrcalli
input Real r;
output Integer[3] rs;
algorithm
rs := {1,integer(20*r),integer(30*r)};
end arrcalli;
Integer[3] i;
Boolean b;
equation
i = arrcalli(time);
when sample(0, 0.05) then
b = not pre(b);
end when;
end EquationCallIntegerArray;
| {
"pile_set_name": "Github"
} |
1 DUMM DU M 0 -1 -2 0.000 0.000 0.000
2 DUMM DU M 1 0 -1 1.522 0.000 0.000
3 DUMM DU M 2 1 0 1.422 109.500 0.000
4 C1 CG M 3 2 1 1.400 113.300 180.000
5 H1 H2 E 4 3 2 1.102 111.600 56.200
6 O5 OS 3 4 3 2 1.412 112.900 -68.700
7 EP+5 EP E 6 4 3 0.700 109.500 -59.700
8 EP-5 EP E 6 4 3 0.700 109.500 60.300
9 C5 CG M 6 4 3 1.435 114.300 -179.700
10 H5 H1 E 9 6 4 1.105 109.100 -60.700
11 C6 CG 3 9 6 4 1.517 106.600 -177.200
12 H61 H1 E 11 9 6 1.092 108.300 179.300
13 H62 H1 E 11 9 6 1.093 108.800 62.900
14 O6 OH 3 11 9 6 1.413 112.700 -65.100
15 H6O HO E 14 11 9 0.955 108.100 -96.000
16 EP+6 EP E 14 11 9 0.700 109.500 24.000
17 EP-6 EP E 14 11 9 0.700 109.500 144.000
18 C4 CG M 9 6 4 1.528 110.800 59.100
19 H4 H1 E 18 9 6 1.100 109.800 66.400
20 O4 OH 3 18 9 6 1.430 107.900 -174.000
21 H4O HO E 20 18 9 0.958 109.200 -124.900
22 EP+4 EP E 20 18 9 0.700 109.500 -4.900
23 EP-4 EP E 20 18 9 0.700 109.500 115.100
24 C3 CG M 18 9 6 1.519 110.100 -54.400
25 H3 H1 E 24 18 9 1.101 108.400 -66.700
26 C2 CG B 24 18 9 1.529 110.600 52.800
27 H2 H1 E 26 24 18 1.105 106.200 -171.000
28 O2 OS B 26 24 18 1.415 113.900 66.100
29 EP+2 EP E 28 26 24 0.700 109.500 166.300
30 EP-2 EP E 28 26 24 0.700 109.500 -73.700
31 O3 OS M 24 18 9 1.421 108.700 173.500
32 EP+3 EP E 31 24 18 0.700 109.500 48.700
33 EP-3 EP E 31 24 18 0.700 109.500 -71.300
| {
"pile_set_name": "Github"
} |
"use strict";
/**
* Turns on Docker Machine
* @example: sjc up
*
*/
var d = require('../../docker-toolbox.js')
var run = function (good, bad) {
console.log("Note: Cold starting VirtualBox VM takes a minute.");
d.machine.start(function(err,data){
if (err) {
bad(err);
} else {
good(data);
}
});
};
module.exports = function(Command,scope) {
return new Command(scope,run);
};
| {
"pile_set_name": "Github"
} |
<% if @headers and ! @headers.empty? -%>
## Header rules
## as per http://httpd.apache.org/docs/2.2/mod/mod_headers.html#header
<%- Array(@headers).each do |header_statement| -%>
<%- if header_statement != '' -%>
Header <%= header_statement %>
<%- end -%>
<%- end -%>
<% end -%>
| {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of SwiftMailer.
* (c) 2011 Fabien Potencier
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/**
* Stores all sent emails for further usage.
*
* @author Fabien Potencier
*/
class Swift_Plugins_MessageLogger implements Swift_Events_SendListener
{
/**
* @var array
*/
private $messages;
public function __construct()
{
$this->messages = array();
}
/**
* Get the message list.
*
* @return array
*/
public function getMessages()
{
return $this->messages;
}
/**
* Get the message count.
*
* @return int count
*/
public function countMessages()
{
return count($this->messages);
}
/**
* Empty the message list.
*/
public function clear()
{
$this->messages = array();
}
/**
* Invoked immediately before the Message is sent.
*
* @param Swift_Events_SendEvent $evt
*/
public function beforeSendPerformed(Swift_Events_SendEvent $evt)
{
$this->messages[] = clone $evt->getMessage();
}
/**
* Invoked immediately after the Message is sent.
*
* @param Swift_Events_SendEvent $evt
*/
public function sendPerformed(Swift_Events_SendEvent $evt)
{
}
}
| {
"pile_set_name": "Github"
} |
var baseClone = require('./_baseClone'),
baseIteratee = require('./_baseIteratee');
/** Used to compose bitmasks for cloning. */
var CLONE_DEEP_FLAG = 1;
/**
* Creates a function that invokes `func` with the arguments of the created
* function. If `func` is a property name, the created function returns the
* property value for a given element. If `func` is an array or object, the
* created function returns `true` for elements that contain the equivalent
* source properties, otherwise it returns `false`.
*
* @static
* @since 4.0.0
* @memberOf _
* @category Util
* @param {*} [func=_.identity] The value to convert to a callback.
* @returns {Function} Returns the callback.
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': true },
* { 'user': 'fred', 'age': 40, 'active': false }
* ];
*
* // The `_.matches` iteratee shorthand.
* _.filter(users, _.iteratee({ 'user': 'barney', 'active': true }));
* // => [{ 'user': 'barney', 'age': 36, 'active': true }]
*
* // The `_.matchesProperty` iteratee shorthand.
* _.filter(users, _.iteratee(['user', 'fred']));
* // => [{ 'user': 'fred', 'age': 40 }]
*
* // The `_.property` iteratee shorthand.
* _.map(users, _.iteratee('user'));
* // => ['barney', 'fred']
*
* // Create custom iteratee shorthands.
* _.iteratee = _.wrap(_.iteratee, function(iteratee, func) {
* return !_.isRegExp(func) ? iteratee(func) : function(string) {
* return func.test(string);
* };
* });
*
* _.filter(['abc', 'def'], /ef/);
* // => ['def']
*/
function iteratee(func) {
return baseIteratee(typeof func == 'function' ? func : baseClone(func, CLONE_DEEP_FLAG));
}
module.exports = iteratee;
| {
"pile_set_name": "Github"
} |
---
- hosts: ip-services
vars:
create_cust_account: false
roles:
- bootstrap
| {
"pile_set_name": "Github"
} |
---
title: "Modify SIP trunk configuration settings in Skype for Business Server"
ms.reviewer:
ms.author: v-lanac
author: lanachin
manager: serdars
audience: ITPro
ms.topic: article
ms.prod: skype-for-business-itpro
f1.keywords:
- NOCSH
localization_priority: Normal
description: "SIP trunk configuration settings define the relationship and capabilities between a Mediation Server and the public switched telephone network (PSTN) gateway, an IP-public branch exchange (PBX), or a Session Border Controller (SBC) at the service provider. "
---
# Modify SIP trunk configuration settings in Skype for Business Server
SIP trunk configuration settings define the relationship and capabilities between a Mediation Server and the public switched telephone network (PSTN) gateway, an IP-public branch exchange (PBX), or a Session Border Controller (SBC) at the service provider. These settings do such things as specify:
- Whether media bypass should be enabled on the trunks.
- The conditions under which real-time transport control protocol (RTCP) packets are sent.
- Whether or not secure real-time protocol (SRTP) encryption is required on each trunk.
When you install Skype for Business Server, a global collection of SIP trunk configuration settings is created for you. In addition, administrators can create custom setting collections at the site scope or at the service scope (for the PSTN gateway service, only). Any of these collections can later be modified using either the Skype for Business Server Control Panel or Windows PowerShell.
When modifying SIP trunk configuration settings using the Skype for Business Server Server Control Panel, the following options are available to you:
|UI Setting |PowerShell Parameter |Description |
|--|--|--|
|Name|Identity|Unique identifier for the collection. This property is read-only; you cannot change the Identity of a collection of trunk configuration settings.|
|Description|Description|Provides a way for administrators to store addition information about the settings (for example, the purpose of the trunk configuration).|
|Maximum early dialogs supported|MaxEarlyDialogs|The maximum number of forked responses a PSTN gateway, IP-PBX, or SBC at the service provider can receive to an Invite that it sent to the Mediation Server.|
|Encryption support level|SRTPMode|Indicates the level of support for protecting media traffic between the Mediation Server and the PSTN Gateway, IP-PBX, or SBC at the service provider. For media bypass cases, this value must be compatible with the EncryptionLevel setting in the media configuration. Media configuration is set by using the New-CsMediaConfiguration and Set-CsMediaConfiguration cmdlets.<br/>Allowed values are:<br/><br/>**Required**: SRTP encryption must be used.<br/>**Optional**: SRTP will be used if the gateway supports it.<br/>**Not Supported**: SRTP encryption is not supported and therefore will not be used.<br/><br/>SRTPMode is used only if the gateway is configured to use Transport Layer Security (TLS). If the gateway is configured with Transmission Control Protocol (TCP) as the transport, SRTPMode is internally set to Not Supported.|
|Refer support|Enable3pccRefer<br/>EnableReferSupport|If set to **Enable sending refer to the gateway**, indicates that the trunk supports receiving Refer requests from the Mediation Server.<br/>If set to **Enable refer using third-party call control**, indicates that the 3pcc protocol can be used to allow transferred calls to bypass the hosted site. 3pcc is also known as "third party control," and occurs when a third-party is used to connect a pair of callers (for example, an operator placing a call from person A to person B).|
|Enable media bypass|EnableBypass|Indicates whether media bypass is enabled for this trunk. Media bypass can only be enabled if **Centralized media processing** is also enabled.|
|Centralized media processing|ConcentratedTopology|Indicates whether there is a well-known media termination point. (An example of a well-known media termination point would be a PSTN gateway where the media termination has the same IP as the signaling termination.)|
|Enable RTP latching|EnableRTPLatching|Indicates whether or not the SIP trunks support RTP latching. RTP latching is a technology that enables RTP/RTCP connectivity through a NAT (network address translator) device or firewall.|
|Enable forward call history|ForwardCallHistory|Indicates whether call history information will be forwarded through the trunk.|
|Enable forward P-Asserted-Identity data|ForwardPAI|Indicates whether the P-Asserted-Identity (PAI) header will be forwarded along with the call. The PAI header provides a way to verify the identity of the caller.|
|Enable outbound routing failover timer|EnableFastFailoverTimer|Indicates whether outbound calls that are not answered by the gateway within 10 seconds will be routed to the next available trunk; if there are no additional trunks then the call will automatically be dropped. In an organization with slow networks and gateway responses, that could potentially result in calls being dropped unnecessarily.|
|Associated PSTN usages|PSTNUsages|Collection of PSTN usages assigned to the trunk.|
|Translated number to test|N/A|Phone number that can be used to do an ad hoc test of the trunk configuration settings.|
|Associated translation rules|OutboundTranslationRulesList|Collection of phone number translation rules that apply to calls handled by Outbound Routing (calls routed to PBX or PSTN destinations).|
|Called number translation rules|OutboundCallingNumberTranslationRulesList|Collection of outbound calling number translation rules assigned to the trunk.|
|Phone number to test|N/A|Phone number that can be used to do an ad hoc test of the translation rules.|
|Calling number|N/A|Indicates that the phone number to test is the phone number of the caller.|
|Called number|N/A|Indicates that the phone number to test is the phone number of the person being called.|
|||
> [!Note]
> The Skype for Business Server CsTrunkConfiguration cmdlets support additional properties not shown in the Skype for Business Server Control Panel. For more information, see the help topic for the [Set-CsTrunkConfiguration](https://docs.microsoft.com/powershell/module/skype/Set-CsTrunkConfiguration) cmdlet.
**To modify SIP trunk configuration settings by using the Skype for Business Server Control Panel**
1. In the Skype for Business Server Control Panel, click **Voice Routing**, and then click **Trunk Configuration**.
2. On the **Trunk Configuration** tab, double-click the trunk configuration settings to be modified. Note that you can only edit one collection of settings at a time. If you would like to make the same changes on multiple collections, use Windows PowerShell instead.
3. In the **Edit Trunk Configuration** dialog box, make the appropriate selections and then click **OK**.
4. The State property for the collection will be updated to Uncommitted. To commit the changes, and to delete the collection, click **Commit**, and then click **Commit All**.
5. In the **Uncommitted Voice Configuration Setting**s dialog box, click **OK**.
6. In the **Skype for Business Server Control Panel** dialog box, click **OK**.
| {
"pile_set_name": "Github"
} |
//
// main.cpp
// f-阿里编程测试-问答题1
//
// Created by ZYJ on 2018/7/25.
// Copyright © 2018年 ZYJ. All rights reserved.
//
// GitHub : https://github.com/f-zyj
// CSDN : https://blog.csdn.net/f_zyj
//
#include <cstdio>
#include <algorithm>
using namespace std;
const int INF = 0x3f3f3f3f;
const int MAXN = 1e7 + 7;
int a[MAXN];
int maxVal[MAXN];
int minVal[MAXN];
int main()
{
int t, cnt = 0;;
while (~scanf("%d", &t))
{
a[cnt++] = t;
getchar();
}
int min_ = INF, max_ = -1;
for (int i = 0; i < cnt; ++i)
{
min_ = min(min_, a[i]);
max_ = max(max_, a[i]);
maxVal[i] = -1;
minVal[i] = INF;
}
t = (max_ - min_) / (cnt - 1);
if ((max_ - min_) % (cnt - 1) != 0)
{
++t;
}
for (int i = 0; i < cnt; ++i)
{
int id = (a[i] - min_) / t;
minVal[id] = min(minVal[id], a[i]);
maxVal[id] = max(maxVal[id], a[i]);
}
int ans = (max_ - min_) / (cnt - 1);
max_ = -1;
min_ = -1;
for (int i = 0; i < cnt; ++i)
{
if (maxVal[i] == -1)
{
continue;
}
min_ = minVal[i];
if (max_ != -1)
{
ans = max(ans, min_ - max_);
}
max_ = maxVal[i];
}
printf("%d\n",ans);
return 0;
}
| {
"pile_set_name": "Github"
} |
/*
* Electronic Arts CMV Video Decoder
* Copyright (c) 2007-2008 Peter Ross
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Electronic Arts CMV Video Decoder
* by Peter Ross (pross@xvid.org)
*
* Technical details here:
* http://wiki.multimedia.cx/index.php?title=Electronic_Arts_CMV
*/
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/imgutils.h"
#include "avcodec.h"
#include "internal.h"
typedef struct CmvContext {
AVCodecContext *avctx;
AVFrame *last_frame; ///< last
AVFrame *last2_frame; ///< second-last
int width, height;
unsigned int palette[AVPALETTE_COUNT];
} CmvContext;
static av_cold int cmv_decode_init(AVCodecContext *avctx){
CmvContext *s = avctx->priv_data;
s->avctx = avctx;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
s->last_frame = av_frame_alloc();
s->last2_frame = av_frame_alloc();
if (!s->last_frame || !s->last2_frame) {
av_frame_free(&s->last_frame);
av_frame_free(&s->last2_frame);
return AVERROR(ENOMEM);
}
return 0;
}
static void cmv_decode_intra(CmvContext * s, AVFrame *frame,
const uint8_t *buf, const uint8_t *buf_end)
{
unsigned char *dst = frame->data[0];
int i;
for (i=0; i < s->avctx->height && buf_end - buf >= s->avctx->width; i++) {
memcpy(dst, buf, s->avctx->width);
dst += frame->linesize[0];
buf += s->avctx->width;
}
}
static void cmv_motcomp(unsigned char *dst, ptrdiff_t dst_stride,
const unsigned char *src, ptrdiff_t src_stride,
int x, int y,
int xoffset, int yoffset,
int width, int height){
int i,j;
for(j=y;j<y+4;j++)
for(i=x;i<x+4;i++)
{
if (i+xoffset>=0 && i+xoffset<width &&
j+yoffset>=0 && j+yoffset<height) {
dst[j*dst_stride + i] = src[(j+yoffset)*src_stride + i+xoffset];
}else{
dst[j*dst_stride + i] = 0;
}
}
}
static void cmv_decode_inter(CmvContext *s, AVFrame *frame, const uint8_t *buf,
const uint8_t *buf_end)
{
const uint8_t *raw = buf + (s->avctx->width*s->avctx->height/16);
int x,y,i;
i = 0;
for(y=0; y<s->avctx->height/4; y++)
for(x=0; x<s->avctx->width/4 && buf_end - buf > i; x++) {
if (buf[i]==0xFF) {
unsigned char *dst = frame->data[0] + (y*4)*frame->linesize[0] + x*4;
if (raw+16<buf_end && *raw==0xFF) { /* intra */
raw++;
memcpy(dst, raw, 4);
memcpy(dst + frame->linesize[0], raw+4, 4);
memcpy(dst + 2 * frame->linesize[0], raw+8, 4);
memcpy(dst + 3 * frame->linesize[0], raw+12, 4);
raw+=16;
}else if(raw<buf_end) { /* inter using second-last frame as reference */
int xoffset = (*raw & 0xF) - 7;
int yoffset = ((*raw >> 4)) - 7;
if (s->last2_frame->data[0])
cmv_motcomp(frame->data[0], frame->linesize[0],
s->last2_frame->data[0], s->last2_frame->linesize[0],
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
raw++;
}
}else{ /* inter using last frame as reference */
int xoffset = (buf[i] & 0xF) - 7;
int yoffset = ((buf[i] >> 4)) - 7;
if (s->last_frame->data[0])
cmv_motcomp(frame->data[0], frame->linesize[0],
s->last_frame->data[0], s->last_frame->linesize[0],
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
}
i++;
}
}
static int cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t *buf_end)
{
int pal_start, pal_count, i, ret, fps;
if(buf_end - buf < 16) {
av_log(s->avctx, AV_LOG_WARNING, "truncated header\n");
return AVERROR_INVALIDDATA;
}
s->width = AV_RL16(&buf[4]);
s->height = AV_RL16(&buf[6]);
if (s->width != s->avctx->width ||
s->height != s->avctx->height) {
av_frame_unref(s->last_frame);
av_frame_unref(s->last2_frame);
}
ret = ff_set_dimensions(s->avctx, s->width, s->height);
if (ret < 0)
return ret;
fps = AV_RL16(&buf[10]);
if (fps > 0)
s->avctx->framerate = (AVRational){ fps, 1 };
pal_start = AV_RL16(&buf[12]);
pal_count = AV_RL16(&buf[14]);
buf += 16;
for (i=pal_start; i<pal_start+pal_count && i<AVPALETTE_COUNT && buf_end - buf >= 3; i++) {
s->palette[i] = 0xFFU << 24 | AV_RB24(buf);
buf += 3;
}
return 0;
}
#define EA_PREAMBLE_SIZE 8
#define MVIh_TAG MKTAG('M', 'V', 'I', 'h')
static int cmv_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
CmvContext *s = avctx->priv_data;
const uint8_t *buf_end = buf + buf_size;
AVFrame *frame = data;
int ret;
if (buf_end - buf < EA_PREAMBLE_SIZE)
return AVERROR_INVALIDDATA;
if (AV_RL32(buf)==MVIh_TAG||AV_RB32(buf)==MVIh_TAG) {
unsigned size = AV_RL32(buf + 4);
ret = cmv_process_header(s, buf+EA_PREAMBLE_SIZE, buf_end);
if (ret < 0)
return ret;
if (size > buf_end - buf - EA_PREAMBLE_SIZE)
return AVERROR_INVALIDDATA;
buf += size;
}
if ((ret = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
return ret;
if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
return ret;
memcpy(frame->data[1], s->palette, AVPALETTE_SIZE);
buf += EA_PREAMBLE_SIZE;
if ((buf[0]&1)) { // subtype
cmv_decode_inter(s, frame, buf+2, buf_end);
frame->key_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_P;
}else{
frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
cmv_decode_intra(s, frame, buf+2, buf_end);
}
av_frame_unref(s->last2_frame);
av_frame_move_ref(s->last2_frame, s->last_frame);
if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
return ret;
*got_frame = 1;
return buf_size;
}
static av_cold int cmv_decode_end(AVCodecContext *avctx){
CmvContext *s = avctx->priv_data;
av_frame_free(&s->last_frame);
av_frame_free(&s->last2_frame);
return 0;
}
AVCodec ff_eacmv_decoder = {
.name = "eacmv",
.long_name = NULL_IF_CONFIG_SMALL("Electronic Arts CMV video"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_CMV,
.priv_data_size = sizeof(CmvContext),
.init = cmv_decode_init,
.close = cmv_decode_end,
.decode = cmv_decode_frame,
.capabilities = AV_CODEC_CAP_DR1,
};
| {
"pile_set_name": "Github"
} |
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!29 &1
OcclusionCullingSettings:
m_ObjectHideFlags: 0
serializedVersion: 2
m_OcclusionBakeSettings:
smallestOccluder: 5
smallestHole: 0.25
backfaceThreshold: 100
m_SceneGUID: 00000000000000000000000000000000
m_OcclusionCullingData: {fileID: 0}
--- !u!104 &2
RenderSettings:
m_ObjectHideFlags: 0
serializedVersion: 9
m_Fog: 0
m_FogColor: {r: 0.5, g: 0.5, b: 0.5, a: 1}
m_FogMode: 3
m_FogDensity: 0.01
m_LinearFogStart: 0
m_LinearFogEnd: 300
m_AmbientSkyColor: {r: 0.212, g: 0.227, b: 0.259, a: 1}
m_AmbientEquatorColor: {r: 0.114, g: 0.125, b: 0.133, a: 1}
m_AmbientGroundColor: {r: 0.047, g: 0.043, b: 0.035, a: 1}
m_AmbientIntensity: 1
m_AmbientMode: 0
m_SubtractiveShadowColor: {r: 0.42, g: 0.478, b: 0.627, a: 1}
m_SkyboxMaterial: {fileID: 10304, guid: 0000000000000000f000000000000000, type: 0}
m_HaloStrength: 0.5
m_FlareStrength: 1
m_FlareFadeSpeed: 3
m_HaloTexture: {fileID: 0}
m_SpotCookie: {fileID: 10001, guid: 0000000000000000e000000000000000, type: 0}
m_DefaultReflectionMode: 0
m_DefaultReflectionResolution: 128
m_ReflectionBounces: 1
m_ReflectionIntensity: 1
m_CustomReflection: {fileID: 0}
m_Sun: {fileID: 0}
m_IndirectSpecularColor: {r: 0, g: 0, b: 0, a: 1}
m_UseRadianceAmbientProbe: 0
--- !u!157 &3
LightmapSettings:
m_ObjectHideFlags: 0
serializedVersion: 11
m_GIWorkflowMode: 1
m_GISettings:
serializedVersion: 2
m_BounceScale: 1
m_IndirectOutputScale: 1
m_AlbedoBoost: 1
m_EnvironmentLightingMode: 0
m_EnableBakedLightmaps: 1
m_EnableRealtimeLightmaps: 0
m_LightmapEditorSettings:
serializedVersion: 12
m_Resolution: 2
m_BakeResolution: 40
m_AtlasSize: 1024
m_AO: 0
m_AOMaxDistance: 1
m_CompAOExponent: 1
m_CompAOExponentDirect: 0
m_ExtractAmbientOcclusion: 0
m_Padding: 2
m_LightmapParameters: {fileID: 0}
m_LightmapsBakeMode: 1
m_TextureCompression: 1
m_FinalGather: 0
m_FinalGatherFiltering: 1
m_FinalGatherRayCount: 256
m_ReflectionCompression: 2
m_MixedBakeMode: 2
m_BakeBackend: 1
m_PVRSampling: 1
m_PVRDirectSampleCount: 32
m_PVRSampleCount: 512
m_PVRBounces: 2
m_PVREnvironmentSampleCount: 256
m_PVREnvironmentReferencePointCount: 2048
m_PVRFilteringMode: 1
m_PVRDenoiserTypeDirect: 1
m_PVRDenoiserTypeIndirect: 1
m_PVRDenoiserTypeAO: 1
m_PVRFilterTypeDirect: 0
m_PVRFilterTypeIndirect: 0
m_PVRFilterTypeAO: 0
m_PVREnvironmentMIS: 1
m_PVRCulling: 1
m_PVRFilteringGaussRadiusDirect: 1
m_PVRFilteringGaussRadiusIndirect: 5
m_PVRFilteringGaussRadiusAO: 2
m_PVRFilteringAtrousPositionSigmaDirect: 0.5
m_PVRFilteringAtrousPositionSigmaIndirect: 2
m_PVRFilteringAtrousPositionSigmaAO: 1
m_ExportTrainingData: 0
m_TrainingDataDestination: TrainingData
m_LightProbeSampleCountMultiplier: 4
m_LightingDataAsset: {fileID: 0}
m_UseShadowmask: 1
--- !u!196 &4
NavMeshSettings:
serializedVersion: 2
m_ObjectHideFlags: 0
m_BuildSettings:
serializedVersion: 2
agentTypeID: 0
agentRadius: 0.5
agentHeight: 2
agentSlope: 45
agentClimb: 0.4
ledgeDropHeight: 0
maxJumpAcrossDistance: 0
minRegionArea: 2
manualCellSize: 0
cellSize: 0.16666667
manualTileSize: 0
tileSize: 256
accuratePlacement: 0
debug:
m_Flags: 0
m_NavMeshData: {fileID: 0}
--- !u!1 &802387370
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- component: {fileID: 802387373}
- component: {fileID: 802387372}
- component: {fileID: 802387371}
m_Layer: 0
m_Name: Main Camera
m_TagString: MainCamera
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!81 &802387371
AudioListener:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 802387370}
m_Enabled: 1
--- !u!20 &802387372
Camera:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 802387370}
m_Enabled: 1
serializedVersion: 2
m_ClearFlags: 1
m_BackGroundColor: {r: 0.19215687, g: 0.3019608, b: 0.4745098, a: 0}
m_projectionMatrixMode: 1
m_GateFitMode: 2
m_FOVAxisMode: 0
m_SensorSize: {x: 36, y: 24}
m_LensShift: {x: 0, y: 0}
m_FocalLength: 50
m_NormalizedViewPortRect:
serializedVersion: 2
x: 0
y: 0
width: 1
height: 1
near clip plane: 0.3
far clip plane: 1000
field of view: 60
orthographic: 0
orthographic size: 5
m_Depth: -1
m_CullingMask:
serializedVersion: 2
m_Bits: 4294967295
m_RenderingPath: -1
m_TargetTexture: {fileID: 0}
m_TargetDisplay: 0
m_TargetEye: 3
m_HDR: 1
m_AllowMSAA: 1
m_AllowDynamicResolution: 0
m_ForceIntoRT: 0
m_OcclusionCulling: 1
m_StereoConvergence: 10
m_StereoSeparation: 0.022
--- !u!4 &802387373
Transform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 802387370}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 1, z: -10}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children: []
m_Father: {fileID: 0}
m_RootOrder: 0
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
--- !u!1 &1160152815
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- component: {fileID: 1160152817}
- component: {fileID: 1160152816}
m_Layer: 0
m_Name: SubScene
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!114 &1160152816
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1160152815}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 45a335734b1572644a6a5d09d87adc65, type: 3}
m_Name:
m_EditorClassIdentifier:
_SceneAsset: {fileID: 102900000, guid: 6181f67d924aaab47a61e8e599da8a93, type: 3}
_HierarchyColor: {r: 0.5, g: 0.5, b: 0.5, a: 1}
AutoLoadScene: 1
_SceneGUID:
Value:
x: 3614382102
y: 1269474345
z: 1586370215
w: 967355801
--- !u!4 &1160152817
Transform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1160152815}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children: []
m_Father: {fileID: 0}
m_RootOrder: 1
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Oct 15 2018 10:31:50).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
#import <objc/NSObject.h>
@class NSMutableArray, NSMutableDictionary, NSString;
__attribute__((visibility("hidden")))
@interface DRTOCFileParser : NSObject
{
NSString *tocPath;
NSMutableArray *trackInfoList;
NSMutableDictionary *burnInfo;
NSMutableArray *cdTextBlocks;
long long trackNumber;
}
- (id)fullPathForAuxFile:(id)arg1;
- (void)setCDTextString:(id)arg1 forKey:(id)arg2 ofTrack:(long long)arg3 inLanguage:(long long)arg4;
- (id)extractOptionalString:(id)arg1;
- (id)extractRequiredString:(id)arg1;
- (void)parseCDText:(id)arg1 forDisc:(BOOL)arg2;
- (id)parseTrackModes:(id)arg1 isAudioTrack:(char *)arg2;
- (id)parseTrack:(id)arg1;
- (id)burnProperties;
- (id)cdText;
- (id)trackLayout;
- (BOOL)parse;
- (void)dealloc;
- (id)initWithPath:(id)arg1;
@end
| {
"pile_set_name": "Github"
} |
IVliiMtDi-U POS O
den POS O
24 POS O
APRIL. POS O
A°. POS O
1865. POS O
M POS O
49. POS O
PROVINCIALE POS B-ORG
OVERIJSSELSCHE POS I-ORG
EN POS I-ORG
ZWOLSCHE POS I-ORG
COURANT. POS I-ORG
85 POS O
Bte POS O
Jaargang. POS O
Deze POS O
Courant POS O
wordt POS O
uitgegeven POS O
op POS O
Dingsdag-, POS O
Donderdag POS O
en POS O
Zaturdag-avond. POS O
Prijs POS O
per POS O
kwartaal POS O
/' POS O
2.37}. POS O
Franco POS O
per POS O
post POS O
/'2.02}. POS O
De POS O
prijs POS O
der POS O
advertentie!) POS O
is POS O
van POS O
I?4 POS O
regels POS O
80 POS O
Centen POS O
voor POS O
eiken POS O
regel POS O
daarenboven POS O
20 POS O
Ct POS O
, POS O
behalve POS O
35 POS O
Ct. POS O
ze POS O
gel POS O
regt. POS O
De POS O
GEDEPUTEERDE POS B-ORG
STATEN POS I-ORG
vau POS I-ORG
de POS I-ORG
provincie POS I-ORG
OVERIJSSEL, POS I-ORG
Overwegeude POS O
dat POS O
in POS O
het POS O
reglement POS O
op POS O
de POS O
heffing POS O
der POS O
belasting POS O
op POS O
den POS O
turf POS O
in POS O
het POS O
dijksdistriet POS O
van POS O
Volienhove POS B-LOC
zooal» POS O
dat POS O
bij POS O
besluit POS O
dezer POS O
vergadering POS O
van POS O
den POS O
22 POS O
Maart POS O
jl., POS O
8' POS O
afdeeling, POS O
N°. POS O
J*f, POS O
is POS O
afge POS O
kondigd POS O
eene POS O
misstelling POS O
beeft POS O
plaats POS O
gehad, POS O
die POS O
verbeterd POS O
dient POS O
te POS O
worden. POS O
Hebben POS O
goedgevonden: POS O
I°. POS O
Het POS O
reglement POS O
op POS O
nieuw POS O
af POS O
te POS O
kondigen POS O
door POS O
opneming POS O
iv POS O
het POS O
Provinciaal POS B-ORG
Blad POS I-ORG
en POS O
het POS O
ter POS O
openbare POS O
kennis POS O
te POS O
brengen POS O
door POS O
plaatsing POS O
in POS O
de POS O
Provinciale POS B-ORG
Overijssëlsche POS I-ORG
en POS I-ORG
Zwolsche POS I-ORG
Courant POS I-ORG
op POS O
de POS O
volgende POS O
wyze. POS O
De POS O
GEDEPUTEERDE POS B-ORG
STATEN POS I-ORG
van POS I-ORG
de POS I-ORG
provincie POS I-ORG
OVERIJSSEL POS I-ORG
doen POS O
te POS O
weten POS O
, POS O
dat POS O
door POS O
de POS O
Stateu POS O
dier POS O
provincie POS O
in POS O
hunne POS O
verga POS O
dering POS O
van POS O
den POS O
11 POS O
Julij POS O
jl. POS O
is POS O
vastgesteld POS O
eu POS O
door POS O
den POS O
koning POS O
bij POS O
besluit POS O
van POS O
den POS O
2 POS O
Maart POS O
daaraanvolgende, POS O
n». POS O
02, POS O
is POS O
goedgekeurd POS O
hetgeen POS O
volgt: POS O
REOIiEJII-i-T POS O
op POS O
de POS O
heffing POS O
der POS O
belas POS O
ting POS O
op POS O
den POS O
turf POS O
in POS O
het POS O
dijksdistriet POS O
van POS O
Volienhove. POS B-LOC
Art. POS O
1. POS O
De POS O
heffing POS O
der POS O
belasting POS O
op POS O
den POS O
turf, POS O
bedoeld POS O
bij POS O
art. POS O
29 POS O
lett. POS O
b POS O
van POS O
het POS O
reglement; POS O
op POS O
het. POS O
beheer POS O
der POS O
dijken POS O
enz. POS O
iv POS O
Overijssel, POS B-LOC
geschiedt POS O
iv POS O
het POS O
dijksdistriet POS O
van POS O
Volienhove POS B-LOC
overeenkomstig POS O
de POS O
Ba POS O
volgende POS O
bepalingen. POS O
Art. POS O
2. POS O
De POS O
belasting POS O
wordt POS O
geheven POS O
bij POS O
da POS O
vierkaute POS O
el POS O
gespreid POS O
veen; POS O
Art. POS O
3. POS O
Alvorens POS O
met POS O
de POS O
werkzaamheden POS O
der POS O
vereeniging POS O
aan POS O
te POS O
vanden, POS O
doet POS O
de POS O
vervecner POS O
jaarlijks POS O
eene POS O
schriftelijke POS O
door POS O
hem POS O
ge POS O
teekendfi POS O
aangifte POS O
toekomen POS O
aan POS O
den POS O
beambte, POS O
dooi- POS O
bet POS O
dijksbestuur POS O
voor POS O
een POS O
of POS O
meer POS O
gemeenten, POS O
waar POS O
verveend POS O
wordt, POS O
aan POS O
te POS O
wijzen. POS O
Die POS O
aangifte POS O
houdt POS O
in: POS O
a. POS O
den POS O
naam, POS O
den POS O
voornaam POS O
en POS O
de POS O
woonplaats POS O
van POS O
den POS O
vervecner; POS O
b. POS O
het POS O
getal POS O
vierkante POS O
ellen, POS O
die POS O
hij POS O
voornemens POS O
is POS O
in POS O
het POS O
loopende POS O
jaar POS O
met POS O
veen POS O
te POS O
beepreiden; POS O
c. POS O
de POS O
kadastrale POS O
aanduiding POS O
vau POS O
het POS O
land, POS O
waarop POS O
hij POS O
die POS O
veen POS O
spreiding POS O
denkt POS O
te POS O
doen. POS O
De POS O
beambte POS O
schrijft POS O
deze POS O
aangiften POS O
in POS O
een POS O
register POS O
en POS O
nummert POS O
ze POS O
daarin POS O
naar POS O
de POS O
volgorde, POS O
waariu POS O
hij POS O
ze POS O
heeft POS O
ontvangen. POS O
Aan POS O
den POS O
aangever POS O
geeft POS O
hij POS O
een POS O
bewijs POS O
af POS O
van POS O
de POS O
ontvangst POS O
der POS O
aan POS O
gifte. POS O
Op POS O
dat POS O
bewijs POS O
vermeldt POS O
hij POS O
het POS O
nummer, POS O
wt-afonder POS O
do POS O
aangifte POS O
in POS O
zijn POS O
register POS O
is POS O
ingeschreven. POS O
Gelijke POS O
aangifte POS O
wordt POS O
gedaan, POS O
indien POS O
de POS O
verveener POS O
later POS O
voornemens POS O
wordt POS O
om POS O
meer POS O
of POS O
minder POS O
te POS O
verveenen, POS O
dan POS O
in POS O
zijne POS O
oorspronkelijke POS O
Opgave POS O
is POS O
vermeld. POS O
Art. POS O
4. POS O
Aan POS O
den POS O
aangever, POS O
die POS O
niet POS O
kan POS O
aantoonen POS O
dat POS O
hij POS O
do POS O
turf POS O
belasting POS O
over POS O
het POS O
voorgaande POS O
jaar POS O
heeft POS O
voldaan, POS O
geeft POS O
de POS O
beambte POS O
geen POS O
bewijs POS O
van POS O
ontvangst POS O
af. POS O
Art. POS O
5. POS O
Het POS O
dijksbestuur POS O
kan POS O
van POS O
den POS O
aangever POS O
voor POS O
de POS O
belasting POS O
eene POS O
persoonlijke POS O
borgstelling POS O
vorderen, POS O
tot POS O
zoodanig POS O
bedrag POS O
als POS O
het POS O
uoodig POS O
oordeelt. POS O
Voldoet POS O
de POS O
aangever POS O
hieraan POS O
niet, POS O
dan POS O
wordt POS O
hem POS O
geen POS O
bewijs POS O
van POS O
ontvangst POS O
zijner POS O
aangifte POS O
afgegeven. POS O
Art. POS O
6. POS O
De POS O
verveener POS O
plaatst POS O
bij POS O
den POS O
aanvang POS O
van POS O
iedere POS O
veensprei POS O
ding, POS O
en POS O
op POS O
het POS O
punt POS O
waar POS O
hij POS O
met POS O
deze POS O
is POS O
begonnen, POS O
ook POS O
wanneer POS O
het POS O
een POS O
afgescheiden POS O
partij POS O
gespreid POS O
veen POS O
geldt, POS O
een POS O
paaltje POS O
of POS O
plankje, POS O
duidelijk POS O
zigtbaar POS O
en POS O
behoorlijk POS O
in POS O
den POS O
grond POS O
bevestigd, POS O
waarop POS O
met POS O
goede POS O
leesbare POS O
cijfers POS O
is POS O
gesneden POS O
of POS O
geschilderd POS O
het POS O
nummer, POS O
uitge POS O
drukt POS O
op POS O
zijn POS O
bewijs POS O
van POS O
ontvangst, POS O
in POS O
de POS O
voorgaande POS O
artikelen POS O
bedoeld. POS O
Hij POS O
zorgt POS O
dat POS O
deze POS O
paaltjes POS O
of POS O
plankjes POS O
tot POS O
den POS O
eersten POS O
October POS O
van POS O
het POS O
verveeningsjaar, POS O
in POS O
den POS O
voorschreven POS O
toestand POS O
aanwezig POS O
blijven. POS O
Art. POS O
7. POS O
Het POS O
veen POS O
wordt POS O
zooveel POS O
mogelijk POS O
gespreid POS O
in POS O
eene POS O
vierkanto POS O
of POS O
langwerpig POS O
vierkante POS O
figuur. POS O
Art. POS O
8. POS O
De POS O
opmeting POS O
van POS O
het POS O
gespreide POS O
veen POS O
geschiedt POS O
door POS O
meters, POS O
door POS O
het POS O
dijksbestuur POS O
aan POS O
te POS O
wijzen. POS O
Onder POS O
goedkeuring POS O
van POS O
Gedeputeerde POS O
Staten, POS O
wijst POS O
het POS O
dijksbestuur POS O
de POS O
gemeenten POS O
aan, POS O
waarin POS O
ieder POS O
meter POS O
met POS O
de POS O
opmeting POS O
wordt POS O
belast. POS O
Art. POS O
9. POS O
Na POS O
afloop POS O
van POS O
de POS O
veenspreidiug, POS O
waarvan POS O
de POS O
bij POS O
art. POS O
3 POS O
ver POS O
melde POS O
aangifte POS O
is POS O
gedaan, POS O
geschiedt POS O
opmeting POS O
op POS O
aanvrage POS O
vau POS O
den POS O
verveener. POS O
. POS O
... POS O
Voor POS O
zeer POS O
groote POS O
partijen POS O
, POS O
waarvan POS O
de POS O
spreiding POS O
nog POS O
met POS O
is POS O
afge POS O
werkt, POS O
wordt POS O
eene POS O
aanvrage POS O
tot POS O
gedeeltelijke POS O
opmeting POS O
toegelaten, POS O
mits POS O
de POS O
partij POS O
veen, POS O
waarvoor POS O
zoodanige POS O
aanvrago POS O
geschiedt, POS O
door POS O
cene POS O
voldoende POS O
greppel, POS O
ter POS O
beoordeeling POS O
van POS O
den POS O
meter, POS O
worde POS O
afge POS O
scheiden POS O
vau POS O
het POS O
overige POS O
veen, POS O
en POS O
bij POS O
elke POS O
alzoo POS O
afgescheiden POS O
partij POS O
worde POS O
geplaatst POS O
een POS O
paaltje POS O
of POS O
plankje, POS O
zooals POS O
bij POS O
art. POS O
6is POS O
omschreven. POS O
Art. POS O
10. POS O
Bij POS O
zijne POS O
aanvraag POS O
tot POS O
opmeting POS O
vermeldt POS O
de POS O
verveener POS O
lijn POS O
naam, POS O
voornaam POS O
en POS O
woonplaats; POS O
het POS O
getal POS O
vierkante POS O
ellen, POS O
die POS O
hij POS O
volgens POS O
art. POS O
3 POS O
heeft POS O
verklaard POS O
met POS O
veen POS O
te POS O
willen POS O
bespreiden; POS O
de POS O
ka POS O
dastrale POS O
aanduiding POS O
van POS O
het POS O
land, POS O
waarop POS O
de POS O
genoemde POS O
veenspreidiug POS O
heeft POS O
plaats POS O
gehad; POS O
en POS O
in POS O
het POS O
geval, POS O
bij POS O
het POS O
laatste POS O
lid POS O
van POS O
het POS O
vorige POS O
artikel POS O
bedoeld, POS O
het POS O
getal POS O
vierk. POS O
ellen, POS O
waarvan POS O
de POS O
meting POS O
wordt POS O
verlangd. POS O
Art. POS O
11. POS O
De POS O
verveener POS O
doet POS O
de POS O
aanvrage, POS O
overeenkomstig POS O
het POS O
vorige POS O
artikel POS O
toegelicht POS O
en POS O
van POS O
zijne POS O
onderteekening POS O
voorzien, POS O
toekomen POS O
aan POS O
den POS O
meter, POS O
die POS O
haar POS O
inschrijft POS O
in POS O
een POS O
register. POS O
Art. POS O
12. POS O
De POS O
meter POS O
verrigt POS O
in POS O
deu POS O
regel POS O
de POS O
opmeting POS O
in POS O
de POS O
volgorde, POS O
waarin POS O
volgens POS O
zijn POS O
register POS O
de POS O
aanvragen POS O
bij POS O
hem POS O
zijn POS O
ingekomen. POS O
Hij POS O
zorgt POS O
zooveel POS O
mogelijk POS O
dat POS O
den POS O
verveener, POS O
door POS O
vertraging POS O
met POS O
do POS O
opmeting, POS O
geen POS O
nadeel POS O
worde POS O
veroorzaakt. POS O
De POS O
verveener POS O
geeft POS O
den POS O
meter POS O
bij POS O
de POS O
opmeting POS O
allo POS O
vereischte POS O
in POS O
lichtingen. POS O
Art. POS O
13. POS O
De POS O
aanvragen POS O
tot POS O
opmeting POS O
met POS O
de POS O
vereischte POS O
opgaven POS O
wordcu POS O
den POS O
meter POS O
toegezonden POS O
vóór POS O
den POS O
1 POS O
Augustus POS O
van POS O
elk POS O
jaar. POS O
Het POS O
veen POS O
van POS O
hen, POS O
die POS O
geene POS O
aanvrago POS O
vóór POS O
den POS O
genoemden POS O
dag POS O
hebben POS O
ingezonden, POS O
of POS O
de POS O
aangifte POS O
bij POS O
art. POS O
3 POS O
vermeld, POS O
hebben POS O
.nage POS O
laten POS O
, POS O
wordt POS O
ambtshalve POS O
opgemeten. POS O
Art. POS O
14. POS O
Geen POS O
gespreid POS O
veen POS O
wordt POS O
voor POS O
de POS O
opmeting POS O
opgebroken, POS O
«onder POS O
toestemming POS O
van POS O
het POS O
dijksbestuur, POS O
dat, POS O
alvorens POS O
die POS O
te POS O
vcr POS O
leenen, POS O
den POS O
meter POS O
hoort. POS O
De POS O
verveener POS O
, POS O
die POS O
krachtens POS O
verkregen POS O
toestemming POS O
zijn POS O
gespreid POS O
veen POS O
heeft POS O
opgebroken, POS O
laat POS O
het POS O
spreiveld POS O
onveranderd POS O
tot POS O
de POS O
opmeting POS O
daarvan POS O
heeft POS O
plaats POS O
gehad. POS O
Art. POS O
15. POS O
De POS O
meter POS O
geeft POS O
van POS O
den POS O
uitslag POS O
zijver POS O
opmeting POS O
schrifte POS O
lijk POS O
kennis POS O
aan POS O
den POS O
daarbij POS O
betrokken POS O
verveener. POS O
De POS O
verveener POS O
kan POS O
binnen POS O
14 POS O
dagen, POS O
na POS O
de POS O
kennisgeving POS O
van POS O
den POS O
meter POS O
te POS O
hebben POS O
ontvangen, POS O
zich POS O
schriftelijk POS O
wenden POS O
tot POS O
het POS O
dijksbe POS O
stuur, POS O
met POS O
verzoek POS O
om POS O
hermeting. POS O
De POS O
hermeting POS O
geschiedt POS O
ten POS O
spoedigste POS O
ten POS O
overstaan POS O
van POS O
eene POS O
com POS O
missie POS O
uit POS O
het POS O
dijksbestuur, POS O
nadat POS O
de POS O
reclamant POS O
minstens POS O
drie POS O
dagen POS O
te POS O
voren POS O
schriftelijk POS O
is POS O
opgeroepen POS O
om POS O
daarbij POS O
tegenwoordig POS O
to POS O
zijn. POS O
De POS O
uitspraak POS O
dezer POS O
commissie POS O
is POS O
beslissend. POS O
Stemt POS O
die POS O
hermeting POS O
met POS O
do POS O
opmeting POS O
overeen, POS O
dan POS O
komen POS O
de POS O
kosten, POS O
die POS O
zij POS O
heeft POS O
vereischt, POS O
ten POS O
laste POS O
van POS O
den POS O
verveener. POS O
Die POS O
kosten POS O
wordcu POS O
berekend POS O
naar POS O
een POS O
tarief, POS O
vastgesteld POS O
door POS O
Ge POS O
deputeerde POS O
Staten, POS O
op POS O
voorstel POS O
van POS O
het POS O
dijksbestuur. POS O
Van POS O
cene POS O
opmeting, POS O
die POS O
volgens POS O
het POS O
2« POS O
lid POS O
van POS O
art. POS O
13 POS O
is POS O
geschied, POS O
wordt POS O
geene POS O
hermeting POS O
toegestaan. POS O
Art. POS O
16. POS O
De POS O
meter POS O
doet POS O
wekelijks POS O
aan POS O
het POS O
dijksbestuur POS O
toekomen POS O
staten POS O
van POS O
de POS O
door POS O
hem POS O
gedane POS O
opmetingen, POS O
Die POS O
staten POS O
bevatten POS O
a. POS O
de POS O
namen, POS O
voornamen POS O
en POS O
woonplaatsen POS O
der POS O
verveene?, POS O
teu POS O
wier POS O
verzoeke POS O
de POS O
opmetingen POS O
ziju POS O
geschied, POS O
en POS O
van POS O
hen POS O
wier POS O
gespreid POS O
veen POS O
ambtshalve POS O
is POS O
opgemeten; POS O
b. POS O
de POS O
kadastrale POS O
aanduiding POS O
van POS O
de POS O
perceelen, POS O
waarop POS O
bet POS O
gemeten POS O
veen POS O
vau POS O
eiken POS O
vervecner POS O
lag POS O
verspreid; POS O
c. POS O
het POS O
aantal POS O
vierk. POS O
ellen POS O
, POS O
door POS O
hem POS O
voor POS O
ieder? POS O
vervecner POS O
opgemeten. POS O
Art. POS O
17. POS O
Het POS O
dijksbestuur POS O
maakt POS O
uit POS O
de POS O
staten, POS O
in POS O
het POS O
voorgaande POS O
artikel POS O
bedoeld, POS O
na POS O
die POS O
te POS O
hebben POS O
vergeleken POS O
met POS O
de POS O
registers POS O
in POS O
ds POS O
artt. POS O
8 POS O
en POS O
11 POS O
vermeld, POS O
vóór POS O
den POS O
laatsten POS O
September, POS O
voor POS O
elke POS O
be POS O
trokkene POS O
gemeente POS O
afzonderlijk, POS O
een POS O
kohier POS O
vau POS O
aanslag POS O
op. POS O
Dat POS O
kohier POS O
bevat POS O
-. POS O
a. POS O
de POS O
namen, POS O
de POS O
voornamen POS O
en POS O
do POS O
woonplaatsen POS O
der POS O
aangeslagenen; POS O
b. POS O
aanwijzing POS O
van POS O
het POS O
aantal POS O
vierkante POS O
ellen POS O
, POS O
door POS O
elk POS O
hunner POS O
met POS O
veen POS O
bespreid, POS O
en POS O
van POS O
de POS O
uit POS O
dien POS O
hoofde POS O
verschuldigde POS O
belasting; POS O
c. POS O
aanwijzing POS O
van POS O
het POS O
bedrag POS O
der POS O
belasting, POS O
op POS O
elk POS O
der POS O
in POS O
art. POS O
20 POS O
bedoelde POS O
termijnen POS O
versehnltltgd. POS O
Art. POS O
18. POS O
Een POS O
afschrift POS O
vau POS O
het POS O
kohier POS O
wordt POS O
in POS O
elke POS O
gemeente POS O
van POS O
het POS O
district, POS O
waar POS O
verveend POS O
is, POS O
gedurende POS O
veertien POS O
dagen, POS O
op POS O
ceuc POS O
geschikte POS O
plaats POS O
ter POS O
inzage POS O
van POS O
de POS O
belanghebbenden POS O
gelegd. POS O
Vau POS O
de POS O
nederlegging POS O
geschiedt POS O
door POS O
het POS O
tlijksbcstuur POS O
openbare POS O
ken POS O
nisgeving POS O
door POS O
middel POS O
van POS O
kerkenspraken POS O
op POS O
den POS O
zondag, POS O
vooraf POS O
gaande POS O
aan POS O
den POS O
dag POS O
der POS O
nederlegging, POS O
en POS O
door POS O
aanplakking POS O
ter POS O
plaatse POS O
voor POS O
iedere POS O
gemeente, POS O
tor POS O
openbare POS O
aanplakking POS O
bestemd. POS O
Binnen POS O
de POS O
genoemde POS O
veertien POS O
dagen, POS O
kan POS O
elk POS O
op POS O
het POS O
kohier POS O
aange POS O
slagene POS O
legen POS O
zijneu POS O
aanslag POS O
bij POS O
het POS O
dijksbestuur POS O
schriftelijk POS O
bezwaar POS O
inbrengen. POS O
Na POS O
verloop POS O
vau POS O
dien POS O
tijd, POS O
stelt POS O
het POS O
dijksbestuur POS O
het POS O
kohier POS O
rast POS O
en POS O
zendt POS O
het, POS O
met POS O
de POS O
ingekomen POS O
bezwaarschriften POS O
en POS O
zijne POS O
be POS O
schouwingen POS O
daaromtrent, POS O
aan POS O
Ged. POS O
Staten POS O
ter POS O
invorderbaarverklaring. POS O
Gedeputeerde POS O
Staten POS O
doen POS O
omtrent POS O
de POS O
bezwaarschriften POS O
ten POS O
spoedig POS O
ste POS O
uitspraak, POS O
en POS O
geven POS O
daarvan POS O
kennis POS O
aan POS O
het POS O
dijksbestuur POS O
en POS O
aan POS O
de POS O
belanghebbenden. POS O
Art. POS O
19. POS O
De POS O
ontvanger POS O
van POS O
het POS O
dijksdistriet POS O
doet, POS O
nadat POS O
hem POS O
het POS O
goedgekeurde POS O
kohier POS O
door POS O
het POS O
dijksbestuur POS O
ter POS O
invordering POS O
is POS O
overhan POS O
digd POS O
, POS O
aan POS O
de POS O
aangeslagenen POS O
kosteloos POS O
eene POS O
kennisgeving POS O
van POS O
hun POS O
aan POS O
slag POS O
toekom.n POS O
Die POS O
kennisgeving POS O
bevat POS O
wat POS O
onder POS O
a POS O
en POS O
c POS O
van POS O
art. POS O
16 POS O
en POS O
onder POS O
e POS O
van POS O
art. POS O
17 POS O
is POS O
vermeld, POS O
met POS O
aanwijzing POS O
van POS O
de POS O
dageu POS O
en POS O
uren, POS O
waarop POS O
tot POS O
invordering POS O
van POS O
het POS O
verschuldigde, POS O
in POS O
de POS O
gemeenten POS O
in POS O
het POS O
vol POS O
gende POS O
artikel POS O
genoemd, POS O
zitting POS O
zal POS O
worden POS O
gehouden. POS O
Art. POS O
20. POS O
Onder POS O
goedkeuring POS O
van POS O
Gedeputeerdo POS O
Staten, POS O
bepaalt POS O
het POS O
vereenigd POS O
collcgie POS O
in POS O
hoeveel POS O
termijnen POS O
de POS O
aanslag POS O
zal POS O
moeten POS O
worden POS O
betaald, POS O
en POS O
op POS O
wclko POS O
dagen POS O
en POS O
uren POS O
de POS O
ontvanger POS O
van POS O
het POS O
dijksdistriet POS O
te POS O
Steenwijk, POS B-LOC
to POS O
Oldemarkt POS B-LOC
en POS O
te POS O
Volienhove POS B-LOC
zitting POS O
zal POS O
houden POS O
om POS O
die POS O
te POS O
innen. POS O
Do POS O
laatste POS O
termijn POS O
wordt POS O
niet POS O
later POS O
gesteld POS O
dan POS O
op POS O
1 POS O
Maart, POS O
vol POS O
gende POS O
op POS O
het POS O
jaar, POS O
waarover POS O
de POS O
belasting POS O
verschuldigd POS O
is. POS O
Art. POS O
21. POS O
De POS O
beambten, POS O
bij POS O
de POS O
artt. POS O
3 POS O
en POS O
8 POS O
bedoeld, POS O
worden POS O
door POS O
het POS O
dijksbestuur POS O
benoemd, POS O
geschorst POS O
en POS O
ontslagen. POS O
Zij POS O
genieten POS O
eene POS O
bezoldiging, POS O
waarvan POS O
het POS O
bedrag POS O
op POS O
voorstel POS O
van POS O
het POS O
dijksbestuur, POS O
door POS O
het POS O
vereenigd POS O
collegie, POS O
onder POS O
goedkeuring POS O
van POS O
Gedeputeerdo POS O
Staten, POS O
wordt POS O
vastgesteld. POS O
Zij POS O
leggen, POS O
alvorens POS O
hunne POS O
betrekking POS O
te POS O
aanvaarden, POS O
in POS O
eene POS O
ver POS O
gadering POS O
van POS O
het POS O
dijksbestuur, POS O
in POS O
handen POS O
van POS O
den POS O
dijkgraaf, POS O
den POS O
eed POS O
of POS O
belofte POS O
af, POS O
waarvan POS O
het POS O
formulier POS O
is POS O
vervat POS O
in POS O
de POS O
instructie, POS O
die POS O
het POS O
dijksbestuur POS O
voor POS O
hen POS O
vaststelt. POS O
Art. POS O
22. POS O
Het POS O
dijksbestuur POS O
zeudt POS O
jaarlijks POS O
vóór POS O
1 POS O
April POS O
aan POS O
Gedep. POS O
Staten POS O
een POS O
naauwkeurig POS O
verslag POS O
omtrent POS O
do POS O
werking POS O
van POS O
dit POS O
reglement. POS O
Het POS O
voorziet POS O
de POS O
beambten, POS O
in POS O
de POS O
artt. POS O
3 POS O
en POS O
8 POS O
genoemd, POS O
van POS O
de POS O
noodige POS O
inscructien POS O
en POS O
zendt POS O
daarvan POS O
een POS O
afschrift POS O
aan POS O
Gedep. POS O
Staten. POS O
Het POS O
stelt POS O
do POS O
biljetten POS O
voor POS O
do POS O
aangifte POS O
bij POS O
art. POS O
3, POS O
en POS O
die POS O
voor POS O
de POS O
aanvraag POS O
bij POS O
art. POS O
9 POS O
bedoeld, POS O
van POS O
de POS O
noodige POS O
inlichtingen POS O
voor POS O
de POS O
in POS O
vulling POS O
voorzien, POS O
kosteloos POS O
verkrijgbaar. POS O
STRAFBEPALINGEN. POS O
Art. POS O
23. POS O
Met POS O
eene POS O
geldboete POS O
van POS O
vijf POS O
en POS O
twintig POS O
lot POS O
vijf POS O
en POS O
zeven POS O
tig POS O
gulden, POS O
of POS O
met POS O
eene POS O
gevangenisstraf POS O
van POS O
een POS O
tot POS O
zeven POS O
dagen, POS O
of POS O
met POS O
die POS O
beide POS O
straffen POS O
to POS O
zamen, POS O
wordt POS O
gestraft; POS O
1". POS O
do POS O
vervecner, POS O
die, POS O
in POS O
strijd POS O
met POS O
het POS O
bepaalde POS O
bij POS O
art. POS O
3, POS O
veen POS O
spreidt POS O
of POS O
laat POS O
spreiden, POS O
zonder POS O
dat POS O
op POS O
aanvrage POS O
van POS O
de POS O
beambten, POS O
in POS O
art. POS O
24 POS O
genoemd, POS O
wordt POS O
overgelegd POS O
het POS O
bewijs POS O
van POS O
ontvangst, POS O
bij POS O
eerstgenoemd POS O
artikeltl POS O
vermeld; POS O
2°. POS O
de POS O
verveener, POS O
die POS O
iv POS O
strijd POS O
met POS O
het POS O
bepaalde POS O
bij POS O
art. POS O
14, POS O
gespreid POS O
veen POS O
vóór POS O
de POS O
opmeting POS O
heeft POS O
opgebroken POS O
of POS O
doen POS O
opbreken, POS O
tenzij POS O
hij POS O
daartoe POS O
volgens POS O
dat POS O
artikel POS O
de POS O
toestemming POS O
van POS O
het POS O
dijksbestuur POS O
had POS O
verkregen; POS O
3°. POS O
de POS O
vervecner POS O
die, POS O
deze POS O
toestemming POS O
van POS O
het POS O
dijksbestuur POS O
vcr POS O
i POS O
kregeu POS O
hebbende, POS O
het POS O
spreiveld POS O
daarvan POS O
niet POS O
onveranderlijk POS O
heeft POS O
ge POS O
laten, POS O
tot POS O
dat POS O
de POS O
opmeting POS O
heeft POS O
plaats POS O
gehad; POS O
4°. POS O
de POS O
vervecner, POS O
die POS O
de POS O
hoeveelheid POS O
van POS O
zijn POS O
gespreid POS O
veen, POS O
na POS O
dat POS O
dit POS O
door POS O
den POS O
bij POS O
art. POS O
8 POS O
genoemden POS O
beambte POS O
is POS O
opgemeten, POS O
heeft POS O
vergroot POS O
of POS O
doen POS O
vergrooten. POS O
Met POS O
cene POS O
geldboete POS O
van POS O
tien POS O
tot POS O
vijf POS O
en POS O
twintig POS O
gulden, POS O
of POS O
met POS O
eene POS O
gevangenisstraf POS O
van POS O
een POS O
tot POS O
drie POS O
dagen, POS O
of POS O
met POS O
die POS O
beide POS O
straffen POS O
to POS O
zamen, POS O
wordt POS O
gestraft: POS O
I°. POS O
de POS O
verveener, POS O
die POS O
in POS O
strijd POS O
met POS O
het POS O
bepaalde POS O
bij POS O
artt. POS O
fi POS O
en POS O
9, POS O
de POS O
aldaar POS O
vermelde POS O
paaltjes POS O
of POS O
plankjes POS O
verzuimd POS O
heeft POS O
te POS O
plaatsen, POS O
of POS O
niet POS O
behoorlijk POS O
in POS O
stand POS O
houdt; POS O
2°. POS O
do POS O
verveener, POS O
dio POS O
weigert POS O
aan POS O
den POS O
meter POS O
de POS O
bij POS O
het POS O
slot POS O
van POS O
art. POS O
12 POS O
verlangde POS O
inlichtingen POS O
te POS O
geven, POS O
of POS O
hem POS O
op POS O
het POS O
verveenings POS O
terrein POS O
toe POS O
te POS O
laten. POS O
Deze POS O
straffen POS O
zijn POS O
toepasselijk, POS O
voor POS O
zooverre POS O
do POS O
overtredingen, POS O
waarop POS O
zij POS O
zijn POS O
gesteld, POS O
niet POS O
reeds POS O
bij POS O
eene POS O
wet POS O
of POS O
een POS O
maatregel POS O
van POS O
algemeen POS O
bestuur POS O
zijn POS O
strafbaar POS O
gesteld. POS O
Art. POS O
24. POS O
Tot POS O
het POS O
constateren POS O
der POS O
overtredingen POS O
van POS O
dit POS O
reglement POS O
ziju POS O
bevoegd, POS O
behalve POS O
de POS O
ambtenaren POS O
bij POS O
het POS O
Wetboek POS O
van POS O
Strafvor POS O
dering POS O
daartoe POS O
aangewezen, POS O
de POS O
beambten POS O
bij POS O
do POS O
artt. POS O
3 POS O
en POS O
8 POS O
van POS O
dit POS O
reglement POS O
genoemd, POS O
alsmede POS O
de POS O
opzigtor POS O
der POS O
lage POS O
verveeningen POS O
in POS O
de POS O
provincie POS O
Overijssel. POS B-LOC
Aldus POS O
vastgesteld POS O
door POS O
de POS O
Staten POS B-ORG
van POS I-ORG
Overijssel. POS I-ORG
Zwolle, POS B-LOC
deu POS O
11 POS O
Julij POS O
1864. POS O
Be POS O
Stalen POS O
voornoemd, POS O
J. POS B-PER
A POS I-PER
SANDBERG, POS I-PER
Voorzitter. POS O
J. POS B-PER
C. POS I-PER
BÜSTERBOS, POS I-PER
Griffier. POS O
Behoort POS O
bij POS O
het POS O
Koninklijk POS O
besluit POS O
van POS O
den POS O
2 POS O
Maart POS O
1865, POS O
no. POS O
62. POS O
Mij POS O
bekend, POS O
Be POS O
Minister POS O
van POS O
Binneidandsche POS O
Zaken, POS O
(get.) POS O
THORBECKE. POS B-PER
Gegeven POS O
te POS O
Zwolle, POS B-LOC
den POS O
19 POS O
April POS O
1865. POS O
Bc POS O
Gedeputeerde POS O
Staten POS O
voornosmd, POS O
VAN POS B-PER
BIJLANDT, POS I-PER
Voorzitter. POS O
J. POS B-PER
C. POS I-PER
BÜSTERBOS, POS I-PER
Griffier. POS O
2». POS O
enz. POS O
Be POS O
Gedeputeerde POS O
Staten POS O
voornoemd POS O
, POS O
VAN POS B-PER
BIJ POS I-PER
LANDT, POS I-PER
Voorzitter. POS O
J POS B-PER
C POS I-PER
BÜSTERBOS, POS I-PER
Griffier. POS O
BURGEMEESTER POS O
en POS O
WETHOUDERS POS O
van POS O
ZWOLLE POS B-LOC
brengen POS O
ter POS O
kennis POS O
van POS O
de POS O
belanghebbenden POS O
: POS O
I°. POS O
dat POS O
door POS O
deu POS O
raad POS O
dezer POS O
gemeente, POS O
bij POS O
besluit POS O
van POS O
den POS O
10 POS O
April POS O
1805 POS O
, POS O
no. POS O
127, POS O
is POS O
vastgesteld POS O
hot POS O
navolgende: POS O
PLAN POS O
VAM POS O
GELDLEENING POS O
eener POS O
som POS O
van POS O
?Bo,ooo', POS O
ten POS O
behoeve POS O
der POS O
gemeente POS O
Zwolle, POS B-LOC
ter POS O
bestrijd? POS O
kosten POS O
van POS O
de POS O
uitbreiding POS O
der POS O
gasfabriek. POS O
Art. POS O
1. POS O
Deze POS O
gcldleening POS O
bestaat POS O
uit POS O
dertig POS O
aandeelcn POS O
, POS O
ieder POS O
van POS O
? POS O
duizend POS O
gulden, POS O
in POS O
blanco, POS O
uit POS O
te POS O
geven POS O
in POS O
heele POS O
en POS O
halve POS O
aan POS O
Art. POS O
2. POS O
De POS O
inschrijving POS O
voor POS O
deze POS O
leening POS O
geschiedt POS O
bij POS O
besloten POS O
briefjes POS O
, POS O
op POS O
den POS O
door POS O
Burgemeester POS O
en POS O
Wethouders POS O
te POS O
bepalen POS O
tijd POS O
, POS O
op POS O
het POS O
Stadhuis POS O
iv POS O
te POS O
leveren, POS O
tegen POS O
eene POS O
rente POS O
van POS O
hoogstens POS O
vijf POS O
procent POS O
per POS O
jaar. POS O
Art. POS O
3. POS O
Do POS O
inschrijving POS O
wordt POS O
gegund POS O
aan POS O
hem, POS O
die POS O
hei POS O
volle POS O
kapitaal POS O
aanbiedt POS O
tegen POS O
do POS O
laagste POS O
rente POS O
en POS O
de POS O
minste POS O
previsie. POS O
lïij POS O
aanbod POS O
vau POS O
gelijke POS O
voorwaarden POS O
wijst POS O
het POS O
lot POS O
deugene POS O
aan POS O
, POS O
aan POS O
wien POS O
de POS O
Leening POS O
gegund POS O
wordt. POS O
Art. POS O
4. POS O
De POS O
storting POS O
van POS O
het POS O
bedrag POS O
Tan POS O
het POS O
ingeschreven POS O
kapitaal POS O
moet POS O
plaats POS O
hebben POS O
op POS O
den POS O
1 POS O
Julij POS O
1865, POS O
ten POS O
kantore POS O
van POS O
den POS O
Ge POS O
meente POS O
ontvanger. POS O
Art. POS O
5. POS O
Voor POS O
do POS O
aandeelcn POS O
worden POS O
schuldbekentenissen POS O
afgegeven, POS O
genommerd POS O
1 POS O
tot POS O
30 POS O
, POS O
geteekend POS O
door POS O
dett POS O
Burgemeester POS O
eu POS O
Secretaris POS O
der POS O
gemeente, POS O
terwijl POS O
de POS O
halve POS O
aandeelcn POS O
met POS O
a POS O
en POS O
b POS O
worden POS O
aangeduid. POS O
Bij POS O
do POS O
schuldbekentenissen POS O
worden POS O
rente-coupons POS O
gevoegd, POS O
waarvan POS O
de POS O
eerste POS O
verschijnt POS O
op POS O
1 POS O
Januarij POS O
18G6. POS O
Art. POS O
fi. POS O
Jaarlijks, POS O
aan POS O
te POS O
vangen POS O
met POS O
1867, POS O
worden POS O
op POS O
den POS O
1 POS O
Januarij POS O
, POS O
tegen POS O
teruggave POS O
der POS O
schuldbekentenissen POS O
en POS O
der POS O
nog POS O
on POS O
verschenen POS O
coupons, POS O
twee POS O
of POS O
meer POS O
aaudeclen POS O
integraal POS O
afgelost. POS O
De POS O
uitloting POS O
der POS O
jaarlijks POS O
af POS O
te POS O
lossen POS O
aandeelen POS O
heeft POS O
plaats POS O
op POS O
den POS O
eersten POS O
maandag POS O
van POS O
de POS O
maand POS O
Julij. POS O
Van POS O
den POS O
uitstag POS O
geschiedt POS O
mededeeling POS O
door POS O
de POS O
Provinciale POS B-ORG
Overijssëlsche POS I-ORG
en POS I-ORG
Zwolsche POS I-ORG
Courant. POS I-ORG
Art. POS O
7. POS O
Op POS O
de POS O
jaarlijkscho POS O
begrootiug POS O
der POS O
Gemeente POS O
wordt POS O
de POS O
rente POS O
en POS O
'aflossing POS O
der POS O
geldlecning POS O
uitgetrokken POS O
, POS O
ten POS O
einde POS O
uit POS O
de POS O
gewone POS O
inkomsten POS O
der POS O
gemeente POS O
en POS O
speciaal POS O
uit POS O
het POS O
batig POS O
slot POS O
der POS O
gasfabriek POS O
te POS O
worden POS O
bestreden. POS O
2". POS O
dat POS O
de POS O
inschrijvingsbilletten POS O
voor POS O
die POS O
leening POS O
verzegeld POS O
zullen POS O
moeten POS O
worden POS O
ingezonden POS O
aan POS O
Burgemeester POS O
en POS O
wethouders POS O
vóór POS O
of POS O
op POS O
den POS O
1 POS O
Junij POS O
1805, POS O
des POS O
middags POS O
te POS O
12 POS O
uur. POS O
Zwolle, POS B-LOC
deu POS O
22 POS O
April POS O
1865. POS O
Burgemeester POS O
en POS O
Wethouders POS O
voornoemd POS O
, POS O
DE POS B-PER
VOS POS I-PER
VAN POS I-PER
STEENWIJK. POS I-PER
L. POS B-PER
N. POS I-PER
SCHUURMAN, POS I-PER
Secretaris. POS O
Buitenlandsche POS O
Nieuwstijdingen. POS O
RUSLAND. POS B-LOC
? POS O
De POS O
laatste POS O
beristen POS O
omtrent POS O
den POS O
toestand POS O
van POS O
den POS O
Groot POS O
vorst-Troonopvolger POS O
zijn POS O
van POS O
vrijdag POS O
voormiddag POS O
te POS O
11 POS O
uren. POS O
Op POS O
dat POS O
oogenblik POS O
was POS O
er POS O
eene POS O
tijdelijke POS O
kalmte POS O
na POS O
afwisse POS O
lende POS O
zware POS O
vlagen POS O
van POS O
ijlhoofdigheid POS O
en POS O
was POS O
de POS O
toestand POS O
iets POS O
gunstiger. POS O
Donderdag POS O
avond POS O
was POS O
de POS O
toestand POS O
zeer POS O
zorgbarend POS O
en POS O
nog POS O
is POS O
er POS O
steeds POS O
dadelijk POS O
gevaar. POS O
Donderdag POS O
was POS O
de POS O
lijder POS O
bewusteloos, POS O
was POS O
tle POS O
hersenontsteking POS O
toegenomen POS O
en POS O
de POS O
pols POS O
zwakker. POS O
De POS O
keizer POS O
van POS O
Kusland POS O
, POS O
die POS O
met POS O
grooten POS O
spoed POS O
heeft POS O
gereisd, POS O
kwam POS O
vrijdag POS O
morgen POS O
te POS O
Parijs POS B-LOC
aan POS O
en POS O
trok POS O
onmiddellijk POS O
door, POS O
na POS O
met POS O
keizer POS B-PER
Napoleon POS I-PER
aan POS O
het POS O
station POS O
eene POS O
zatnen POS O
komst POS O
te POS O
hebben POS O
gehad. POS O
Heden POS O
(zaturdag) POS O
middag POS O
12 POS O
ure POS O
werd POS O
hij POS O
te POS O
Nizza POS B-LOC
verwacht. POS O
Grootvorst. POS B-PER
Alexander POS I-PER
en POS O
groot POS B-PER
vorstin POS I-PER
Maria, POS I-PER
broeder POS O
en POS O
zuster POS O
van POS O
den POS O
kranke, POS O
bevinden POS O
zich POS O
reeds POS O
met POS O
den POS O
vorst POS O
van POS O
Leuchtenberg POS B-LOC
en POS O
de POS O
keizerin POS O
aan POS O
het POS O
ziekbed, POS O
dat POS O
door POS O
de POS O
laatstgenoemde POS O
geen POS O
oogenblik POS O
wordt POS O
verlaten. POS O
Beroemde, POS O
geneesheeren POS O
uit POS O
Petersburg POS B-LOC
en POS O
Weenen POS B-LOC
zijn POS O
naar POS O
Nizza POS B-LOC
ontboden. POS O
DENEMARKEN POS B-LOC
? POS O
De POS O
te POS O
Bendsburg POS B-LOC
tien POS O
19 POS O
April POS O
gehouden POS O
vergadering POS O
van POS O
gedelegeerden POS O
der POS O
Slees POS O
wijk-Holsteinsche POS O
vereenigingen POS O
heeft POS O
zich POS O
voor POS O
de POS O
spoedige POS O
vestiging POS O
der POS O
hertogdommen POS O
als POS O
zelfstandigen POS O
staat POS O
onder POS O
den POS O
hertog POS O
van POS O
Aiigustenburg POS B-LOC
ver POS O
klaard. POS O
Nadat POS O
de POS O
Oostenrijksche POS O
commissaris POS O
von POS B-PER
Halbhuber POS I-PER
tegen POS O
de POS O
door POS O
Pruissen POS B-LOC
verordende POS O
opnemingen POS O
in POS O
tle POS O
baai POS O
van POS O
Kiel POS B-LOC
geprotesteerd POS O
had, POS O
heeft POS O
de POS O
Sleeswijk-Holsteinsche POS O
lands POS O
regering POS O
de POS O
aanschrijving POS O
ingetrokken, POS O
waarbij POS O
het POS O
gemeente POS O
bestuur POS O
van POS O
Kiel POS B-LOC
verzocht POS O
wenl, POS O
de POS O
bedoelde POS O
werkzaamheden POS O
zoo POS O
veel POS O
mogelijk POS O
te POS O
helpen POS O
bevorderen. POS O
De POS O
Deensche POS O
rijksraatl POS O
is POS O
den POS O
11 POS O
en POS O
de POS O
rijksdag POS O
den POS O
18 POS O
dezer POS O
gesloten. POS O
DUITSCHLAND. POS B-LOC
? POS O
Pruissen POS O
heeft POS O
geantwoord POS O
op POS O
de. POS O
aanmerkingen POS O
van POS O
Oostenrijk POS B-LOC
omtrent POS O
het POS O
overbrengen POS O
der POS O
Pruissische POS O
maritieme POS O
inrigtingcn POS O
naar POS O
Kiel. POS B-LOC
De POS O
heer POS O
von POS B-PER
Uismarck POS I-PER
zegt POS O
verwon POS O
derd POS O
te POS O
zijn POS O
over POS O
tle POS O
klagten POS O
van POS O
Oostenrijk. POS B-LOC
De POS O
tegenwoor POS O
dige POS O
souvereinen POS O
van POS O
de POS O
hertogdommen POS O
behoorer POS O
partij POS O
te POS O
trekken POS O
van POS O
deze POS O
bezittingen POS O
ten POS O
behoeve POS O
van POS O
hunne POS O
mari POS O
tieme POS O
en POS O
verwante POS O
belangen. POS O
Oostenrijk POS B-LOC
moet POS O
de POS O
billijkheid POS O
der POS O
Pruissische POS O
aanspraken POS O
op POS O
Kiel POS B-LOC
erkennen, POS O
en POS O
is POS O
vrij POS O
om POS O
zijnerzijds POS O
de POS O
Oostenrijksche POS O
garnizoenen POS O
te POS O
versterken. POS O
?De POS O
heer POS O
Deak POS B-PER
heeft POS O
zijn POS O
programma POS O
opeifbaar POS O
gemaakt. POS O
Hij POS O
zegt POS O
daarin: POS O
?Wij POS O
willen POS O
de POS O
constitutionnele POS O
onafhan POS O
kelijkheid POS O
van POS O
Hongarije POS B-LOC
niet POS O
opgeven, POS O
maar POS O
zullen POS O
steeds POS O
bereid POS O
zijn POS O
om POS O
langs POS O
den POS O
wettelijken POS O
weg POS O
onze POS O
eigene POS O
wetten POS O
met POS O
de POS O
waarborgen POS O
van POS O
het POS O
duurzaam POS O
bestaan POS O
der POS O
monarchie POS O
in POS O
overeenstemming POS O
te POS O
brengen." POS O
ITALIË. POS B-LOC
? POS O
Dezer POS O
dagen POS O
hebben POS O
eenige POS O
dagbladen POS O
weder POS O
gewaagd POS O
van POS O
onderhandelingen, POS O
die POS O
tusschen POS O
het POS O
koningrijk POS O
Italië POS B-LOC
en POS O
het POS O
hof POS I-ORG
van POS O
Eome POS O
zouden POS O
aangeknoopt POS O
zijn POS O
over POS O
het POS O
bezetten POS O
van POS O
een POS O
aantal POS O
openstaande POS O
bisschopszetels POS O
in POS O
dat POS O
koningrijk. POS O
| {
"pile_set_name": "Github"
} |
using System;
using System.Collections.Generic;
using System.IO;
using Microsoft.Win32;
using System.Security;
using System.Reflection;
namespace OpenVPNUtils
{
/// <summary>
/// provides some static helper function
/// </summary>
public static class UtilsHelper
{
/// <summary>
/// Describes properties required in the ovpn file
/// </summary>
public class ServiceConfigProperty
{
/// <summary>
/// name of the property
/// </summary>
public string name;
/// <summary>
/// if the propery can be used to detect if the ovpn file is meant to be used for a service.
/// </summary>
public bool serviceOnly;
/// <summary>
/// constructor which imidiatly initializes the class variables
/// </summary>
public ServiceConfigProperty(String name, bool serviceOnly)
{
this.name = name;
this.serviceOnly = serviceOnly;
}
}
public delegate void Action();
public delegate void Action<T1>(T1 a);
public delegate void Action<T1, T2>(T1 a, T2 b);
public delegate T0 Function<T0>();
public delegate T0 Function<T0, T1>(T1 a);
public delegate T0 Function<T0, T1, T2>(T1 a, T2 b);
public static ServiceConfigProperty[] managementConfigItems = new ServiceConfigProperty[]{
new ServiceConfigProperty("management-query-passwords",true),
new ServiceConfigProperty("management-hold",true),
new ServiceConfigProperty("management-signal",true),
new ServiceConfigProperty("management-forget-disconnect",true),
new ServiceConfigProperty("management",true),
new ServiceConfigProperty("auth-retry interact",false)};
/// <summary>
/// tries to find openvpn binary in %PATH% and in %PROGRAMS%\openvpn\bin
/// </summary>
/// <returns>path to openvpn.exe or null</returns>
static public string LocateOpenVPN()
{
// split %path%
string pathVar = System.Environment.GetEnvironmentVariable("PATH");
string[] path = pathVar.Split(new Char[] { Path.PathSeparator });
// search openvpn in each path
foreach (string p in path)
{
string pa = Path.Combine(p, "openvpn.exe");
try
{
if ((new FileInfo(pa)).Exists)
return pa;
}
catch(DirectoryNotFoundException)
{
}
}
// search openvpn in program files
pathVar = Path.Combine(System.Environment.GetFolderPath(
Environment.SpecialFolder.ProgramFiles),
"openvpn" + Path.DirectorySeparatorChar + "bin" +
Path.DirectorySeparatorChar + "openvpn.exe");
try
{
if (File.Exists(pathVar))
return pathVar;
}
catch (DirectoryNotFoundException)
{
}
RegistryKey openVPNkey = Registry.LocalMachine.OpenSubKey("SOFTWARE\\OpenVPN");
if (openVPNkey == null)
openVPNkey = Registry.LocalMachine.OpenSubKey("SOFTWARE\\Wow6432Node\\OpenVPN");
if (openVPNkey != null)
{
using (openVPNkey)
{
String OpenVPNexe = null;
try
{
if (openVPNkey.GetValueKind("exe_path") == RegistryValueKind.String)
OpenVPNexe = (String)openVPNkey.GetValue("exe_path");
if (File.Exists(OpenVPNexe))
return OpenVPNexe;
}
catch (IOException)
{
}
}
}
// it was not found, return
return null;
}
/// <summary>
/// search all config files in a specific directory and all subdirectories
/// </summary>
/// <param name="di">start directory</param>
/// <param name="dest">list to save results</param>
/// <param name="extension">file extension</param>
public static void GetConfigFiles(DirectoryInfo di, List<string> dest,
string extension, bool recursive)
{
// add all files
FileInfo[] files = di.GetFiles("*." + extension);
foreach (FileInfo fi in files)
{
dest.Add(fi.FullName);
}
if (recursive)
{
foreach (DirectoryInfo d in di.GetDirectories())
{
GetConfigFiles(d, dest, extension, recursive);
}
}
}
/// <summary>
/// try to locate the configuration directory of openvpn
/// </summary>
/// <param name="vpnbin">path where openvpn lies</param>
/// <returns>path to configuration directory or null</returns>
static public string LocateOpenVPNConfigDir(string vpnbin)
{
string p = Path.GetFullPath(Path.Combine(vpnbin,
string.Join(Path.DirectorySeparatorChar.ToString(),
new string[] { "..", "..", "config"})));
try
{
if ((new DirectoryInfo(p)).Exists)
return p;
}
catch (DirectoryNotFoundException)
{
}
return null;
}
/// <summary>
/// find all configuration files in a specific directory
/// </summary>
/// <param name="configdir">the directory</param>
/// <returns>list of configuration files or null</returns>
static public List<String> LocateOpenVPNConfigs(string configdir)
{
List<string> files = new List<string>();
if (configdir == null || configdir.Length == 0)
return files;
try
{
GetConfigFiles(new DirectoryInfo(configdir), files,
"ovpn", true);
}
catch (DirectoryNotFoundException)
{ }
return files;
}
/// <summary>
/// check if the configuration file is to be used with a management console
/// </summary>
/// <param name="config">the config filename</param>
/// <returns>returns if the config file is to be used as as service</returns>
static public bool IsConfigForService(string config)
{
// if only a single management configuration item is pressent still asume it is meant for use with the service.
ConfigParser cf = new ConfigParser(config);
foreach (var directive in managementConfigItems)
{
if (directive.serviceOnly)
if (cf.GetValue(directive.name) != null)
return true;
}
//cf.Dispose();
return false;
}
/// <summary>
/// Returns the path used by the OpenVPNManager Service
/// </summary>
public static String FixedConfigDir
{
get
{
return Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location) + "\\config";
}
}
/// <summary>
/// Returns the path used for log files by the OpenVPN processes controled by the OpenVPNManager Service
/// </summary>
public static String FixedLogDir
{
get
{
return Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location) + "\\log";
}
}
/// <summary>
/// Returns a list of files which are used by the OpenVPNManager Service.
/// </summary>
/// <returns></returns>
public static List<String> LocateOpenVPNManagerConfigs(bool managedServices)
{
List<string> files = new List<string>();
try
{
GetConfigFiles(
new DirectoryInfo(FixedConfigDir),
files, "ovpn", true);
}
catch (DirectoryNotFoundException)
{ }
List<string> filesResult = new List<string>();
foreach (String file in files)
{
if (IsConfigForService(file) == managedServices)
filesResult.Add(file);
}
return filesResult;
}
/// <summary>
/// returns the directory which is used by the OpenVPN server.
/// </summary>
/// <returns>the directory or an empty string on errors</returns>
public static string LocateOpenVPNServiceDir()
{
string ret;
RegistryKey k = Registry.LocalMachine.OpenSubKey(
@"SOFTWARE\OpenVPN", false);
if (k == null)
{
k = Registry.LocalMachine.OpenSubKey(
@"SOFTWARE\Wow6432Node\OpenVPN", false);
}
ret = (string)k.GetValue("config_dir", "");
k.Close();
return ret;
}
}
}
| {
"pile_set_name": "Github"
} |
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -196,6 +196,12 @@ config MTD_MYLOADER_PARTS
You will still need the parsing functions to be called by the driver
for your particular device. It won't happen automatically.
+config MTD_TPLINK_PARTS
+ tristate "TP-Link AR7XXX/AR9XXX partitioning support"
+ depends on ATH79
+ ---help---
+ TBD.
+
comment "User Modules And Translation Layers"
#
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
obj-$(CONFIG_MTD_MYLOADER_PARTS) += myloader.o
+obj-$(CONFIG_MTD_TPLINK_PARTS) += tplinkpart.o
obj-$(CONFIG_MTD_CYBERTAN_PARTS) += cybertan_part.o
# 'Users' - code which presents functionality to userspace.
| {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
from gluon import *
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
# =============================================================================
class S3MainMenu(default.S3MainMenu):
"""
Custom Application Main Menu:
The main menu consists of several sub-menus, each of which can
be customised separately as a method of this class. The overall
composition of the menu is defined in the menu() method, which can
be customised as well:
Function Sub-Menu Access to (standard)
menu_modules() the modules menu the Eden modules
menu_gis() the GIS menu GIS configurations
menu_admin() the Admin menu System/User Administration
menu_lang() the Language menu Selection of the GUI locale
menu_auth() the User menu Login, Logout, User Profile
menu_help() the Help menu Contact page, About page
The standard uses the MM layout class for main menu items - but you
can of course use a custom layout class which you define in layouts.py.
Additional sub-menus can simply be defined as additional functions in
this class, and then be included in the menu() method.
Each sub-menu function returns a list of menu items, only the menu()
function must return a layout class instance.
"""
# -------------------------------------------------------------------------
@classmethod
def menu(cls):
""" Compose Menu """
main_menu = MM()(
cls.menu_modules(),
cls.menu_lang(right=True),
cls.menu_auth(),
cls.menu_admin(),
)
return main_menu
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
auth = current.auth
has_role = auth.s3_has_role
home_page_menu = homepage()
if auth.s3_logged_in():
alerting_menu = MM("Other Feeds", c="cap", f="alert")
mapping_menu = MM("Map", c="gis", f="index")
recipient_menu = MM("Recipients", c="pr", f="subscription",
vars={"option": "manage_recipient"})
alert_hub_menu = MM("CAP Feeds", c="cap", f="alert",
vars={"~.external": True})
if has_role("ADMIN"):
# Full set
# @ToDo: Add menu entries for "Create RSS Feed for CAP" & "Create RSS Feed for CMS"
return [home_page_menu,
alerting_menu,
MM("Organizations", c="org", f="organisation"),
MM("Persons", c="pr", f="person"),
recipient_menu,
mapping_menu,
]
else:
# Publisher sees minimal options
menus_ = [home_page_menu,
alerting_menu,
]
if has_role("MAP_ADMIN"):
menus_.append(mapping_menu)
else:
return menus_
return menus_
# Public or CUG reader sees minimal options
return [home_page_menu,
MM("Other Feeds", c="default", f="index", args=["alert_map"]),
]
# -------------------------------------------------------------------------
@classmethod
def menu_auth(cls, **attr):
""" Auth Menu """
auth = current.auth
if not auth.is_logged_in():
menu_auth = MM("Login", link=False, right=True)(
MM("Login", c="default", f="user", m="login",
vars={"_next": URL(c="cap", f="alert")}),
MM("Lost Password", c="default", f="user",
m="retrieve_password"),
MM("Request for Account", c="default", f="user",
m="register"),
)
else:
# Logged-in
user_id = auth.s3_logged_in_person()
menu_auth = MM(auth.user.email, link=False, right=True)(
MM("Subscription", c="pr", f="subscription"),
MM("Edit Profile", c="pr", f="person", args=[user_id]),
MM("Change Password", c="default", f="user",
m="change_password"),
MM("Logout", c="default", f="user", m="logout"),
)
return menu_auth
# -------------------------------------------------------------------------
@classmethod
def menu_admin(cls, **attr):
""" Administrator Menu """
if current.auth.s3_has_role("ADMIN"):
name_nice = current.deployment_settings.modules["admin"].get("name_nice")
menu_admin = MM(name_nice, c="admin", right=True, **attr)(
MM("Settings", f="setting"),
MM("Manage Users", f="user"),
MM("Database", c="appadmin", f="index"),
MM("Error Tickets", f="errors"),
#MM("Synchronization", c="sync", f="index"),
)
else:
menu_admin = None
return menu_admin
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
"""
Custom Controller Menus
The options menu (left-hand options menu) is individual for each
controller, so each controller has its own options menu function
in this class.
Each of these option menu functions can be customised separately,
by simply overriding (re-defining) the default function. The
options menu function must return an instance of the item layout.
The standard menu uses the M item layout class, but you can of
course also use any other layout class which you define in
layouts.py (can also be mixed).
Make sure additional helper functions in this class don't match
any current or future controller prefix (e.g. by using an
underscore prefix).
"""
# -------------------------------------------------------------------------
def admin(self):
""" ADMIN menu """
ADMIN = current.session.s3.system_roles.ADMIN
settings_messaging = self.settings_messaging()
return M(restrict=[ADMIN])(
M("Settings", c="admin", f="setting")(
settings_messaging,
),
M("User Management", c="admin", f="user")(
M("Create User", m="create"),
M("List All Users"),
M("Import Users", m="import"),
M("List All Roles", f="role"),
M("List All Organization Approvers & Whitelists", f="organisation"),
),
M("Database", c="appadmin", f="index")(
M("Raw Database access", c="appadmin", f="index")
),
M("Error Tickets", c="admin", f="errors")
)
# -------------------------------------------------------------------------
@staticmethod
def cap():
""" CAP menu """
if current.request.get_vars["~.external"] == "True":
# Alert Hub
return M(c="cap")(
M("CAP Feeds", f="alert", vars={"~.external": True})(
M("Map View", c="default",
f="index", args=["alert_hub_cop"]),
),
)
else:
s3_has_role = current.auth.s3_has_role
cap_editors = lambda i: s3_has_role("ALERT_EDITOR") or \
s3_has_role("ALERT_APPROVER")
return M(c="cap")(
M("Other Feeds", f="alert")(
M("Create", m="create", check=cap_editors),
M("Import from Feed URL", m="import_feed", p="create",
check=cap_editors),
M("View on Map", c="default", f="index",
args=["alert_map"]),
M("To Review", c="cap", f="alert", m="review",
check=s3_has_role("ALERT_APPROVER")),
),
M("View", check=cap_editors)(
M("Approved Alerts", c="cap", f="alert",
vars={"~.approved_by__ne": None},
),
M("Incomplete Alerts", c="cap", f="alert", m="review",
vars={"status": "incomplete"}
),
),
M("Templates", f="template")(
M("Create", m="create",
restrict=["ADMIN"]),
),
M("Warning Classifications", f="warning_priority",
restrict=["ADMIN"])(
M("Create", m="create"),
M("Import from CSV", m="import", p="create"),
),
M("Predefined Alert Area", f="area", vars={"~.is_template": True},
restrict=["ADMIN"])(
M("Create", m="create"),
M("Import from CSV", m="import", p="create"),
),
M("Event Types", c="event", f="event_type",
restrict=["ADMIN"])(
M("Create", m="create"),
M("Import from CSV", m="import", p="create"),
),
)
# -------------------------------------------------------------------------
@staticmethod
def org():
""" ORG / Organization Registry """
return M(c="org")(
M("Organizations", f="organisation")(
M("Create", m="create"),
M("Import", m="import")
),
)
# -------------------------------------------------------------------------
@staticmethod
def pr():
""" PR / Person Registry """
ADMIN = current.session.s3.system_roles.ADMIN
if current.request.vars.option == "manage_recipient":
return M(c="pr", restrict=ADMIN)(
M("Manage Recipients", f="subscription",
vars={"option": "manage_recipient"})(
M("Add Recipient to List", c="default", f="index",
m="subscriptions", vars={"option": "manage_recipient"})
)
)
else:
return M(c="pr", restrict=ADMIN)(
M("Persons", f="person")(
M("Create", m="create"),
),
M("Groups", f="group")(
M("Create", m="create"),
),
)
# -------------------------------------------------------------------------
@classmethod
def settings_messaging(cls):
""" Messaging settings menu items:
These items are used in multiple menus, but each item instance can
always only belong to one parent, so we need to re-instantiate
with the same parameters, and therefore this is defined as a
function here.
This separates the RSS containing CAP Feeds or CMS Feeds
"""
return [
M("Email Channels (Inbound)", c="msg", f="email_channel"),
M("Facebook Channels", c="msg", f="facebook_channel"),
M("RSS Channels", link=False)(
M("Create RSS Feed for CAP", c="msg", f="rss_channel", vars={"type": "cap"}),
M("Create RSS Feed for CMS", c="msg", f="rss_channel"),
),
M("SMS Outbound Gateways", c="msg", f="sms_outbound_gateway")(
M("SMS Modem Channels", c="msg", f="sms_modem_channel"),
M("SMS SMTP Channels", c="msg", f="sms_smtp_channel"),
M("SMS WebAPI Channels", c="msg", f="sms_webapi_channel"),
),
M("Mobile Commons Channels", c="msg", f="mcommons_channel"),
M("Twilio Channels", c="msg", f="twilio_channel"),
M("Twitter Channels", c="msg", f="twitter_channel"),
M("Parsers", c="msg", f="parser"),
]
# END =========================================================================
| {
"pile_set_name": "Github"
} |
/*
* (C) Copyright 2013 Kurento (http://kurento.org/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.kurento.repository;
import org.kurento.commons.exception.KurentoException;
/**
* This exception is thrown when the user is trying to create a repository item with the same id
* than existing repository item.
*
* @author Micael Gallego (micael.gallego@gmail.com)
*
*/
public class DuplicateItemException extends KurentoException {
private static final long serialVersionUID = 3515920000618086477L;
public DuplicateItemException(String id) {
super("An item with id " + id + " already exists");
}
}
| {
"pile_set_name": "Github"
} |
/**
@license
Copyright (c) 2017 The Polymer Project Authors. All rights reserved.
This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt
The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt
The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt
Code distributed by Google as part of the polymer project is also
subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt
*/
'use strict';
/*
* Polyfills loaded: Custom Elements ES5 Shim
*/
import '../../node_modules/@webcomponents/custom-elements/src/native-shim.js';
| {
"pile_set_name": "Github"
} |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build arm64,linux
package unix
const _SYS_dup = SYS_DUP3
//sys Fchown(fd int, uid int, gid int) (err error)
//sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
//sys Fstatfs(fd int, buf *Statfs_t) (err error)
//sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int)
//sysnb Geteuid() (euid int)
//sysnb Getgid() (gid int)
//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Getuid() (uid int)
//sys Listen(s int, n int) (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys Setfsgid(gid int) (err error)
//sys Setfsuid(uid int) (err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
func Stat(path string, stat *Stat_t) (err error) {
return Fstatat(AT_FDCWD, path, stat, 0)
}
func Lchown(path string, uid int, gid int) (err error) {
return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW)
}
func Lstat(path string, stat *Stat_t) (err error) {
return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW)
}
//sys Statfs(path string, buf *Statfs_t) (err error)
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
//sys Truncate(path string, length int64) (err error)
//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
//sysnb setgroups(n int, list *_Gid_t) (err error)
//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
//sysnb socket(domain int, typ int, proto int) (fd int, err error)
//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
func Getpagesize() int { return 65536 }
//sysnb Gettimeofday(tv *Timeval) (err error)
//sysnb Time(t *Time_t) (tt Time_t, err error)
func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
func NsecToTimespec(nsec int64) (ts Timespec) {
ts.Sec = nsec / 1e9
ts.Nsec = nsec % 1e9
return
}
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
func NsecToTimeval(nsec int64) (tv Timeval) {
nsec += 999 // round up to microsecond
tv.Sec = nsec / 1e9
tv.Usec = nsec % 1e9 / 1e3
return
}
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, 0)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
func (r *PtraceRegs) PC() uint64 { return r.Pc }
func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc }
func (iov *Iovec) SetLen(length int) {
iov.Len = uint64(length)
}
func (msghdr *Msghdr) SetControllen(length int) {
msghdr.Controllen = uint64(length)
}
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint64(length)
}
func InotifyInit() (fd int, err error) {
return InotifyInit1(0)
}
// TODO(dfc): constants that should be in zsysnum_linux_arm64.go, remove
// these when the deprecated syscalls that the syscall package relies on
// are removed.
const (
SYS_GETPGRP = 1060
SYS_UTIMES = 1037
SYS_FUTIMESAT = 1066
SYS_PAUSE = 1061
SYS_USTAT = 1070
SYS_UTIME = 1063
SYS_LCHOWN = 1032
SYS_TIME = 1062
SYS_EPOLL_CREATE = 1042
SYS_EPOLL_WAIT = 1069
)
| {
"pile_set_name": "Github"
} |
/*
* tst_getsize.c --- this function tests the getsize function
*
* Copyright (C) 1997 by Theodore Ts'o.
*
* %Begin-Header%
* This file may be redistributed under the terms of the GNU Library
* General Public License, version 2.
* %End-Header%
*/
#include "config.h"
#include <stdio.h>
#include <string.h>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <fcntl.h>
#include <time.h>
#include <sys/stat.h>
#include <sys/types.h>
#if HAVE_ERRNO_H
#include <errno.h>
#endif
#include "ext2_fs.h"
#include "ext2fs.h"
int main(int argc, const char *argv[])
{
errcode_t retval;
blk_t blocks;
if (argc < 2) {
fprintf(stderr, "%s device\n", argv[0]);
exit(1);
}
add_error_table(&et_ext2_error_table);
retval = ext2fs_get_device_size(argv[1], 1024, &blocks);
if (retval) {
com_err(argv[0], retval, "while getting device size");
exit(1);
}
printf("%s is device has %u blocks.\n", argv[1], blocks);
return 0;
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<encoder>
<pattern>[%date{ISO8601}] [%level] [%logger] [%marker] [%thread] - %msg MDC: {%mdc}%n</pattern>
</encoder>
</appender>
<appender name="FILE" class="ch.qos.logback.core.FileAppender">
<file>target/myapp-dev.log</file>
<encoder>
<pattern>[%date{ISO8601}] [%level] [%logger] [%marker] [%thread] - %msg MDC: {%mdc}%n</pattern>
</encoder>
</appender>
<root level="DEBUG">
<appender-ref ref="STDOUT"/>
<appender-ref ref="FILE"/>
</root>
</configuration>
| {
"pile_set_name": "Github"
} |
/*
** Performance test for SQLite.
**
** This program reads ASCII text from a file named on the command-line
** and submits that text to SQLite for evaluation. A new database
** is created at the beginning of the program. All statements are
** timed using the high-resolution timer built into Intel-class processors.
**
** To compile this program, first compile the SQLite library separately
** will full optimizations. For example:
**
** gcc -c -O6 -DSQLITE_THREADSAFE=0 sqlite3.c
**
** Then link against this program. But to do optimize this program
** because that defeats the hi-res timer.
**
** gcc speedtest8.c sqlite3.o -ldl -I../src
**
** Then run this program with a single argument which is the name of
** a file containing SQL script that you want to test:
**
** ./a.out test.db test.sql
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <time.h>
#if defined(_MSC_VER)
#include <windows.h>
#else
#include <unistd.h>
#include <sys/times.h>
#include <sched.h>
#endif
#include "sqlite3.h"
/*
** hwtime.h contains inline assembler code for implementing
** high-performance timing routines.
*/
#include "hwtime.h"
/*
** Timers
*/
static sqlite_uint64 prepTime = 0;
static sqlite_uint64 runTime = 0;
static sqlite_uint64 finalizeTime = 0;
/*
** Prepare and run a single statement of SQL.
*/
static void prepareAndRun(sqlite3 *db, const char *zSql, int bQuiet){
sqlite3_stmt *pStmt;
const char *stmtTail;
sqlite_uint64 iStart, iElapse;
int rc;
if (!bQuiet){
printf("***************************************************************\n");
}
if (!bQuiet) printf("SQL statement: [%s]\n", zSql);
iStart = sqlite3Hwtime();
rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &stmtTail);
iElapse = sqlite3Hwtime() - iStart;
prepTime += iElapse;
if (!bQuiet){
printf("sqlite3_prepare_v2() returns %d in %llu cycles\n", rc, iElapse);
}
if( rc==SQLITE_OK ){
int nRow = 0;
iStart = sqlite3Hwtime();
while( (rc=sqlite3_step(pStmt))==SQLITE_ROW ){ nRow++; }
iElapse = sqlite3Hwtime() - iStart;
runTime += iElapse;
if (!bQuiet){
printf("sqlite3_step() returns %d after %d rows in %llu cycles\n",
rc, nRow, iElapse);
}
iStart = sqlite3Hwtime();
rc = sqlite3_finalize(pStmt);
iElapse = sqlite3Hwtime() - iStart;
finalizeTime += iElapse;
if (!bQuiet){
printf("sqlite3_finalize() returns %d in %llu cycles\n", rc, iElapse);
}
}
}
int main(int argc, char **argv){
sqlite3 *db;
int rc;
int nSql;
char *zSql;
int i, j;
FILE *in;
sqlite_uint64 iStart, iElapse;
sqlite_uint64 iSetup = 0;
int nStmt = 0;
int nByte = 0;
const char *zArgv0 = argv[0];
int bQuiet = 0;
#if !defined(_MSC_VER)
struct tms tmsStart, tmsEnd;
clock_t clkStart, clkEnd;
#endif
#ifdef HAVE_OSINST
extern sqlite3_vfs *sqlite3_instvfs_binarylog(char *, char *, char *);
extern void sqlite3_instvfs_destroy(sqlite3_vfs *);
sqlite3_vfs *pVfs = 0;
#endif
while (argc>3)
{
#ifdef HAVE_OSINST
if( argc>4 && (strcmp(argv[1], "-log")==0) ){
pVfs = sqlite3_instvfs_binarylog("oslog", 0, argv[2]);
sqlite3_vfs_register(pVfs, 1);
argv += 2;
argc -= 2;
continue;
}
#endif
/*
** Increasing the priority slightly above normal can help with
** repeatability of testing. Note that with Cygwin, -5 equates
** to "High", +5 equates to "Low", and anything in between
** equates to "Normal".
*/
if( argc>4 && (strcmp(argv[1], "-priority")==0) ){
#if defined(_MSC_VER)
int new_priority = atoi(argv[2]);
if(!SetPriorityClass(GetCurrentProcess(),
(new_priority<=-5) ? HIGH_PRIORITY_CLASS :
(new_priority<=0) ? ABOVE_NORMAL_PRIORITY_CLASS :
(new_priority==0) ? NORMAL_PRIORITY_CLASS :
(new_priority<5) ? BELOW_NORMAL_PRIORITY_CLASS :
IDLE_PRIORITY_CLASS)){
printf ("error setting priority\n");
exit(2);
}
#else
struct sched_param myParam;
sched_getparam(0, &myParam);
printf ("Current process priority is %d.\n", (int)myParam.sched_priority);
myParam.sched_priority = atoi(argv[2]);
printf ("Setting process priority to %d.\n", (int)myParam.sched_priority);
if (sched_setparam (0, &myParam) != 0){
printf ("error setting priority\n");
exit(2);
}
#endif
argv += 2;
argc -= 2;
continue;
}
if( argc>3 && strcmp(argv[1], "-quiet")==0 ){
bQuiet = -1;
argv++;
argc--;
continue;
}
break;
}
if( argc!=3 ){
fprintf(stderr, "Usage: %s [options] FILENAME SQL-SCRIPT\n"
"Runs SQL-SCRIPT against a UTF8 database\n"
"\toptions:\n"
#ifdef HAVE_OSINST
"\t-log <log>\n"
#endif
"\t-priority <value> : set priority of task\n"
"\t-quiet : only display summary results\n",
zArgv0);
exit(1);
}
in = fopen(argv[2], "r");
fseek(in, 0L, SEEK_END);
nSql = ftell(in);
zSql = malloc( nSql+1 );
fseek(in, 0L, SEEK_SET);
nSql = fread(zSql, 1, nSql, in);
zSql[nSql] = 0;
printf("SQLite version: %d\n", sqlite3_libversion_number());
unlink(argv[1]);
#if !defined(_MSC_VER)
clkStart = times(&tmsStart);
#endif
iStart = sqlite3Hwtime();
rc = sqlite3_open(argv[1], &db);
iElapse = sqlite3Hwtime() - iStart;
iSetup = iElapse;
if (!bQuiet) printf("sqlite3_open() returns %d in %llu cycles\n", rc, iElapse);
for(i=j=0; j<nSql; j++){
if( zSql[j]==';' ){
int isComplete;
char c = zSql[j+1];
zSql[j+1] = 0;
isComplete = sqlite3_complete(&zSql[i]);
zSql[j+1] = c;
if( isComplete ){
zSql[j] = 0;
while( i<j && isspace(zSql[i]) ){ i++; }
if( i<j ){
int n = j - i;
if( n>=6 && memcmp(&zSql[i], ".crash",6)==0 ) exit(1);
nStmt++;
nByte += n;
prepareAndRun(db, &zSql[i], bQuiet);
}
zSql[j] = ';';
i = j+1;
}
}
}
iStart = sqlite3Hwtime();
sqlite3_close(db);
iElapse = sqlite3Hwtime() - iStart;
#if !defined(_MSC_VER)
clkEnd = times(&tmsEnd);
#endif
iSetup += iElapse;
if (!bQuiet) printf("sqlite3_close() returns in %llu cycles\n", iElapse);
printf("\n");
printf("Statements run: %15d stmts\n", nStmt);
printf("Bytes of SQL text: %15d bytes\n", nByte);
printf("Total prepare time: %15llu cycles\n", prepTime);
printf("Total run time: %15llu cycles\n", runTime);
printf("Total finalize time: %15llu cycles\n", finalizeTime);
printf("Open/Close time: %15llu cycles\n", iSetup);
printf("Total time: %15llu cycles\n",
prepTime + runTime + finalizeTime + iSetup);
#if !defined(_MSC_VER)
printf("\n");
printf("Total user CPU time: %15.3g secs\n", (tmsEnd.tms_utime - tmsStart.tms_utime)/(double)CLOCKS_PER_SEC );
printf("Total system CPU time: %15.3g secs\n", (tmsEnd.tms_stime - tmsStart.tms_stime)/(double)CLOCKS_PER_SEC );
printf("Total real time: %15.3g secs\n", (clkEnd -clkStart)/(double)CLOCKS_PER_SEC );
#endif
#ifdef HAVE_OSINST
if( pVfs ){
sqlite3_instvfs_destroy(pVfs);
printf("vfs log written to %s\n", argv[0]);
}
#endif
return 0;
}
| {
"pile_set_name": "Github"
} |
[brv](../../index.md) / [com.drake.brv.listener](../index.md) / [OnMultiStateListener](index.md) / [onHeaderStartAnimator](./on-header-start-animator.md)
# onHeaderStartAnimator
`open fun onHeaderStartAnimator(header: RefreshHeader?, headerHeight: `[`Int`](https://kotlinlang.org/api/latest/jvm/stdlib/kotlin/-int/index.html)`, maxDragHeight: `[`Int`](https://kotlinlang.org/api/latest/jvm/stdlib/kotlin/-int/index.html)`): `[`Unit`](https://kotlinlang.org/api/latest/jvm/stdlib/kotlin/-unit/index.html) | {
"pile_set_name": "Github"
} |
// GENERATE BY ./scripts/generate.ts
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import FileImageTwoToneSvg from '@ant-design/icons-svg/lib/asn/FileImageTwoTone';
import AntdIcon, { AntdIconProps } from '../components/AntdIcon';
const FileImageTwoTone = (
props: AntdIconProps,
ref: React.MutableRefObject<HTMLSpanElement>,
) => <AntdIcon {...props} ref={ref} icon={FileImageTwoToneSvg} />;
FileImageTwoTone.displayName = 'FileImageTwoTone';
export default React.forwardRef<HTMLSpanElement, AntdIconProps>(FileImageTwoTone); | {
"pile_set_name": "Github"
} |
//
// Copyright (c) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
import Firebase
import MaterialComponents
@objc(MainViewController)
class MainViewController: MDCCollectionViewController, UITextFieldDelegate {
var controller1: MDCTextInputControllerUnderline!
var controller2: MDCTextInputControllerUnderline!
var controller3: MDCTextInputControllerUnderline!
override func viewDidLoad() {
super.viewDidLoad()
self.styler.cellStyle = .card
self.styler.cellLayoutType = .list
}
override func collectionView(_ collectionView: UICollectionView, cellHeightAt indexPath: IndexPath) -> CGFloat {
if indexPath.section == 0 {
return 181
}
return 230
}
override func numberOfSections(in collectionView: UICollectionView) -> Int {
return 2
}
override func collectionView(_ collectionView: UICollectionView, numberOfItemsInSection section: Int) -> Int {
return 1
}
override func collectionView(_ collectionView: UICollectionView,
cellForItemAt indexPath: IndexPath) -> UICollectionViewCell {
if indexPath.section == 0 {
let addCell = collectionView.dequeueReusableCell(withReuseIdentifier: "add", for: indexPath) as! CloudAddCell
addCell.number1Field.delegate = self
controller1 = MDCTextInputControllerUnderline(textInput: addCell.number1Field)
addCell.number2Field.delegate = self
controller2 = MDCTextInputControllerUnderline(textInput: addCell.number2Field)
addCell.button.setElevation(ShadowElevation.raisedButtonResting, for: .normal)
addCell.button.setElevation(ShadowElevation.raisedButtonPressed, for: .highlighted)
return addCell
} else {
let commentCell = collectionView.dequeueReusableCell(withReuseIdentifier: "message", for: indexPath) as! CommentCell
commentCell.inputField.delegate = self
controller3 = MDCTextInputControllerUnderline(textInput: commentCell.inputField)
commentCell.button.setElevation(ShadowElevation.raisedButtonResting, for: .normal)
commentCell.button.setElevation(ShadowElevation.raisedButtonPressed, for: .highlighted)
return commentCell
}
}
}
| {
"pile_set_name": "Github"
} |
/*---------------------------------------------------------------------------*
* timer routine *
*---------------------------------------------------------------------------*/
#include <sys/times.h>
/*---------------------------------------------------------------------------*/
void atim_(double * cpu, double * wall)
{
struct tms buf;
times(&buf);
*cpu = buf.tms_utime / 100.0;
*wall = *cpu + buf.tms_stime / 100.0;
}
/* $Id$ */
| {
"pile_set_name": "Github"
} |
'use strict';
var isObject = require('is-extendable');
module.exports = function extend(o/*, objects*/) {
if (!isObject(o)) { o = {}; }
var len = arguments.length;
for (var i = 1; i < len; i++) {
var obj = arguments[i];
if (isObject(obj)) {
assign(o, obj);
}
}
return o;
};
function assign(a, b) {
for (var key in b) {
if (hasOwn(b, key)) {
a[key] = b[key];
}
}
}
/**
* Returns true if the given `key` is an own property of `obj`.
*/
function hasOwn(obj, key) {
return Object.prototype.hasOwnProperty.call(obj, key);
}
| {
"pile_set_name": "Github"
} |
SET(CMAKE_SYSTEM_NAME Windows)
# which compilers to use for C and C++
SET(CMAKE_C_COMPILER i686-w64-mingw32-gcc)
SET(CMAKE_CXX_COMPILER i686-w64-mingw32-g++)
SET(CMAKE_RC_COMPILER i686-w64-mingw32-windres)
# here is the target environment located
SET(CMAKE_FIND_ROOT_PATH /usr/i686-w64-mingw32)
# adjust the default behaviour of the FIND_XXX() commands:
# search headers and libraries in the target environment, search
# programs in the host environment
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
#set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
#set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set(IS_WINDOWS_BUILD TRUE)
| {
"pile_set_name": "Github"
} |
// Copyright (c) YugaByte, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations
// under the License.
//
#include <string>
#include "yb/common/table_properties_constants.h"
#include "yb/docdb/value.h"
#include "yb/gutil/strings/substitute.h"
namespace yb {
namespace docdb {
using std::string;
using strings::Substitute;
const MonoDelta Value::kMaxTtl = yb::common::kMaxTtl;
const MonoDelta Value::kResetTtl = MonoDelta::FromNanoseconds(0);
const int64_t Value::kInvalidUserTimestamp = yb::common::kInvalidUserTimestamp;
template <typename T>
bool DecodeType(ValueType expected_value_type, const T& default_value, Slice* slice,
T* val) {
if (!slice->TryConsumeByte(static_cast<char>(expected_value_type))) {
*val = default_value;
return false;
}
return true;
}
CHECKED_STATUS Value::DecodeMergeFlags(Slice* slice, uint64_t* merge_flags) {
if (DecodeType(ValueType::kMergeFlags, (uint64_t) 0, slice, merge_flags)) {
*merge_flags = VERIFY_RESULT(util::FastDecodeUnsignedVarInt(slice));
}
return Status::OK();
}
CHECKED_STATUS DecodeIntentDocHT(Slice* slice, DocHybridTime* doc_ht) {
if (!DecodeType(ValueType::kHybridTime, DocHybridTime::kInvalid, slice, doc_ht)) {
return Status::OK();
}
return doc_ht->DecodeFrom(slice);
}
Status Value::DecodeTTL(rocksdb::Slice* slice, MonoDelta* ttl) {
if (DecodeType(ValueType::kTtl, kMaxTtl, slice, ttl)) {
*ttl = MonoDelta::FromMilliseconds(VERIFY_RESULT(util::FastDecodeSignedVarInt(slice)));
}
return Status::OK();
}
Status Value::DecodeUserTimestamp(const rocksdb::Slice& rocksdb_value,
UserTimeMicros* user_timestamp) {
MonoDelta ttl;
auto slice_copy = rocksdb_value;
RETURN_NOT_OK(DecodeTTL(&slice_copy, &ttl));
return DecodeUserTimestamp(&slice_copy, user_timestamp);
}
Status Value::DecodeUserTimestamp(rocksdb::Slice* slice, UserTimeMicros* user_timestamp) {
if (DecodeType(ValueType::kUserTimestamp, kInvalidUserTimestamp, slice,
user_timestamp)) {
if (slice->size() < kBytesPerInt64) {
return STATUS(Corruption, Substitute(
"Failed to decode TTL from value, size too small: $1, need $2",
slice->size(), kBytesPerInt64));
}
*user_timestamp = BigEndian::Load64(slice->data());
slice->remove_prefix(kBytesPerInt64);
}
return Status::OK();
}
Status Value::DecodeControlFields(Slice* slice) {
if (slice->empty()) {
return STATUS(Corruption, "Cannot decode a value from an empty slice");
}
Slice original = *slice;
RETURN_NOT_OK_PREPEND(
DecodeMergeFlags(slice, &merge_flags_),
Format("Failed to decode merge flags in $0", original.ToDebugHexString()));
RETURN_NOT_OK_PREPEND(
DecodeIntentDocHT(slice, &intent_doc_ht_),
Format("Failed to decode intent ht in $0", original.ToDebugHexString()));
RETURN_NOT_OK_PREPEND(
DecodeTTL(slice, &ttl_),
Format("Failed to decode TTL in $0", original.ToDebugHexString()));
RETURN_NOT_OK_PREPEND(
DecodeUserTimestamp(slice, &user_timestamp_),
Format("Failed to decode user timestamp in $0", original.ToDebugHexString()));
return Status::OK();
}
Status Value::Decode(const Slice& rocksdb_value) {
Slice slice = rocksdb_value;
RETURN_NOT_OK(DecodeControlFields(&slice));
RETURN_NOT_OK_PREPEND(
primitive_value_.DecodeFromValue(slice),
Format("Failed to decode value in $0", rocksdb_value.ToDebugHexString()));
return Status::OK();
}
std::string Value::ToString() const {
std::string result = primitive_value_.ToString();
if (merge_flags_) {
result += Format("; merge flags: $0", merge_flags_);
}
if (intent_doc_ht_.is_valid()) {
result += Format("; intent doc ht: $0", intent_doc_ht_);
}
if (!ttl_.Equals(kMaxTtl)) {
result += Format("; ttl: $0", ttl_);
}
if (user_timestamp_ != kInvalidUserTimestamp) {
result += Format("; user timestamp: $0", user_timestamp_);
}
return result;
}
std::string Value::DebugSliceToString(const Slice& encoded_value) {
Value value;
auto status = value.Decode(encoded_value);
if (!status.ok()) {
return status.ToString();
}
return value.ToString();
}
std::string Value::Encode(const Slice* external_value) const {
std::string result;
EncodeAndAppend(&result, external_value);
return result;
}
void Value::EncodeAndAppend(std::string *value_bytes, const Slice* external_value) const {
if (merge_flags_) {
value_bytes->push_back(ValueTypeAsChar::kMergeFlags);
util::FastAppendUnsignedVarIntToStr(merge_flags_, value_bytes);
}
if (intent_doc_ht_.is_valid()) {
value_bytes->push_back(ValueTypeAsChar::kHybridTime);
intent_doc_ht_.AppendEncodedInDocDbFormat(value_bytes);
}
if (!ttl_.Equals(kMaxTtl)) {
value_bytes->push_back(ValueTypeAsChar::kTtl);
util::FastAppendSignedVarIntToBuffer(ttl_.ToMilliseconds(), value_bytes);
}
if (user_timestamp_ != kInvalidUserTimestamp) {
value_bytes->push_back(ValueTypeAsChar::kUserTimestamp);
util::AppendBigEndianUInt64(user_timestamp_, value_bytes);
}
if (!external_value) {
value_bytes->append(primitive_value_.ToValue());
} else {
value_bytes->append(external_value->cdata(), external_value->size());
}
}
Status Value::DecodePrimitiveValueType(
const rocksdb::Slice& rocksdb_value,
ValueType* value_type,
uint64_t* merge_flags,
MonoDelta* ttl,
int64_t* user_ts) {
auto slice_copy = rocksdb_value;
uint64_t local_merge_flags;
DocHybridTime local_doc_ht;
MonoDelta local_ttl;
int64_t local_user_ts;
RETURN_NOT_OK(DecodeMergeFlags(&slice_copy, merge_flags ? merge_flags : &local_merge_flags));
RETURN_NOT_OK(DecodeIntentDocHT(&slice_copy, &local_doc_ht));
RETURN_NOT_OK(DecodeTTL(&slice_copy, ttl ? ttl : &local_ttl));
RETURN_NOT_OK(DecodeUserTimestamp(&slice_copy, user_ts ? user_ts : &local_user_ts));
*value_type = DecodeValueType(slice_copy);
return Status::OK();
}
const Value& Value::Tombstone() {
static const auto kTombstone = Value(PrimitiveValue::kTombstone);
return kTombstone;
}
const string& Value::EncodedTombstone() {
static const string kEncodedTombstone = Tombstone().Encode();
return kEncodedTombstone;
}
void Value::ClearIntentDocHt() {
intent_doc_ht_ = DocHybridTime::kInvalid;
}
} // namespace docdb
} // namespace yb
| {
"pile_set_name": "Github"
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.waveprotocol.wave.crypto;
/**
* Thrown if we can't find the signer of a payload. (Signed payloads have a
* reference to a cert chain. This exception gets thrown when can't find the
* cert chain for that reference).
*/
public class UnknownSignerException extends Exception {
public UnknownSignerException(String message) {
super(message);
}
public UnknownSignerException(Throwable cause) {
super(cause);
}
public UnknownSignerException(String message, Throwable cause) {
super(message, cause);
}
}
| {
"pile_set_name": "Github"
} |
/*************************************
* Queue.h
**************************************/
#ifndef __QUEUE_H__
#define __QUEUE_H__
#define ENQUEUEPACKET(_Head, _Tail,_Packet) \
do \
{ \
if (!_Head) { \
_Head = _Packet; \
} \
else { \
(_Tail)->next = _Packet; \
} \
(_Packet)->next = NULL; \
_Tail = _Packet; \
}while(0)
#define DEQUEUEPACKET(Head, Tail ) \
do \
{ if(Head) \
{ \
if (!Head->next) { \
Tail = NULL; \
} \
Head = Head->next; \
} \
}while(0)
#endif //__QUEUE_H__
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: b76a80c7343e79049a6d0f652db38b29
NativeFormatImporter:
externalObjects: {}
mainObjectFileID: 0
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
E1: C1 CALL pred polymorphic_output.main/2-0 (det) polymorphic_output.m:29
mdb> echo on
Command echo enabled.
mdb> register --quiet
mdb> context none
Contexts will not be printed.
mdb> b functor_names
0: + stop interface func polymorphic_output.functor_names/1-0 (det)
mdb> c
E2: C2 CALL func polymorphic_output.functor_names/1-0 (det)
mdb> delete 0
0: E stop interface func polymorphic_output.functor_names/1-0 (det)
mdb> p goal
functor_names(two("three", 3, three("four", 4, "one", 1, empty, empty, empty), two("two", 2, empty, empty))) = _
mdb> format verbose
mdb> format_param lines 100
mdb> p goal
functor_names
1-two
| 1-"three"
| 2-3
| 3-three
| | 1-"four"
| | 2-4
| | 3-"one"
| | 4-1
| | 5-empty
| | 6-empty
| | 7-empty
| 4-two
| 1-"two"
| 2-2
| 3-empty
| 4-empty
2-_
mdb> format flat
mdb> browse goal
browser> ^1
browser> p
two("three", 3, three("four", 4, "one", 1, empty, empty, empty), two("two", 2, empty, empty))
browser> ^..^2
browser> p
'_'
browser> ^..^3
error: there is no subterm 3
browser> p
'_'
browser> ^..^r
browser> p
'_'
browser> quit
mdb> b -A deconstruct.det_arg/4
0: + stop interface pred deconstruct.det_arg/4-0 (det)
1: + stop interface pred deconstruct.det_arg/4-1 (det)
2: + stop interface pred deconstruct.det_arg/4-2 (cc_multi)
3: + stop interface pred deconstruct.det_arg/4-3 (cc_multi)
mdb> c
E3: C3 CALL pred deconstruct.det_arg/4-1 (det)
mdb> P
Term (arg 1) two("three", 3, three("four", 4, "one", 1, empty, empty, empty), two/4)
NonCanon (arg 2) canonicalize
Index (arg 3) 3
mdb> f
E4: C3 EXIT pred deconstruct.det_arg/4-1 (det)
mdb> P
Term (arg 1) two("three", 3, three("four", 4, "one", 1, empty, empty, empty), two/4)
NonCanon (arg 2) canonicalize
Index (arg 3) 3
Argument (arg 4) two("two", 2, empty, empty)
mdb> disable *
0: - stop interface pred deconstruct.det_arg/4-0 (det)
1: - stop interface pred deconstruct.det_arg/4-1 (det)
2: - stop interface pred deconstruct.det_arg/4-2 (cc_multi)
3: - stop interface pred deconstruct.det_arg/4-3 (cc_multi)
mdb> c
two
| {
"pile_set_name": "Github"
} |
/*=============================================================================
Boost.Wave: A Standard compliant C++ preprocessor library
Definition of the abstract lexer interface
http://www.boost.org/
Copyright (c) 2001-2012 Hartmut Kaiser. Distributed under the Boost
Software License, Version 1.0. (See accompanying file
LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#if !defined(BOOST_WAVE_LEX_INTERFACE_GENERATOR_HPP_INCLUDED)
#define BOOST_WAVE_LEX_INTERFACE_GENERATOR_HPP_INCLUDED
#include <boost/wave/wave_config.hpp>
#include <boost/wave/util/file_position.hpp>
#include <boost/wave/language_support.hpp>
#include <boost/wave/cpplexer/cpp_lex_interface.hpp>
#include <boost/wave/cpplexer/cpp_lex_token.hpp> // lex_token
// this must occur after all of the includes and before any code appears
#ifdef BOOST_HAS_ABI_HEADERS
#include BOOST_ABI_PREFIX
#endif
// suppress warnings about dependent classes not being exported from the dll
#ifdef BOOST_MSVC
#pragma warning(push)
#pragma warning(disable : 4251 4231 4660)
#endif
///////////////////////////////////////////////////////////////////////////////
namespace boost {
namespace wave {
namespace cpplexer {
#if BOOST_WAVE_SEPARATE_LEXER_INSTANTIATION != 0
#define BOOST_WAVE_NEW_LEXER_DECL BOOST_WAVE_DECL
#else
#define BOOST_WAVE_NEW_LEXER_DECL
#endif
///////////////////////////////////////////////////////////////////////////////
//
// new_lexer_gen: generates a new instance of the required C++ lexer
//
///////////////////////////////////////////////////////////////////////////////
template <
typename IteratorT,
typename PositionT = boost::wave::util::file_position_type,
typename TokenT = lex_token<PositionT>
>
struct BOOST_WAVE_NEW_LEXER_DECL new_lexer_gen
{
// The NewLexer function allows the opaque generation of a new lexer object.
// It is coupled to the token type to allow to decouple the lexer/token
// configurations at compile time.
static lex_input_interface<TokenT> *
new_lexer(IteratorT const &first, IteratorT const &last,
PositionT const &pos, boost::wave::language_support language);
};
#undef BOOST_WAVE_NEW_LEXER_DECL
///////////////////////////////////////////////////////////////////////////////
//
// The lex_input_interface_generator helps to instantiate a concrete lexer
// to be used by the Wave preprocessor module.
// This is done to allow compile time reduction.
//
///////////////////////////////////////////////////////////////////////////////
template <typename TokenT>
struct lex_input_interface_generator
: lex_input_interface<TokenT>
{
typedef typename lex_input_interface<TokenT>::position_type position_type;
lex_input_interface_generator() {}
~lex_input_interface_generator() {}
// The new_lexer function allows the opaque generation of a new lexer object.
// It is coupled to the token type to allow to distinguish different
// lexer/token configurations at compile time.
template <typename IteratorT>
static lex_input_interface<TokenT> *
new_lexer(IteratorT const &first, IteratorT const &last,
position_type const &pos, boost::wave::language_support language)
{
return new_lexer_gen<IteratorT, position_type, TokenT>::new_lexer (
first, last, pos, language);
}
};
///////////////////////////////////////////////////////////////////////////////
} // namespace cpplexer
} // namespace wave
} // namespace boost
#ifdef BOOST_MSVC
#pragma warning(pop)
#endif
// the suffix header occurs after all of the code
#ifdef BOOST_HAS_ABI_HEADERS
#include BOOST_ABI_SUFFIX
#endif
#endif // !defined(BOOST_WAVE_LEX_INTERFACE_GENERATOR_HPP_INCLUDED)
| {
"pile_set_name": "Github"
} |
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["swagger_doc_generator_test.go"],
importpath = "k8s.io/apimachinery/pkg/runtime",
library = ":go_default_library",
)
go_library(
name = "go_default_library",
srcs = [
"codec.go",
"codec_check.go",
"conversion.go",
"doc.go",
"embedded.go",
"error.go",
"extension.go",
"generated.pb.go",
"helper.go",
"interfaces.go",
"register.go",
"scheme.go",
"scheme_builder.go",
"swagger_doc_generator.go",
"types.go",
"types_proto.go",
"zz_generated.deepcopy.go",
],
importpath = "k8s.io/apimachinery/pkg/runtime",
deps = [
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/conversion/queryparams:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
],
)
go_test(
name = "go_default_xtest",
srcs = [
"conversion_test.go",
"embedded_test.go",
"extension_test.go",
"scheme_test.go",
],
importpath = "k8s.io/apimachinery/pkg/runtime_test",
deps = [
"//vendor/github.com/google/gofuzz:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:all-srcs",
"//staging/src/k8s.io/apimachinery/pkg/runtime/testing:all-srcs",
],
tags = ["automanaged"],
)
filegroup(
name = "go_default_library_protos",
srcs = ["generated.proto"],
visibility = ["//visibility:public"],
)
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>404 - <%= t('not_found.title') %> - <%= t('cm-central') %></title>
<%= favicon_link_tag "favicon.ico" %>
<%= stylesheet_link_tag :application %>
</head>
<body class="NotFound">
<div class="NotFound__container">
<h2 class="NotFound__title">404</h2>
<h3 class="NotFound__description"><%= t('not_found.description') %></h3>
<%= link_to t('back'), request.referer || root_path, class: "NotFound__link" %>
</div>
</body>
</html>
| {
"pile_set_name": "Github"
} |
<!doctype html>
<!-- - - - - - - - - - - - - - - -->
<!-- Book Finder -->
<!-- dnajs.org/book-finder.html -->
<!-- - - - - - - - - - - - - - - -->
<html lang=en>
<head>
<meta charset=utf-8>
<title>Book Finder</title>
<link rel=icon href=https://dnajs.org/graphics/bookmark.png>
<style>
body { font-family: system-ui, sans-serif; margin: 30px; }
.book { display: flex; align-items: flex-start;
width: 400px; background-color: skyblue;
padding: 10px; margin: 10px 0px; }
.book img { width: 100px; margin-right: 10px; }
.book h2 { margin: 0px; }
</style>
</head>
<body>
<main>
<h1>Book Finder</h1>
<label>
Search:
<input placeholder="Enter terms" autofocus>
</label>
<button>Find</button>
<section class=books>
<div class=book>
<img src=https://dnajs.org/graphics/sample-book-cover.jpg alt=cover>
<div>
<h2>Title</h2>
<p>Publisher</p>
<i>Price</i>
</div>
</div>
</section>
</main>
</body>
</html>
| {
"pile_set_name": "Github"
} |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyKerasApplications(PythonPackage):
"""Sample Deep Learning application in Keras.
Keras depends on this package to run properly."""
homepage = "http://keras.io"
url = "https://github.com/keras-team/keras-applications/archive/1.0.4.tar.gz"
version('1.0.8', sha256='7c37f9e9ef93efac9b4956301cb21ce46c474ce9da41fac9a46753bab6823dfc')
version('1.0.7', sha256='8580a885c8abe4bf8429cb0e551f23e79b14eda73d99138cfa1d355968dd4b0a')
version('1.0.6', sha256='2cb412c97153160ec267b238e958d281ac3532b139cab42045c2d7086a157c21')
version('1.0.4', sha256='37bd2f3ba9c0e0105c193999b1162fd99562cf43e5ef06c73932950ecc46d085')
version('1.0.3', sha256='35b663a4933ee3c826a9349d19048221c997f0dd5ea24dd598c05cf90c72879d')
version('1.0.2', sha256='6d8923876a7f7f2d459dd7efe3b10830f316f714b707f0c136e7f00c63035338')
version('1.0.1', sha256='05ad1a73fddd22ed73ae59065b554e7ea13d05c3d4c6755ac166702b88686db5')
depends_on('py-setuptools', type='build')
| {
"pile_set_name": "Github"
} |
好奇心原文链接:[为什么当今的流行文化那么热衷于好人与坏人间的斗争?_文化_好奇心日报-Catherine Nichols](https://www.qdaily.com/articles/49839.html)
WebArchive归档链接:[为什么当今的流行文化那么热衷于好人与坏人间的斗争?_文化_好奇心日报-Catherine Nichols](http://web.archive.org/web/20180916061512/http://www.qdaily.com:80/articles/49839.html)
 | {
"pile_set_name": "Github"
} |
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/renderer/android/synchronous_compositor_frame_sink.h"
#include <vector>
#include "base/auto_reset.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "cc/output/compositor_frame.h"
#include "cc/output/compositor_frame_sink_client.h"
#include "cc/output/context_provider.h"
#include "cc/output/output_surface.h"
#include "cc/output/output_surface_frame.h"
#include "cc/output/renderer_settings.h"
#include "cc/output/software_output_device.h"
#include "cc/output/texture_mailbox_deleter.h"
#include "cc/quads/render_pass.h"
#include "cc/quads/surface_draw_quad.h"
#include "cc/surfaces/display.h"
#include "cc/surfaces/surface_factory.h"
#include "cc/surfaces/surface_id_allocator.h"
#include "cc/surfaces/surface_manager.h"
#include "content/common/android/sync_compositor_messages.h"
#include "content/renderer/android/synchronous_compositor_filter.h"
#include "content/renderer/android/synchronous_compositor_registry.h"
#include "content/renderer/gpu/frame_swap_message_queue.h"
#include "content/renderer/render_thread_impl.h"
#include "gpu/command_buffer/client/context_support.h"
#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/common/gpu_memory_allocation.h"
#include "ipc/ipc_message.h"
#include "ipc/ipc_message_macros.h"
#include "ipc/ipc_sender.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "ui/gfx/geometry/rect_conversions.h"
#include "ui/gfx/skia_util.h"
#include "ui/gfx/transform.h"
namespace content {
namespace {
const int64_t kFallbackTickTimeoutInMilliseconds = 100;
const cc::FrameSinkId kFrameSinkId(1, 1);
// Do not limit number of resources, so use an unrealistically high value.
const size_t kNumResourcesLimit = 10 * 1000 * 1000;
class SoftwareDevice : public cc::SoftwareOutputDevice {
public:
SoftwareDevice(SkCanvas** canvas) : canvas_(canvas) {}
void Resize(const gfx::Size& pixel_size, float device_scale_factor) override {
// Intentional no-op: canvas size is controlled by the embedder.
}
SkCanvas* BeginPaint(const gfx::Rect& damage_rect) override {
DCHECK(*canvas_) << "BeginPaint with no canvas set";
return *canvas_;
}
void EndPaint() override {}
private:
SkCanvas** canvas_;
DISALLOW_COPY_AND_ASSIGN(SoftwareDevice);
};
} // namespace
class SynchronousCompositorFrameSink::SoftwareOutputSurface
: public cc::OutputSurface {
public:
SoftwareOutputSurface(std::unique_ptr<SoftwareDevice> software_device)
: cc::OutputSurface(std::move(software_device)) {}
// cc::OutputSurface implementation.
void BindToClient(cc::OutputSurfaceClient* client) override {}
void EnsureBackbuffer() override {}
void DiscardBackbuffer() override {}
void BindFramebuffer() override {}
void SwapBuffers(cc::OutputSurfaceFrame frame) override {}
void Reshape(const gfx::Size& size,
float scale_factor,
const gfx::ColorSpace& color_space,
bool has_alpha) override {}
uint32_t GetFramebufferCopyTextureFormat() override { return 0; }
cc::OverlayCandidateValidator* GetOverlayCandidateValidator() const override {
return nullptr;
}
bool IsDisplayedAsOverlayPlane() const override { return false; }
unsigned GetOverlayTextureId() const override { return 0; }
bool SurfaceIsSuspendForRecycle() const override { return false; }
bool HasExternalStencilTest() const override { return false; }
void ApplyExternalStencil() override {}
};
SynchronousCompositorFrameSink::SynchronousCompositorFrameSink(
scoped_refptr<cc::ContextProvider> context_provider,
scoped_refptr<cc::ContextProvider> worker_context_provider,
gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager,
int routing_id,
uint32_t compositor_frame_sink_id,
std::unique_ptr<cc::BeginFrameSource> begin_frame_source,
SynchronousCompositorRegistry* registry,
scoped_refptr<FrameSwapMessageQueue> frame_swap_message_queue)
: cc::CompositorFrameSink(std::move(context_provider),
std::move(worker_context_provider),
gpu_memory_buffer_manager,
nullptr),
routing_id_(routing_id),
compositor_frame_sink_id_(compositor_frame_sink_id),
registry_(registry),
sender_(RenderThreadImpl::current()->sync_compositor_message_filter()),
memory_policy_(0u),
frame_swap_message_queue_(frame_swap_message_queue),
surface_manager_(new cc::SurfaceManager),
surface_id_allocator_(new cc::SurfaceIdAllocator()),
surface_factory_(
new cc::SurfaceFactory(kFrameSinkId, surface_manager_.get(), this)),
begin_frame_source_(std::move(begin_frame_source)) {
DCHECK(registry_);
DCHECK(sender_);
DCHECK(begin_frame_source_);
thread_checker_.DetachFromThread();
memory_policy_.priority_cutoff_when_visible =
gpu::MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE;
}
SynchronousCompositorFrameSink::~SynchronousCompositorFrameSink() = default;
void SynchronousCompositorFrameSink::SetSyncClient(
SynchronousCompositorFrameSinkClient* compositor) {
DCHECK(CalledOnValidThread());
sync_client_ = compositor;
if (sync_client_)
Send(new SyncCompositorHostMsg_CompositorFrameSinkCreated(routing_id_));
}
bool SynchronousCompositorFrameSink::OnMessageReceived(
const IPC::Message& message) {
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(SynchronousCompositorFrameSink, message)
IPC_MESSAGE_HANDLER(SyncCompositorMsg_SetMemoryPolicy, SetMemoryPolicy)
IPC_MESSAGE_HANDLER(SyncCompositorMsg_ReclaimResources, OnReclaimResources)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
}
bool SynchronousCompositorFrameSink::BindToClient(
cc::CompositorFrameSinkClient* sink_client) {
DCHECK(CalledOnValidThread());
if (!cc::CompositorFrameSink::BindToClient(sink_client))
return false;
DCHECK(begin_frame_source_);
client_->SetBeginFrameSource(begin_frame_source_.get());
client_->SetMemoryPolicy(memory_policy_);
client_->SetTreeActivationCallback(
base::Bind(&SynchronousCompositorFrameSink::DidActivatePendingTree,
base::Unretained(this)));
registry_->RegisterCompositorFrameSink(routing_id_, this);
surface_manager_->RegisterFrameSinkId(kFrameSinkId);
surface_manager_->RegisterSurfaceFactoryClient(kFrameSinkId, this);
cc::RendererSettings software_renderer_settings;
auto output_surface = base::MakeUnique<SoftwareOutputSurface>(
base::MakeUnique<SoftwareDevice>(¤t_sw_canvas_));
software_output_surface_ = output_surface.get();
// The shared_bitmap_manager and gpu_memory_buffer_manager here are null as
// this Display is only used for resourcesless software draws, where no
// resources are included in the frame swapped from the compositor. So there
// is no need for these.
display_.reset(new cc::Display(
nullptr /* shared_bitmap_manager */,
nullptr /* gpu_memory_buffer_manager */, software_renderer_settings,
kFrameSinkId, nullptr /* begin_frame_source */, std::move(output_surface),
nullptr /* scheduler */, nullptr /* texture_mailbox_deleter */));
display_->Initialize(&display_client_, surface_manager_.get());
display_->SetVisible(true);
return true;
}
void SynchronousCompositorFrameSink::DetachFromClient() {
DCHECK(CalledOnValidThread());
client_->SetBeginFrameSource(nullptr);
// Destroy the begin frame source on the same thread it was bound on.
begin_frame_source_ = nullptr;
registry_->UnregisterCompositorFrameSink(routing_id_, this);
client_->SetTreeActivationCallback(base::Closure());
if (root_local_frame_id_.is_valid()) {
surface_factory_->Destroy(root_local_frame_id_);
surface_factory_->Destroy(child_local_frame_id_);
}
surface_manager_->UnregisterSurfaceFactoryClient(kFrameSinkId);
surface_manager_->InvalidateFrameSinkId(kFrameSinkId);
software_output_surface_ = nullptr;
display_ = nullptr;
surface_factory_ = nullptr;
surface_id_allocator_ = nullptr;
surface_manager_ = nullptr;
cc::CompositorFrameSink::DetachFromClient();
CancelFallbackTick();
}
static void NoOpDrawCallback() {}
void SynchronousCompositorFrameSink::SubmitCompositorFrame(
cc::CompositorFrame frame) {
DCHECK(CalledOnValidThread());
DCHECK(sync_client_);
if (fallback_tick_running_) {
DCHECK(frame.resource_list.empty());
cc::ReturnedResourceArray return_resources;
ReturnResources(return_resources);
did_submit_frame_ = true;
return;
}
cc::CompositorFrame submit_frame;
if (in_software_draw_) {
// The frame we send to the client is actually just the metadata. Preserve
// the |frame| for the software path below.
submit_frame.metadata = frame.metadata.Clone();
if (!root_local_frame_id_.is_valid()) {
root_local_frame_id_ = surface_id_allocator_->GenerateId();
surface_factory_->Create(root_local_frame_id_);
child_local_frame_id_ = surface_id_allocator_->GenerateId();
surface_factory_->Create(child_local_frame_id_);
}
display_->SetLocalFrameId(root_local_frame_id_,
frame.metadata.device_scale_factor);
// The layer compositor should be giving a frame that covers the
// |sw_viewport_for_current_draw_| but at 0,0.
gfx::Size child_size = sw_viewport_for_current_draw_.size();
DCHECK(gfx::Rect(child_size) == frame.render_pass_list.back()->output_rect);
// Make a size that covers from 0,0 and includes the area coming from the
// layer compositor.
gfx::Size display_size(sw_viewport_for_current_draw_.right(),
sw_viewport_for_current_draw_.bottom());
display_->Resize(display_size);
// The offset for the child frame relative to the origin of the canvas being
// drawn into.
gfx::Transform child_transform;
child_transform.Translate(
gfx::Vector2dF(sw_viewport_for_current_draw_.OffsetFromOrigin()));
// Make a root frame that embeds the frame coming from the layer compositor
// and positions it based on the provided viewport.
// TODO(danakj): We could apply the transform here instead of passing it to
// the CompositorFrameSink client too? (We'd have to do the same for
// hardware frames in SurfacesInstance?)
cc::CompositorFrame embed_frame;
embed_frame.render_pass_list.push_back(cc::RenderPass::Create());
// The embedding RenderPass covers the entire Display's area.
const auto& embed_render_pass = embed_frame.render_pass_list.back();
embed_render_pass->SetAll(cc::RenderPassId(1, 1), gfx::Rect(display_size),
gfx::Rect(display_size), gfx::Transform(), false);
// The RenderPass has a single SurfaceDrawQuad (and SharedQuadState for it).
auto* shared_quad_state =
embed_render_pass->CreateAndAppendSharedQuadState();
auto* surface_quad =
embed_render_pass->CreateAndAppendDrawQuad<cc::SurfaceDrawQuad>();
shared_quad_state->SetAll(
child_transform, child_size, gfx::Rect(child_size),
gfx::Rect() /* clip_rect */, false /* is_clipped */, 1.f /* opacity */,
SkBlendMode::kSrcOver, 0 /* sorting_context_id */);
surface_quad->SetNew(shared_quad_state, gfx::Rect(child_size),
gfx::Rect(child_size),
cc::SurfaceId(kFrameSinkId, child_local_frame_id_));
surface_factory_->SubmitCompositorFrame(
child_local_frame_id_, std::move(frame), base::Bind(&NoOpDrawCallback));
surface_factory_->SubmitCompositorFrame(root_local_frame_id_,
std::move(embed_frame),
base::Bind(&NoOpDrawCallback));
display_->DrawAndSwap();
} else {
// For hardware draws we send the whole frame to the client so it can draw
// the content in it.
submit_frame = std::move(frame);
}
sync_client_->SubmitCompositorFrame(compositor_frame_sink_id_,
std::move(submit_frame));
DeliverMessages();
did_submit_frame_ = true;
}
void SynchronousCompositorFrameSink::CancelFallbackTick() {
fallback_tick_.Cancel();
fallback_tick_pending_ = false;
}
void SynchronousCompositorFrameSink::FallbackTickFired() {
DCHECK(CalledOnValidThread());
TRACE_EVENT0("renderer", "SynchronousCompositorFrameSink::FallbackTickFired");
base::AutoReset<bool> in_fallback_tick(&fallback_tick_running_, true);
SkBitmap bitmap;
bitmap.allocN32Pixels(1, 1);
bitmap.eraseColor(0);
SkCanvas canvas(bitmap);
fallback_tick_pending_ = false;
DemandDrawSw(&canvas);
}
void SynchronousCompositorFrameSink::Invalidate() {
DCHECK(CalledOnValidThread());
if (sync_client_)
sync_client_->Invalidate();
if (!fallback_tick_pending_) {
fallback_tick_.Reset(
base::Bind(&SynchronousCompositorFrameSink::FallbackTickFired,
base::Unretained(this)));
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, fallback_tick_.callback(),
base::TimeDelta::FromMilliseconds(kFallbackTickTimeoutInMilliseconds));
fallback_tick_pending_ = true;
}
}
void SynchronousCompositorFrameSink::DemandDrawHw(
const gfx::Size& viewport_size,
const gfx::Rect& viewport_rect_for_tile_priority,
const gfx::Transform& transform_for_tile_priority) {
DCHECK(CalledOnValidThread());
DCHECK(HasClient());
DCHECK(context_provider_.get());
CancelFallbackTick();
client_->SetExternalTilePriorityConstraints(viewport_rect_for_tile_priority,
transform_for_tile_priority);
InvokeComposite(gfx::Transform(), gfx::Rect(viewport_size));
}
void SynchronousCompositorFrameSink::DemandDrawSw(SkCanvas* canvas) {
DCHECK(CalledOnValidThread());
DCHECK(canvas);
DCHECK(!current_sw_canvas_);
CancelFallbackTick();
base::AutoReset<SkCanvas*> canvas_resetter(¤t_sw_canvas_, canvas);
SkIRect canvas_clip;
canvas->getClipDeviceBounds(&canvas_clip);
gfx::Rect viewport = gfx::SkIRectToRect(canvas_clip);
gfx::Transform transform(gfx::Transform::kSkipInitialization);
transform.matrix() = canvas->getTotalMatrix(); // Converts 3x3 matrix to 4x4.
// We will resize the Display to ensure it covers the entire |viewport|, so
// save it for later.
sw_viewport_for_current_draw_ = viewport;
base::AutoReset<bool> set_in_software_draw(&in_software_draw_, true);
InvokeComposite(transform, viewport);
}
void SynchronousCompositorFrameSink::InvokeComposite(
const gfx::Transform& transform,
const gfx::Rect& viewport) {
did_submit_frame_ = false;
// Adjust transform so that the layer compositor draws the |viewport| rect
// at its origin. The offset of the |viewport| we pass to the layer compositor
// is ignored for drawing, so its okay to not match the transform.
// TODO(danakj): Why do we pass a viewport origin and then not really use it
// (only for comparing to the viewport passed in
// SetExternalTilePriorityConstraints), surely this could be more clear?
gfx::Transform adjusted_transform = transform;
adjusted_transform.matrix().postTranslate(-viewport.x(), -viewport.y(), 0);
client_->OnDraw(adjusted_transform, viewport, in_software_draw_);
if (did_submit_frame_) {
// This must happen after unwinding the stack and leaving the compositor.
// Usually it is a separate task but we just defer it until OnDraw completes
// instead.
client_->DidReceiveCompositorFrameAck();
}
}
void SynchronousCompositorFrameSink::OnReclaimResources(
uint32_t compositor_frame_sink_id,
const cc::ReturnedResourceArray& resources) {
// Ignore message if it's a stale one coming from a different output surface
// (e.g. after a lost context).
if (compositor_frame_sink_id != compositor_frame_sink_id_)
return;
client_->ReclaimResources(resources);
}
void SynchronousCompositorFrameSink::SetMemoryPolicy(size_t bytes_limit) {
DCHECK(CalledOnValidThread());
bool became_zero = memory_policy_.bytes_limit_when_visible && !bytes_limit;
bool became_non_zero =
!memory_policy_.bytes_limit_when_visible && bytes_limit;
memory_policy_.bytes_limit_when_visible = bytes_limit;
memory_policy_.num_resources_limit = kNumResourcesLimit;
if (client_)
client_->SetMemoryPolicy(memory_policy_);
if (became_zero) {
// This is small hack to drop context resources without destroying it
// when this compositor is put into the background.
context_provider()->ContextSupport()->SetAggressivelyFreeResources(
true /* aggressively_free_resources */);
} else if (became_non_zero) {
context_provider()->ContextSupport()->SetAggressivelyFreeResources(
false /* aggressively_free_resources */);
}
}
void SynchronousCompositorFrameSink::DidActivatePendingTree() {
DCHECK(CalledOnValidThread());
if (sync_client_)
sync_client_->DidActivatePendingTree();
DeliverMessages();
}
void SynchronousCompositorFrameSink::DeliverMessages() {
std::vector<std::unique_ptr<IPC::Message>> messages;
std::unique_ptr<FrameSwapMessageQueue::SendMessageScope> send_message_scope =
frame_swap_message_queue_->AcquireSendMessageScope();
frame_swap_message_queue_->DrainMessages(&messages);
for (auto& msg : messages) {
Send(msg.release());
}
}
bool SynchronousCompositorFrameSink::Send(IPC::Message* message) {
DCHECK(CalledOnValidThread());
return sender_->Send(message);
}
bool SynchronousCompositorFrameSink::CalledOnValidThread() const {
return thread_checker_.CalledOnValidThread();
}
void SynchronousCompositorFrameSink::ReturnResources(
const cc::ReturnedResourceArray& resources) {
DCHECK(resources.empty());
client_->ReclaimResources(resources);
}
void SynchronousCompositorFrameSink::SetBeginFrameSource(
cc::BeginFrameSource* begin_frame_source) {
// Software output is synchronous and doesn't use a BeginFrameSource.
NOTREACHED();
}
} // namespace content
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env bash
#
# Script for various external toolchain tasks, refer to
# the --help output for more information.
#
# Copyright (C) 2012 Jo-Philipp Wich <jo@mein.io>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
CC=""
CXX=""
CPP=""
CFLAGS=""
TOOLCHAIN="."
LIBC_TYPE=""
# Library specs
LIB_SPECS="
c: ld-* lib{anl,c,cidn,crypt,dl,m,nsl,nss_dns,nss_files,resolv,util}
rt: librt-* librt
pthread: libpthread-* libpthread
stdcpp: libstdc++
thread_db: libthread-db
gcc: libgcc_s
ssp: libssp
gfortran: libgfortran
gomp: libgomp
"
# Binary specs
BIN_SPECS="
ldd: ldd
ldconfig: ldconfig
gdb: gdb
gdbserver: gdbserver
"
test_c() {
cat <<-EOT | "${CC:-false}" $CFLAGS -o /dev/null -x c - 2>/dev/null
#include <stdio.h>
int main(int argc, char **argv)
{
printf("Hello, world!\n");
return 0;
}
EOT
}
test_cxx() {
cat <<-EOT | "${CXX:-false}" $CFLAGS -o /dev/null -x c++ - 2>/dev/null
#include <iostream>
using namespace std;
int main()
{
cout << "Hello, world!" << endl;
return 0;
}
EOT
}
test_softfloat() {
cat <<-EOT | "$CC" $CFLAGS -msoft-float -o /dev/null -x c - 2>/dev/null
int main(int argc, char **argv)
{
double a = 0.1;
double b = 0.2;
double c = (a + b) / (a * b);
return 1;
}
EOT
}
test_uclibc() {
local sysroot="$("$CC" $CFLAGS -print-sysroot 2>/dev/null)"
if [ -d "${sysroot:-$TOOLCHAIN}" ]; then
local lib
for lib in "${sysroot:-$TOOLCHAIN}"/{lib,usr/lib,usr/local/lib}/ld*-uClibc*.so*; do
if [ -f "$lib" ] && [ ! -h "$lib" ]; then
return 0
fi
done
fi
return 1
}
test_feature() {
local feature="$1"; shift
# find compilers, libc type
probe_cc
probe_cxx
probe_libc
# common toolchain feature tests
case "$feature" in
c) test_c; return $? ;;
c++) test_cxx; return $? ;;
soft*) test_softfloat; return $? ;;
esac
# assume eglibc/glibc supports all libc features
if [ "$LIBC_TYPE" != "uclibc" ]; then
return 0
fi
# uclibc feature tests
local inc
local sysroot="$("$CC" "$@" -muclibc -print-sysroot 2>/dev/null)"
for inc in "include" "usr/include" "usr/local/include"; do
local conf="${sysroot:-$TOOLCHAIN}/$inc/bits/uClibc_config.h"
if [ -f "$conf" ]; then
case "$feature" in
lfs) grep -q '__UCLIBC_HAS_LFS__ 1' "$conf"; return $?;;
ipv6) grep -q '__UCLIBC_HAS_IPV6__ 1' "$conf"; return $?;;
rpc) grep -q '__UCLIBC_HAS_RPC__ 1' "$conf"; return $?;;
locale) grep -q '__UCLIBC_HAS_LOCALE__ 1' "$conf"; return $?;;
wchar) grep -q '__UCLIBC_HAS_WCHAR__ 1' "$conf"; return $?;;
threads) grep -q '__UCLIBC_HAS_THREADS__ 1' "$conf"; return $?;;
esac
fi
done
return 1
}
find_libs() {
local spec="$(echo "$LIB_SPECS" | sed -ne "s#^[[:space:]]*$1:##ip")"
if [ -n "$spec" ] && probe_cpp; then
local libdir libdirs
for libdir in $(
"$CPP" $CFLAGS -v -x c /dev/null 2>&1 | \
sed -ne 's#:# #g; s#^LIBRARY_PATH=##p'
); do
if [ -d "$libdir" ]; then
libdirs="$libdirs $(cd "$libdir"; pwd)/"
fi
done
local pattern
for pattern in $(eval echo $spec); do
find $libdirs -name "$pattern.so*" | sort -u
done
return 0
fi
return 1
}
find_bins() {
local spec="$(echo "$BIN_SPECS" | sed -ne "s#^[[:space:]]*$1:##ip")"
if [ -n "$spec" ] && probe_cpp; then
local sysroot="$("$CPP" -print-sysroot)"
local bindir bindirs
for bindir in $(
echo "${sysroot:-$TOOLCHAIN}/bin";
echo "${sysroot:-$TOOLCHAIN}/usr/bin";
echo "${sysroot:-$TOOLCHAIN}/usr/local/bin";
"$CPP" $CFLAGS -v -x c /dev/null 2>&1 | \
sed -ne 's#:# #g; s#^COMPILER_PATH=##p'
); do
if [ -d "$bindir" ]; then
bindirs="$bindirs $(cd "$bindir"; pwd)/"
fi
done
local pattern
for pattern in $(eval echo $spec); do
find $bindirs -name "$pattern" | sort -u
done
return 0
fi
return 1
}
wrap_bin_cc() {
local out="$1"
local bin="$2"
echo '#!/bin/sh' > "$out"
echo 'for arg in "$@"; do' >> "$out"
echo ' case "$arg" in -l*|-L*|-shared|-static)' >> "$out"
echo -n ' exec "'"$bin"'" '"$CFLAGS"' ${STAGING_DIR:+' >> "$out"
echo -n '-idirafter "$STAGING_DIR/usr/include" ' >> "$out"
echo -n '-L "$STAGING_DIR/usr/lib" ' >> "$out"
echo '-Wl,-rpath-link,"$STAGING_DIR/usr/lib"} "$@" ;;' >> "$out"
echo ' esac' >> "$out"
echo 'done' >> "$out"
echo -n 'exec "'"$bin"'" '"$CFLAGS"' ${STAGING_DIR:+' >> "$out"
echo '-idirafter "$STAGING_DIR/usr/include"} "$@"' >> "$out"
chmod +x "$out"
}
wrap_bin_ld() {
local out="$1"
local bin="$2"
echo '#!/bin/sh' > "$out"
echo -n 'exec "'"$bin"'" ${STAGING_DIR:+' >> "$out"
echo -n '-L "$STAGING_DIR/usr/lib" ' >> "$out"
echo '-rpath-link "$STAGING_DIR/usr/lib"} "$@"' >> "$out"
chmod +x "$out"
}
wrap_bin_other() {
local out="$1"
local bin="$2"
echo '#!/bin/sh' > "$out"
echo 'exec "'"$bin"'" "$@"' >> "$out"
chmod +x "$out"
}
wrap_bins() {
if probe_cc; then
mkdir -p "$1" || return 1
local cmd
for cmd in "${CC%-*}-"*; do
if [ -x "$cmd" ]; then
local out="$1/${cmd##*/}"
local bin="$cmd"
if [ -x "$out" ] && ! grep -q STAGING_DIR "$out"; then
mv "$out" "$out.bin"
bin='$(dirname "$0")/'"${out##*/}"'.bin'
fi
case "${cmd##*/}" in
*-*cc|*-*cc-*|*-*++|*-*++-*|*-cpp)
wrap_bin_cc "$out" "$bin"
;;
*-ld)
wrap_bin_ld "$out" "$bin"
;;
*)
wrap_bin_other "$out" "$bin"
;;
esac
fi
done
return 0
fi
return 1
}
print_config() {
local mktarget="$1"
local mksubtarget
local target="$("$CC" $CFLAGS -dumpmachine)"
local cpuarch="${target%%-*}"
local prefix="${CC##*/}"; prefix="${prefix%-*}-"
local config="${0%/scripts/*}/.config"
# if no target specified, print choice list and exit
if [ -z "$mktarget" ]; then
# prepare metadata
if [ ! -f "${0%/scripts/*}/tmp/.targetinfo" ]; then
"${0%/*}/scripts/config/mconf" prepare-tmpinfo
fi
local mktargets=$(
sed -ne "
/^Target: / { h };
/^Target-Arch: $cpuarch\$/ { x; s#^Target: ##p }
" "${0%/scripts/*}/tmp/.targetinfo" | sort -u
)
for mktarget in $mktargets; do
case "$mktarget" in */*)
mktargets=$(echo "$mktargets" | sed -e "/^${mktarget%/*}\$/d")
esac
done
if [ -n "$mktargets" ]; then
echo "Available targets:" >&2
echo $mktargets >&2
else
echo -e "Could not find a suitable OpenWrt target for " >&2
echo -e "CPU architecture '$cpuarch' - you need to " >&2
echo -e "define one first!" >&2
fi
return 1
fi
# bail out if there is a .config already
if [ -f "${0%/scripts/*}/.config" ]; then
echo "There already is a .config file, refusing to overwrite!" >&2
return 1
fi
case "$mktarget" in */*)
mksubtarget="${mktarget#*/}"
mktarget="${mktarget%/*}"
;; esac
echo "CONFIG_TARGET_${mktarget}=y" > "$config"
if [ -n "$mksubtarget" ]; then
echo "CONFIG_TARGET_${mktarget}_${mksubtarget}=y" >> "$config"
fi
if test_feature "softfloat"; then
echo "CONFIG_SOFT_FLOAT=y" >> "$config"
else
echo "# CONFIG_SOFT_FLOAT is not set" >> "$config"
fi
if test_feature "ipv6"; then
echo "CONFIG_IPV6=y" >> "$config"
else
echo "# CONFIG_IPV6 is not set" >> "$config"
fi
if test_feature "locale"; then
echo "CONFIG_BUILD_NLS=y" >> "$config"
else
echo "# CONFIG_BUILD_NLS is not set" >> "$config"
fi
echo "CONFIG_DEVEL=y" >> "$config"
echo "CONFIG_EXTERNAL_TOOLCHAIN=y" >> "$config"
echo "CONFIG_TOOLCHAIN_ROOT=\"$TOOLCHAIN\"" >> "$config"
echo "CONFIG_TOOLCHAIN_PREFIX=\"$prefix\"" >> "$config"
echo "CONFIG_TARGET_NAME=\"$target\"" >> "$config"
if [ "$LIBC_TYPE" != glibc ]; then
echo "CONFIG_TOOLCHAIN_LIBC=\"$LIBC_TYPE\"" >> "$config"
fi
local lib
for lib in C RT PTHREAD GCC STDCPP SSP GFORTRAN GOMP; do
local file
local spec=""
local llib="$(echo "$lib" | sed -e 's#.*#\L&#')"
for file in $(find_libs "$lib"); do
spec="${spec:+$spec }$(echo "$file" | sed -e "s#^$TOOLCHAIN#.#")"
done
if [ -n "$spec" ]; then
echo "CONFIG_PACKAGE_lib${llib}=y" >> "$config"
echo "CONFIG_LIB${lib}_FILE_SPEC=\"$spec\"" >> "$config"
else
echo "# CONFIG_PACKAGE_lib${llib} is not set" >> "$config"
fi
done
local bin
for bin in LDD LDCONFIG; do
local file
local spec=""
local lbin="$(echo "$bin" | sed -e 's#.*#\L&#')"
for file in $(find_bins "$bin"); do
spec="${spec:+$spec }$(echo "$file" | sed -e "s#^$TOOLCHAIN#.#")"
done
if [ -n "$spec" ]; then
echo "CONFIG_PACKAGE_${lbin}=y" >> "$config"
echo "CONFIG_${bin}_FILE_SPEC=\"$spec\"" >> "$config"
else
echo "# CONFIG_PACKAGE_${lbin} is not set" >> "$config"
fi
done
# inflate
make -C "${0%/scripts/*}" defconfig
return 0
}
probe_cc() {
if [ -z "$CC" ]; then
local bin
for bin in "bin" "usr/bin" "usr/local/bin"; do
local cmd
for cmd in "$TOOLCHAIN/$bin/"*-*cc*; do
if [ -x "$cmd" ] && [ ! -h "$cmd" ]; then
CC="$(cd "${cmd%/*}"; pwd)/${cmd##*/}"
return 0
fi
done
done
return 1
fi
return 0
}
probe_cxx() {
if [ -z "$CXX" ]; then
local bin
for bin in "bin" "usr/bin" "usr/local/bin"; do
local cmd
for cmd in "$TOOLCHAIN/$bin/"*-*++*; do
if [ -x "$cmd" ] && [ ! -h "$cmd" ]; then
CXX="$(cd "${cmd%/*}"; pwd)/${cmd##*/}"
return 0
fi
done
done
return 1
fi
return 0
}
probe_cpp() {
if [ -z "$CPP" ]; then
local bin
for bin in "bin" "usr/bin" "usr/local/bin"; do
local cmd
for cmd in "$TOOLCHAIN/$bin/"*-cpp*; do
if [ -x "$cmd" ] && [ ! -h "$cmd" ]; then
CPP="$(cd "${cmd%/*}"; pwd)/${cmd##*/}"
return 0
fi
done
done
return 1
fi
return 0
}
probe_libc() {
if [ -z "$LIBC_TYPE" ]; then
if test_uclibc; then
LIBC_TYPE="uclibc"
else
LIBC_TYPE="glibc"
fi
fi
return 0
}
while [ -n "$1" ]; do
arg="$1"; shift
case "$arg" in
--toolchain)
[ -d "$1" ] || {
echo "Toolchain directory '$1' does not exist." >&2
exit 1
}
TOOLCHAIN="$(cd "$1"; pwd)"; shift
;;
--cflags)
CFLAGS="${CFLAGS:+$CFLAGS }$1"; shift
;;
--print-libc)
if probe_cc; then
probe_libc
echo "$LIBC_TYPE"
exit 0
fi
echo "No C compiler found in '$TOOLCHAIN'." >&2
exit 1
;;
--print-target)
if probe_cc; then
exec "$CC" $CFLAGS -dumpmachine
fi
echo "No C compiler found in '$TOOLCHAIN'." >&2
exit 1
;;
--print-bin)
if [ -z "$1" ]; then
echo "Available programs:" >&2
echo $(echo "$BIN_SPECS" | sed -ne 's#:.*$##p') >&2
exit 1
fi
find_bins "$1" || exec "$0" --toolchain "$TOOLCHAIN" --print-bin
exit 0
;;
--print-libs)
if [ -z "$1" ]; then
echo "Available libraries:" >&2
echo $(echo "$LIB_SPECS" | sed -ne 's#:.*$##p') >&2
exit 1
fi
find_libs "$1" || exec "$0" --toolchain "$TOOLCHAIN" --print-libs
exit 0
;;
--test)
test_feature "$1"
exit $?
;;
--wrap)
[ -n "$1" ] || exec "$0" --help
wrap_bins "$1"
exit $?
;;
--config)
if probe_cc; then
print_config "$1"
exit $?
fi
echo "No C compiler found in '$TOOLCHAIN'." >&2
exit 1
;;
-h|--help)
me="$(basename "$0")"
echo -e "\nUsage:\n" >&2
echo -e " $me --toolchain {directory} --print-libc" >&2
echo -e " Print the libc implementation and exit.\n" >&2
echo -e " $me --toolchain {directory} --print-target" >&2
echo -e " Print the GNU target name and exit.\n" >&2
echo -e " $me --toolchain {directory} --print-bin {program}" >&2
echo -e " Print executables belonging to given program," >&2
echo -e " omit program argument to get a list of names.\n" >&2
echo -e " $me --toolchain {directory} --print-libs {library}" >&2
echo -e " Print shared objects belonging to given library," >&2
echo -e " omit library argument to get a list of names.\n" >&2
echo -e " $me --toolchain {directory} --test {feature}" >&2
echo -e " Test given feature, exit code indicates success." >&2
echo -e " Possible features are 'c', 'c++', 'softfloat'," >&2
echo -e " 'lfs', 'rpc', 'ipv6', 'wchar', 'locale' and " >&2
echo -e " 'threads'.\n" >&2
echo -e " $me --toolchain {directory} --wrap {directory}" >&2
echo -e " Create wrapper scripts for C and C++ compiler, " >&2
echo -e " linker, assembler and other key executables in " >&2
echo -e " the directory given with --wrap.\n" >&2
echo -e " $me --toolchain {directory} --config {target}" >&2
echo -e " Analyze the given toolchain and print a suitable" >&2
echo -e " .config for the given target. Omit target " >&2
echo -e " argument to get a list of names.\n" >&2
echo -e " $me --help" >&2
echo -e " Display this help text and exit.\n\n" >&2
echo -e " Most commands also take a --cflags parameter which " >&2
echo -e " is used to specify C flags to be passed to the " >&2
echo -e " cross compiler when performing tests." >&2
echo -e " This paremter may be repeated multiple times." >&2
exit 1
;;
*)
echo "Unknown argument '$arg'" >&2
exec $0 --help
;;
esac
done
exec $0 --help
| {
"pile_set_name": "Github"
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_35) on Tue Oct 09 17:08:24 PDT 2012 -->
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
<TITLE>
Uses of Class com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder (jackson-databind 2.1.0 API)
</TITLE>
<META NAME="date" CONTENT="2012-10-09">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder (jackson-databind 2.1.0 API)";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../com/fasterxml/jackson/databind/annotation/JsonPOJOBuilder.html" title="annotation in com.fasterxml.jackson.databind.annotation"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../index.html?com/fasterxml/jackson/databind/annotation//class-useJsonPOJOBuilder.html" target="_top"><B>FRAMES</B></A>
<A HREF="JsonPOJOBuilder.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
<B>Uses of Class<br>com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder</B></H2>
</CENTER>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
Packages that use <A HREF="../../../../../../com/fasterxml/jackson/databind/annotation/JsonPOJOBuilder.html" title="annotation in com.fasterxml.jackson.databind.annotation">JsonPOJOBuilder</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><A HREF="#com.fasterxml.jackson.databind.annotation"><B>com.fasterxml.jackson.databind.annotation</B></A></TD>
<TD>Annotations that directly depend on classes in databinding bundle
(not just Jackson core) and can not be included
in Jackson core annotations package (because it can not have any
external dependencies). </TD>
</TR>
</TABLE>
<P>
<A NAME="com.fasterxml.jackson.databind.annotation"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
Uses of <A HREF="../../../../../../com/fasterxml/jackson/databind/annotation/JsonPOJOBuilder.html" title="annotation in com.fasterxml.jackson.databind.annotation">JsonPOJOBuilder</A> in <A HREF="../../../../../../com/fasterxml/jackson/databind/annotation/package-summary.html">com.fasterxml.jackson.databind.annotation</A></FONT></TH>
</TR>
</TABLE>
<P>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left" COLSPAN="2">Constructors in <A HREF="../../../../../../com/fasterxml/jackson/databind/annotation/package-summary.html">com.fasterxml.jackson.databind.annotation</A> with parameters of type <A HREF="../../../../../../com/fasterxml/jackson/databind/annotation/JsonPOJOBuilder.html" title="annotation in com.fasterxml.jackson.databind.annotation">JsonPOJOBuilder</A></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><B><A HREF="../../../../../../com/fasterxml/jackson/databind/annotation/JsonPOJOBuilder.Value.html#JsonPOJOBuilder.Value(com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder)">JsonPOJOBuilder.Value</A></B>(<A HREF="../../../../../../com/fasterxml/jackson/databind/annotation/JsonPOJOBuilder.html" title="annotation in com.fasterxml.jackson.databind.annotation">JsonPOJOBuilder</A> ann)</CODE>
<BR>
</TD>
</TR>
</TABLE>
<P>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../com/fasterxml/jackson/databind/annotation/JsonPOJOBuilder.html" title="annotation in com.fasterxml.jackson.databind.annotation"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../index.html?com/fasterxml/jackson/databind/annotation//class-useJsonPOJOBuilder.html" target="_top"><B>FRAMES</B></A>
<A HREF="JsonPOJOBuilder.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
Copyright © 2012 <a href="http://fasterxml.com/">FasterXML</a>. All Rights Reserved.
</BODY>
</HTML>
| {
"pile_set_name": "Github"
} |
/*
Copyright (c) 2009-2018 ARM Limited. All rights reserved.
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the License); you may
not use this file except in compliance with the License.
You may obtain a copy of the License at
www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
NOTICE: This file has been modified by Nordic Semiconductor ASA.
*/
#ifndef SYSTEM_NRF52_H
#define SYSTEM_NRF52_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
extern uint32_t SystemCoreClock; /*!< System Clock Frequency (Core Clock) */
/**
* Initialize the system
*
* @param none
* @return none
*
* @brief Setup the microcontroller system.
* Initialize the System and update the SystemCoreClock variable.
*/
extern void SystemInit (void);
/**
* Update SystemCoreClock variable
*
* @param none
* @return none
*
* @brief Updates the SystemCoreClock with current core Clock
* retrieved from cpu registers.
*/
extern void SystemCoreClockUpdate (void);
#ifdef __cplusplus
}
#endif
#endif /* SYSTEM_NRF52_H */
| {
"pile_set_name": "Github"
} |
import RTE from "@coralproject/rte";
import sinon from "sinon";
import { ERROR_CODES } from "coral-common/errors";
import { InvalidRequestError } from "coral-framework/lib/errors";
import { GQLResolver } from "coral-framework/schema";
import {
act,
createResolversStub,
findParentWithType,
waitForElement,
within,
} from "coral-framework/testHelpers";
import { commenters, settings, stories } from "../../fixtures";
import create from "./create";
const settingsWithCharCount = {
...settings,
charCount: {
enabled: true,
min: 3,
max: 10,
},
};
async function createTestRenderer(
resolver: any = {},
options: { muteNetworkErrors?: boolean } = {}
) {
const resolvers = {
...resolver,
Query: {
settings: sinon.stub().returns(settingsWithCharCount),
viewer: sinon.stub().returns(commenters[0]),
stream: sinon.stub().returns(stories[0]),
...resolver.Query,
},
};
const { testRenderer, context } = create({
// Set this to true, to see graphql responses.
logNetwork: false,
muteNetworkErrors: options.muteNetworkErrors,
resolvers,
initLocalState: (localRecord) => {
localRecord.setValue(stories[0].id, "storyID");
},
});
const comment = await waitForElement(() =>
within(testRenderer.root).getByTestID("comment-comment-0")
);
// Open reply form.
act(() =>
within(comment).getByTestID("comment-reply-button").props.onClick()
);
const rte = await waitForElement(
() =>
findParentWithType(
within(comment).getByLabelText("Write a reply"),
// We'll use the RTE component here as an exception because the
// jsdom does not support all of what is needed for rendering the
// Rich Text Editor.
RTE
)!
);
const form = findParentWithType(rte, "form")!;
return {
testRenderer,
context,
comment,
rte,
form,
};
}
it("validate min", async () => {
const { rte, form } = await createTestRenderer();
act(() => rte.props.onChange("ab"));
expect(within(form).queryByText("Submit")?.props.disabled).toBeTruthy();
act(() => rte.props.onChange("abcdefg"));
expect(within(form).queryByText("Submit")?.props.disabled).toBeFalsy();
});
it("validate max", async () => {
const { rte, form } = await createTestRenderer();
act(() => rte.props.onChange("abcdefghijklmnopqrst"));
expect(within(form).queryByText("Submit")?.props.disabled).toBeTruthy();
act(() => rte.props.onChange(""));
expect(within(form).queryByText("Submit")?.props.disabled).toBeTruthy();
act(() => rte.props.onChange("abcdefg"));
expect(within(form).queryByText("Submit")?.props.disabled).toBeFalsy();
});
it("show remaining characters", async () => {
const { rte, form } = await createTestRenderer();
act(() => rte.props.onChange("abc"));
await waitForElement(() => within(form).getByText("7 characters remaining"));
act(() => rte.props.onChange("abcdefghijkl"));
await waitForElement(() => within(form).getByText("-2 characters remaining"));
});
it("update from server upon specific char count error", async () => {
for (const errorCode of [
ERROR_CODES.COMMENT_BODY_EXCEEDS_MAX_LENGTH,
ERROR_CODES.COMMENT_BODY_TOO_SHORT,
]) {
let createCommentCalled = false;
const { rte, form } = await createTestRenderer(
createResolversStub<GQLResolver>({
Mutation: {
createCommentReply: () => {
createCommentCalled = true;
throw new InvalidRequestError({
code: errorCode,
param: "input.body",
});
},
},
Query: {
settings: () => {
if (!createCommentCalled) {
return settingsWithCharCount;
}
return {
...settingsWithCharCount,
charCount: {
enabled: true,
min: 3,
max: 5,
},
};
},
},
}),
{ muteNetworkErrors: true }
);
act(() => rte.props.onChange("abc"));
await waitForElement(() =>
within(form).getByText("7 characters remaining")
);
act(() => rte.props.onChange("abcdefgh"));
await waitForElement(() =>
within(form).getByText("2 characters remaining")
);
await act(async () => form.props.onSubmit());
await waitForElement(() =>
within(form).getByText("-3 characters remaining")
);
// Body submit error should be displayed.
await waitForElement(() => within(form).getByText(errorCode));
act(() => rte.props.onChange("abcde"));
// Body submit error should disappear when form gets dirty.
expect(within(form).queryByText(errorCode)).toBeNull();
}
});
| {
"pile_set_name": "Github"
} |
/*BEGIN_LEGAL
Intel Open Source License
Copyright (c) 2002-2015 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer. Redistributions
in binary form must reproduce the above copyright notice, this list of
conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution. Neither the name of
the Intel Corporation nor the names of its contributors may be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL OR
ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
END_LEGAL */
// This little application tests recursive calls in probes mode.
//
#include <stdio.h>
static done = 0;
void Bar( int a, int b, int c, int d )
{
if ( done == 0 )
{
done = 1;
Bar(a+20, b+20, c+20, d+20);
}
printf( "Bar: %d, %d, %d, %d\n", a,b,c,d );
}
| {
"pile_set_name": "Github"
} |
<ol>
<li>Specificeer de tangentiële eenheid.</li>
<li>Geef het eerste punt op de cirkellijn op.</li>
<li>Geef het tweede punt op de cirkellijn op.</li>
</ol>
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.sentry.spi;
/**
* Implementation of SomeTestProvider
*/
public class SomeTestProviderImplB implements SomeTestProviderFactory, SomeTestProvider {
@Override
public Provider create() {
return this;
}
@Override
public String getId() {
return "B";
}
@Override
public void close() {
}
}
| {
"pile_set_name": "Github"
} |
<?php
/**
*
* PHP 5.3 or better is required
*
* @package Global functions
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE FREEBSD PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* @author Dmitri Snytkine <cms@lampcms.com>
* @copyright 2005-2012 (or current year) Dmitri Snytkine
* @license http://www.gnu.org/licenses/gpl-3.0.txt The GNU General Public License (GPL) version 3
* @link http://cms.lampcms.com Lampcms.com project
* @version Release: @package_version@
*
*
*/
/**
*
* Generates block with list of
* links to questions.
*
* This block is shown or user details page
* when viewing user profile
*
* @author Dmitri Snytkine
*
*/
class tplUquestions extends \Lampcms\Template\Fast
{
protected static function func(&$a)
{
if (!empty($a['a_closed'])) {
$a['title'] = $a['title'] . ' [closed]';
}
}
protected static $vars = array(
'_id' => '0', //1
'i_votes' => '0', //2
'i_ans' => '0', //3
'i_views' => '0', //4
'url' => '', //5
'intro' => '', //6
'title' => '', //7
'tags_c' => '', //8
'tags_html' => '', //9
'status' => 'un', //10
'username' => '', //11
'avtr' => '', //12
'hts' => '', //13
'i_ts' => '', //14
'vw_s' => 's', //15
'v_s' => '', //16
'ans_s' => '' //17
);
protected static $tpl = '
<div class="qrow" id="q-%1$s">
<div class="qstats2">
<div class="sqr1 vc2">
%2$s
<br>
vote%16$s
</div>
<div class="sqr1 %10$s">
<span class="middle">
%3$s
<br>answer%17$s
</span>
</div>
<div class="sqr1 vws2">
%4$s
<br>
view%15$s
</div>
</div>
<!-- //statsdiv -->
<div class="smmry2">
<a href="{_WEB_ROOT_}/{_viewquestion_}/{_QID_PREFIX_}%1$s/%5$s" class="ql" title="%6$s">%7$s</a><br>
<div class="tgs2 %8$s">%9$s</div>
<div class="asked2"><span rel="in">@@asked@@ </span><span title="%13$s" class="ts" rel="time">%13$s</span></div>
</div>
<!-- //smmry -->
</div>
<!-- //qs -->';
}
| {
"pile_set_name": "Github"
} |
#
# Copyright World Wide Web Consortium, (Massachusetts Institute of
# Technology, Institut National de Recherche en Informatique et en
# Automatique, Keio University).
#
# All Rights Reserved.
#
# Please see the full Copyright clause at
# <http://www.w3.org/Consortium/Legal/copyright-software.html>
#
# $Id: test005.nt,v 1.2 2005-08-04 09:53:18 jeremy_carroll Exp $
#
#####################################################################
<http://example.org/node> <http://example.org/property> "chat" .
| {
"pile_set_name": "Github"
} |
aliases:
- &filter-only-master
branches:
only:
- master
- &restore-cache
restore_cache:
keys:
- dependencies-{{ .Branch }}-{{ checksum "website/yarn.lock" }}
# fallback to using the latest cache if no exact match is found
- dependencies-{{ .Branch }}-
version: 2
jobs:
build-website:
docker:
- image: circleci/node:9.11
working_directory: ~/profilo
steps:
- checkout
- *restore-cache
# Download and cache dependencies
- run:
name: Install Docusaurus
command: |
cd website
yarn --non-interactive --cache-folder ~/.cache/yarn
- save_cache:
paths:
- website/node_modules
- ~/.cache/yarn
key: dependencies-{{ .Branch }}-{{ checksum "website/yarn.lock" }}
- run:
name: Build website
command: |
cd website
yarn --non-interactive --cache-folder ~/.cache/yarn build
- persist_to_workspace:
root: website
paths:
- "*/*"
deploy-website:
docker:
- image: circleci/node:9.11
working_directory: ~/profilo
steps:
- checkout
- *restore-cache
- attach_workspace:
at: website
- run:
name: Configure GitHub Bot
command: |
git config --global user.email "docusaurus-bot@users.noreply.github.com"
git config --global user.name "Website Deployment Script"
echo "machine github.com login docusaurus-bot password $GITHUB_TOKEN" > ~/.netrc
- deploy:
name: Deploy Website
command: |
echo "Deploying website..."
cd website
GIT_USER=docusaurus-bot USE_SSH= yarn run deploy
workflows:
version: 2
website_workflow:
jobs:
- build-website
- deploy-website:
filters: *filter-only-master
requires:
- build-website
| {
"pile_set_name": "Github"
} |
// Copyright (C) 2004-2019 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
// 27.6.1.2.3 basic_istream::operator>>
#include <istream>
#include <sstream>
#include <testsuite_hooks.h>
void test03()
{
using namespace std;
// template<_CharT, _Traits>
// basic_istream& operator>>(ios_base& (*pf) (ios_base&))
{
int i = 0;
std::wistringstream iss(L" 43");
iss >> std::noskipws >> i;
VERIFY ( !iss ); //should set failbit
}
// template<_CharT, _Traits>
// basic_istream& operator>>(basic_ios& (*pf) (basic_ios&))
// template<_CharT, _Traits>
// basic_istream& operator>>(basic_istream& (*pf) (basic_istream&))
}
int main()
{
test03();
return 0;
}
| {
"pile_set_name": "Github"
} |
I just love coffee and tea, they really help me to think, I am filled with more ideas, the more of them I drink.
| {
"pile_set_name": "Github"
} |
//
// This file was automatically generated by wxrc, do not edit by hand.
//
#include <wx/wxprec.h>
#ifdef __BORLANDC__
#pragma hdrstop
#endif
#include <wx/filesys.h>
#include <wx/fs_mem.h>
#include <wx/xrc/xmlres.h>
#include <wx/xrc/xh_all.h>
#if wxCHECK_VERSION(2,8,5) && wxABI_VERSION >= 20805
#define XRC_ADD_FILE(name, data, size, mime) \
wxMemoryFSHandler::AddFileWithMimeType(name, data, size, mime)
#else
#define XRC_ADD_FILE(name, data, size, mime) \
wxMemoryFSHandler::AddFile(name, data, size)
#endif
static size_t xml_res_size_0 = 137;
static unsigned char xml_res_file_0[] = {
60,63,120,109,108,32,118,101,114,115,105,111,110,61,34,49,46,48,34,32,101,
110,99,111,100,105,110,103,61,34,85,84,70,45,56,34,63,62,10,60,114,101,
115,111,117,114,99,101,32,120,109,108,110,115,61,34,104,116,116,112,58,
47,47,119,119,119,46,119,120,119,105,100,103,101,116,115,46,111,114,103,
47,119,120,120,114,99,34,62,10,32,32,60,33,45,45,32,72,97,110,100,108,101,
114,32,71,101,110,101,114,97,116,105,111,110,32,105,115,32,79,78,32,45,
45,62,10,60,47,114,101,115,111,117,114,99,101,62,10};
void wxC3999InitBitmapResources()
{
// Check for memory FS. If not present, load the handler:
{
wxMemoryFSHandler::AddFile(wxT("XRC_resource/dummy_file"), wxT("dummy one"));
wxFileSystem fsys;
wxFSFile *f = fsys.OpenFile(wxT("memory:XRC_resource/dummy_file"));
wxMemoryFSHandler::RemoveFile(wxT("XRC_resource/dummy_file"));
if (f) delete f;
else wxFileSystem::AddHandler(new wxMemoryFSHandlerBase);
}
XRC_ADD_FILE(wxT("XRC_resource/newclasswizard_gizmos_bitmaps.cpp$C__src_codelite_Gizmos_newclasswizard_gizmos_bitmaps.xrc"), xml_res_file_0, xml_res_size_0, wxT("text/xml"));
wxXmlResource::Get()->Load(wxT("memory:XRC_resource/newclasswizard_gizmos_bitmaps.cpp$C__src_codelite_Gizmos_newclasswizard_gizmos_bitmaps.xrc"));
}
| {
"pile_set_name": "Github"
} |
{
"mulNonConst" : {
"_info" : {
"comment" : "",
"filledwith" : "testeth 1.6.0-alpha.0-11+commit.978e68d2",
"lllcversion" : "Version: 0.5.0-develop.2018.11.9+commit.9709dfe0.Linux.g++",
"source" : "src/GeneralStateTestsFiller/stArgsZeroOneBalance/mulNonConstFiller.yml",
"sourceHash" : "d8f40f5ab3def8fe9e6d1a48db6ade7703d9fa231827194c6c8ad7a5fe4dabf8"
},
"env" : {
"currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentDifficulty" : "0x20000",
"currentGasLimit" : "0x0f4240",
"currentNumber" : "0x01",
"currentTimestamp" : "0x03e8",
"previousHash" : "0x5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6"
},
"post" : {
"Byzantium" : [
{
"hash" : "0x6bab4b3839365276b48912fa927acbe32bf845e2009cdd0a59df34465844181b",
"indexes" : {
"data" : 0,
"gas" : 0,
"value" : 0
},
"logs" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
{
"hash" : "0x18d9fee6772091d646af39fed5d8805a32d6a0650beddb43990cd123e40e15fe",
"indexes" : {
"data" : 0,
"gas" : 0,
"value" : 1
},
"logs" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
}
],
"Constantinople" : [
{
"hash" : "0x37f9b3c1d2f19e04d1ca8d5e24e1a48734370b3ccd554d18791515c2d0fcdd5c",
"indexes" : {
"data" : 0,
"gas" : 0,
"value" : 0
},
"logs" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
{
"hash" : "0x18d9fee6772091d646af39fed5d8805a32d6a0650beddb43990cd123e40e15fe",
"indexes" : {
"data" : 0,
"gas" : 0,
"value" : 1
},
"logs" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
}
],
"ConstantinopleFix" : [
{
"hash" : "0x6bab4b3839365276b48912fa927acbe32bf845e2009cdd0a59df34465844181b",
"indexes" : {
"data" : 0,
"gas" : 0,
"value" : 0
},
"logs" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
},
{
"hash" : "0x18d9fee6772091d646af39fed5d8805a32d6a0650beddb43990cd123e40e15fe",
"indexes" : {
"data" : 0,
"gas" : 0,
"value" : 1
},
"logs" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
}
]
},
"pre" : {
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
"balance" : "0x00",
"code" : "0x73095e7baea6a6c7c4c2dfeb977efac326af552d873173095e7baea6a6c7c4c2dfeb977efac326af552d873102600055",
"nonce" : "0x00",
"storage" : {
}
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
"balance" : "0x0de0b6b3a7640000",
"code" : "",
"nonce" : "0x00",
"storage" : {
}
}
},
"transaction" : {
"data" : [
"0x"
],
"gasLimit" : [
"0x061a80"
],
"gasPrice" : "0x01",
"nonce" : "0x00",
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
"to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87",
"value" : [
"0x00",
"0x01"
]
}
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright (c) 2015, The Linux Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of The Linux Foundation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-->
<!-- These resources are around just to allow their values to be customized
for different hardware and product builds. Do not translate. -->
<resources xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
<!-- Configuration for displaying 14 digit IMEI Number -->
</resources>
| {
"pile_set_name": "Github"
} |
[environment variables with path]
WEBOTS_LIBRARY_PATH = lib/ros:$(WEBOTS_LIBRARY_PATH)
| {
"pile_set_name": "Github"
} |
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magento.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magento.com for more information.
*
* @category Mage
* @package Mage_Adminhtml
* @copyright Copyright (c) 2006-2020 Magento, Inc. (http://www.magento.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/**
* Controller for CMS Page Link Widget plugin
*
* @category Mage
* @package Mage_Adminhtml
* @author Magento Core Team <core@magentocommerce.com>
*/
class Mage_Adminhtml_Cms_Page_WidgetController extends Mage_Adminhtml_Controller_Action
{
/**
* Chooser Source action
*/
public function chooserAction()
{
$uniqId = $this->getRequest()->getParam('uniq_id');
$pagesGrid = $this->getLayout()->createBlock('adminhtml/cms_page_widget_chooser', '', array(
'id' => $uniqId,
));
$this->getResponse()->setBody($pagesGrid->toHtml());
}
/**
* Check is allowed access to action
*
* @return bool
*/
protected function _isAllowed()
{
return Mage::getSingleton('admin/session')->isAllowed('cms/widget_instance');
}
}
| {
"pile_set_name": "Github"
} |
//
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard.
//
#import "NSObject.h"
@class UIActionSheet;
@protocol UIActionSheetDelegate <NSObject>
@optional
- (void)actionSheet:(UIActionSheet *)arg1 didDismissWithButtonIndex:(long long)arg2;
- (void)actionSheet:(UIActionSheet *)arg1 willDismissWithButtonIndex:(long long)arg2;
- (void)didPresentActionSheet:(UIActionSheet *)arg1;
- (void)willPresentActionSheet:(UIActionSheet *)arg1;
- (void)actionSheetCancel:(UIActionSheet *)arg1;
- (void)actionSheet:(UIActionSheet *)arg1 clickedButtonAtIndex:(long long)arg2;
@end
| {
"pile_set_name": "Github"
} |
# 概述
在ES6引入模板字符串之前,如果大家需要在代码中创建一个包含变量的字符串,那么代码将非常难读,并且也非常容易出错。下面就是一个简单的例子,在例子中我们将输入的3个参数拼接在一起,然后返回给调用方。
```javascript
//在模板字符串出现前的写法,写法冗长而且难于理解
function returnSomthing(param1, param2, param3){
return "something return based on input("
+ "param1:" + param1.toString() + "---"
+ "param2:" + param2.toString() + "---"
+ "param3:" + param3.toString();
}
//使用模板字符串的写法,
function returnSomthingNew(param1, param2, param3){
return `something return based on input(
param1:${param1}---param2:${param2}---param3:${param3}`;
}
```
通过上面的代码,你可以看出在使用模板字符串之后,代码变得非常简洁,而且也容易阅读。下面是在使用模板字符串时候的一些注意点
* 模板字符串是使用 **`** 引用起来的,如果在最终生成的字符串中包含`字符,那么需要使用\字符进行转义
* 模板字符串中对于变量的引用是通过${}来进行的
* 使用模板字符串的时候,${}中可以放入任意合法的JavaScript表达式。JavaScript对包含在${}中的内容实际上是通过eval表达式来进行的
# 标签模板
模板字符串可以跟在一个函数名之后,该函数将被调用来处理跟在后面的模板字符串,这个功能被称为标签模板。被调用的函数将接收到下面的参数列表(literals,...values)。其中literals是一个数组,内容是模板字符串中不需要进行变量替换的部分,而values就是每个替换变量经过eval之后的值,下面是一个具体的例子。
```javascript
var total = 30;
var msg = transform`The total number is ${total}`;
total = 20;
var msg1 = transform`The total number is ${total}`;
//in our sample
//literals = ["The total number is ", ""]
//values = [30]
function transform(literals,...values){
var output = "";
for (var index = 0; index < values.length; index++){
if (parseInt(values[index]) >= 30){
output += literals[index] + "high value";
}else{
output += literals[index] + "low value";
}
}
output += literals[index];
return output;
}
console.log(msg); //output The total number is high value
console.log(msg1);//output The total number is low value
``` | {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.