code
stringlengths 3
1.01M
| repo_name
stringlengths 5
116
| path
stringlengths 3
311
| language
stringclasses 30
values | license
stringclasses 15
values | size
int64 3
1.01M
|
|---|---|---|---|---|---|
'use strict';
var should = require('should');
var app = require('../../app');
var request = require('supertest');
describe('GET /api/signins', function() {
it('should respond with JSON array', function(done) {
request(app)
.get('/api/signins')
.expect(200)
.expect('Content-Type', /json/)
.end(function(err, res) {
if (err) return done(err);
res.body.should.be.instanceof(Array);
done();
});
});
});
|
xBlazeTECH/FHS-AMS
|
server/api/signin/signin.spec.js
|
JavaScript
|
gpl-2.0
| 465
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author Petr Ivanov
*
*/
/* *********************************************************************** */
#include "events.h"
#include "utils.h"
#include "ncai.h"
#ifdef POSIX
#include <stdlib.h>
#include <time.h>
#else
#include <windows.h>
#include <CRTDBG.H>
#endif
#ifdef POSIX
#define SLEEP_UNIVERSAL(_x_) { timespec delay = {(_x_)/1000, 1000000*((_x_)%1000)}; nanosleep(&delay, NULL); }
#else // #ifdef POSIX
#define SLEEP_UNIVERSAL(_x_) Sleep((_x_))
#endif // #ifdef POSIX
static int g_stop_thread = 0;
static volatile int g_resume_agent_thread = 0;
//static jthread g_thread_jthread;
//static jthread g_agent_thread;
static volatile int no_opt = 1; //used to prevent inlining;
//static int counter = 0;
static bool test = false;
static bool util = false;
static bool flag = false;
static const char test_case_name[] = "GetThreadHandle02n";
static void Test1(JNIEnv *env, jobject obj);
static void Test2(JNIEnv *env, jobject obj);
static void Test3(JNIEnv *env, jobject obj);
//static void JNICALL test_function(jvmtiEnv*, JNIEnv*, void*);
//static bool CheckSuspend(ncaiEnv*, ncaiThread);
extern "C" JNIEXPORT void JNICALL
Java_ncai_funcs_GetThreadHandle02n_resumeagent(JNIEnv *env, jclass cls)
{
//warning fix
int w_fix = sizeof(cls);
w_fix += sizeof(env);
//
g_resume_agent_thread = 1;
}
extern "C" JNIEXPORT jboolean JNICALL
Java_ncai_funcs_GetThreadHandle02n_stopsignal(JNIEnv *env, jclass cls)
{
//warning fix
int w_fix = sizeof(cls);
w_fix += sizeof(env);
//
return g_stop_thread ? true : false;
}
extern "C" JNIEXPORT void JNICALL Java_ncai_funcs_GetThreadHandle02n_TestFunction
(JNIEnv *env, jobject obj)
{
fprintf(stderr, "thread - native TestFunction\n");
jclass clazz = env->GetObjectClass(obj);
if (!clazz)
{
fprintf(stderr, "\tnative: native TestFunction: GetObjectClass failed\n");
return;
}
jmethodID mid = env->GetMethodID(clazz, "test_java_func2", "()V");
if (!mid)
{
fprintf(stderr, "\tnative: native TestFunction: GetStaticMethodID for 'test_java_func2' failed\n");
return;
}
env->CallVoidMethod(obj, mid);
return;
}
extern "C" JNIEXPORT void JNICALL Java_ncai_funcs_GetThreadHandle02n_TestFunction1
(JNIEnv *env, jobject obj)
{
fprintf(stderr, "thread - native TestFunction1\n");
Test1(env, obj);
return;
}
void JNICALL ThreadStart(jvmtiEnv *jvmti_env,
JNIEnv* jni_env,
jthread thread)
{
//warning fix
int w_fix = 0;
w_fix += sizeof(jni_env);
//
jvmtiPhase phase;
jvmtiError result;
jvmtiThreadInfo tinfo;
result = jvmti_env->GetPhase(&phase);
if (result != JVMTI_ERROR_NONE || phase != JVMTI_PHASE_LIVE)
return;
result = jvmti_env->GetThreadInfo(thread, &tinfo);
if (result != JVMTI_ERROR_NONE)
return;
if (strcmp(tinfo.name, "java_thread") != 0)
return;
printf("ThreadStart: java_thread\n");
/* g_thread_jthread = jni_env->NewGlobalRef(thread);
jclass clazz = jni_env->FindClass("java/lang/Thread");
if (!clazz)
{
fprintf(stderr, "\tnative: JNI: FindClass failed\n");
return;
}
jmethodID mid = jni_env->GetMethodID(clazz, "<init>", "()V");
if (!mid)
{
fprintf(stderr, "\tnative: JNI: GetMethodID failed\n");
return;
}
g_agent_thread = jni_env->NewObject(clazz, mid, "native_agent_thread");
if (!g_agent_thread)
{
fprintf(stderr, "\tnative: JNI: NewObject failed\n");
return;
}
g_agent_thread = jni_env->NewGlobalRef(g_agent_thread);
result = jvmti_env->GetThreadInfo(g_agent_thread, &tinfo);
if (result != JVMTI_ERROR_NONE)
{
fprintf(stderr, "\tnative: JNI: GetThreadInfo failed\n");
}
result = jvmti_env->RunAgentThread(g_agent_thread, test_function, NULL, JVMTI_THREAD_NORM_PRIORITY);
if (result != JVMTI_ERROR_NONE)
{
fprintf(stderr, "\tnative: jvmti: RunAgentThread failed\n");
return;
}*/
}
/*
JNIEXPORT jint
JNICALL Agent_OnLoad(JavaVM *jvm, char *options, void *reserved)
{
jint res;
jvmtiEnv* jvmti_env = NULL;
res = jvm->GetEnv((void**)&jvmti_env, JVMTI_VERSION_1_0);
if (res != JNI_OK || jvmti_env == NULL)
return JNI_ERR;
jvmtiEventCallbacks callbacks;
callbacks.ThreadStart=&ThreadStart;
jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_THREAD_START, NULL);
return JNI_OK;
}
*/
/* *********************************************************************** */
JNIEXPORT jint JNICALL Agent_OnLoad(prms_AGENT_ONLOAD)
{
Callbacks CB;
CB.cbThreadStart = &ThreadStart;
check_AGENT_ONLOAD;
jvmtiEvent events[] = { JVMTI_EVENT_EXCEPTION, JVMTI_EVENT_THREAD_START, JVMTI_EVENT_VM_DEATH };
cb_exc;
cb_death;
return func_for_Agent_OnLoad(vm, options, reserved, &CB,
events, sizeof(events)/sizeof(jvmtiEvent), test_case_name, DEBUG_OUT);
}
/* *********************************************************************** */
void JNICALL callbackException(jvmtiEnv *jvmti_env, JNIEnv* jni_env,
jthread thread, jmethodID method,
jlocation location, jobject exception,
jmethodID catch_method, jlocation catch_location)
{
check_EXCPT;
if (flag) return;
/*
* Function separate all other exceptions in all other method
*/
if (!check_phase_and_method_debug(jvmti_env, method, SPP_LIVE_ONLY,
"special_method", DEBUG_OUT)) return;
flag = true;
util = true;
fprintf(stderr, "agent... \n");
SLEEP_UNIVERSAL(300);
////////////////////ncai env get
jvmtiError err;
ncaiError ncai_err;
jvmtiExtensionFunctionInfo* ext_info = NULL;
jint ext_count = 0;
err = jvmti_env->GetExtensionFunctions(&ext_count, &ext_info);
if (err != JVMTI_ERROR_NONE)
{
fprintf(stderr, "test_function: GetExtensionFunctions() returned error: %d, '%s'\n",
err, get_jvmti_eror_text(err));
test = false;
return;
}
fprintf(stderr, "agent... \n");
if (ext_count == 0 || ext_info == NULL)
{
fprintf(stderr, "test_function: GetExtensionFunctions() returned no extensions\n");
test = false;
return;
}
jvmtiExtensionFunction get_ncai_func = NULL;
fprintf(stderr, "agent... \n");
for (int k = 0; k < ext_count; k++)
{
if (strcmp(ext_info[k].id, "org.apache.harmony.vm.GetExtensionEnv") == 0)
{
get_ncai_func = ext_info[k].func;
break;
}
}
fprintf(stderr, "agent... \n");
if (get_ncai_func == NULL)
{
fprintf(stderr, "test_function: GetNCAIEnvironment() nas not been found among JVMTI extensions\n");
test = false;
return;
}
ncaiEnv* ncai_env = NULL;
fprintf(stderr, "agent... \n");
err = get_ncai_func(jvmti_env, &ncai_env, NCAI_VERSION_1_0);
if (err != JVMTI_ERROR_NONE)
{
fprintf(stderr, "test_function: get_ncai_func() returned error: %d, '%s'\n",
err, get_jvmti_eror_text(err));
test = false;
return;
}
if (ncai_env == NULL)
{
fprintf(stderr, "test_function: get_ncai_func() returned NULL environment\n");
test = false;
return;
}
fprintf(stderr, "agent... \n");
///////////////////////////////////
ncaiThread ncai_thread;
while(!g_resume_agent_thread)
SLEEP_UNIVERSAL(200);
g_resume_agent_thread = 0;
test = true;
fprintf(stderr, "calling ncai->GetThreadHandle()...\n");
ncai_err = ncai_env->GetThreadHandle(NULL, &ncai_thread);
if (ncai_err != NCAI_ERROR_INVALID_THREAD)
{
fprintf(stderr, "ncai->GetThreadHandle() returned invalid error: %d", ncai_err);
test = false;
g_stop_thread = 1;
return;
}
g_stop_thread = 1;
SLEEP_UNIVERSAL(400);
}
void JNICALL callbackVMDeath(prms_VMDEATH)
{
check_VMDEATH;
func_for_callback_VMDeath(jni_env, jvmti_env, test_case_name, test, util);
}
/* *********************************************************************** */
void Test1(JNIEnv *env, jobject obj)
{
if(!no_opt)
Test1(env, obj);
fprintf(stderr, "thread - pure native Test1\n");
return Test2(env, obj);
}
void Test2(JNIEnv *env, jobject obj)
{
if(!no_opt)
Test2(env, obj);
fprintf(stderr, "thread - pure native Test2\n");
return Test3(env, obj);
}
void Test3(JNIEnv *env, jobject obj)
{
if(!no_opt)
Test3(env, obj);
fprintf(stderr, "thread - pure native Test3\n");
jclass clazz = env->GetObjectClass(obj);
if (!clazz)
{
fprintf(stderr, "\tnative: native TestFunction1: GetObjectClass failed\n");
return;
}
jmethodID mid = env->GetStaticMethodID(clazz, "sleep", "(J)V");
if (!mid)
{
fprintf(stderr, "\tnative: native TestFunction1: GetStaticMethodID for 'sleep' failed\n");
return;
}
g_resume_agent_thread = 1;
while(!g_stop_thread)
{
fprintf(stderr, "thread... \n");
SLEEP_UNIVERSAL(100);
// env->CallStaticVoidMethod(clazz, mid, 500);
}
return;
}
|
skyHALud/codenameone
|
Ports/iOSPort/xmlvm/apache-harmony-6.0-src-r991881/drlvm/vm/tests/ncai/funcs/GetThreadHandle/GetThreadHandle02n/GetThreadHandle02n.cpp
|
C++
|
gpl-2.0
| 10,560
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the search engine."""
__revision__ = \
"$Id$"
from invenio.testutils import InvenioTestCase
from invenio import search_engine
from invenio.testutils import make_test_suite, run_test_suite
from invenio.config import CFG_CERN_SITE
class TestMiscUtilityFunctions(InvenioTestCase):
"""Test whatever non-data-specific utility functions are essential."""
def test_ziplist2x2(self):
"""search engine - ziplist 2 x 2"""
self.assertEqual(search_engine.ziplist([1, 2], [3, 4]), [[1, 3], [2, 4]])
def test_ziplist3x3(self):
"""search engine - ziplist 3 x 3"""
self.assertEqual(search_engine.ziplist([1, 2, 3], ['a', 'b', 'c'], [9, 8, 7]),
[[1, 'a', 9], [2, 'b', 8], [3, 'c', 7]])
class TestWashQueryParameters(InvenioTestCase):
"""Test for washing of search query parameters."""
def test_wash_pattern(self):
"""search engine - washing of query patterns"""
self.assertEqual("Ellis, J", search_engine.wash_pattern('Ellis, J'))
#self.assertEqual("ell", search_engine.wash_pattern('ell*'))
def test_wash_dates_from_tuples(self):
"""search engine - washing of date arguments from (year,month,day) tuples"""
self.assertEqual(search_engine.wash_dates(d1y=1980, d1m=1, d1d=28, d2y=2003, d2m=2, d2d=3),
('1980-01-28 00:00:00', '2003-02-03 00:00:00'))
self.assertEqual(search_engine.wash_dates(d1y=1980, d1m=0, d1d=28, d2y=2003, d2m=2, d2d=0),
('1980-01-28 00:00:00', '2003-02-31 00:00:00'))
def test_wash_dates_from_datetexts(self):
"""search engine - washing of date arguments from datetext strings"""
self.assertEqual(search_engine.wash_dates(d1="1980-01-28 01:02:03", d2="1980-01-29 12:34:56"),
('1980-01-28 01:02:03', '1980-01-29 12:34:56'))
self.assertEqual(search_engine.wash_dates(d1="1980-01-28 01:02:03"),
('1980-01-28 01:02:03', '9999-12-31 00:00:00'))
self.assertEqual(search_engine.wash_dates(d2="1980-01-29 12:34:56"),
('0000-01-01 00:00:00', '1980-01-29 12:34:56'))
def test_wash_dates_from_both(self):
"""search engine - washing of date arguments from both datetext strings and (year,month,day) tuples"""
# datetext mode takes precedence, d1* should be ignored
self.assertEqual(search_engine.wash_dates(d1="1980-01-28 01:02:03", d1y=1980, d1m=1, d1d=28),
('1980-01-28 01:02:03', '9999-12-31 00:00:00'))
# datetext mode takes precedence, d2 missing, d2* should be ignored
self.assertEqual(search_engine.wash_dates(d1="1980-01-28 01:02:03", d2y=2003, d2m=2, d2d=3),
('1980-01-28 01:02:03', '2003-02-03 00:00:00'))
class TestQueryParser(InvenioTestCase):
"""Test of search pattern (or query) parser."""
def _check(self, p, f, m, result_wanted):
"Internal checking function calling create_basic_search_units."
result_obtained = search_engine.create_basic_search_units(None, p, f, m)
assert result_obtained == result_wanted, \
'obtained %s instead of %s' % (repr(result_obtained),
repr(result_wanted))
return
def test_parsing_single_word_query(self):
"search engine - parsing single word queries"
self._check('word', '', None, [['+', 'word', '', 'w']])
def test_parsing_single_word_with_boolean_operators(self):
"search engine - parsing single word queries"
self._check('+word', '', None, [['+', 'word', '', 'w']])
self._check('-word', '', None, [['-', 'word', '', 'w']])
self._check('|word', '', None, [['|', 'word', '', 'w']])
def test_parsing_single_word_in_field(self):
"search engine - parsing single word queries in a logical field"
self._check('word', 'title', None, [['+', 'word', 'title', 'w']])
def test_parsing_single_word_in_tag(self):
"search engine - parsing single word queries in a physical tag"
self._check('word', '500', None, [['+', 'word', '500', 'a']])
def test_parsing_query_with_commas(self):
"search engine - parsing queries with commas"
self._check('word,word', 'title', None,
[['+', 'word,word', 'title', 'a']])
def test_parsing_exact_phrase_query(self):
"search engine - parsing exact phrase"
self._check('"the word"', 'title', None,
[['+', 'the word', 'title', 'a']])
def test_parsing_exact_phrase_query_unbalanced(self):
"search engine - parsing unbalanced exact phrase"
self._check('"the word', 'title', None,
[['+', '"the', 'title', 'w'],
['+', 'word', 'title', 'w']])
def test_parsing_exact_phrase_query_in_any_field(self):
"search engine - parsing exact phrase in any field"
self._check('"the word"', '', None,
[['+', 'the word', '', 'a']])
def test_parsing_partial_phrase_query(self):
"search engine - parsing partial phrase"
self._check("'the word'", 'title', None,
[['+', '%the word%', 'title', 'a']])
def test_parsing_partial_phrase_query_unbalanced(self):
"search engine - parsing unbalanced partial phrase"
self._check("'the word", 'title', None,
[['+', "'the", 'title', 'w'],
['+', "word", 'title', 'w']])
def test_parsing_partial_phrase_query_in_any_field(self):
"search engine - parsing partial phrase in any field"
self._check("'the word'", '', None,
[['+', '%the word%', '', 'a']])
def test_parsing_regexp_query(self):
"search engine - parsing regex matches"
self._check("/the word/", 'title', None,
[['+', 'the word', 'title', 'r']])
def test_parsing_regexp_query_unbalanced(self):
"search engine - parsing unbalanced regexp"
self._check("/the word", 'title', None,
[['+', '/the', 'title', 'w'],
['+', 'word', 'title', 'w']])
def test_parsing_regexp_query_in_any_field(self):
"search engine - parsing regexp searches in any field"
self._check("/the word/", '', None,
[['+', 'the word', '', 'r']])
def test_parsing_boolean_query(self):
"search engine - parsing boolean query with several words"
self._check("muon kaon ellis cern", '', None,
[['+', 'muon', '', 'w'],
['+', 'kaon', '', 'w'],
['+', 'ellis', '', 'w'],
['+', 'cern', '', 'w']])
def test_parsing_boolean_query_with_word_operators(self):
"search engine - parsing boolean query with word operators"
self._check("muon and kaon or ellis not cern", '', None,
[['+', 'muon', '', 'w'],
['+', 'kaon', '', 'w'],
['|', 'ellis', '', 'w'],
['-', 'cern', '', 'w']])
def test_parsing_boolean_query_with_symbol_operators(self):
"search engine - parsing boolean query with symbol operators"
self._check("muon +kaon |ellis -cern", '', None,
[['+', 'muon', '', 'w'],
['+', 'kaon', '', 'w'],
['|', 'ellis', '', 'w'],
['-', 'cern', '', 'w']])
def test_parsing_boolean_query_with_symbol_operators_and_spaces(self):
"search engine - parsing boolean query with operators and spaces"
self._check("muon + kaon | ellis - cern", '', None,
[['+', 'muon', '', 'w'],
['+', 'kaon', '', 'w'],
['|', 'ellis', '', 'w'],
['-', 'cern', '', 'w']])
def test_parsing_boolean_query_with_symbol_operators_and_no_spaces(self):
"search engine - parsing boolean query with operators and no spaces"
self._check("muon+kaon|ellis-cern", '', None,
[['+', 'muon+kaon|ellis-cern', '', 'w']])
def test_parsing_structured_query_existing(self):
"search engine - parsing structured query, existing index"
self._check("title:muon", '', None,
[['+', 'muon', 'title', 'w']])
if not CFG_CERN_SITE:
def test_parsing_structured_query_existing_field(self):
"search engine - parsing structured query, existing field, but no word index"
self._check("experiment:LHC", '', None,
[['+', 'LHC', 'experiment', 'a']])
def test_parsing_structured_query_nonexisting(self):
"search engine - parsing structured query, non-existing index"
self._check("foo:muon", '', None,
[['+', 'foo:muon', '', 'w']])
def test_parsing_structured_query_marc(self):
"search engine - parsing structured query, MARC-tag defined index"
self._check("245:muon", '', None,
[['+', 'muon', '245', 'a']])
def test_parsing_combined_structured_query(self):
"search engine - parsing combined structured query"
self._check("title:muon author:ellis", '', None,
[['+', 'muon', 'title', 'w'],
['+', 'ellis', 'author', 'w']])
def test_parsing_structured_regexp_query(self):
"search engine - parsing structured regexp query"
self._check("title:/(one|two)/", '', None,
[['+', '(one|two)', 'title', 'r']])
def test_parsing_structured_regexp_marc_query(self):
"search engine - parsing structured regexp MARC query"
self._check("245__a:/(one|two)/", '', None,
[['+', '(one|two)', '245__a', 'r']])
def test_parsing_structured_regexp_refersto_query(self):
"search engine - parsing structured regexp refersto query"
self._check("refersto:/(one|two)/", '', None,
[['+', '(one|two)', 'refersto', 'r']])
def test_parsing_combined_structured_query_in_a_field(self):
"search engine - parsing structured query in a field"
self._check("title:muon author:ellis", 'abstract', None,
[['+', 'muon', 'title', 'w'],
['+', 'ellis', 'author', 'w']])
def test_parsing_colons_and_spaces_well_structured(self):
"search engine - parsing query with colons and spaces, well structured"
self._check("title: muon author:ellis keyword: kaon", 'abstract', None,
[['+', 'muon', 'title', 'w'],
['+', 'ellis', 'author', 'w'],
['+', 'kaon', 'keyword', 'w']])
def test_parsing_colons_and_spaces_badly_structured(self):
"search engine - parsing query with colons and spaces, badly structured"
self._check("foo: bar", 'title', None,
[['+', 'foo', 'title', 'w'],
['+', 'bar', 'title', 'w']])
def test_parsing_colons_and_spaces_for_phrase_query(self):
"search engine - parsing query with colons and spaces, phrase query"
self._check('author: "Ellis, J"', None, None,
[['+', 'Ellis, J', 'author', 'a']])
def test_search_pattern_with_equal_sign(self):
"search engine - parsing query with equal sign"
self._check('title:"s = 630"', None, None,
[['+', 's = 630', 'title', 'a']])
TEST_SUITE = make_test_suite(TestWashQueryParameters,
TestQueryParser,
TestMiscUtilityFunctions)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
mvesper/invenio
|
modules/websearch/lib/search_engine_unit_tests.py
|
Python
|
gpl-2.0
| 12,575
|
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package javafx.scene;
import com.sun.javafx.cursor.CursorFrame;
public class CursorShim {
public static void activate(Cursor c) {
c.activate();
}
public static void deactivate(Cursor c) {
c.deactivate();
}
public static CursorFrame getCurrentFrame(Cursor c) {
return c.getCurrentFrame();
}
public static Cursor getCursor(String name) {
return new Cursor(name) {
@Override
CursorFrame getCurrentFrame() {
throw new UnsupportedOperationException("Not supported yet.");
}
};
}
}
|
teamfx/openjfx-9-dev-rt
|
modules/javafx.graphics/src/shims/java/javafx/scene/CursorShim.java
|
Java
|
gpl-2.0
| 1,816
|
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.memory;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.gc_interface.*;
import sun.jvm.hotspot.gc_implementation.g1.G1CollectedHeap;
import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.runtime.*;
public class Universe {
private static AddressField collectedHeapField;
private static VirtualConstructor heapConstructor;
private static sun.jvm.hotspot.types.OopField mainThreadGroupField;
private static sun.jvm.hotspot.types.OopField systemThreadGroupField;
// single dimensional primitive array klasses
private static sun.jvm.hotspot.types.AddressField boolArrayKlassField;
private static sun.jvm.hotspot.types.AddressField byteArrayKlassField;
private static sun.jvm.hotspot.types.AddressField charArrayKlassField;
private static sun.jvm.hotspot.types.AddressField intArrayKlassField;
private static sun.jvm.hotspot.types.AddressField shortArrayKlassField;
private static sun.jvm.hotspot.types.AddressField longArrayKlassField;
private static sun.jvm.hotspot.types.AddressField singleArrayKlassField;
private static sun.jvm.hotspot.types.AddressField doubleArrayKlassField;
private static AddressField narrowOopBaseField;
private static CIntegerField narrowOopShiftField;
private static AddressField narrowKlassBaseField;
private static CIntegerField narrowKlassShiftField;
public enum NARROW_OOP_MODE {
UnscaledNarrowOop,
ZeroBasedNarrowOop,
HeapBasedNarrowOop
}
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("Universe");
collectedHeapField = type.getAddressField("_collectedHeap");
heapConstructor = new VirtualConstructor(db);
heapConstructor.addMapping("GenCollectedHeap", GenCollectedHeap.class);
heapConstructor.addMapping("ParallelScavengeHeap", ParallelScavengeHeap.class);
heapConstructor.addMapping("G1CollectedHeap", G1CollectedHeap.class);
mainThreadGroupField = type.getOopField("_main_thread_group");
systemThreadGroupField = type.getOopField("_system_thread_group");
boolArrayKlassField = type.getAddressField("_boolArrayKlassObj");
byteArrayKlassField = type.getAddressField("_byteArrayKlassObj");
charArrayKlassField = type.getAddressField("_charArrayKlassObj");
intArrayKlassField = type.getAddressField("_intArrayKlassObj");
shortArrayKlassField = type.getAddressField("_shortArrayKlassObj");
longArrayKlassField = type.getAddressField("_longArrayKlassObj");
singleArrayKlassField = type.getAddressField("_singleArrayKlassObj");
doubleArrayKlassField = type.getAddressField("_doubleArrayKlassObj");
narrowOopBaseField = type.getAddressField("_narrow_oop._base");
narrowOopShiftField = type.getCIntegerField("_narrow_oop._shift");
narrowKlassBaseField = type.getAddressField("_narrow_klass._base");
narrowKlassShiftField = type.getCIntegerField("_narrow_klass._shift");
}
public Universe() {
}
public static String narrowOopModeToString(NARROW_OOP_MODE mode) {
switch (mode) {
case UnscaledNarrowOop:
return "32-bits Oops";
case ZeroBasedNarrowOop:
return "zero based Compressed Oops";
case HeapBasedNarrowOop:
return "Compressed Oops with base";
}
return "";
}
public CollectedHeap heap() {
try {
return (CollectedHeap) heapConstructor.instantiateWrapperFor(collectedHeapField.getValue());
} catch (WrongTypeException e) {
return new CollectedHeap(collectedHeapField.getValue());
}
}
public static long getNarrowOopBase() {
if (narrowOopBaseField.getValue() == null) {
return 0;
} else {
return narrowOopBaseField.getValue().minus(null);
}
}
public static int getNarrowOopShift() {
return (int)narrowOopShiftField.getValue();
}
public static long getNarrowKlassBase() {
if (narrowKlassBaseField.getValue() == null) {
return 0;
} else {
return narrowKlassBaseField.getValue().minus(null);
}
}
public static int getNarrowKlassShift() {
return (int)narrowKlassShiftField.getValue();
}
/** Returns "TRUE" iff "p" points into the allocated area of the heap. */
public boolean isIn(Address p) {
return heap().isIn(p);
}
/** Returns "TRUE" iff "p" points into the reserved area of the heap. */
public boolean isInReserved(Address p) {
return heap().isInReserved(p);
}
private Oop newOop(OopHandle handle) {
return VM.getVM().getObjectHeap().newOop(handle);
}
public Oop mainThreadGroup() {
return newOop(mainThreadGroupField.getValue());
}
public Oop systemThreadGroup() {
return newOop(systemThreadGroupField.getValue());
}
// iterate through the single dimensional primitive array klasses
// refer to basic_type_classes_do(void f(Klass*)) in universe.cpp
public void basicTypeClassesDo(SystemDictionary.ClassVisitor visitor) {
visitor.visit(new TypeArrayKlass(boolArrayKlassField.getValue()));
visitor.visit(new TypeArrayKlass(byteArrayKlassField.getValue()));
visitor.visit(new TypeArrayKlass(charArrayKlassField.getValue()));
visitor.visit(new TypeArrayKlass(intArrayKlassField.getValue()));
visitor.visit(new TypeArrayKlass(shortArrayKlassField.getValue()));
visitor.visit(new TypeArrayKlass(longArrayKlassField.getValue()));
visitor.visit(new TypeArrayKlass(singleArrayKlassField.getValue()));
visitor.visit(new TypeArrayKlass(doubleArrayKlassField.getValue()));
}
public void print() { printOn(System.out); }
public void printOn(PrintStream tty) {
heap().printOn(tty);
}
// Check whether an element of a typeArrayOop with the given type must be
// aligned 0 mod 8. The typeArrayOop itself must be aligned at least this
// strongly.
public static boolean elementTypeShouldBeAligned(BasicType type) {
return type == BasicType.T_DOUBLE || type == BasicType.T_LONG;
}
// Check whether an object field (static/non-static) of the given type must be
// aligned 0 mod 8.
public static boolean fieldTypeShouldBeAligned(BasicType type) {
return type == BasicType.T_DOUBLE || type == BasicType.T_LONG;
}
}
|
netroby/hotspot9
|
agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java
|
Java
|
gpl-2.0
| 7,554
|
/* Common capabilities, needed by capability.o.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/capability.h>
#include <linux/audit.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/security.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/ptrace.h>
#include <linux/xattr.h>
#include <linux/hugetlb.h>
#include <linux/mount.h>
#include <linux/sched.h>
#include <linux/prctl.h>
#include <linux/securebits.h>
#include <linux/syslog.h>
#ifdef CONFIG_ANDROID_PARANOID_NETWORK
#include <linux/android_aid.h>
#endif
/*
* If a non-root user executes a setuid-root binary in
* !secure(SECURE_NOROOT) mode, then we raise capabilities.
* However if fE is also set, then the intent is for only
* the file capabilities to be applied, and the setuid-root
* bit is left on either to change the uid (plausible) or
* to get full privilege on a kernel without file capabilities
* support. So in that case we do not raise capabilities.
*
* Warn if that happens, once per boot.
*/
static void warn_setuid_and_fcaps_mixed(const char *fname)
{
static int warned;
if (!warned) {
printk(KERN_INFO "warning: `%s' has both setuid-root and"
" effective capabilities. Therefore not raising all"
" capabilities.\n", fname);
warned = 1;
}
}
int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
{
NETLINK_CB(skb).eff_cap = current_cap();
return 0;
}
int cap_netlink_recv(struct sk_buff *skb, int cap)
{
if (!cap_raised(NETLINK_CB(skb).eff_cap, cap))
return -EPERM;
return 0;
}
EXPORT_SYMBOL(cap_netlink_recv);
/**
* cap_capable - Determine whether a task has a particular effective capability
* @tsk: The task to query
* @cred: The credentials to use
* @cap: The capability to check for
* @audit: Whether to write an audit message or not
*
* Determine whether the nominated task has the specified capability amongst
* its effective set, returning 0 if it does, -ve if it does not.
*
* NOTE WELL: cap_has_capability() cannot be used like the kernel's capable()
* and has_capability() functions. That is, it has the reverse semantics:
* cap_has_capability() returns 0 when a task has a capability, but the
* kernel's capable() and has_capability() returns 1 for this case.
*/
int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap,
int audit)
{
#ifdef CONFIG_ANDROID_PARANOID_NETWORK
if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW))
return 0;
if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN))
return 0;
#endif
return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
}
/**
* cap_settime - Determine whether the current process may set the system clock
* @ts: The time to set
* @tz: The timezone to set
*
* Determine whether the current process may set the system clock and timezone
* information, returning 0 if permission granted, -ve if denied.
*/
int cap_settime(struct timespec *ts, struct timezone *tz)
{
if (!capable(CAP_SYS_TIME))
return -EPERM;
return 0;
}
/**
* cap_ptrace_access_check - Determine whether the current process may access
* another
* @child: The process to be accessed
* @mode: The mode of attachment.
*
* Determine whether a process may access another, returning 0 if permission
* granted, -ve if denied.
*/
int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
int ret = 0;
rcu_read_lock();
if (!cap_issubset(__task_cred(child)->cap_permitted,
current_cred()->cap_permitted) &&
!capable(CAP_SYS_PTRACE))
ret = -EPERM;
rcu_read_unlock();
return ret;
}
/**
* cap_ptrace_traceme - Determine whether another process may trace the current
* @parent: The task proposed to be the tracer
*
* Determine whether the nominated task is permitted to trace the current
* process, returning 0 if permission is granted, -ve if denied.
*/
int cap_ptrace_traceme(struct task_struct *parent)
{
int ret = 0;
rcu_read_lock();
if (!cap_issubset(current_cred()->cap_permitted,
__task_cred(parent)->cap_permitted) &&
!has_capability(parent, CAP_SYS_PTRACE))
ret = -EPERM;
rcu_read_unlock();
return ret;
}
/**
* cap_capget - Retrieve a task's capability sets
* @target: The task from which to retrieve the capability sets
* @effective: The place to record the effective set
* @inheritable: The place to record the inheritable set
* @permitted: The place to record the permitted set
*
* This function retrieves the capabilities of the nominated task and returns
* them to the caller.
*/
int cap_capget(struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
const struct cred *cred;
/* Derived from kernel/capability.c:sys_capget. */
rcu_read_lock();
cred = __task_cred(target);
*effective = cred->cap_effective;
*inheritable = cred->cap_inheritable;
*permitted = cred->cap_permitted;
rcu_read_unlock();
return 0;
}
/*
* Determine whether the inheritable capabilities are limited to the old
* permitted set. Returns 1 if they are limited, 0 if they are not.
*/
static inline int cap_inh_is_capped(void)
{
/* they are so limited unless the current task has the CAP_SETPCAP
* capability
*/
if (cap_capable(current, current_cred(), CAP_SETPCAP,
SECURITY_CAP_AUDIT) == 0)
return 0;
return 1;
}
/**
* cap_capset - Validate and apply proposed changes to current's capabilities
* @new: The proposed new credentials; alterations should be made here
* @old: The current task's current credentials
* @effective: A pointer to the proposed new effective capabilities set
* @inheritable: A pointer to the proposed new inheritable capabilities set
* @permitted: A pointer to the proposed new permitted capabilities set
*
* This function validates and applies a proposed mass change to the current
* process's capability sets. The changes are made to the proposed new
* credentials, and assuming no error, will be committed by the caller of LSM.
*/
int cap_capset(struct cred *new,
const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{
if (cap_inh_is_capped() &&
!cap_issubset(*inheritable,
cap_combine(old->cap_inheritable,
old->cap_permitted)))
/* incapable of using this inheritable set */
return -EPERM;
if (!cap_issubset(*inheritable,
cap_combine(old->cap_inheritable,
old->cap_bset)))
/* no new pI capabilities outside bounding set */
return -EPERM;
/* verify restrictions on target's new Permitted set */
if (!cap_issubset(*permitted, old->cap_permitted))
return -EPERM;
/* verify the _new_Effective_ is a subset of the _new_Permitted_ */
if (!cap_issubset(*effective, *permitted))
return -EPERM;
new->cap_effective = *effective;
new->cap_inheritable = *inheritable;
new->cap_permitted = *permitted;
return 0;
}
/*
* Clear proposed capability sets for execve().
*/
static inline void bprm_clear_caps(struct linux_binprm *bprm)
{
cap_clear(bprm->cred->cap_permitted);
bprm->cap_effective = false;
}
/**
* cap_inode_need_killpriv - Determine if inode change affects privileges
* @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV
*
* Determine if an inode having a change applied that's marked ATTR_KILL_PRIV
* affects the security markings on that inode, and if it is, should
* inode_killpriv() be invoked or the change rejected?
*
* Returns 0 if granted; +ve if granted, but inode_killpriv() is required; and
* -ve to deny the change.
*/
int cap_inode_need_killpriv(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
int error;
if (!inode->i_op->getxattr)
return 0;
error = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS, NULL, 0);
if (error <= 0)
return 0;
return 1;
}
/**
* cap_inode_killpriv - Erase the security markings on an inode
* @dentry: The inode/dentry to alter
*
* Erase the privilege-enhancing security markings on an inode.
*
* Returns 0 if successful, -ve on error.
*/
int cap_inode_killpriv(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
if (!inode->i_op->removexattr)
return 0;
return inode->i_op->removexattr(dentry, XATTR_NAME_CAPS);
}
/*
* Calculate the new process capability sets from the capability sets attached
* to a file.
*/
static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
struct linux_binprm *bprm,
bool *effective)
{
struct cred *new = bprm->cred;
unsigned i;
int ret = 0;
if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE)
*effective = true;
CAP_FOR_EACH_U32(i) {
__u32 permitted = caps->permitted.cap[i];
__u32 inheritable = caps->inheritable.cap[i];
/*
* pP' = (X & fP) | (pI & fI)
*/
new->cap_permitted.cap[i] =
(new->cap_bset.cap[i] & permitted) |
(new->cap_inheritable.cap[i] & inheritable);
if (permitted & ~new->cap_permitted.cap[i])
/* insufficient to execute correctly */
ret = -EPERM;
}
/*
* For legacy apps, with no internal support for recognizing they
* do not have enough capabilities, we return an error if they are
* missing some "forced" (aka file-permitted) capabilities.
*/
return *effective ? ret : 0;
}
/*
* Extract the on-exec-apply capability sets for an executable file.
*/
int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps)
{
struct inode *inode = dentry->d_inode;
__u32 magic_etc;
unsigned tocopy, i;
int size;
struct vfs_cap_data caps;
memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
if (!inode || !inode->i_op->getxattr)
return -ENODATA;
size = inode->i_op->getxattr((struct dentry *)dentry, XATTR_NAME_CAPS, &caps,
XATTR_CAPS_SZ);
if (size == -ENODATA || size == -EOPNOTSUPP)
/* no data, that's ok */
return -ENODATA;
if (size < 0)
return size;
if (size < sizeof(magic_etc))
return -EINVAL;
cpu_caps->magic_etc = magic_etc = le32_to_cpu(caps.magic_etc);
switch (magic_etc & VFS_CAP_REVISION_MASK) {
case VFS_CAP_REVISION_1:
if (size != XATTR_CAPS_SZ_1)
return -EINVAL;
tocopy = VFS_CAP_U32_1;
break;
case VFS_CAP_REVISION_2:
if (size != XATTR_CAPS_SZ_2)
return -EINVAL;
tocopy = VFS_CAP_U32_2;
break;
default:
return -EINVAL;
}
CAP_FOR_EACH_U32(i) {
if (i >= tocopy)
break;
cpu_caps->permitted.cap[i] = le32_to_cpu(caps.data[i].permitted);
cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable);
}
return 0;
}
/*
* Attempt to get the on-exec apply capability sets for an executable file from
* its xattrs and, if present, apply them to the proposed credentials being
* constructed by execve().
*/
static int get_file_caps(struct linux_binprm *bprm, bool *effective)
{
struct dentry *dentry;
int rc = 0;
struct cpu_vfs_cap_data vcaps;
bprm_clear_caps(bprm);
if (!file_caps_enabled)
return 0;
if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)
return 0;
dentry = dget(bprm->file->f_dentry);
rc = get_vfs_caps_from_disk(dentry, &vcaps);
if (rc < 0) {
if (rc == -EINVAL)
printk(KERN_NOTICE "%s: get_vfs_caps_from_disk returned %d for %s\n",
__func__, rc, bprm->filename);
else if (rc == -ENODATA)
rc = 0;
goto out;
}
rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective);
if (rc == -EINVAL)
printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
__func__, rc, bprm->filename);
out:
dput(dentry);
if (rc)
bprm_clear_caps(bprm);
return rc;
}
/**
* cap_bprm_set_creds - Set up the proposed credentials for execve().
* @bprm: The execution parameters, including the proposed creds
*
* Set up the proposed credentials for a new execution context being
* constructed by execve(). The proposed creds in @bprm->cred is altered,
* which won't take effect immediately. Returns 0 if successful, -ve on error.
*/
int cap_bprm_set_creds(struct linux_binprm *bprm)
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
bool effective;
int ret;
effective = false;
ret = get_file_caps(bprm, &effective);
if (ret < 0)
return ret;
if (!issecure(SECURE_NOROOT)) {
/*
* If the legacy file capability is set, then don't set privs
* for a setuid root binary run by a non-root user. Do set it
* for a root user just to cause least surprise to an admin.
*/
if (effective && new->uid != 0 && new->euid == 0) {
warn_setuid_and_fcaps_mixed(bprm->filename);
goto skip;
}
/*
* To support inheritance of root-permissions and suid-root
* executables under compatibility mode, we override the
* capability sets for the file.
*
* If only the real uid is 0, we do not set the effective bit.
*/
if (new->euid == 0 || new->uid == 0) {
/* pP' = (cap_bset & ~0) | (pI & ~0) */
new->cap_permitted = cap_combine(old->cap_bset,
old->cap_inheritable);
}
if (new->euid == 0)
effective = true;
}
skip:
/* Don't let someone trace a set[ug]id/setpcap binary with the revised
* credentials unless they have the appropriate permit
*/
if ((new->euid != old->uid ||
new->egid != old->gid ||
!cap_issubset(new->cap_permitted, old->cap_permitted)) &&
bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
/* downgrade; they get no more than they had, and maybe less */
if (!capable(CAP_SETUID)) {
new->euid = new->uid;
new->egid = new->gid;
}
new->cap_permitted = cap_intersect(new->cap_permitted,
old->cap_permitted);
}
new->suid = new->fsuid = new->euid;
new->sgid = new->fsgid = new->egid;
/* For init, we want to retain the capabilities set in the initial
* task. Thus we skip the usual capability rules
*/
if (!is_global_init(current)) {
if (effective)
new->cap_effective = new->cap_permitted;
else
cap_clear(new->cap_effective);
}
bprm->cap_effective = effective;
/*
* Audit candidate if current->cap_effective is set
*
* We do not bother to audit if 3 things are true:
* 1) cap_effective has all caps
* 2) we are root
* 3) root is supposed to have all caps (SECURE_NOROOT)
* Since this is just a normal root execing a process.
*
* Number 1 above might fail if you don't have a full bset, but I think
* that is interesting information to audit.
*/
if (!cap_isclear(new->cap_effective)) {
if (!cap_issubset(CAP_FULL_SET, new->cap_effective) ||
new->euid != 0 || new->uid != 0 ||
issecure(SECURE_NOROOT)) {
ret = audit_log_bprm_fcaps(bprm, new, old);
if (ret < 0)
return ret;
}
}
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
return 0;
}
/**
* cap_bprm_secureexec - Determine whether a secure execution is required
* @bprm: The execution parameters
*
* Determine whether a secure execution is required, return 1 if it is, and 0
* if it is not.
*
* The credentials have been committed by this point, and so are no longer
* available through @bprm->cred.
*/
int cap_bprm_secureexec(struct linux_binprm *bprm)
{
const struct cred *cred = current_cred();
if (cred->uid != 0) {
if (bprm->cap_effective)
return 1;
if (!cap_isclear(cred->cap_permitted))
return 1;
}
return (cred->euid != cred->uid ||
cred->egid != cred->gid);
}
/**
* cap_inode_setxattr - Determine whether an xattr may be altered
* @dentry: The inode/dentry being altered
* @name: The name of the xattr to be changed
* @value: The value that the xattr will be changed to
* @size: The size of value
* @flags: The replacement flag
*
* Determine whether an xattr may be altered or set on an inode, returning 0 if
* permission is granted, -ve if denied.
*
* This is used to make sure security xattrs don't get updated or set by those
* who aren't privileged to do so.
*/
int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
if (!strcmp(name, XATTR_NAME_CAPS)) {
if (!capable(CAP_SETFCAP))
return -EPERM;
return 0;
}
if (!strncmp(name, XATTR_SECURITY_PREFIX,
sizeof(XATTR_SECURITY_PREFIX) - 1) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/**
* cap_inode_removexattr - Determine whether an xattr may be removed
* @dentry: The inode/dentry being altered
* @name: The name of the xattr to be changed
*
* Determine whether an xattr may be removed from an inode, returning 0 if
* permission is granted, -ve if denied.
*
* This is used to make sure security xattrs don't get removed by those who
* aren't privileged to remove them.
*/
int cap_inode_removexattr(struct dentry *dentry, const char *name)
{
if (!strcmp(name, XATTR_NAME_CAPS)) {
if (!capable(CAP_SETFCAP))
return -EPERM;
return 0;
}
if (!strncmp(name, XATTR_SECURITY_PREFIX,
sizeof(XATTR_SECURITY_PREFIX) - 1) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/*
* cap_emulate_setxuid() fixes the effective / permitted capabilities of
* a process after a call to setuid, setreuid, or setresuid.
*
* 1) When set*uiding _from_ one of {r,e,s}uid == 0 _to_ all of
* {r,e,s}uid != 0, the permitted and effective capabilities are
* cleared.
*
* 2) When set*uiding _from_ euid == 0 _to_ euid != 0, the effective
* capabilities of the process are cleared.
*
* 3) When set*uiding _from_ euid != 0 _to_ euid == 0, the effective
* capabilities are set to the permitted capabilities.
*
* fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should
* never happen.
*
* -astor
*
* cevans - New behaviour, Oct '99
* A process may, via prctl(), elect to keep its capabilities when it
* calls setuid() and switches away from uid==0. Both permitted and
* effective sets will be retained.
* Without this change, it was impossible for a daemon to drop only some
* of its privilege. The call to setuid(!=0) would drop all privileges!
* Keeping uid 0 is not an option because uid 0 owns too many vital
* files..
* Thanks to Olaf Kirch and Peter Benie for spotting this.
*/
static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old)
{
if ((old->uid == 0 || old->euid == 0 || old->suid == 0) &&
(new->uid != 0 && new->euid != 0 && new->suid != 0) &&
!issecure(SECURE_KEEP_CAPS)) {
cap_clear(new->cap_permitted);
cap_clear(new->cap_effective);
}
if (old->euid == 0 && new->euid != 0)
cap_clear(new->cap_effective);
if (old->euid != 0 && new->euid == 0)
new->cap_effective = new->cap_permitted;
}
/**
* cap_task_fix_setuid - Fix up the results of setuid() call
* @new: The proposed credentials
* @old: The current task's current credentials
* @flags: Indications of what has changed
*
* Fix up the results of setuid() call before the credential changes are
* actually applied, returning 0 to grant the changes, -ve to deny them.
*/
int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
{
switch (flags) {
case LSM_SETID_RE:
case LSM_SETID_ID:
case LSM_SETID_RES:
/* juggle the capabilities to follow [RES]UID changes unless
* otherwise suppressed */
if (!issecure(SECURE_NO_SETUID_FIXUP))
cap_emulate_setxuid(new, old);
break;
case LSM_SETID_FS:
/* juggle the capabilties to follow FSUID changes, unless
* otherwise suppressed
*
* FIXME - is fsuser used for all CAP_FS_MASK capabilities?
* if not, we might be a bit too harsh here.
*/
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
if (old->fsuid == 0 && new->fsuid != 0)
new->cap_effective =
cap_drop_fs_set(new->cap_effective);
if (old->fsuid != 0 && new->fsuid == 0)
new->cap_effective =
cap_raise_fs_set(new->cap_effective,
new->cap_permitted);
}
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Rationale: code calling task_setscheduler, task_setioprio, and
* task_setnice, assumes that
* . if capable(cap_sys_nice), then those actions should be allowed
* . if not capable(cap_sys_nice), but acting on your own processes,
* then those actions should be allowed
* This is insufficient now since you can call code without suid, but
* yet with increased caps.
* So we check for increased caps on the target process.
*/
static int cap_safe_nice(struct task_struct *p)
{
int is_subset;
rcu_read_lock();
is_subset = cap_issubset(__task_cred(p)->cap_permitted,
current_cred()->cap_permitted);
rcu_read_unlock();
if (!is_subset && !capable(CAP_SYS_NICE))
return -EPERM;
return 0;
}
/**
* cap_task_setscheduler - Detemine if scheduler policy change is permitted
* @p: The task to affect
*
* Detemine if the requested scheduler policy change is permitted for the
* specified task, returning 0 if permission is granted, -ve if denied.
*/
int cap_task_setscheduler(struct task_struct *p)
{
return cap_safe_nice(p);
}
/**
* cap_task_ioprio - Detemine if I/O priority change is permitted
* @p: The task to affect
* @ioprio: The I/O priority to set
*
* Detemine if the requested I/O priority change is permitted for the specified
* task, returning 0 if permission is granted, -ve if denied.
*/
int cap_task_setioprio(struct task_struct *p, int ioprio)
{
return cap_safe_nice(p);
}
/**
* cap_task_ioprio - Detemine if task priority change is permitted
* @p: The task to affect
* @nice: The nice value to set
*
* Detemine if the requested task priority change is permitted for the
* specified task, returning 0 if permission is granted, -ve if denied.
*/
int cap_task_setnice(struct task_struct *p, int nice)
{
return cap_safe_nice(p);
}
/*
* Implement PR_CAPBSET_DROP. Attempt to remove the specified capability from
* the current task's bounding set. Returns 0 on success, -ve on error.
*/
static long cap_prctl_drop(struct cred *new, unsigned long cap)
{
if (!capable(CAP_SETPCAP))
return -EPERM;
if (!cap_valid(cap))
return -EINVAL;
cap_lower(new->cap_bset, cap);
return 0;
}
/**
* cap_task_prctl - Implement process control functions for this security module
* @option: The process control function requested
* @arg2, @arg3, @arg4, @arg5: The argument data for this function
*
* Allow process control functions (sys_prctl()) to alter capabilities; may
* also deny access to other functions not otherwise implemented here.
*
* Returns 0 or +ve on success, -ENOSYS if this function is not implemented
* here, other -ve on error. If -ENOSYS is returned, sys_prctl() and other LSM
* modules will consider performing the function.
*/
int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
struct cred *new;
long error = 0;
new = prepare_creds();
if (!new)
return -ENOMEM;
switch (option) {
case PR_CAPBSET_READ:
error = -EINVAL;
if (!cap_valid(arg2))
goto error;
error = !!cap_raised(new->cap_bset, arg2);
goto no_change;
case PR_CAPBSET_DROP:
error = cap_prctl_drop(new, arg2);
if (error < 0)
goto error;
goto changed;
/*
* The next four prctl's remain to assist with transitioning a
* system from legacy UID=0 based privilege (when filesystem
* capabilities are not in use) to a system using filesystem
* capabilities only - as the POSIX.1e draft intended.
*
* Note:
*
* PR_SET_SECUREBITS =
* issecure_mask(SECURE_KEEP_CAPS_LOCKED)
* | issecure_mask(SECURE_NOROOT)
* | issecure_mask(SECURE_NOROOT_LOCKED)
* | issecure_mask(SECURE_NO_SETUID_FIXUP)
* | issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)
*
* will ensure that the current process and all of its
* children will be locked into a pure
* capability-based-privilege environment.
*/
case PR_SET_SECUREBITS:
error = -EPERM;
if ((((new->securebits & SECURE_ALL_LOCKS) >> 1)
& (new->securebits ^ arg2)) /*[1]*/
|| ((new->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
|| (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
|| (cap_capable(current, current_cred(), CAP_SETPCAP,
SECURITY_CAP_AUDIT) != 0) /*[4]*/
/*
* [1] no changing of bits that are locked
* [2] no unlocking of locks
* [3] no setting of unsupported bits
* [4] doing anything requires privilege (go read about
* the "sendmail capabilities bug")
*/
)
/* cannot change a locked bit */
goto error;
new->securebits = arg2;
goto changed;
case PR_GET_SECUREBITS:
error = new->securebits;
goto no_change;
case PR_GET_KEEPCAPS:
if (issecure(SECURE_KEEP_CAPS))
error = 1;
goto no_change;
case PR_SET_KEEPCAPS:
error = -EINVAL;
if (arg2 > 1) /* Note, we rely on arg2 being unsigned here */
goto error;
error = -EPERM;
if (issecure(SECURE_KEEP_CAPS_LOCKED))
goto error;
if (arg2)
new->securebits |= issecure_mask(SECURE_KEEP_CAPS);
else
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
goto changed;
default:
/* No functionality available - continue with default */
error = -ENOSYS;
goto error;
}
/* Functionality provided */
changed:
return commit_creds(new);
no_change:
error:
abort_creds(new);
return error;
}
/**
* cap_syslog - Determine whether syslog function is permitted
* @type: Function requested
* @from_file: Whether this request came from an open file (i.e. /proc)
*
* Determine whether the current process is permitted to use a particular
* syslog function, returning 0 if permission is granted, -ve if not.
*/
int cap_syslog(int type, bool from_file)
{
if (type != SYSLOG_ACTION_OPEN && from_file)
return 0;
if ((type != SYSLOG_ACTION_READ_ALL &&
type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/**
* cap_vm_enough_memory - Determine whether a new virtual mapping is permitted
* @mm: The VM space in which the new mapping is to be made
* @pages: The size of the mapping
*
* Determine whether the allocation of a new virtual mapping by the current
* task is permitted, returning 0 if permission is granted, -ve if not.
*/
int cap_vm_enough_memory(struct mm_struct *mm, long pages)
{
int cap_sys_admin = 0;
if (cap_capable(current, current_cred(), CAP_SYS_ADMIN,
SECURITY_CAP_NOAUDIT) == 0)
cap_sys_admin = 1;
return __vm_enough_memory(mm, pages, cap_sys_admin);
}
/*
* cap_file_mmap - check if able to map given addr
* @file: unused
* @reqprot: unused
* @prot: unused
* @flags: unused
* @addr: address attempting to be mapped
* @addr_only: unused
*
* If the process is attempting to map memory below dac_mmap_min_addr they need
* CAP_SYS_RAWIO. The other parameters to this function are unused by the
* capability security module. Returns 0 if this mapping should be allowed
* -EPERM if not.
*/
int cap_file_mmap(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags,
unsigned long addr, unsigned long addr_only)
{
int ret = 0;
if (addr < dac_mmap_min_addr) {
ret = cap_capable(current, current_cred(), CAP_SYS_RAWIO,
SECURITY_CAP_AUDIT);
/* set PF_SUPERPRIV if it turns out we allow the low mmap */
if (ret == 0)
current->flags |= PF_SUPERPRIV;
}
return ret;
}
|
steppnasty/platform_kernel_msm7x30
|
security/commoncap.c
|
C
|
gpl-2.0
| 27,333
|
# -*- coding: iso-8859-1 -*-
# (c) 2006 Stephan Reichholf
# This Software is Free, use it where you want, when you want for whatever you want and modify it if you want but don't remove my copyright!
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Pixmap import Pixmap
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Plugins.Plugin import PluginDescriptor
from Components.config import config
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from enigma import eEnv
import os
SKINXML = "skin.xml"
DEFAULTSKIN = "<Default Skin>"
class SkinSelector(Screen):
# for i18n:
# _("Choose your Skin")
skinlist = []
root = os.path.join(eEnv.resolve("${datadir}"),"enigma2")
def __init__(self, session, args = None):
Screen.__init__(self, session)
self.skinlist = []
self.previewPath = ""
if os.path.exists(os.path.join(self.root, SKINXML)):
self.skinlist.append(DEFAULTSKIN)
for root, dirs, files in os.walk(self.root, followlinks=True):
for subdir in dirs:
dir = os.path.join(root,subdir)
if os.path.exists(os.path.join(dir,SKINXML)):
self.skinlist.append(subdir)
dirs = []
self["key_red"] = StaticText(_("Close"))
self["introduction"] = StaticText(_("Press OK to activate the selected skin."))
self.skinlist.sort()
self["SkinList"] = MenuList(self.skinlist)
self["Preview"] = Pixmap()
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "EPGSelectActions"],
{
"ok": self.ok,
"back": self.close,
"red": self.close,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"info": self.info,
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
tmp = config.skin.primary_skin.value.find("/"+SKINXML)
if tmp != -1:
tmp = config.skin.primary_skin.value[:tmp]
idx = 0
for skin in self.skinlist:
if skin == tmp:
break
idx += 1
if idx < len(self.skinlist):
self["SkinList"].moveToIndex(idx)
self.loadPreview()
def up(self):
self["SkinList"].up()
self.loadPreview()
def down(self):
self["SkinList"].down()
self.loadPreview()
def left(self):
self["SkinList"].pageUp()
self.loadPreview()
def right(self):
self["SkinList"].pageDown()
self.loadPreview()
def info(self):
aboutbox = self.session.open(MessageBox,_("STB-GUI Skinselector\n\nIf you experience any problems please contact\nstephan@reichholf.net\n\n\xA9 2006 - Stephan Reichholf"), MessageBox.TYPE_INFO)
aboutbox.setTitle(_("About..."))
def ok(self):
if self["SkinList"].getCurrent() == DEFAULTSKIN:
self.skinfile = "."
else:
self.skinfile = self["SkinList"].getCurrent()
self.skinfile = os.path.join(self.skinfile, SKINXML)
print "Skinselector: Selected Skin: "+self.root+self.skinfile
restartbox = self.session.openWithCallback(self.restartGUI,MessageBox,_("GUI needs a restart to apply a new skin\nDo you want to restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
def loadPreview(self):
if self["SkinList"].getCurrent() == DEFAULTSKIN:
pngpath = "."
else:
pngpath = self["SkinList"].getCurrent()
pngpath = os.path.join(os.path.join(self.root, pngpath), "prev.png")
if not os.path.exists(pngpath):
pngpath = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/SkinSelector/noprev.png")
if self.previewPath != pngpath:
self.previewPath = pngpath
self["Preview"].instance.setPixmapFromFile(self.previewPath)
def restartGUI(self, answer):
if answer is True:
config.skin.primary_skin.value = self.skinfile
config.skin.primary_skin.save()
self.session.open(TryQuitMainloop, 3)
def SkinSelMain(session, **kwargs):
session.open(SkinSelector)
def SkinSelSetup(menuid, **kwargs):
if menuid == "ui_menu":
return [(_("Skin"), SkinSelMain, "skin_selector", None)]
else:
return []
def Plugins(**kwargs):
return PluginDescriptor(name="Skinselector", description="Select Your Skin", where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=SkinSelSetup)
|
kajgan/e2
|
lib/python/Plugins/SystemPlugins/SkinSelector/plugin.py
|
Python
|
gpl-2.0
| 4,208
|
/* Copyright (C) Olivier Bertrand 2004 - 2013
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*************** Mycat CC Program Source Code File (.CC) ***************/
/* PROGRAM NAME: MYCAT */
/* ------------- */
/* Version 1.4 */
/* */
/* Author: Olivier Bertrand 2012 - 2013 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
/* This program are the DB description related routines. */
/***********************************************************************/
/***********************************************************************/
/* Include relevant MariaDB header file. */
/***********************************************************************/
#include <my_config.h>
#if defined(WIN32)
//#include <windows.h>
//#include <sqlext.h>
#elif defined(UNIX)
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#endif
#define DONT_DEFINE_VOID
//#include <mysql/plugin.h>
#include "handler.h"
#undef OFFSET
/***********************************************************************/
/* Include application header files */
/* */
/* global.h is header containing all global declarations. */
/* plgdbsem.h is header containing DB application declarations. */
/* tabdos.h is header containing TDBDOS classes declarations. */
/* MYCAT.h is header containing DB description declarations. */
/***********************************************************************/
#if defined(UNIX)
#include "osutil.h"
#endif // UNIX
#include "global.h"
#include "plgdbsem.h"
#include "reldef.h"
#include "tabcol.h"
#include "xtable.h"
#include "filamtxt.h"
#include "tabdos.h"
#include "tabfmt.h"
#include "tabvct.h"
#include "tabsys.h"
#if defined(WIN32)
#include "tabmac.h"
#include "tabwmi.h"
#endif // WIN32
//#include "tabtbl.h"
#include "tabxcl.h"
#include "tabtbl.h"
#include "taboccur.h"
#if defined(XML_SUPPORT)
#include "tabxml.h"
#endif // XML_SUPPORT
#include "tabmul.h"
#if defined(MYSQL_SUPPORT)
#include "tabmysql.h"
#endif // MYSQL_SUPPORT
#if defined(ODBC_SUPPORT)
#define NODBC
#include "tabodbc.h"
#endif // ODBC_SUPPORT
#if defined(PIVOT_SUPPORT)
#include "tabpivot.h"
#endif // PIVOT_SUPPORT
#include "tabvir.h"
#include "ha_connect.h"
#include "mycat.h"
/***********************************************************************/
/* Extern static variables. */
/***********************************************************************/
#if defined(WIN32)
extern "C" HINSTANCE s_hModule; // Saved module handle
#endif // !WIN32
PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info);
/***********************************************************************/
/* Get a unique enum table type ID. */
/***********************************************************************/
TABTYPE GetTypeID(const char *type)
{
return (!type) ? TAB_UNDEF
: (!stricmp(type, "DOS")) ? TAB_DOS
: (!stricmp(type, "FIX")) ? TAB_FIX
: (!stricmp(type, "BIN")) ? TAB_BIN
: (!stricmp(type, "CSV")) ? TAB_CSV
: (!stricmp(type, "FMT")) ? TAB_FMT
: (!stricmp(type, "DBF")) ? TAB_DBF
#ifdef XML_SUPPORT
: (!stricmp(type, "XML")) ? TAB_XML
#endif
: (!stricmp(type, "INI")) ? TAB_INI
: (!stricmp(type, "VEC")) ? TAB_VEC
#ifdef ODBC_SUPPORT
: (!stricmp(type, "ODBC")) ? TAB_ODBC
#endif
#ifdef MYSQL_SUPPORT
: (!stricmp(type, "MYSQL")) ? TAB_MYSQL
: (!stricmp(type, "MYPRX")) ? TAB_MYSQL
#endif
: (!stricmp(type, "DIR")) ? TAB_DIR
#ifdef WIN32
: (!stricmp(type, "MAC")) ? TAB_MAC
: (!stricmp(type, "WMI")) ? TAB_WMI
#endif
: (!stricmp(type, "TBL")) ? TAB_TBL
: (!stricmp(type, "XCOL")) ? TAB_XCL
: (!stricmp(type, "OCCUR")) ? TAB_OCCUR
: (!stricmp(type, "CATLG")) ? TAB_PRX // Legacy
: (!stricmp(type, "PROXY")) ? TAB_PRX
#ifdef PIVOT_SUPPORT
: (!stricmp(type, "PIVOT")) ? TAB_PIVOT
#endif
: (!stricmp(type, "VIR")) ? TAB_VIR
: (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY;
} // end of GetTypeID
/***********************************************************************/
/* Return true for table types based on file. */
/***********************************************************************/
bool IsFileType(TABTYPE type)
{
bool isfile;
switch (type) {
case TAB_DOS:
case TAB_FIX:
case TAB_BIN:
case TAB_CSV:
case TAB_FMT:
case TAB_DBF:
case TAB_XML:
case TAB_INI:
case TAB_VEC:
isfile= true;
break;
default:
isfile= false;
break;
} // endswitch type
return isfile;
} // end of IsFileType
/***********************************************************************/
/* Return true for table types returning exact row count. */
/***********************************************************************/
bool IsExactType(TABTYPE type)
{
bool exact;
switch (type) {
case TAB_FIX:
case TAB_BIN:
case TAB_DBF:
// case TAB_XML: depends on Multiple || Xpand || Coltype
case TAB_VEC:
case TAB_VIR:
exact= true;
break;
default:
exact= false;
break;
} // endswitch type
return exact;
} // end of IsExactType
/***********************************************************************/
/* Return true for table types accepting null fields. */
/***********************************************************************/
bool IsTypeNullable(TABTYPE type)
{
bool nullable;
switch (type) {
case TAB_MAC:
case TAB_DIR:
nullable= false;
break;
default:
nullable= true;
break;
} // endswitch type
return nullable;
} // end of IsTypeNullable
/***********************************************************************/
/* Return true for indexable table by XINDEX. */
/***********************************************************************/
bool IsTypeFixed(TABTYPE type)
{
bool fix;
switch (type) {
case TAB_FIX:
case TAB_BIN:
case TAB_VEC:
// case TAB_DBF: ???
fix= true;
break;
default:
fix= false;
break;
} // endswitch type
return fix;
} // end of IsTypeFixed
/***********************************************************************/
/* Return true for table indexable by XINDEX. */
/***********************************************************************/
bool IsTypeIndexable(TABTYPE type)
{
bool idx;
switch (type) {
case TAB_DOS:
case TAB_CSV:
case TAB_FMT:
case TAB_FIX:
case TAB_BIN:
case TAB_VEC:
case TAB_DBF:
idx= true;
break;
default:
idx= false;
break;
} // endswitch type
return idx;
} // end of IsTypeIndexable
/***********************************************************************/
/* Return index type: 0 NO, 1 XINDEX, 2 REMOTE. */
/***********************************************************************/
int GetIndexType(TABTYPE type)
{
int xtyp;
switch (type) {
case TAB_DOS:
case TAB_CSV:
case TAB_FMT:
case TAB_FIX:
case TAB_BIN:
case TAB_VEC:
case TAB_DBF:
xtyp= 1;
break;
case TAB_MYSQL:
// case TAB_ODBC:
xtyp= 2;
break;
case TAB_VIR:
xtyp= 3;
break;
case TAB_ODBC:
default:
xtyp= 0;
break;
} // endswitch type
return xtyp;
} // end of GetIndexType
/***********************************************************************/
/* Get a unique enum catalog function ID. */
/***********************************************************************/
uint GetFuncID(const char *func)
{
uint fnc;
if (!func)
fnc= FNC_NO;
else if (!strnicmp(func, "col", 3))
fnc= FNC_COL;
else if (!strnicmp(func, "tab", 3))
fnc= FNC_TABLE;
else if (!stricmp(func, "dsn") ||
!strnicmp(func, "datasource", 10) ||
!strnicmp(func, "source", 6) ||
!strnicmp(func, "sqldatasource", 13))
fnc= FNC_DSN;
else if (!strnicmp(func, "driver", 6) ||
!strnicmp(func, "sqldriver", 9))
fnc= FNC_DRIVER;
else
fnc= FNC_NIY;
return fnc;
} // end of GetFuncID
/***********************************************************************/
/* OEMColumn: Get table column info for an OEM table. */
/***********************************************************************/
PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info)
{
typedef PQRYRES (__stdcall *XCOLDEF) (PGLOBAL, void*, char*, char*, bool);
const char *module, *subtype;
char c, getname[40] = "Col";
#if defined(WIN32)
HANDLE hdll; /* Handle to the external DLL */
#else // !WIN32
void *hdll; /* Handle for the loaded shared library */
#endif // !WIN32
XCOLDEF coldef = NULL;
PQRYRES qrp = NULL;
module = topt->module;
subtype = topt->subtype;
if (!module || !subtype)
return NULL;
// The exported name is always in uppercase
for (int i = 0; ; i++) {
c = subtype[i];
getname[i + 3] = toupper(c);
if (!c) break;
} // endfor i
#if defined(WIN32)
// Load the Dll implementing the table
if (!(hdll = LoadLibrary(module))) {
char buf[256];
DWORD rc = GetLastError();
sprintf(g->Message, MSG(DLL_LOAD_ERROR), rc, module);
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0,
(LPTSTR)buf, sizeof(buf), NULL);
strcat(strcat(g->Message, ": "), buf);
return NULL;
} // endif hDll
// Get the function returning an instance of the external DEF class
if (!(coldef = (XCOLDEF)GetProcAddress((HINSTANCE)hdll, getname))) {
sprintf(g->Message, MSG(PROCADD_ERROR), GetLastError(), getname);
FreeLibrary((HMODULE)hdll);
return NULL;
} // endif coldef
#else // !WIN32
const char *error = NULL;
// Load the desired shared library
if (!(hdll = dlopen(module, RTLD_LAZY))) {
error = dlerror();
sprintf(g->Message, MSG(SHARED_LIB_ERR), module, SVP(error));
return NULL;
} // endif Hdll
// Get the function returning an instance of the external DEF class
if (!(coldef = (XCOLDEF)dlsym(hdll, getname))) {
error = dlerror();
sprintf(g->Message, MSG(GET_FUNC_ERR), getname, SVP(error));
dlclose(hdll);
return NULL;
} // endif coldef
#endif // !WIN32
// Just in case the external Get function does not set error messages
sprintf(g->Message, "Error getting column info from %s", subtype);
// Get the table column definition
qrp = coldef(g, topt, tab, db, info);
#if defined(WIN32)
FreeLibrary((HMODULE)hdll);
#else // !WIN32
dlclose(hdll);
#endif // !WIN32
return qrp;
} // end of OEMColumns
/* ------------------------- Class CATALOG --------------------------- */
/***********************************************************************/
/* CATALOG Constructor. */
/***********************************************************************/
CATALOG::CATALOG(void)
{
#if defined(WIN32)
//DataPath= ".\\";
#else // !WIN32
//DataPath= "./";
#endif // !WIN32
memset(&Ctb, 0, sizeof(CURTAB));
Cbuf= NULL;
Cblen= 0;
DefHuge= false;
} // end of CATALOG constructor
/* -------------------------- Class MYCAT ---------------------------- */
/***********************************************************************/
/* MYCAT Constructor. */
/***********************************************************************/
MYCAT::MYCAT(PHC hc) : CATALOG()
{
Hc= hc;
DefHuge= false;
} // end of MYCAT constructor
/***********************************************************************/
/* Nothing to do for CONNECT. */
/***********************************************************************/
void MYCAT::Reset(void)
{
} // end of Reset
#if 0
/***********************************************************************/
/* This function sets the current database path. */
/***********************************************************************/
void MYCAT::SetPath(PGLOBAL g, LPCSTR *datapath, const char *path)
{
if (path) {
size_t len= strlen(path) + (*path != '.' ? 4 : 1);
char *buf= (char*)PlugSubAlloc(g, NULL, len);
if (PlugIsAbsolutePath(path))
{
strcpy(buf, path);
*datapath= buf;
return;
}
if (*path != '.') {
#if defined(WIN32)
char *s= "\\";
#else // !WIN32
char *s= "/";
#endif // !WIN32
strcat(strcat(strcat(strcpy(buf, "."), s), path), s);
} else
strcpy(buf, path);
*datapath= buf;
} // endif path
} // end of SetDataPath
#endif // 0
/***********************************************************************/
/* GetTableDesc: retrieve a table descriptor. */
/* Look for a table descriptor matching the name and type. */
/***********************************************************************/
PRELDEF MYCAT::GetTableDesc(PGLOBAL g, LPCSTR name,
LPCSTR type, PRELDEF *prp)
{
if (trace)
printf("GetTableDesc: name=%s am=%s\n", name, SVP(type));
// If not specified get the type of this table
if (!type)
type= Hc->GetStringOption("Type","*");
return MakeTableDesc(g, name, type);
} // end of GetTableDesc
/***********************************************************************/
/* MakeTableDesc: make a table/view description. */
/* Note: caller must check if name already exists before calling it. */
/***********************************************************************/
PRELDEF MYCAT::MakeTableDesc(PGLOBAL g, LPCSTR name, LPCSTR am)
{
TABTYPE tc;
PRELDEF tdp= NULL;
if (trace)
printf("MakeTableDesc: name=%s am=%s\n", name, SVP(am));
/*********************************************************************/
/* Get a unique enum identifier for types. */
/*********************************************************************/
tc= GetTypeID(am);
switch (tc) {
case TAB_FIX:
case TAB_BIN:
case TAB_DBF:
case TAB_DOS: tdp= new(g) DOSDEF; break;
case TAB_CSV:
case TAB_FMT: tdp= new(g) CSVDEF; break;
case TAB_INI: tdp= new(g) INIDEF; break;
case TAB_DIR: tdp= new(g) DIRDEF; break;
#if defined(XML_SUPPORT)
case TAB_XML: tdp= new(g) XMLDEF; break;
#endif // XML_SUPPORT
case TAB_VEC: tdp= new(g) VCTDEF; break;
#if defined(ODBC_SUPPORT)
case TAB_ODBC: tdp= new(g) ODBCDEF; break;
#endif // ODBC_SUPPORT
#if defined(WIN32)
case TAB_MAC: tdp= new(g) MACDEF; break;
case TAB_WMI: tdp= new(g) WMIDEF; break;
#endif // WIN32
case TAB_OEM: tdp= new(g) OEMDEF; break;
case TAB_TBL: tdp= new(g) TBLDEF; break;
case TAB_XCL: tdp= new(g) XCLDEF; break;
case TAB_PRX: tdp= new(g) PRXDEF; break;
case TAB_OCCUR: tdp= new(g) OCCURDEF; break;
#if defined(MYSQL_SUPPORT)
case TAB_MYSQL: tdp= new(g) MYSQLDEF; break;
#endif // MYSQL_SUPPORT
#if defined(PIVOT_SUPPORT)
case TAB_PIVOT: tdp= new(g) PIVOTDEF; break;
#endif // PIVOT_SUPPORT
case TAB_VIR: tdp= new(g) VIRDEF; break;
default:
sprintf(g->Message, MSG(BAD_TABLE_TYPE), am, name);
} // endswitch
// Do make the table/view definition
if (tdp && tdp->Define(g, this, name, am))
tdp= NULL;
return tdp;
} // end of MakeTableDesc
/***********************************************************************/
/* Initialize a Table Description Block construction. */
/***********************************************************************/
PTDB MYCAT::GetTable(PGLOBAL g, PTABLE tablep, MODE mode, LPCSTR type)
{
PRELDEF tdp;
PTDB tdbp= NULL;
LPCSTR name= tablep->GetName();
if (trace)
printf("GetTableDB: name=%s\n", name);
// Look for the description of the requested table
tdp= GetTableDesc(g, name, type);
if (tdp) {
if (trace)
printf("tdb=%p type=%s\n", tdp, tdp->GetType());
if (tablep->GetQualifier())
tdp->Database = SetPath(g, tablep->GetQualifier());
tdbp= tdp->GetTable(g, mode);
} // endif tdp
if (tdbp) {
if (trace)
printf("tdbp=%p name=%s amtype=%d\n", tdbp, tdbp->GetName(),
tdbp->GetAmType());
tablep->SetTo_Tdb(tdbp);
tdbp->SetTable(tablep);
tdbp->SetMode(mode);
} // endif tdbp
return (tdbp);
} // end of GetTable
/***********************************************************************/
/* ClearDB: Terminates Database usage. */
/***********************************************************************/
void MYCAT::ClearDB(PGLOBAL g)
{
} // end of ClearDB
/* ------------------------ End of MYCAT --------------------------- */
|
SunguckLee/MariaDB-PageCompression
|
storage/connect/mycat.cc
|
C++
|
gpl-2.0
| 18,631
|
#
# Makefile for the linux memory manager.
#
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
vmalloc.o pagewalk.o pgtable-generic.o \
process_vm_access.o
obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
page_isolation.o mm_init.o mmu_context.o percpu.o \
compaction.o slab_common.o $(mmu-y)
obj-y += init-mm.o
ifdef CONFIG_NO_BOOTMEM
obj-y += nobootmem.o
else
obj-y += bootmem.o
endif
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
obj-$(CONFIG_FRONTSWAP) += frontswap.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
obj-$(CONFIG_KSM_LEGACY) += ksm.o
obj-$(CONFIG_UKSM) += uksm.o
obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
obj-$(CONFIG_CLEANCACHE) += cleancache.o
obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
|
ion-storm/Unleashed-N4
|
mm/Makefile
|
Makefile
|
gpl-2.0
| 1,924
|
<?
require_once("../../lib/general/fpdf/fpdf.php");
require_once("../../lib/general/Herramientas.class.php");
require_once("../../lib/bd/basedatosAdo.php");
require_once("../../lib/general/cabecera.php");
class pdfreporte extends fpdf
{
var $bd;
var $titulos;
var $anchos;
var $campos;
var $sql;
var $rep;
var $numero;
var $cab;
var $coddis1;
var $coddis2;
function pdfreporte()
{
$this->fpdf("l","mm","Letter");
$this->arrp=array("no_vacio");
$this->cab=new cabecera();
$this->bd=new basedatosAdo();
$this->titulos=array();
$this->campos=array();
$this->anchos=array();
$this->coddis1=H::GetPost("coddis1");
$this->coddis2=H::GetPost("coddis2");
$this->sql="SELECT DISTINCT coddis as coddis,desdis as desdis, (case when afecon='S' then 'SI' else 'NO' end) as afecon, (case when desinc='S' then 'SI' else 'NO' end) as desinc, (case when adimej='S' then 'SI' else 'NO' end) as adimej
FROM BNDISBIE
WHERE ( coddis >='".$this->coddis1."' AND coddis <='".$this->coddis2."' )
ORDER BY coddis";
$this->llenartitulosmaestro();
}
function llenartitulosmaestro()
{
$this->titulos[0]="Código de Disposición";
$this->titulos[1]="Descripcion";
$this->titulos[2]=" Afecta Contabilidad";
$this->titulos[3]=" Desincorpora";
$this->titulos[4]=" Mejora el bien";
$this->anchos[0]=40;
$this->anchos[1]=80;
$this->anchos[2]=40;
$this->anchos[3]=40;
$this->anchos[4]=40;
}
function Header()
{
$this->cab->poner_cabecera($this,H::GetPost("titulo"),"l","s","Letter");
$this->setFont("Arial","B",9);
$ncampos=count($this->titulos);
for($i=0;$i<$ncampos;$i++)
{
$this->cell($this->anchos[$i],10,$this->titulos[$i]);
}
$this->ln(4);
$this->Line(10,$this->GetY()+5,270,$this->GetY()+5);
$this->ln(5);
}
function Cuerpo()
{
$this->setFont("Arial","B",7);
$tb=$this->bd->select($this->sql);
$this->SetWidths(array($this->anchos[0],$this->anchos[1],$this->anchos[2],$this->anchos[3],$this->anchos[4]));
$this->SetAligns(array('L','L','C','C','C'));
while(!$tb->EOF)
{
$this->setFont("Arial","",8);
$this->Row(array($tb->fields["coddis"],$tb->fields["desdis"],$tb->fields["afecon"],$tb->fields["desinc"],$tb->fields["adimej"]));
$this->ln();
$tb->MoveNext();
}
}
}
?>
|
cidesa/siga-universitario
|
web/reportes/reportes/bienes/pdfbnrdefdis.php
|
PHP
|
gpl-2.0
| 2,387
|
<?php // $Id: chatMsgList.class.php 415 2008-03-31 13:32:19Z fragile_be $
if ( count( get_included_files() ) == 1 ) die( '---' );
/**
* CLAROLINE
*
* @version 1.8 $Revision: 415 $
*
* @copyright (c) 2001-2011, Universite catholique de Louvain (UCL)
*
* @license http://www.gnu.org/copyleft/gpl.html (GPL) GENERAL PUBLIC LICENSE
*
* @package CLTRACK
*
* @author Claro Team <cvs@claroline.net>
* @author Sebastien Piraux <pir@cerdecam.be>
*/
/**
* This class defines main methods used in the tracking renderers for
* course related tracking data
*
* @abstract
*/
abstract class CourseTrackingRenderer
{
private $courseId;
public function __contruct($courseId){}
public function render()
{
$html = '<div class="statBlock">' . "\n"
. ' <div class="blockHeader">' . "\n"
. $this->renderHeader()
. ' </div>' . "\n"
. ' <div class="blockContent">' . "\n"
. $this->renderContent()
. ' </div>' . "\n"
. ' <div class="blockFooter">' . "\n"
. $this->renderFooter()
. ' </div>' . "\n"
. '</div>' . "\n";
return $html;
}
/**
* Render part of display (header) used in render class
* @abstract
*/
abstract protected function renderHeader();
/**
* Render part of display (content) used in render class
* @abstract
*/
abstract protected function renderContent();
/**
* Render part of display(footer) used in render class
* @abstract
*/
abstract protected function renderFooter();
}
/**
* This class defines main methods used in the tracking renderers for
* user related tracking data in course
*
* @abstract
*/
abstract class UserTrackingRenderer
{
private $courseId;
private $userId;
public function __contruct($courseId, $userId){}
public function render()
{
$html = '<div class="statBlock">' . "\n"
. ' <div class="blockHeader">' . "\n"
. $this->renderHeader()
. ' </div>' . "\n"
. ' <div class="blockContent">' . "\n"
. $this->renderContent()
. ' </div>' . "\n"
. ' <div class="blockFooter">' . "\n"
. $this->renderFooter()
. ' </div>' . "\n"
. '</div>' . "\n";
return $html;
}
/**
* Render part of display (header) used in render class
* @abstract
*/
abstract protected function renderHeader();
/**
* Render part of display (header) used in render class
* @abstract
*/
abstract protected function renderContent();
/**
* Render part of display (header) used in render class
* @abstract
*/
abstract protected function renderFooter();
}
?>
|
TeamRocketScience/Claroline-TRS-Edition
|
claroline/tracking/lib/trackingRenderer.class.php
|
PHP
|
gpl-2.0
| 3,259
|
/* Language-level data type conversion for GNU C++.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011 Free Software Foundation, Inc.
Hacked by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* This file contains the functions for converting C++ expressions
to different data types. The only entry point is `convert'.
Every language front end must have a `convert' function
but what kind of conversions it does will depend on the language. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "flags.h"
#include "cp-tree.h"
#include "intl.h"
#include "convert.h"
#include "decl.h"
#include "target.h"
static tree cp_convert_to_pointer (tree, tree);
static tree convert_to_pointer_force (tree, tree);
static tree build_type_conversion (tree, tree);
static tree build_up_reference (tree, tree, int, tree);
static void warn_ref_binding (tree, tree, tree);
/* Change of width--truncation and extension of integers or reals--
is represented with NOP_EXPR. Proper functioning of many things
assumes that no other conversions can be NOP_EXPRs.
Conversion between integer and pointer is represented with CONVERT_EXPR.
Converting integer to real uses FLOAT_EXPR
and real to integer uses FIX_TRUNC_EXPR.
Here is a list of all the functions that assume that widening and
narrowing is always done with a NOP_EXPR:
In convert.c, convert_to_integer.
In c-typeck.c, build_binary_op_nodefault (boolean ops),
and c_common_truthvalue_conversion.
In expr.c: expand_expr, for operands of a MULT_EXPR.
In fold-const.c: fold.
In tree.c: get_narrower and get_unwidened.
C++: in multiple-inheritance, converting between pointers may involve
adjusting them by a delta stored within the class definition. */
/* Subroutines of `convert'. */
/* if converting pointer to pointer
if dealing with classes, check for derived->base or vice versa
else if dealing with method pointers, delegate
else convert blindly
else if converting class, pass off to build_type_conversion
else try C-style pointer conversion. */
static tree
cp_convert_to_pointer (tree type, tree expr)
{
tree intype = TREE_TYPE (expr);
enum tree_code form;
tree rval;
if (intype == error_mark_node)
return error_mark_node;
if (MAYBE_CLASS_TYPE_P (intype))
{
intype = complete_type (intype);
if (!COMPLETE_TYPE_P (intype))
{
error ("can%'t convert from incomplete type %qT to %qT",
intype, type);
return error_mark_node;
}
rval = build_type_conversion (type, expr);
if (rval)
{
if (rval == error_mark_node)
error ("conversion of %qE from %qT to %qT is ambiguous",
expr, intype, type);
return rval;
}
}
/* Handle anachronistic conversions from (::*)() to cv void* or (*)(). */
if (TREE_CODE (type) == POINTER_TYPE
&& (TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE
|| VOID_TYPE_P (TREE_TYPE (type))))
{
if (TYPE_PTRMEMFUNC_P (intype)
|| TREE_CODE (intype) == METHOD_TYPE)
return convert_member_func_to_ptr (type, expr);
if (TREE_CODE (TREE_TYPE (expr)) == POINTER_TYPE)
return build_nop (type, expr);
intype = TREE_TYPE (expr);
}
if (expr == error_mark_node)
return error_mark_node;
form = TREE_CODE (intype);
if (POINTER_TYPE_P (intype))
{
intype = TYPE_MAIN_VARIANT (intype);
if (TYPE_MAIN_VARIANT (type) != intype
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (type)) == RECORD_TYPE
&& MAYBE_CLASS_TYPE_P (TREE_TYPE (type))
&& MAYBE_CLASS_TYPE_P (TREE_TYPE (intype))
&& TREE_CODE (TREE_TYPE (intype)) == RECORD_TYPE)
{
enum tree_code code = PLUS_EXPR;
tree binfo;
tree intype_class;
tree type_class;
bool same_p;
intype_class = TREE_TYPE (intype);
type_class = TREE_TYPE (type);
same_p = same_type_p (TYPE_MAIN_VARIANT (intype_class),
TYPE_MAIN_VARIANT (type_class));
binfo = NULL_TREE;
/* Try derived to base conversion. */
if (!same_p)
binfo = lookup_base (intype_class, type_class, ba_check, NULL);
if (!same_p && !binfo)
{
/* Try base to derived conversion. */
binfo = lookup_base (type_class, intype_class, ba_check, NULL);
code = MINUS_EXPR;
}
if (binfo == error_mark_node)
return error_mark_node;
if (binfo || same_p)
{
if (binfo)
expr = build_base_path (code, expr, binfo, 0,
tf_warning_or_error);
/* Add any qualifier conversions. */
return build_nop (type, expr);
}
}
if (TYPE_PTRMEMFUNC_P (type))
{
error ("cannot convert %qE from type %qT to type %qT",
expr, intype, type);
return error_mark_node;
}
return build_nop (type, expr);
}
else if ((TYPE_PTRMEM_P (type) && TYPE_PTRMEM_P (intype))
|| (TYPE_PTRMEMFUNC_P (type) && TYPE_PTRMEMFUNC_P (intype)))
return convert_ptrmem (type, expr, /*allow_inverse_p=*/false,
/*c_cast_p=*/false, tf_warning_or_error);
else if (TYPE_PTRMEMFUNC_P (intype))
{
if (!warn_pmf2ptr)
{
if (TREE_CODE (expr) == PTRMEM_CST)
return cp_convert_to_pointer (type,
PTRMEM_CST_MEMBER (expr));
else if (TREE_CODE (expr) == OFFSET_REF)
{
tree object = TREE_OPERAND (expr, 0);
return get_member_function_from_ptrfunc (&object,
TREE_OPERAND (expr, 1));
}
}
error ("cannot convert %qE from type %qT to type %qT",
expr, intype, type);
return error_mark_node;
}
if (null_ptr_cst_p (expr))
{
if (c_inhibit_evaluation_warnings == 0
&& !NULLPTR_TYPE_P (TREE_TYPE (expr)))
warning (OPT_Wzero_as_null_pointer_constant,
"zero as null pointer constant");
if (TYPE_PTRMEMFUNC_P (type))
return build_ptrmemfunc (TYPE_PTRMEMFUNC_FN_TYPE (type), expr, 0,
/*c_cast_p=*/false, tf_warning_or_error);
if (TYPE_PTRMEM_P (type))
{
/* A NULL pointer-to-member is represented by -1, not by
zero. */
expr = build_int_cst_type (type, -1);
}
else
expr = build_int_cst (type, 0);
return expr;
}
else if (TYPE_PTR_TO_MEMBER_P (type) && INTEGRAL_CODE_P (form))
{
error ("invalid conversion from %qT to %qT", intype, type);
return error_mark_node;
}
if (INTEGRAL_CODE_P (form))
{
if (TYPE_PRECISION (intype) == POINTER_SIZE)
return build1 (CONVERT_EXPR, type, expr);
expr = cp_convert (c_common_type_for_size (POINTER_SIZE, 0), expr);
/* Modes may be different but sizes should be the same. There
is supposed to be some integral type that is the same width
as a pointer. */
gcc_assert (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (expr)))
== GET_MODE_SIZE (TYPE_MODE (type)));
return convert_to_pointer (type, expr);
}
if (type_unknown_p (expr))
return instantiate_type (type, expr, tf_warning_or_error);
error ("cannot convert %qE from type %qT to type %qT",
expr, intype, type);
return error_mark_node;
}
/* Like convert, except permit conversions to take place which
are not normally allowed due to access restrictions
(such as conversion from sub-type to private super-type). */
static tree
convert_to_pointer_force (tree type, tree expr)
{
tree intype = TREE_TYPE (expr);
enum tree_code form = TREE_CODE (intype);
if (form == POINTER_TYPE)
{
intype = TYPE_MAIN_VARIANT (intype);
if (TYPE_MAIN_VARIANT (type) != intype
&& TREE_CODE (TREE_TYPE (type)) == RECORD_TYPE
&& MAYBE_CLASS_TYPE_P (TREE_TYPE (type))
&& MAYBE_CLASS_TYPE_P (TREE_TYPE (intype))
&& TREE_CODE (TREE_TYPE (intype)) == RECORD_TYPE)
{
enum tree_code code = PLUS_EXPR;
tree binfo;
binfo = lookup_base (TREE_TYPE (intype), TREE_TYPE (type),
ba_unique, NULL);
if (!binfo)
{
binfo = lookup_base (TREE_TYPE (type), TREE_TYPE (intype),
ba_unique, NULL);
code = MINUS_EXPR;
}
if (binfo == error_mark_node)
return error_mark_node;
if (binfo)
{
expr = build_base_path (code, expr, binfo, 0,
tf_warning_or_error);
if (expr == error_mark_node)
return error_mark_node;
/* Add any qualifier conversions. */
if (!same_type_p (TREE_TYPE (TREE_TYPE (expr)),
TREE_TYPE (type)))
expr = build_nop (type, expr);
return expr;
}
}
}
return cp_convert_to_pointer (type, expr);
}
/* We are passing something to a function which requires a reference.
The type we are interested in is in TYPE. The initial
value we have to begin with is in ARG.
FLAGS controls how we manage access checking.
DIRECT_BIND in FLAGS controls how any temporaries are generated.
If DIRECT_BIND is set, DECL is the reference we're binding to. */
static tree
build_up_reference (tree type, tree arg, int flags, tree decl)
{
tree rval;
tree argtype = TREE_TYPE (arg);
tree target_type = TREE_TYPE (type);
gcc_assert (TREE_CODE (type) == REFERENCE_TYPE);
if ((flags & DIRECT_BIND) && ! real_lvalue_p (arg))
{
/* Create a new temporary variable. We can't just use a TARGET_EXPR
here because it needs to live as long as DECL. */
tree targ = arg;
arg = make_temporary_var_for_ref_to_temp (decl, target_type);
/* Process the initializer for the declaration. */
DECL_INITIAL (arg) = targ;
cp_finish_decl (arg, targ, /*init_const_expr_p=*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING|DIRECT_BIND);
}
else if (!(flags & DIRECT_BIND) && ! lvalue_p (arg))
return get_target_expr (arg);
/* If we had a way to wrap this up, and say, if we ever needed its
address, transform all occurrences of the register, into a memory
reference we could win better. */
rval = cp_build_addr_expr (arg, tf_warning_or_error);
if (rval == error_mark_node)
return error_mark_node;
if ((flags & LOOKUP_PROTECT)
&& TYPE_MAIN_VARIANT (argtype) != TYPE_MAIN_VARIANT (target_type)
&& MAYBE_CLASS_TYPE_P (argtype)
&& MAYBE_CLASS_TYPE_P (target_type))
{
/* We go through lookup_base for the access control. */
tree binfo = lookup_base (argtype, target_type, ba_check, NULL);
if (binfo == error_mark_node)
return error_mark_node;
if (binfo == NULL_TREE)
return error_not_base_type (target_type, argtype);
rval = build_base_path (PLUS_EXPR, rval, binfo, 1,
tf_warning_or_error);
}
else
rval
= convert_to_pointer_force (build_pointer_type (target_type), rval);
return build_nop (type, rval);
}
/* Subroutine of convert_to_reference. REFTYPE is the target reference type.
INTYPE is the original rvalue type and DECL is an optional _DECL node
for diagnostics.
[dcl.init.ref] says that if an rvalue is used to
initialize a reference, then the reference must be to a
non-volatile const type. */
static void
warn_ref_binding (tree reftype, tree intype, tree decl)
{
tree ttl = TREE_TYPE (reftype);
if (!CP_TYPE_CONST_NON_VOLATILE_P (ttl))
{
const char *msg;
if (CP_TYPE_VOLATILE_P (ttl) && decl)
msg = G_("initialization of volatile reference type %q#T from "
"rvalue of type %qT");
else if (CP_TYPE_VOLATILE_P (ttl))
msg = G_("conversion to volatile reference type %q#T "
"from rvalue of type %qT");
else if (decl)
msg = G_("initialization of non-const reference type %q#T from "
"rvalue of type %qT");
else
msg = G_("conversion to non-const reference type %q#T from "
"rvalue of type %qT");
permerror (input_location, msg, reftype, intype);
}
}
/* For C++: Only need to do one-level references, but cannot
get tripped up on signed/unsigned differences.
DECL is either NULL_TREE or the _DECL node for a reference that is being
initialized. It can be error_mark_node if we don't know the _DECL but
we know it's an initialization. */
tree
convert_to_reference (tree reftype, tree expr, int convtype,
int flags, tree decl)
{
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (reftype));
tree intype;
tree rval = NULL_TREE;
tree rval_as_conversion = NULL_TREE;
bool can_convert_intype_to_type;
if (TREE_CODE (type) == FUNCTION_TYPE
&& TREE_TYPE (expr) == unknown_type_node)
expr = instantiate_type (type, expr,
(flags & LOOKUP_COMPLAIN)
? tf_warning_or_error : tf_none);
if (expr == error_mark_node)
return error_mark_node;
intype = TREE_TYPE (expr);
gcc_assert (TREE_CODE (intype) != REFERENCE_TYPE);
gcc_assert (TREE_CODE (reftype) == REFERENCE_TYPE);
intype = TYPE_MAIN_VARIANT (intype);
can_convert_intype_to_type = can_convert (type, intype);
if (!can_convert_intype_to_type
&& (convtype & CONV_IMPLICIT) && MAYBE_CLASS_TYPE_P (intype)
&& ! (flags & LOOKUP_NO_CONVERSION))
{
/* Look for a user-defined conversion to lvalue that we can use. */
rval_as_conversion
= build_type_conversion (reftype, expr);
if (rval_as_conversion && rval_as_conversion != error_mark_node
&& real_lvalue_p (rval_as_conversion))
{
expr = rval_as_conversion;
rval_as_conversion = NULL_TREE;
intype = type;
can_convert_intype_to_type = 1;
}
}
if (((convtype & CONV_STATIC) && can_convert (intype, type))
|| ((convtype & CONV_IMPLICIT) && can_convert_intype_to_type))
{
if (flags & LOOKUP_COMPLAIN)
{
tree ttl = TREE_TYPE (reftype);
tree ttr = lvalue_type (expr);
if (! real_lvalue_p (expr))
warn_ref_binding (reftype, intype, decl);
if (! (convtype & CONV_CONST)
&& !at_least_as_qualified_p (ttl, ttr))
permerror (input_location, "conversion from %qT to %qT discards qualifiers",
ttr, reftype);
}
return build_up_reference (reftype, expr, flags, decl);
}
else if ((convtype & CONV_REINTERPRET) && lvalue_p (expr))
{
/* When casting an lvalue to a reference type, just convert into
a pointer to the new type and deference it. This is allowed
by San Diego WP section 5.2.9 paragraph 12, though perhaps it
should be done directly (jason). (int &)ri ---> *(int*)&ri */
/* B* bp; A& ar = (A&)bp; is valid, but it's probably not what they
meant. */
if (TREE_CODE (intype) == POINTER_TYPE
&& (comptypes (TREE_TYPE (intype), type,
COMPARE_BASE | COMPARE_DERIVED)))
warning (0, "casting %qT to %qT does not dereference pointer",
intype, reftype);
rval = cp_build_addr_expr (expr, tf_warning_or_error);
if (rval != error_mark_node)
rval = convert_force (build_pointer_type (TREE_TYPE (reftype)),
rval, 0);
if (rval != error_mark_node)
rval = build1 (NOP_EXPR, reftype, rval);
}
else
{
rval = convert_for_initialization (NULL_TREE, type, expr, flags,
ICR_CONVERTING, 0, 0,
tf_warning_or_error);
if (rval == NULL_TREE || rval == error_mark_node)
return rval;
warn_ref_binding (reftype, intype, decl);
rval = build_up_reference (reftype, rval, flags, decl);
}
if (rval)
{
/* If we found a way to convert earlier, then use it. */
return rval;
}
if (flags & LOOKUP_COMPLAIN)
error ("cannot convert type %qT to type %qT", intype, reftype);
return error_mark_node;
}
/* We are using a reference VAL for its value. Bash that reference all the
way down to its lowest form. */
tree
convert_from_reference (tree val)
{
if (TREE_TYPE (val)
&& TREE_CODE (TREE_TYPE (val)) == REFERENCE_TYPE)
{
tree t = TREE_TYPE (TREE_TYPE (val));
tree ref = build1 (INDIRECT_REF, t, val);
mark_exp_read (val);
/* We *must* set TREE_READONLY when dereferencing a pointer to const,
so that we get the proper error message if the result is used
to assign to. Also, &* is supposed to be a no-op. */
TREE_READONLY (ref) = CP_TYPE_CONST_P (t);
TREE_THIS_VOLATILE (ref) = CP_TYPE_VOLATILE_P (t);
TREE_SIDE_EFFECTS (ref)
= (TREE_THIS_VOLATILE (ref) || TREE_SIDE_EFFECTS (val));
val = ref;
}
return val;
}
/* Really perform an lvalue-to-rvalue conversion, including copying an
argument of class type into a temporary. */
tree
force_rvalue (tree expr, tsubst_flags_t complain)
{
tree type = TREE_TYPE (expr);
if (MAYBE_CLASS_TYPE_P (type) && TREE_CODE (expr) != TARGET_EXPR)
{
VEC(tree,gc) *args = make_tree_vector_single (expr);
expr = build_special_member_call (NULL_TREE, complete_ctor_identifier,
&args, type, LOOKUP_NORMAL, complain);
release_tree_vector (args);
expr = build_cplus_new (type, expr, complain);
}
else
expr = decay_conversion (expr);
return expr;
}
/* If EXPR and ORIG are INTEGER_CSTs, return a version of EXPR that has
TREE_OVERFLOW set only if it is set in ORIG. Otherwise, return EXPR
unchanged. */
static tree
ignore_overflows (tree expr, tree orig)
{
if (TREE_CODE (expr) == INTEGER_CST
&& TREE_CODE (orig) == INTEGER_CST
&& TREE_OVERFLOW (expr) != TREE_OVERFLOW (orig))
{
gcc_assert (!TREE_OVERFLOW (orig));
/* Ensure constant sharing. */
expr = build_int_cst_wide (TREE_TYPE (expr),
TREE_INT_CST_LOW (expr),
TREE_INT_CST_HIGH (expr));
}
return expr;
}
/* Fold away simple conversions, but make sure TREE_OVERFLOW is set
properly. */
tree
cp_fold_convert (tree type, tree expr)
{
tree conv = fold_convert (type, expr);
conv = ignore_overflows (conv, expr);
return conv;
}
/* C++ conversions, preference to static cast conversions. */
tree
cp_convert (tree type, tree expr)
{
return ocp_convert (type, expr, CONV_OLD_CONVERT, LOOKUP_NORMAL);
}
/* C++ equivalent of convert_and_check but using cp_convert as the
conversion function.
Convert EXPR to TYPE, warning about conversion problems with constants.
Invoke this function on every expression that is converted implicitly,
i.e. because of language rules and not because of an explicit cast. */
tree
cp_convert_and_check (tree type, tree expr)
{
tree result;
if (TREE_TYPE (expr) == type)
return expr;
result = cp_convert (type, expr);
if (c_inhibit_evaluation_warnings == 0
&& !TREE_OVERFLOW_P (expr)
&& result != error_mark_node)
warnings_for_convert_and_check (type, expr, result);
return result;
}
/* Conversion...
FLAGS indicates how we should behave. */
tree
ocp_convert (tree type, tree expr, int convtype, int flags)
{
tree e = expr;
enum tree_code code = TREE_CODE (type);
const char *invalid_conv_diag;
tree e1;
if (error_operand_p (e) || type == error_mark_node)
return error_mark_node;
complete_type (type);
complete_type (TREE_TYPE (expr));
if ((invalid_conv_diag
= targetm.invalid_conversion (TREE_TYPE (expr), type)))
{
error (invalid_conv_diag);
return error_mark_node;
}
/* FIXME remove when moving to c_fully_fold model. */
/* FIXME do we still need this test? */
if (!CLASS_TYPE_P (type))
e = integral_constant_value (e);
if (error_operand_p (e))
return error_mark_node;
if (MAYBE_CLASS_TYPE_P (type) && (convtype & CONV_FORCE_TEMP))
/* We need a new temporary; don't take this shortcut. */;
else if (same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (e)))
{
if (same_type_p (type, TREE_TYPE (e)))
/* The call to fold will not always remove the NOP_EXPR as
might be expected, since if one of the types is a typedef;
the comparison in fold is just equality of pointers, not a
call to comptypes. We don't call fold in this case because
that can result in infinite recursion; fold will call
convert, which will call ocp_convert, etc. */
return e;
/* For complex data types, we need to perform componentwise
conversion. */
else if (TREE_CODE (type) == COMPLEX_TYPE)
return fold_if_not_in_template (convert_to_complex (type, e));
else if (TREE_CODE (e) == TARGET_EXPR)
{
/* Don't build a NOP_EXPR of class type. Instead, change the
type of the temporary. */
TREE_TYPE (e) = TREE_TYPE (TARGET_EXPR_SLOT (e)) = type;
return e;
}
else
{
/* We shouldn't be treating objects of ADDRESSABLE type as
rvalues. */
gcc_assert (!TREE_ADDRESSABLE (type));
return fold_if_not_in_template (build_nop (type, e));
}
}
e1 = targetm.convert_to_type (type, e);
if (e1)
return e1;
if (code == VOID_TYPE && (convtype & CONV_STATIC))
{
e = convert_to_void (e, ICV_CAST, tf_warning_or_error);
return e;
}
if (INTEGRAL_CODE_P (code))
{
tree intype = TREE_TYPE (e);
tree converted;
if (TREE_CODE (type) == ENUMERAL_TYPE)
{
/* enum = enum, enum = int, enum = float, (enum)pointer are all
errors. */
if (((INTEGRAL_OR_ENUMERATION_TYPE_P (intype)
|| TREE_CODE (intype) == REAL_TYPE)
&& ! (convtype & CONV_STATIC))
|| TREE_CODE (intype) == POINTER_TYPE)
{
if (flags & LOOKUP_COMPLAIN)
permerror (input_location, "conversion from %q#T to %q#T", intype, type);
if (!flag_permissive)
return error_mark_node;
}
/* [expr.static.cast]
8. A value of integral or enumeration type can be explicitly
converted to an enumeration type. The value is unchanged if
the original value is within the range of the enumeration
values. Otherwise, the resulting enumeration value is
unspecified. */
if (TREE_CODE (expr) == INTEGER_CST
&& !int_fits_type_p (expr, ENUM_UNDERLYING_TYPE (type)))
warning (OPT_Wconversion,
"the result of the conversion is unspecified because "
"%qE is outside the range of type %qT",
expr, type);
}
if (MAYBE_CLASS_TYPE_P (intype))
{
tree rval;
rval = build_type_conversion (type, e);
if (rval)
return rval;
if (flags & LOOKUP_COMPLAIN)
error ("%q#T used where a %qT was expected", intype, type);
return error_mark_node;
}
if (code == BOOLEAN_TYPE)
{
/* We can't implicitly convert a scoped enum to bool, so convert
to the underlying type first. */
if (SCOPED_ENUM_P (intype) && (convtype & CONV_STATIC))
e = build_nop (ENUM_UNDERLYING_TYPE (intype), e);
return cp_truthvalue_conversion (e);
}
converted = fold_if_not_in_template (convert_to_integer (type, e));
/* Ignore any integer overflow caused by the conversion. */
return ignore_overflows (converted, e);
}
if (NULLPTR_TYPE_P (type) && e && null_ptr_cst_p (e))
return nullptr_node;
if (POINTER_TYPE_P (type) || TYPE_PTR_TO_MEMBER_P (type))
return fold_if_not_in_template (cp_convert_to_pointer (type, e));
if (code == VECTOR_TYPE)
{
tree in_vtype = TREE_TYPE (e);
if (MAYBE_CLASS_TYPE_P (in_vtype))
{
tree ret_val;
ret_val = build_type_conversion (type, e);
if (ret_val)
return ret_val;
if (flags & LOOKUP_COMPLAIN)
error ("%q#T used where a %qT was expected", in_vtype, type);
return error_mark_node;
}
return fold_if_not_in_template (convert_to_vector (type, e));
}
if (code == REAL_TYPE || code == COMPLEX_TYPE)
{
if (MAYBE_CLASS_TYPE_P (TREE_TYPE (e)))
{
tree rval;
rval = build_type_conversion (type, e);
if (rval)
return rval;
else
if (flags & LOOKUP_COMPLAIN)
error ("%q#T used where a floating point value was expected",
TREE_TYPE (e));
}
if (code == REAL_TYPE)
return fold_if_not_in_template (convert_to_real (type, e));
else if (code == COMPLEX_TYPE)
return fold_if_not_in_template (convert_to_complex (type, e));
}
/* New C++ semantics: since assignment is now based on
memberwise copying, if the rhs type is derived from the
lhs type, then we may still do a conversion. */
if (RECORD_OR_UNION_CODE_P (code))
{
tree dtype = TREE_TYPE (e);
tree ctor = NULL_TREE;
dtype = TYPE_MAIN_VARIANT (dtype);
/* Conversion between aggregate types. New C++ semantics allow
objects of derived type to be cast to objects of base type.
Old semantics only allowed this between pointers.
There may be some ambiguity between using a constructor
vs. using a type conversion operator when both apply. */
ctor = e;
if (abstract_virtuals_error (NULL_TREE, type))
return error_mark_node;
if (BRACE_ENCLOSED_INITIALIZER_P (ctor))
ctor = perform_implicit_conversion (type, ctor, tf_warning_or_error);
else if ((flags & LOOKUP_ONLYCONVERTING)
&& ! (CLASS_TYPE_P (dtype) && DERIVED_FROM_P (type, dtype)))
/* For copy-initialization, first we create a temp of the proper type
with a user-defined conversion sequence, then we direct-initialize
the target with the temp (see [dcl.init]). */
ctor = build_user_type_conversion (type, ctor, flags);
else
{
VEC(tree,gc) *ctor_vec = make_tree_vector_single (ctor);
ctor = build_special_member_call (NULL_TREE,
complete_ctor_identifier,
&ctor_vec,
type, flags,
tf_warning_or_error);
release_tree_vector (ctor_vec);
}
if (ctor)
return build_cplus_new (type, ctor, tf_warning_or_error);
}
if (flags & LOOKUP_COMPLAIN)
{
/* If the conversion failed and expr was an invalid use of pointer to
member function, try to report a meaningful error. */
if (invalid_nonstatic_memfn_p (expr, tf_warning_or_error))
/* We displayed the error message. */;
else
error ("conversion from %qT to non-scalar type %qT requested",
TREE_TYPE (expr), type);
}
return error_mark_node;
}
/* When an expression is used in a void context, its value is discarded and
no lvalue-rvalue and similar conversions happen [expr.static.cast/4,
stmt.expr/1, expr.comma/1]. This permits dereferencing an incomplete type
in a void context. The C++ standard does not define what an `access' to an
object is, but there is reason to believe that it is the lvalue to rvalue
conversion -- if it were not, `*&*p = 1' would violate [expr]/4 in that it
accesses `*p' not to calculate the value to be stored. But, dcl.type.cv/8
indicates that volatile semantics should be the same between C and C++
where ever possible. C leaves it implementation defined as to what
constitutes an access to a volatile. So, we interpret `*vp' as a read of
the volatile object `vp' points to, unless that is an incomplete type. For
volatile references we do not do this interpretation, because that would
make it impossible to ignore the reference return value from functions. We
issue warnings in the confusing cases.
The IMPLICIT is ICV_CAST when the user is explicitly converting an expression
to void via a cast. If an expression is being implicitly converted, IMPLICIT
indicates the context of the implicit conversion. */
tree
convert_to_void (tree expr, impl_conv_void implicit, tsubst_flags_t complain)
{
if (expr == error_mark_node
|| TREE_TYPE (expr) == error_mark_node)
return error_mark_node;
if (implicit == ICV_CAST)
mark_exp_read (expr);
else
{
tree exprv = expr;
while (TREE_CODE (exprv) == COMPOUND_EXPR)
exprv = TREE_OPERAND (exprv, 1);
if (DECL_P (exprv)
|| handled_component_p (exprv)
|| TREE_CODE (exprv) == INDIRECT_REF)
/* Expr is not being 'used' here, otherwise we whould have
called mark_{rl}value_use use here, which would have in turn
called mark_exp_read. Rather, we call mark_exp_read directly
to avoid some warnings when
-Wunused-but-set-{variable,parameter} is in effect. */
mark_exp_read (exprv);
}
if (!TREE_TYPE (expr))
return expr;
if (invalid_nonstatic_memfn_p (expr, complain))
return error_mark_node;
if (TREE_CODE (expr) == PSEUDO_DTOR_EXPR)
{
if (complain & tf_error)
error ("pseudo-destructor is not called");
return error_mark_node;
}
if (VOID_TYPE_P (TREE_TYPE (expr)))
return expr;
switch (TREE_CODE (expr))
{
case COND_EXPR:
{
/* The two parts of a cond expr might be separate lvalues. */
tree op1 = TREE_OPERAND (expr,1);
tree op2 = TREE_OPERAND (expr,2);
bool side_effects = ((op1 && TREE_SIDE_EFFECTS (op1))
|| TREE_SIDE_EFFECTS (op2));
tree new_op1, new_op2;
new_op1 = NULL_TREE;
if (implicit != ICV_CAST && !side_effects)
{
if (op1)
new_op1 = convert_to_void (op1, ICV_SECOND_OF_COND, complain);
new_op2 = convert_to_void (op2, ICV_THIRD_OF_COND, complain);
}
else
{
if (op1)
new_op1 = convert_to_void (op1, ICV_CAST, complain);
new_op2 = convert_to_void (op2, ICV_CAST, complain);
}
expr = build3 (COND_EXPR, TREE_TYPE (new_op2),
TREE_OPERAND (expr, 0), new_op1, new_op2);
break;
}
case COMPOUND_EXPR:
{
/* The second part of a compound expr contains the value. */
tree op1 = TREE_OPERAND (expr,1);
tree new_op1;
if (implicit != ICV_CAST && !TREE_NO_WARNING (expr))
new_op1 = convert_to_void (op1, ICV_RIGHT_OF_COMMA, complain);
else
new_op1 = convert_to_void (op1, ICV_CAST, complain);
if (new_op1 != op1)
{
tree t = build2 (COMPOUND_EXPR, TREE_TYPE (new_op1),
TREE_OPERAND (expr, 0), new_op1);
expr = t;
}
break;
}
case NON_LVALUE_EXPR:
case NOP_EXPR:
/* These have already decayed to rvalue. */
break;
case CALL_EXPR: /* We have a special meaning for volatile void fn(). */
break;
case INDIRECT_REF:
{
tree type = TREE_TYPE (expr);
int is_reference = TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0)))
== REFERENCE_TYPE;
int is_volatile = TYPE_VOLATILE (type);
int is_complete = COMPLETE_TYPE_P (complete_type (type));
/* Can't load the value if we don't know the type. */
if (is_volatile && !is_complete)
{
if (complain & tf_warning)
switch (implicit)
{
case ICV_CAST:
warning (0, "conversion to void will not access "
"object of incomplete type %qT", type);
break;
case ICV_SECOND_OF_COND:
warning (0, "indirection will not access object of "
"incomplete type %qT in second operand "
"of conditional expression", type);
break;
case ICV_THIRD_OF_COND:
warning (0, "indirection will not access object of "
"incomplete type %qT in third operand "
"of conditional expression", type);
break;
case ICV_RIGHT_OF_COMMA:
warning (0, "indirection will not access object of "
"incomplete type %qT in right operand of "
"comma operator", type);
break;
case ICV_LEFT_OF_COMMA:
warning (0, "indirection will not access object of "
"incomplete type %qT in left operand of "
"comma operator", type);
break;
case ICV_STATEMENT:
warning (0, "indirection will not access object of "
"incomplete type %qT in statement", type);
break;
case ICV_THIRD_IN_FOR:
warning (0, "indirection will not access object of "
"incomplete type %qT in for increment "
"expression", type);
break;
default:
gcc_unreachable ();
}
}
/* Don't load the value if this is an implicit dereference, or if
the type needs to be handled by ctors/dtors. */
else if (is_volatile && is_reference)
{
if (complain & tf_warning)
switch (implicit)
{
case ICV_CAST:
warning (0, "conversion to void will not access "
"object of type %qT", type);
break;
case ICV_SECOND_OF_COND:
warning (0, "implicit dereference will not access object "
"of type %qT in second operand of "
"conditional expression", type);
break;
case ICV_THIRD_OF_COND:
warning (0, "implicit dereference will not access object "
"of type %qT in third operand of "
"conditional expression", type);
break;
case ICV_RIGHT_OF_COMMA:
warning (0, "implicit dereference will not access object "
"of type %qT in right operand of "
"comma operator", type);
break;
case ICV_LEFT_OF_COMMA:
warning (0, "implicit dereference will not access object "
"of type %qT in left operand of comma operator",
type);
break;
case ICV_STATEMENT:
warning (0, "implicit dereference will not access object "
"of type %qT in statement", type);
break;
case ICV_THIRD_IN_FOR:
warning (0, "implicit dereference will not access object "
"of type %qT in for increment expression",
type);
break;
default:
gcc_unreachable ();
}
}
else if (is_volatile && TREE_ADDRESSABLE (type))
{
if (complain & tf_warning)
switch (implicit)
{
case ICV_CAST:
warning (0, "conversion to void will not access "
"object of non-trivially-copyable type %qT",
type);
break;
case ICV_SECOND_OF_COND:
warning (0, "indirection will not access object of "
"non-trivially-copyable type %qT in second "
"operand of conditional expression", type);
break;
case ICV_THIRD_OF_COND:
warning (0, "indirection will not access object of "
"non-trivially-copyable type %qT in third "
"operand of conditional expression", type);
break;
case ICV_RIGHT_OF_COMMA:
warning (0, "indirection will not access object of "
"non-trivially-copyable type %qT in right "
"operand of comma operator", type);
break;
case ICV_LEFT_OF_COMMA:
warning (0, "indirection will not access object of "
"non-trivially-copyable type %qT in left "
"operand of comma operator", type);
break;
case ICV_STATEMENT:
warning (0, "indirection will not access object of "
"non-trivially-copyable type %qT in statement",
type);
break;
case ICV_THIRD_IN_FOR:
warning (0, "indirection will not access object of "
"non-trivially-copyable type %qT in for "
"increment expression", type);
break;
default:
gcc_unreachable ();
}
}
if (is_reference || !is_volatile || !is_complete || TREE_ADDRESSABLE (type))
{
/* Emit a warning (if enabled) when the "effect-less" INDIRECT_REF
operation is stripped off. Note that we don't warn about
- an expression with TREE_NO_WARNING set. (For an example of
such expressions, see build_over_call in call.c.)
- automatic dereferencing of references, since the user cannot
control it. (See also warn_if_unused_value() in stmt.c.) */
if (warn_unused_value
&& implicit != ICV_CAST
&& (complain & tf_warning)
&& !TREE_NO_WARNING (expr)
&& !is_reference)
warning (OPT_Wunused_value, "value computed is not used");
expr = TREE_OPERAND (expr, 0);
}
break;
}
case VAR_DECL:
{
/* External variables might be incomplete. */
tree type = TREE_TYPE (expr);
int is_complete = COMPLETE_TYPE_P (complete_type (type));
if (TYPE_VOLATILE (type) && !is_complete && (complain & tf_warning))
switch (implicit)
{
case ICV_CAST:
warning (0, "conversion to void will not access "
"object %qE of incomplete type %qT", expr, type);
break;
case ICV_SECOND_OF_COND:
warning (0, "variable %qE of incomplete type %qT will not "
"be accessed in second operand of "
"conditional expression", expr, type);
break;
case ICV_THIRD_OF_COND:
warning (0, "variable %qE of incomplete type %qT will not "
"be accessed in third operand of "
"conditional expression", expr, type);
break;
case ICV_RIGHT_OF_COMMA:
warning (0, "variable %qE of incomplete type %qT will not "
"be accessed in right operand of comma operator",
expr, type);
break;
case ICV_LEFT_OF_COMMA:
warning (0, "variable %qE of incomplete type %qT will not "
"be accessed in left operand of comma operator",
expr, type);
break;
case ICV_STATEMENT:
warning (0, "variable %qE of incomplete type %qT will not "
"be accessed in statement", expr, type);
break;
case ICV_THIRD_IN_FOR:
warning (0, "variable %qE of incomplete type %qT will not "
"be accessed in for increment expression",
expr, type);
break;
default:
gcc_unreachable ();
}
break;
}
case TARGET_EXPR:
/* Don't bother with the temporary object returned from a function if
we don't use it and don't need to destroy it. We'll still
allocate space for it in expand_call or declare_return_variable,
but we don't need to track it through all the tree phases. */
if (TARGET_EXPR_IMPLICIT_P (expr)
&& TYPE_HAS_TRIVIAL_DESTRUCTOR (TREE_TYPE (expr)))
{
tree init = TARGET_EXPR_INITIAL (expr);
if (TREE_CODE (init) == AGGR_INIT_EXPR
&& !AGGR_INIT_VIA_CTOR_P (init))
{
tree fn = AGGR_INIT_EXPR_FN (init);
expr = build_call_array_loc (input_location,
TREE_TYPE (TREE_TYPE (TREE_TYPE (fn))),
fn,
aggr_init_expr_nargs (init),
AGGR_INIT_EXPR_ARGP (init));
}
}
break;
default:;
}
expr = resolve_nondeduced_context (expr);
{
tree probe = expr;
if (TREE_CODE (probe) == ADDR_EXPR)
probe = TREE_OPERAND (expr, 0);
if (type_unknown_p (probe))
{
/* [over.over] enumerates the places where we can take the address
of an overloaded function, and this is not one of them. */
if (complain & tf_error)
switch (implicit)
{
case ICV_CAST:
error ("conversion to void "
"cannot resolve address of overloaded function");
break;
case ICV_SECOND_OF_COND:
error ("second operand of conditional expression "
"cannot resolve address of overloaded function");
break;
case ICV_THIRD_OF_COND:
error ("third operand of conditional expression "
"cannot resolve address of overloaded function");
break;
case ICV_RIGHT_OF_COMMA:
error ("right operand of comma operator "
"cannot resolve address of overloaded function");
break;
case ICV_LEFT_OF_COMMA:
error ("left operand of comma operator "
"cannot resolve address of overloaded function");
break;
case ICV_STATEMENT:
error ("statement "
"cannot resolve address of overloaded function");
break;
case ICV_THIRD_IN_FOR:
error ("for increment expression "
"cannot resolve address of overloaded function");
break;
}
else
return error_mark_node;
expr = void_zero_node;
}
else if (implicit != ICV_CAST && probe == expr && is_overloaded_fn (probe))
{
/* Only warn when there is no &. */
if (complain & tf_warning)
switch (implicit)
{
case ICV_SECOND_OF_COND:
warning (OPT_Waddress,
"second operand of conditional expression "
"is a reference, not call, to function %qE", expr);
break;
case ICV_THIRD_OF_COND:
warning (OPT_Waddress,
"third operand of conditional expression "
"is a reference, not call, to function %qE", expr);
break;
case ICV_RIGHT_OF_COMMA:
warning (OPT_Waddress,
"right operand of comma operator "
"is a reference, not call, to function %qE", expr);
break;
case ICV_LEFT_OF_COMMA:
warning (OPT_Waddress,
"left operand of comma operator "
"is a reference, not call, to function %qE", expr);
break;
case ICV_STATEMENT:
warning (OPT_Waddress,
"statement is a reference, not call, to function %qE",
expr);
break;
case ICV_THIRD_IN_FOR:
warning (OPT_Waddress,
"for increment expression "
"is a reference, not call, to function %qE", expr);
break;
default:
gcc_unreachable ();
}
if (TREE_CODE (expr) == COMPONENT_REF)
expr = TREE_OPERAND (expr, 0);
}
}
if (expr != error_mark_node && !VOID_TYPE_P (TREE_TYPE (expr)))
{
if (implicit != ICV_CAST
&& warn_unused_value
&& !TREE_NO_WARNING (expr)
&& !processing_template_decl)
{
/* The middle end does not warn about expressions that have
been explicitly cast to void, so we must do so here. */
if (!TREE_SIDE_EFFECTS (expr)) {
if (complain & tf_warning)
switch (implicit)
{
case ICV_SECOND_OF_COND:
warning (OPT_Wunused_value,
"second operand of conditional expression has no effect");
break;
case ICV_THIRD_OF_COND:
warning (OPT_Wunused_value,
"third operand of conditional expression has no effect");
break;
case ICV_RIGHT_OF_COMMA:
warning (OPT_Wunused_value,
"right operand of comma operator has no effect");
break;
case ICV_LEFT_OF_COMMA:
warning (OPT_Wunused_value,
"left operand of comma operator has no effect");
break;
case ICV_STATEMENT:
warning (OPT_Wunused_value,
"statement has no effect");
break;
case ICV_THIRD_IN_FOR:
warning (OPT_Wunused_value,
"for increment expression has no effect");
break;
default:
gcc_unreachable ();
}
}
else
{
tree e;
enum tree_code code;
enum tree_code_class tclass;
e = expr;
/* We might like to warn about (say) "(int) f()", as the
cast has no effect, but the compiler itself will
generate implicit conversions under some
circumstances. (For example a block copy will be
turned into a call to "__builtin_memcpy", with a
conversion of the return value to an appropriate
type.) So, to avoid false positives, we strip
conversions. Do not use STRIP_NOPs because it will
not strip conversions to "void", as that is not a
mode-preserving conversion. */
while (TREE_CODE (e) == NOP_EXPR)
e = TREE_OPERAND (e, 0);
code = TREE_CODE (e);
tclass = TREE_CODE_CLASS (code);
if ((tclass == tcc_comparison
|| tclass == tcc_unary
|| (tclass == tcc_binary
&& !(code == MODIFY_EXPR
|| code == INIT_EXPR
|| code == PREDECREMENT_EXPR
|| code == PREINCREMENT_EXPR
|| code == POSTDECREMENT_EXPR
|| code == POSTINCREMENT_EXPR)))
&& (complain & tf_warning))
warning (OPT_Wunused_value, "value computed is not used");
}
}
expr = build1 (CONVERT_EXPR, void_type_node, expr);
}
if (! TREE_SIDE_EFFECTS (expr))
expr = void_zero_node;
return expr;
}
/* Create an expression whose value is that of EXPR,
converted to type TYPE. The TREE_TYPE of the value
is always TYPE. This function implements all reasonable
conversions; callers should filter out those that are
not permitted by the language being compiled.
Most of this routine is from build_reinterpret_cast.
The back end cannot call cp_convert (what was convert) because
conversions to/from basetypes may involve memory references
(vbases) and adding or subtracting small values (multiple
inheritance), but it calls convert from the constant folding code
on subtrees of already built trees after it has ripped them apart.
Also, if we ever support range variables, we'll probably also have to
do a little bit more work. */
tree
convert (tree type, tree expr)
{
tree intype;
if (type == error_mark_node || expr == error_mark_node)
return error_mark_node;
intype = TREE_TYPE (expr);
if (POINTER_TYPE_P (type) && POINTER_TYPE_P (intype))
return fold_if_not_in_template (build_nop (type, expr));
return ocp_convert (type, expr, CONV_OLD_CONVERT,
LOOKUP_NORMAL|LOOKUP_NO_CONVERSION);
}
/* Like cp_convert, except permit conversions to take place which
are not normally allowed due to access restrictions
(such as conversion from sub-type to private super-type). */
tree
convert_force (tree type, tree expr, int convtype)
{
tree e = expr;
enum tree_code code = TREE_CODE (type);
if (code == REFERENCE_TYPE)
return (fold_if_not_in_template
(convert_to_reference (type, e, CONV_C_CAST, LOOKUP_COMPLAIN,
NULL_TREE)));
if (code == POINTER_TYPE)
return fold_if_not_in_template (convert_to_pointer_force (type, e));
/* From typeck.c convert_for_assignment */
if (((TREE_CODE (TREE_TYPE (e)) == POINTER_TYPE && TREE_CODE (e) == ADDR_EXPR
&& TREE_CODE (TREE_TYPE (e)) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (TREE_TYPE (e))) == METHOD_TYPE)
|| integer_zerop (e)
|| TYPE_PTRMEMFUNC_P (TREE_TYPE (e)))
&& TYPE_PTRMEMFUNC_P (type))
/* compatible pointer to member functions. */
return build_ptrmemfunc (TYPE_PTRMEMFUNC_FN_TYPE (type), e, 1,
/*c_cast_p=*/1, tf_warning_or_error);
return ocp_convert (type, e, CONV_C_CAST|convtype, LOOKUP_NORMAL);
}
/* Convert an aggregate EXPR to type XTYPE. If a conversion
exists, return the attempted conversion. This may
return ERROR_MARK_NODE if the conversion is not
allowed (references private members, etc).
If no conversion exists, NULL_TREE is returned.
FIXME: Ambiguity checking is wrong. Should choose one by the implicit
object parameter, or by the second standard conversion sequence if
that doesn't do it. This will probably wait for an overloading rewrite.
(jason 8/9/95) */
static tree
build_type_conversion (tree xtype, tree expr)
{
/* C++: check to see if we can convert this aggregate type
into the required type. */
return build_user_type_conversion (xtype, expr, LOOKUP_NORMAL);
}
/* Convert the given EXPR to one of a group of types suitable for use in an
expression. DESIRES is a combination of various WANT_* flags (q.v.)
which indicates which types are suitable. If COMPLAIN is true, complain
about ambiguity; otherwise, the caller will deal with it. */
tree
build_expr_type_conversion (int desires, tree expr, bool complain)
{
tree basetype = TREE_TYPE (expr);
tree conv = NULL_TREE;
tree winner = NULL_TREE;
if (expr == null_node
&& (desires & WANT_INT)
&& !(desires & WANT_NULL))
warning_at (input_location, OPT_Wconversion_null,
"converting NULL to non-pointer type");
basetype = TREE_TYPE (expr);
if (basetype == error_mark_node)
return error_mark_node;
if (! MAYBE_CLASS_TYPE_P (basetype))
switch (TREE_CODE (basetype))
{
case INTEGER_TYPE:
if ((desires & WANT_NULL) && null_ptr_cst_p (expr))
return expr;
/* else fall through... */
case BOOLEAN_TYPE:
return (desires & WANT_INT) ? expr : NULL_TREE;
case ENUMERAL_TYPE:
return (desires & WANT_ENUM) ? expr : NULL_TREE;
case REAL_TYPE:
return (desires & WANT_FLOAT) ? expr : NULL_TREE;
case POINTER_TYPE:
return (desires & WANT_POINTER) ? expr : NULL_TREE;
case FUNCTION_TYPE:
case ARRAY_TYPE:
return (desires & WANT_POINTER) ? decay_conversion (expr)
: NULL_TREE;
case COMPLEX_TYPE:
case VECTOR_TYPE:
if ((desires & WANT_VECTOR_OR_COMPLEX) == 0)
return NULL_TREE;
switch (TREE_CODE (TREE_TYPE (basetype)))
{
case INTEGER_TYPE:
case BOOLEAN_TYPE:
return (desires & WANT_INT) ? expr : NULL_TREE;
case ENUMERAL_TYPE:
return (desires & WANT_ENUM) ? expr : NULL_TREE;
case REAL_TYPE:
return (desires & WANT_FLOAT) ? expr : NULL_TREE;
default:
return NULL_TREE;
}
default:
return NULL_TREE;
}
/* The code for conversions from class type is currently only used for
delete expressions. Other expressions are handled by build_new_op. */
if (!complete_type_or_maybe_complain (basetype, expr, complain))
return error_mark_node;
if (!TYPE_HAS_CONVERSION (basetype))
return NULL_TREE;
for (conv = lookup_conversions (basetype); conv; conv = TREE_CHAIN (conv))
{
int win = 0;
tree candidate;
tree cand = TREE_VALUE (conv);
cand = OVL_CURRENT (cand);
if (winner && winner == cand)
continue;
if (DECL_NONCONVERTING_P (cand))
continue;
candidate = non_reference (TREE_TYPE (TREE_TYPE (cand)));
switch (TREE_CODE (candidate))
{
case BOOLEAN_TYPE:
case INTEGER_TYPE:
win = (desires & WANT_INT); break;
case ENUMERAL_TYPE:
win = (desires & WANT_ENUM); break;
case REAL_TYPE:
win = (desires & WANT_FLOAT); break;
case POINTER_TYPE:
win = (desires & WANT_POINTER); break;
case COMPLEX_TYPE:
case VECTOR_TYPE:
if ((desires & WANT_VECTOR_OR_COMPLEX) == 0)
break;
switch (TREE_CODE (TREE_TYPE (candidate)))
{
case BOOLEAN_TYPE:
case INTEGER_TYPE:
win = (desires & WANT_INT); break;
case ENUMERAL_TYPE:
win = (desires & WANT_ENUM); break;
case REAL_TYPE:
win = (desires & WANT_FLOAT); break;
default:
break;
}
break;
default:
break;
}
if (win)
{
if (winner)
{
if (complain)
{
error ("ambiguous default type conversion from %qT",
basetype);
error (" candidate conversions include %qD and %qD",
winner, cand);
}
return error_mark_node;
}
else
winner = cand;
}
}
if (winner)
{
tree type = non_reference (TREE_TYPE (TREE_TYPE (winner)));
return build_user_type_conversion (type, expr, LOOKUP_NORMAL);
}
return NULL_TREE;
}
/* Implements integral promotion (4.1) and float->double promotion. */
tree
type_promotes_to (tree type)
{
tree promoted_type;
if (type == error_mark_node)
return error_mark_node;
type = TYPE_MAIN_VARIANT (type);
/* Check for promotions of target-defined types first. */
promoted_type = targetm.promoted_type (type);
if (promoted_type)
return promoted_type;
/* bool always promotes to int (not unsigned), even if it's the same
size. */
if (TREE_CODE (type) == BOOLEAN_TYPE)
type = integer_type_node;
/* Scoped enums don't promote, but pretend they do for backward ABI bug
compatibility wrt varargs. */
else if (SCOPED_ENUM_P (type) && abi_version_at_least (6))
;
/* Normally convert enums to int, but convert wide enums to something
wider. */
else if (TREE_CODE (type) == ENUMERAL_TYPE
|| type == char16_type_node
|| type == char32_type_node
|| type == wchar_type_node)
{
int precision = MAX (TYPE_PRECISION (type),
TYPE_PRECISION (integer_type_node));
tree totype = c_common_type_for_size (precision, 0);
if (SCOPED_ENUM_P (type))
warning (OPT_Wabi, "scoped enum %qT will not promote to an integral "
"type in a future version of GCC", type);
if (TREE_CODE (type) == ENUMERAL_TYPE)
type = ENUM_UNDERLYING_TYPE (type);
if (TYPE_UNSIGNED (type)
&& ! int_fits_type_p (TYPE_MAX_VALUE (type), totype))
type = c_common_type_for_size (precision, 1);
else
type = totype;
}
else if (c_promoting_integer_type_p (type))
{
/* Retain unsignedness if really not getting bigger. */
if (TYPE_UNSIGNED (type)
&& TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node))
type = unsigned_type_node;
else
type = integer_type_node;
}
else if (type == float_type_node)
type = double_type_node;
return type;
}
/* The routines below this point are carefully written to conform to
the standard. They use the same terminology, and follow the rules
closely. Although they are used only in pt.c at the moment, they
should presumably be used everywhere in the future. */
/* Attempt to perform qualification conversions on EXPR to convert it
to TYPE. Return the resulting expression, or error_mark_node if
the conversion was impossible. */
tree
perform_qualification_conversions (tree type, tree expr)
{
tree expr_type;
expr_type = TREE_TYPE (expr);
if (same_type_p (type, expr_type))
return expr;
else if (TYPE_PTR_P (type) && TYPE_PTR_P (expr_type)
&& comp_ptr_ttypes (TREE_TYPE (type), TREE_TYPE (expr_type)))
return build_nop (type, expr);
else if (TYPE_PTR_TO_MEMBER_P (type)
&& TYPE_PTR_TO_MEMBER_P (expr_type)
&& same_type_p (TYPE_PTRMEM_CLASS_TYPE (type),
TYPE_PTRMEM_CLASS_TYPE (expr_type))
&& comp_ptr_ttypes (TYPE_PTRMEM_POINTED_TO_TYPE (type),
TYPE_PTRMEM_POINTED_TO_TYPE (expr_type)))
return build_nop (type, expr);
else
return error_mark_node;
}
|
embecosm/epiphany-gcc
|
gcc/cp/cvt.c
|
C
|
gpl-2.0
| 53,126
|
/*
* QLogic qlge NIC HBA Driver
* Copyright (c) 2003-2008 QLogic Corporation
* See LICENSE.qlge for copyright and licensing details.
* Author: Linux qlge network device driver by
* Ron Mercer <ron.mercer@qlogic.com>
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
#include <linux/mempool.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/prefetch.h>
#include <net/ip6_checksum.h>
#include "qlge.h"
char qlge_driver_name[] = DRV_NAME;
const char qlge_driver_version[] = DRV_VERSION;
MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
MODULE_DESCRIPTION(DRV_STRING " ");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
static const u32 default_msg =
NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
/* NETIF_MSG_TIMER | */
NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP |
NETIF_MSG_RX_ERR |
NETIF_MSG_TX_ERR |
/* NETIF_MSG_TX_QUEUED | */
/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
/* NETIF_MSG_PKTDATA | */
NETIF_MSG_HW | NETIF_MSG_WOL | 0;
static int debug = -1; /* defaults above */
module_param(debug, int, 0664);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define MSIX_IRQ 0
#define MSI_IRQ 1
#define LEG_IRQ 2
static int qlge_irq_type = MSIX_IRQ;
module_param(qlge_irq_type, int, 0664);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static int qlge_mpi_coredump;
module_param(qlge_mpi_coredump, int, 0);
MODULE_PARM_DESC(qlge_mpi_coredump,
"Option to enable MPI firmware dump. "
"Default is OFF - Do Not allocate memory. ");
static int qlge_force_coredump;
module_param(qlge_force_coredump, int, 0);
MODULE_PARM_DESC(qlge_force_coredump,
"Option to allow force of firmware core dump. "
"Default is OFF - Do not allow.");
static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
/* required last entry */
{0,}
};
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
static int ql_wol(struct ql_adapter *);
static void qlge_set_multicast_list(struct net_device *);
static int ql_adapter_down(struct ql_adapter *);
static int ql_adapter_up(struct ql_adapter *);
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
* FCOE firmware and the FC driver.
*/
static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
{
u32 sem_bits = 0;
switch (sem_mask) {
case SEM_XGMAC0_MASK:
sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
break;
case SEM_XGMAC1_MASK:
sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
break;
case SEM_ICB_MASK:
sem_bits = SEM_SET << SEM_ICB_SHIFT;
break;
case SEM_MAC_ADDR_MASK:
sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
break;
case SEM_FLASH_MASK:
sem_bits = SEM_SET << SEM_FLASH_SHIFT;
break;
case SEM_PROBE_MASK:
sem_bits = SEM_SET << SEM_PROBE_SHIFT;
break;
case SEM_RT_IDX_MASK:
sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
break;
case SEM_PROC_REG_MASK:
sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
break;
default:
netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
return -EINVAL;
}
ql_write32(qdev, SEM, sem_bits | sem_mask);
return !(ql_read32(qdev, SEM) & sem_bits);
}
int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
{
unsigned int wait_count = 30;
do {
if (!ql_sem_trylock(qdev, sem_mask))
return 0;
udelay(100);
} while (--wait_count);
return -ETIMEDOUT;
}
void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
{
ql_write32(qdev, SEM, sem_mask);
ql_read32(qdev, SEM); /* flush */
}
/* This function waits for a specific bit to come ready
* in a given register. It is used mostly by the initialize
* process, but is also used in kernel thread API such as
* netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
*/
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
{
u32 temp;
int count = UDELAY_COUNT;
while (count) {
temp = ql_read32(qdev, reg);
/* check for errors */
if (temp & err_bit) {
netif_alert(qdev, probe, qdev->ndev,
"register 0x%.08x access error, value = 0x%.08x!.\n",
reg, temp);
return -EIO;
} else if (temp & bit)
return 0;
udelay(UDELAY_DELAY);
count--;
}
netif_alert(qdev, probe, qdev->ndev,
"Timed out waiting for reg %x to come ready.\n", reg);
return -ETIMEDOUT;
}
/* The CFG register is used to download TX and RX control blocks
* to the chip. This function waits for an operation to complete.
*/
static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
{
int count = UDELAY_COUNT;
u32 temp;
while (count) {
temp = ql_read32(qdev, CFG);
if (temp & CFG_LE)
return -EIO;
if (!(temp & bit))
return 0;
udelay(UDELAY_DELAY);
count--;
}
return -ETIMEDOUT;
}
/* Used to issue init control blocks to hw. Maps control block,
* sets address, triggers download, waits for completion.
*/
int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
u16 q_id)
{
u64 map;
int status = 0;
int direction;
u32 mask;
u32 value;
direction =
(bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE;
map = pci_map_single(qdev->pdev, ptr, size, direction);
if (pci_dma_mapping_error(qdev->pdev, map)) {
netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
return -ENOMEM;
}
status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
if (status)
return status;
status = ql_wait_cfg(qdev, bit);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Timed out waiting for CFG to come ready.\n");
goto exit;
}
ql_write32(qdev, ICB_L, (u32) map);
ql_write32(qdev, ICB_H, (u32) (map >> 32));
mask = CFG_Q_MASK | (bit << 16);
value = bit | (q_id << CFG_Q_SHIFT);
ql_write32(qdev, CFG, (mask | value));
/*
* Wait for the bit to clear after signaling hw.
*/
status = ql_wait_cfg(qdev, bit);
exit:
ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
pci_unmap_single(qdev->pdev, map, size, direction);
return status;
}
/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
u32 *value)
{
u32 offset = 0;
int status;
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
case MAC_ADDR_TYPE_CAM_MAC:
{
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
goto exit;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MR, 0);
if (status)
goto exit;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
if (type == MAC_ADDR_TYPE_CAM_MAC) {
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
status =
ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
MAC_ADDR_MR, 0);
if (status)
goto exit;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
}
break;
}
case MAC_ADDR_TYPE_VLAN:
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
netif_crit(qdev, ifup, qdev->ndev,
"Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
return status;
}
/* Set up a MAC, multicast or VLAN address for the
* inbound frame matching.
*/
static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
u16 index)
{
u32 offset = 0;
int status = 0;
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
{
u32 upper = (addr[0] << 8) | addr[1];
u32 lower = (addr[2] << 24) | (addr[3] << 16) |
(addr[4] << 8) | (addr[5]);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
(index << MAC_ADDR_IDX_SHIFT) |
type | MAC_ADDR_E);
ql_write32(qdev, MAC_ADDR_DATA, lower);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
(index << MAC_ADDR_IDX_SHIFT) |
type | MAC_ADDR_E);
ql_write32(qdev, MAC_ADDR_DATA, upper);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
break;
}
case MAC_ADDR_TYPE_CAM_MAC:
{
u32 cam_output;
u32 upper = (addr[0] << 8) | addr[1];
u32 lower =
(addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
ql_write32(qdev, MAC_ADDR_DATA, lower);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
ql_write32(qdev, MAC_ADDR_DATA, upper);
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type); /* type */
/* This field should also include the queue id
and possibly the function id. Right now we hardcode
the route field to NIC core.
*/
cam_output = (CAM_OUT_ROUTE_NIC |
(qdev->
func << CAM_OUT_FUNC_SHIFT) |
(0 << CAM_OUT_CQ_ID_SHIFT));
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
cam_output |= CAM_OUT_RV;
/* route to NIC core */
ql_write32(qdev, MAC_ADDR_DATA, cam_output);
break;
}
case MAC_ADDR_TYPE_VLAN:
{
u32 enable_bit = *((u32 *) &addr[0]);
/* For VLAN, the addr actually holds a bit that
* either enables or disables the vlan id we are
* addressing. It's either MAC_ADDR_E on or off.
* That's bit-27 we're talking about.
*/
status =
ql_wait_reg_rdy(qdev,
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
if (status)
goto exit;
ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
type | /* type */
enable_bit); /* enable/disable */
break;
}
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
netif_crit(qdev, ifup, qdev->ndev,
"Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
return status;
}
/* Set or clear MAC address in hardware. We sometimes
* have to clear it to prevent wrong frame routing
* especially in a bonding environment.
*/
static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
{
int status;
char zero_mac_addr[ETH_ALEN];
char *addr;
if (set) {
addr = &qdev->current_mac_addr[0];
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Set Mac addr %pM\n", addr);
} else {
memset(zero_mac_addr, 0, ETH_ALEN);
addr = &zero_mac_addr[0];
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Clearing MAC address\n");
}
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init mac address.\n");
return status;
}
void ql_link_on(struct ql_adapter *qdev)
{
netif_err(qdev, link, qdev->ndev, "Link is up.\n");
netif_carrier_on(qdev->ndev);
ql_set_mac_addr(qdev, 1);
}
void ql_link_off(struct ql_adapter *qdev)
{
netif_err(qdev, link, qdev->ndev, "Link is down.\n");
netif_carrier_off(qdev->ndev);
ql_set_mac_addr(qdev, 0);
}
/* Get a specific frame routing value from the CAM.
* Used for debug and reg dump.
*/
int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
{
int status = 0;
status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
if (status)
goto exit;
ql_write32(qdev, RT_IDX,
RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
if (status)
goto exit;
*value = ql_read32(qdev, RT_DATA);
exit:
return status;
}
/* The NIC function for this chip has 16 routing indexes. Each one can be used
* to route different frame types to various inbound queues. We send broadcast/
* multicast/error frames to the default queue for slow handling,
* and CAM hit/RSS frames to the fast handling queues.
*/
static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
int enable)
{
int status = -EINVAL; /* Return error if no mask match. */
u32 value = 0;
switch (mask) {
case RT_IDX_CAM_HIT:
{
value = RT_IDX_DST_CAM_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_VALID: /* Promiscuous Mode frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_IP_CSUM_ERR_SLOT <<
RT_IDX_IDX_SHIFT); /* index */
break;
}
case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
RT_IDX_IDX_SHIFT); /* index */
break;
}
case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_MCAST: /* Pass up All Multicast frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
{
value = RT_IDX_DST_RSS | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case 0: /* Clear the E-bit on an entry. */
{
value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(index << RT_IDX_IDX_SHIFT);/* index */
break;
}
default:
netif_err(qdev, ifup, qdev->ndev,
"Mask type %d not yet supported.\n", mask);
status = -EPERM;
goto exit;
}
if (value) {
status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
if (status)
goto exit;
value |= (enable ? RT_IDX_E : 0);
ql_write32(qdev, RT_IDX, value);
ql_write32(qdev, RT_DATA, enable ? mask : 0);
}
exit:
return status;
}
static void ql_enable_interrupts(struct ql_adapter *qdev)
{
ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
}
static void ql_disable_interrupts(struct ql_adapter *qdev)
{
ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
}
/* If we're running with multiple MSI-X vectors then we enable on the fly.
* Otherwise, we may have multiple outstanding workers and don't want to
* enable until the last one finishes. In this case, the irq_cnt gets
* incremented every time we queue a worker and decremented every time
* a worker finishes. Once it hits zero we enable the interrupt.
*/
u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
u32 var = 0;
unsigned long hw_flags = 0;
struct intr_context *ctx = qdev->intr_context + intr;
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
/* Always enable if we're MSIX multi interrupts and
* it's not the default (zeroeth) interrupt.
*/
ql_write32(qdev, INTR_EN,
ctx->intr_en_mask);
var = ql_read32(qdev, STS);
return var;
}
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
if (atomic_dec_and_test(&ctx->irq_cnt)) {
ql_write32(qdev, INTR_EN,
ctx->intr_en_mask);
var = ql_read32(qdev, STS);
}
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
return var;
}
static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
u32 var = 0;
struct intr_context *ctx;
/* HW disables for us if we're MSIX multi interrupts and
* it's not the default (zeroeth) interrupt.
*/
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
return 0;
ctx = qdev->intr_context + intr;
spin_lock(&qdev->hw_lock);
if (!atomic_read(&ctx->irq_cnt)) {
ql_write32(qdev, INTR_EN,
ctx->intr_dis_mask);
var = ql_read32(qdev, STS);
}
atomic_inc(&ctx->irq_cnt);
spin_unlock(&qdev->hw_lock);
return var;
}
static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
{
int i;
for (i = 0; i < qdev->intr_count; i++) {
/* The enable call does a atomic_dec_and_test
* and enables only if the result is zero.
* So we precharge it here.
*/
if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
i == 0))
atomic_set(&qdev->intr_context[i].irq_cnt, 1);
ql_enable_completion_interrupt(qdev, i);
}
}
static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
{
int status, i;
u16 csum = 0;
__le16 *flash = (__le16 *)&qdev->flash;
status = strncmp((char *)&qdev->flash, str, 4);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
return status;
}
for (i = 0; i < size; i++)
csum += le16_to_cpu(*flash++);
if (csum)
netif_err(qdev, ifup, qdev->ndev,
"Invalid flash checksum, csum = 0x%.04x.\n", csum);
return csum;
}
static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
{
int status = 0;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
/* set up for reg read */
ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
/* This data is stored on flash as an array of
* __le32. Since ql_read32() returns cpu endian
* we need to swap it back.
*/
*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
exit:
return status;
}
static int ql_get_8000_flash_params(struct ql_adapter *qdev)
{
u32 i, size;
int status;
__le32 *p = (__le32 *)&qdev->flash;
u32 offset;
u8 mac_addr[6];
/* Get flash offset for function and adjust
* for dword access.
*/
if (!qdev->port)
offset = FUNC0_FLASH_OFFSET / sizeof(u32);
else
offset = FUNC1_FLASH_OFFSET / sizeof(u32);
if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
return -ETIMEDOUT;
size = sizeof(struct flash_params_8000) / sizeof(u32);
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
goto exit;
}
}
status = ql_validate_flash(qdev,
sizeof(struct flash_params_8000) / sizeof(u16),
"8000");
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
/* Extract either manufacturer or BOFM modified
* MAC address.
*/
if (qdev->flash.flash_params_8000.data_type1 == 2)
memcpy(mac_addr,
qdev->flash.flash_params_8000.mac_addr1,
qdev->ndev->addr_len);
else
memcpy(mac_addr,
qdev->flash.flash_params_8000.mac_addr,
qdev->ndev->addr_len);
if (!is_valid_ether_addr(mac_addr)) {
netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
status = -EINVAL;
goto exit;
}
memcpy(qdev->ndev->dev_addr,
mac_addr,
qdev->ndev->addr_len);
exit:
ql_sem_unlock(qdev, SEM_FLASH_MASK);
return status;
}
static int ql_get_8012_flash_params(struct ql_adapter *qdev)
{
int i;
int status;
__le32 *p = (__le32 *)&qdev->flash;
u32 offset = 0;
u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
/* Second function's parameters follow the first
* function's.
*/
if (qdev->port)
offset = size;
if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
return -ETIMEDOUT;
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
goto exit;
}
}
status = ql_validate_flash(qdev,
sizeof(struct flash_params_8012) / sizeof(u16),
"8012");
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
status = -EINVAL;
goto exit;
}
memcpy(qdev->ndev->dev_addr,
qdev->flash.flash_params_8012.mac_addr,
qdev->ndev->addr_len);
exit:
ql_sem_unlock(qdev, SEM_FLASH_MASK);
return status;
}
/* xgmac register are located behind the xgmac_addr and xgmac_data
* register pair. Each read/write requires us to wait for the ready
* bit before reading/writing the data.
*/
static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
{
int status;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
return status;
/* write the data to the data reg */
ql_write32(qdev, XGMAC_DATA, data);
/* trigger the write */
ql_write32(qdev, XGMAC_ADDR, reg);
return status;
}
/* xgmac register are located behind the xgmac_addr and xgmac_data
* register pair. Each read/write requires us to wait for the ready
* bit before reading/writing the data.
*/
int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
{
int status = 0;
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* set up for reg read */
ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
/* wait for reg to come ready */
status = ql_wait_reg_rdy(qdev,
XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
/* get the data */
*data = ql_read32(qdev, XGMAC_DATA);
exit:
return status;
}
/* This is used for reading the 64-bit statistics regs. */
int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
{
int status = 0;
u32 hi = 0;
u32 lo = 0;
status = ql_read_xgmac_reg(qdev, reg, &lo);
if (status)
goto exit;
status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
if (status)
goto exit;
*data = (u64) lo | ((u64) hi << 32);
exit:
return status;
}
static int ql_8000_port_initialize(struct ql_adapter *qdev)
{
int status;
/*
* Get MPI firmware version for driver banner
* and ethool info.
*/
status = ql_mb_about_fw(qdev);
if (status)
goto exit;
status = ql_mb_get_fw_state(qdev);
if (status)
goto exit;
/* Wake up a worker to get/set the TX/RX frame sizes. */
queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
exit:
return status;
}
/* Take the MAC Core out of reset.
* Enable statistics counting.
* Take the transmitter/receiver out of reset.
* This functionality may be done in the MPI firmware at a
* later date.
*/
static int ql_8012_port_initialize(struct ql_adapter *qdev)
{
int status = 0;
u32 data;
if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
/* Another function has the semaphore, so
* wait for the port init bit to come ready.
*/
netif_info(qdev, link, qdev->ndev,
"Another function has the semaphore, so wait for the port init bit to come ready.\n");
status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
if (status) {
netif_crit(qdev, link, qdev->ndev,
"Port initialize timed out.\n");
}
return status;
}
netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
/* Set the core reset. */
status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
if (status)
goto end;
data |= GLOBAL_CFG_RESET;
status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
if (status)
goto end;
/* Clear the core reset and turn on jumbo for receiver. */
data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
data |= GLOBAL_CFG_TX_STAT_EN;
data |= GLOBAL_CFG_RX_STAT_EN;
status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
if (status)
goto end;
/* Enable transmitter, and clear it's reset. */
status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
if (status)
goto end;
data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
data |= TX_CFG_EN; /* Enable the transmitter. */
status = ql_write_xgmac_reg(qdev, TX_CFG, data);
if (status)
goto end;
/* Enable receiver and clear it's reset. */
status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
if (status)
goto end;
data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
data |= RX_CFG_EN; /* Enable the receiver. */
status = ql_write_xgmac_reg(qdev, RX_CFG, data);
if (status)
goto end;
/* Turn on jumbo. */
status =
ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
if (status)
goto end;
status =
ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
if (status)
goto end;
/* Signal to the world that the port is enabled. */
ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
end:
ql_sem_unlock(qdev, qdev->xg_sem_mask);
return status;
}
static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
{
return PAGE_SIZE << qdev->lbq_buf_order;
}
/* Get the next large buffer. */
static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
{
struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
rx_ring->lbq_curr_idx++;
if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
rx_ring->lbq_curr_idx = 0;
rx_ring->lbq_free_cnt++;
return lbq_desc;
}
static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
dma_unmap_addr(lbq_desc, mapaddr),
rx_ring->lbq_buf_size,
PCI_DMA_FROMDEVICE);
/* If it's the last chunk of our master page then
* we unmap it.
*/
if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
== ql_lbq_block_size(qdev))
pci_unmap_page(qdev->pdev,
lbq_desc->p.pg_chunk.map,
ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
return lbq_desc;
}
/* Get the next small buffer. */
static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
{
struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
rx_ring->sbq_curr_idx++;
if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
rx_ring->sbq_curr_idx = 0;
rx_ring->sbq_free_cnt++;
return sbq_desc;
}
/* Update an rx ring index. */
static void ql_update_cq(struct rx_ring *rx_ring)
{
rx_ring->cnsmr_idx++;
rx_ring->curr_entry++;
if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
rx_ring->cnsmr_idx = 0;
rx_ring->curr_entry = rx_ring->cq_base;
}
}
static void ql_write_cq_idx(struct rx_ring *rx_ring)
{
ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
}
static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
struct bq_desc *lbq_desc)
{
if (!rx_ring->pg_chunk.page) {
u64 map;
rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
GFP_ATOMIC,
qdev->lbq_buf_order);
if (unlikely(!rx_ring->pg_chunk.page)) {
netif_err(qdev, drv, qdev->ndev,
"page allocation failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.offset = 0;
map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
0, ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
__free_pages(rx_ring->pg_chunk.page,
qdev->lbq_buf_order);
rx_ring->pg_chunk.page = NULL;
netif_err(qdev, drv, qdev->ndev,
"PCI mapping failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.map = map;
rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
}
/* Copy the current master pg_chunk info
* to the current descriptor.
*/
lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
/* Adjust the master page chunk for next
* buffer get.
*/
rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
rx_ring->pg_chunk.page = NULL;
lbq_desc->p.pg_chunk.last_flag = 1;
} else {
rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
get_page(rx_ring->pg_chunk.page);
lbq_desc->p.pg_chunk.last_flag = 0;
}
return 0;
}
/* Process (refill) a large buffer queue. */
static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
u32 clean_idx = rx_ring->lbq_clean_idx;
u32 start_idx = clean_idx;
struct bq_desc *lbq_desc;
u64 map;
int i;
while (rx_ring->lbq_free_cnt > 32) {
for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"lbq: try cleaning clean_idx = %d.\n",
clean_idx);
lbq_desc = &rx_ring->lbq[clean_idx];
if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
rx_ring->lbq_clean_idx = clean_idx;
netif_err(qdev, ifup, qdev->ndev,
"Could not get a page chunk, i=%d, clean_idx =%d .\n",
i, clean_idx);
return;
}
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
dma_unmap_addr_set(lbq_desc, mapaddr, map);
dma_unmap_len_set(lbq_desc, maplen,
rx_ring->lbq_buf_size);
*lbq_desc->addr = cpu_to_le64(map);
pci_dma_sync_single_for_device(qdev->pdev, map,
rx_ring->lbq_buf_size,
PCI_DMA_FROMDEVICE);
clean_idx++;
if (clean_idx == rx_ring->lbq_len)
clean_idx = 0;
}
rx_ring->lbq_clean_idx = clean_idx;
rx_ring->lbq_prod_idx += 16;
if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
rx_ring->lbq_prod_idx = 0;
rx_ring->lbq_free_cnt -= 16;
}
if (start_idx != clean_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"lbq: updating prod idx = %d.\n",
rx_ring->lbq_prod_idx);
ql_write_db_reg(rx_ring->lbq_prod_idx,
rx_ring->lbq_prod_idx_db_reg);
}
}
/* Process (refill) a small buffer queue. */
static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
u32 clean_idx = rx_ring->sbq_clean_idx;
u32 start_idx = clean_idx;
struct bq_desc *sbq_desc;
u64 map;
int i;
while (rx_ring->sbq_free_cnt > 16) {
for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
sbq_desc = &rx_ring->sbq[clean_idx];
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"sbq: try cleaning clean_idx = %d.\n",
clean_idx);
if (sbq_desc->p.skb == NULL) {
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
"sbq: getting new skb for index %d.\n",
sbq_desc->index);
sbq_desc->p.skb =
netdev_alloc_skb(qdev->ndev,
SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
rx_ring->sbq_clean_idx = clean_idx;
return;
}
skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
map = pci_map_single(qdev->pdev,
sbq_desc->p.skb->data,
rx_ring->sbq_buf_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
netif_err(qdev, ifup, qdev->ndev,
"PCI mapping failed.\n");
rx_ring->sbq_clean_idx = clean_idx;
dev_kfree_skb_any(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
return;
}
dma_unmap_addr_set(sbq_desc, mapaddr, map);
dma_unmap_len_set(sbq_desc, maplen,
rx_ring->sbq_buf_size);
*sbq_desc->addr = cpu_to_le64(map);
}
clean_idx++;
if (clean_idx == rx_ring->sbq_len)
clean_idx = 0;
}
rx_ring->sbq_clean_idx = clean_idx;
rx_ring->sbq_prod_idx += 16;
if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
rx_ring->sbq_prod_idx = 0;
rx_ring->sbq_free_cnt -= 16;
}
if (start_idx != clean_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"sbq: updating prod idx = %d.\n",
rx_ring->sbq_prod_idx);
ql_write_db_reg(rx_ring->sbq_prod_idx,
rx_ring->sbq_prod_idx_db_reg);
}
}
static void ql_update_buffer_queues(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
ql_update_sbq(qdev, rx_ring);
ql_update_lbq(qdev, rx_ring);
}
/* Unmaps tx buffers. Can be called from send() if a pci mapping
* fails at some stage, or from the interrupt when a tx completes.
*/
static void ql_unmap_send(struct ql_adapter *qdev,
struct tx_ring_desc *tx_ring_desc, int mapped)
{
int i;
for (i = 0; i < mapped; i++) {
if (i == 0 || (i == 7 && mapped > 7)) {
/*
* Unmap the skb->data area, or the
* external sglist (AKA the Outbound
* Address List (OAL)).
* If its the zeroeth element, then it's
* the skb->data area. If it's the 7th
* element and there is more than 6 frags,
* then its an OAL.
*/
if (i == 7) {
netif_printk(qdev, tx_done, KERN_DEBUG,
qdev->ndev,
"unmapping OAL area.\n");
}
pci_unmap_single(qdev->pdev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
maplen),
PCI_DMA_TODEVICE);
} else {
netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
"unmapping frag %d.\n", i);
pci_unmap_page(qdev->pdev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
maplen), PCI_DMA_TODEVICE);
}
}
}
/* Map the buffers for this transmit. This will return
* NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
*/
static int ql_map_send(struct ql_adapter *qdev,
struct ob_mac_iocb_req *mac_iocb_ptr,
struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
{
int len = skb_headlen(skb);
dma_addr_t map;
int frag_idx, err, map_idx = 0;
struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
int frag_cnt = skb_shinfo(skb)->nr_frags;
if (frag_cnt) {
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"frag_cnt = %d.\n", frag_cnt);
}
/*
* Map the skb buffer first.
*/
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping failed with error: %d\n", err);
return NETDEV_TX_BUSY;
}
tbd->len = cpu_to_le32(len);
tbd->addr = cpu_to_le64(map);
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
map_idx++;
/*
* This loop fills the remainder of the 8 address descriptors
* in the IOCB. If there are more than 7 fragments, then the
* eighth address desc will point to an external list (OAL).
* When this happens, the remainder of the frags will be stored
* in this list.
*/
for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
tbd++;
if (frag_idx == 6 && frag_cnt > 7) {
/* Let's tack on an sglist.
* Our control block will now
* look like this:
* iocb->seg[0] = skb->data
* iocb->seg[1] = frag[0]
* iocb->seg[2] = frag[1]
* iocb->seg[3] = frag[2]
* iocb->seg[4] = frag[3]
* iocb->seg[5] = frag[4]
* iocb->seg[6] = frag[5]
* iocb->seg[7] = ptr to OAL (external sglist)
* oal->seg[0] = frag[6]
* oal->seg[1] = frag[7]
* oal->seg[2] = frag[8]
* oal->seg[3] = frag[9]
* oal->seg[4] = frag[10]
* etc...
*/
/* Tack on the OAL in the eighth segment of IOCB. */
map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
sizeof(struct oal),
PCI_DMA_TODEVICE);
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping outbound address list with error: %d\n",
err);
goto map_error;
}
tbd->addr = cpu_to_le64(map);
/*
* The length is the number of fragments
* that remain to be mapped times the length
* of our sglist (OAL).
*/
tbd->len =
cpu_to_le32((sizeof(struct tx_buf_desc) *
(frag_cnt - frag_idx)) | TX_DESC_C);
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
sizeof(struct oal));
tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
map_idx++;
}
map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping frags failed with error: %d.\n",
err);
goto map_error;
}
tbd->addr = cpu_to_le64(map);
tbd->len = cpu_to_le32(skb_frag_size(frag));
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
skb_frag_size(frag));
}
/* Save the number of segments we've mapped. */
tx_ring_desc->map_cnt = map_idx;
/* Terminate the last segment. */
tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
return NETDEV_TX_OK;
map_error:
/*
* If the first frag mapping failed, then i will be zero.
* This causes the unmap of the skb->data area. Otherwise
* we pass in the number of frags that mapped successfully
* so they can be umapped.
*/
ql_unmap_send(qdev, tx_ring_desc, map_idx);
return NETDEV_TX_BUSY;
}
/* Categorizing receive firmware frame errors */
static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
struct rx_ring *rx_ring)
{
struct nic_stats *stats = &qdev->nic_stats;
stats->rx_err_count++;
rx_ring->rx_errors++;
switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
stats->rx_code_err++;
break;
case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
stats->rx_oversize_err++;
break;
case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
stats->rx_undersize_err++;
break;
case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
stats->rx_preamble_err++;
break;
case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
stats->rx_frame_len_err++;
break;
case IB_MAC_IOCB_RSP_ERR_CRC:
stats->rx_crc_err++;
default:
break;
}
}
/**
* ql_update_mac_hdr_len - helper routine to update the mac header length
* based on vlan tags if present
*/
static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
struct ib_mac_iocb_rsp *ib_mac_rsp,
void *page, size_t *len)
{
u16 *tags;
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
return;
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
tags = (u16 *)page;
/* Look for stacked vlan tags in ethertype field */
if (tags[6] == ETH_P_8021Q &&
tags[8] == ETH_P_8021Q)
*len += 2 * VLAN_HLEN;
else
*len += VLAN_HLEN;
}
}
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp,
u32 length,
u16 vlan_id)
{
struct sk_buff *skb;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
put_page(lbq_desc->p.pg_chunk.page);
return;
}
napi->dev = qdev->ndev;
skb = napi_get_frags(napi);
if (!skb) {
netif_err(qdev, drv, qdev->ndev,
"Couldn't get an skb, exiting.\n");
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
}
prefetch(lbq_desc->p.pg_chunk.va);
__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
skb_shinfo(skb)->nr_frags++;
rx_ring->rx_packets++;
rx_ring->rx_bytes += length;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
napi_gro_frags(napi);
}
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp,
u32 length,
u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
void *addr;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
size_t hlen = ETH_HLEN;
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
}
addr = lbq_desc->p.pg_chunk.va;
prefetch(addr);
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
goto err_out;
}
/* Update the MAC header length*/
ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
if (skb->len > ndev->mtu + hlen) {
netif_err(qdev, drv, qdev->ndev,
"Segment too small, dropping.\n");
rx_ring->rx_dropped++;
goto err_out;
}
memcpy(skb_put(skb, hlen), addr, hlen);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset + hlen,
length - hlen);
skb->len += length - hlen;
skb->data_len += length - hlen;
skb->truesize += length - hlen;
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph =
(struct iphdr *)((u8 *)addr + hlen);
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
"UDP checksum done!\n");
}
}
}
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(napi, skb);
else
netif_receive_skb(skb);
return;
err_out:
dev_kfree_skb_any(skb);
put_page(lbq_desc->p.pg_chunk.page);
}
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp,
u32 length,
u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
struct sk_buff *new_skb = NULL;
struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
skb = sbq_desc->p.skb;
/* Allocate new_skb and copy */
new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
if (new_skb == NULL) {
rx_ring->rx_dropped++;
return;
}
skb_reserve(new_skb, NET_IP_ALIGN);
memcpy(skb_put(new_skb, length), skb->data, length);
skb = new_skb;
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
dev_kfree_skb_any(skb);
return;
}
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
dev_kfree_skb_any(skb);
return;
}
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
if (skb->len > ndev->mtu + ETH_HLEN) {
dev_kfree_skb_any(skb);
rx_ring->rx_dropped++;
return;
}
prefetch(skb->data);
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%s Multicast.\n",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_REG ? "Registered" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
}
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Promiscuous Packet.\n");
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
/* If rx checksum is on, and there are no
* csum or frame errors.
*/
if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
"UDP checksum done!\n");
}
}
}
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
netif_receive_skb(skb);
}
static void ql_realign_skb(struct sk_buff *skb, int len)
{
void *temp_addr = skb->data;
/* Undo the skb_reserve(skb,32) we did before
* giving to hardware, and realign data on
* a 2-byte boundary.
*/
skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
skb_copy_to_linear_data(skb, temp_addr,
(unsigned int)len);
}
/*
* This function builds an skb for the given inbound
* completion. It will be rewritten for readability in the near
* future, but for not it works well.
*/
static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
struct bq_desc *lbq_desc;
struct bq_desc *sbq_desc;
struct sk_buff *skb = NULL;
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
size_t hlen = ETH_HLEN;
/*
* Handle the header buffer if present.
*/
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Header of %d bytes in small buffer.\n", hdr_len);
/*
* Headers fit nicely into a small buffer.
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr),
dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
skb_put(skb, hdr_len);
sbq_desc->p.skb = NULL;
}
/*
* Handle the data buffer(s).
*/
if (unlikely(!length)) { /* Is there data too? */
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"No Data buffer in this packet.\n");
return skb;
}
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Headers in small, data of %d bytes in small, combine them.\n",
length);
/*
* Data is less than small buffer size so it's
* stuffed in a small buffer.
* For this case we append the data
* from the "data" small buffer to the "header" small
* buffer.
*/
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_dma_sync_single_for_cpu(qdev->pdev,
dma_unmap_addr
(sbq_desc, mapaddr),
dma_unmap_len
(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
memcpy(skb_put(skb, length),
sbq_desc->p.skb->data, length);
pci_dma_sync_single_for_device(qdev->pdev,
dma_unmap_addr
(sbq_desc,
mapaddr),
dma_unmap_len
(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
} else {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes in a single small buffer.\n",
length);
sbq_desc = ql_get_curr_sbuf(rx_ring);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
skb_put(skb, length);
pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc,
mapaddr),
dma_unmap_len(sbq_desc,
maplen),
PCI_DMA_FROMDEVICE);
sbq_desc->p.skb = NULL;
}
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Header in small, %d bytes in large. Chain large to small!\n",
length);
/*
* The data is in a single large buffer. We
* chain it to the header buffer's skb and let
* it rip.
*/
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Chaining page at offset = %d, for %d bytes to skb.\n",
lbq_desc->p.pg_chunk.offset, length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
} else {
/*
* The headers and data are in a single large buffer. We
* copy it to a new skb and let it go. This can happen with
* jumbo mtu on a non-TCP/UDP frame.
*/
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
if (skb == NULL) {
netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
"No skb available, drop the packet.\n");
return NULL;
}
pci_unmap_page(qdev->pdev,
dma_unmap_addr(lbq_desc,
mapaddr),
dma_unmap_len(lbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
length -= length;
ql_update_mac_hdr_len(qdev, ib_mac_rsp,
lbq_desc->p.pg_chunk.va,
&hlen);
__pskb_pull_tail(skb, hlen);
}
} else {
/*
* The data is in a chain of large buffers
* pointed to by a small buffer. We loop
* thru and chain them to the our small header
* buffer's skb.
* frags: There are 18 max frags and our small
* buffer will hold 32 of them. The thing is,
* we'll use 3 max for our 9000 byte jumbo
* frames. If the MTU goes up we could
* eventually be in trouble.
*/
int size, i = 0;
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr),
dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
* This is an non TCP/UDP IP frame, so
* the headers aren't split into a small
* buffer. We have to use the small buffer
* that contains our sg list as our skb to
* send upstairs. Copy the sg list here to
* a local buffer and use it to find the
* pages to chain.
*/
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers & data in chain of large.\n",
length);
skb = sbq_desc->p.skb;
sbq_desc->p.skb = NULL;
skb_reserve(skb, NET_IP_ALIGN);
}
while (length > 0) {
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
size = (length < rx_ring->lbq_buf_size) ? length :
rx_ring->lbq_buf_size;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Adding page %d to skb for %d bytes.\n",
i, size);
skb_fill_page_desc(skb, i,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
size);
skb->len += size;
skb->data_len += size;
skb->truesize += size;
length -= size;
i++;
}
ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
&hlen);
__pskb_pull_tail(skb, hlen);
}
return skb;
}
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp,
u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
if (unlikely(!skb)) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"No skb available, drop packet.\n");
rx_ring->rx_dropped++;
return;
}
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
dev_kfree_skb_any(skb);
return;
}
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
if (skb->len > ndev->mtu + ETH_HLEN) {
dev_kfree_skb_any(skb);
rx_ring->rx_dropped++;
return;
}
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
dev_kfree_skb_any(skb);
return;
}
prefetch(skb->data);
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_REG ? "Registered" :
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
rx_ring->rx_multicast++;
}
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Promiscuous Packet.\n");
}
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
/* If rx checksum is on, and there are no
* csum or frame errors.
*/
if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
}
}
}
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
netif_receive_skb(skb);
}
/* Process an inbound completion from an rx ring. */
static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
(qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
((le16_to_cpu(ib_mac_rsp->vlan_id) &
IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
*/
ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
/* The data fit in a single small buffer.
* Allocate a new skb, copy the data and
* return the buffer to the free pool.
*/
ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
length, vlan_id);
} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
/* TCP packet in a page chunk that's been checksummed.
* Tack it on to our GRO skb and let it go.
*/
ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
length, vlan_id);
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
/* Non-TCP packet in a page chunk. Allocate an
* skb, tack it on frags, and send it up.
*/
ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
length, vlan_id);
} else {
/* Non-TCP/UDP large frames that span multiple buffers
* can be processed corrrectly by the split frame logic.
*/
ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
vlan_id);
}
return (unsigned long)length;
}
/* Process an outbound completion from an rx ring. */
static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
struct ob_mac_iocb_rsp *mac_rsp)
{
struct tx_ring *tx_ring;
struct tx_ring_desc *tx_ring_desc;
QL_DUMP_OB_MAC_RSP(mac_rsp);
tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
tx_ring_desc = &tx_ring->q[mac_rsp->tid];
ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
tx_ring->tx_packets++;
dev_kfree_skb(tx_ring_desc->skb);
tx_ring_desc->skb = NULL;
if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
OB_MAC_IOCB_RSP_S |
OB_MAC_IOCB_RSP_L |
OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
netif_warn(qdev, tx_done, qdev->ndev,
"Total descriptor length did not match transfer length.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
netif_warn(qdev, tx_done, qdev->ndev,
"Frame too short to be valid, not sent.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
netif_warn(qdev, tx_done, qdev->ndev,
"Frame too long, but sent anyway.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
netif_warn(qdev, tx_done, qdev->ndev,
"PCI backplane error. Frame not sent.\n");
}
}
atomic_inc(&tx_ring->tx_count);
}
/* Fire up a handler to reset the MPI processor. */
void ql_queue_fw_error(struct ql_adapter *qdev)
{
ql_link_off(qdev);
queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
}
void ql_queue_asic_error(struct ql_adapter *qdev)
{
ql_link_off(qdev);
ql_disable_interrupts(qdev);
/* Clear adapter up bit to signal the recovery
* process that it shouldn't kill the reset worker
* thread
*/
clear_bit(QL_ADAPTER_UP, &qdev->flags);
/* Set asic recovery bit to indicate reset process that we are
* in fatal error recovery process rather than normal close
*/
set_bit(QL_ASIC_RECOVERY, &qdev->flags);
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
}
static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
struct ib_ae_iocb_rsp *ib_ae_rsp)
{
switch (ib_ae_rsp->event) {
case MGMT_ERR_EVENT:
netif_err(qdev, rx_err, qdev->ndev,
"Management Processor Fatal Error.\n");
ql_queue_fw_error(qdev);
return;
case CAM_LOOKUP_ERR_EVENT:
netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
netdev_err(qdev->ndev, "This event shouldn't occur.\n");
ql_queue_asic_error(qdev);
return;
case SOFT_ECC_ERROR_EVENT:
netdev_err(qdev->ndev, "Soft ECC error detected.\n");
ql_queue_asic_error(qdev);
break;
case PCI_ERR_ANON_BUF_RD:
netdev_err(qdev->ndev, "PCI error occurred when reading "
"anonymous buffers from rx_ring %d.\n",
ib_ae_rsp->q_id);
ql_queue_asic_error(qdev);
break;
default:
netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
ib_ae_rsp->event);
ql_queue_asic_error(qdev);
break;
}
}
static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
{
struct ql_adapter *qdev = rx_ring->qdev;
u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
struct ob_mac_iocb_rsp *net_rsp = NULL;
int count = 0;
struct tx_ring *tx_ring;
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"cq_id = %d, prod = %d, cnsmr = %d.\n.",
rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
rmb();
switch (net_rsp->opcode) {
case OPCODE_OB_MAC_TSO_IOCB:
case OPCODE_OB_MAC_IOCB:
ql_process_mac_tx_intr(qdev, net_rsp);
break;
default:
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Hit default case, not handled! dropping the packet, opcode = %x.\n",
net_rsp->opcode);
}
count++;
ql_update_cq(rx_ring);
prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
}
if (!net_rsp)
return 0;
ql_write_cq_idx(rx_ring);
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
*/
netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
}
return count;
}
static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
{
struct ql_adapter *qdev = rx_ring->qdev;
u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
struct ql_net_rsp_iocb *net_rsp;
int count = 0;
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"cq_id = %d, prod = %d, cnsmr = %d.\n.",
rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = rx_ring->curr_entry;
rmb();
switch (net_rsp->opcode) {
case OPCODE_IB_MAC_IOCB:
ql_process_mac_rx_intr(qdev, rx_ring,
(struct ib_mac_iocb_rsp *)
net_rsp);
break;
case OPCODE_IB_AE_IOCB:
ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
net_rsp);
break;
default:
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Hit default case, not handled! dropping the packet, opcode = %x.\n",
net_rsp->opcode);
break;
}
count++;
ql_update_cq(rx_ring);
prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
if (count == budget)
break;
}
ql_update_buffer_queues(qdev, rx_ring);
ql_write_cq_idx(rx_ring);
return count;
}
static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
{
struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
struct ql_adapter *qdev = rx_ring->qdev;
struct rx_ring *trx_ring;
int i, work_done = 0;
struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
/* Service the TX rings first. They start
* right after the RSS rings. */
for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
trx_ring = &qdev->rx_ring[i];
/* If this TX completion ring belongs to this vector and
* it's not empty then service it.
*/
if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
trx_ring->cnsmr_idx)) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"%s: Servicing TX completion ring %d.\n",
__func__, trx_ring->cq_id);
ql_clean_outbound_rx_ring(trx_ring);
}
}
/*
* Now service the RSS ring if it's active.
*/
if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
rx_ring->cnsmr_idx) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"%s: Servicing RX completion ring %d.\n",
__func__, rx_ring->cq_id);
work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
}
if (work_done < budget) {
napi_complete(napi);
ql_enable_completion_interrupt(qdev, rx_ring->irq);
}
return work_done;
}
static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
if (features & NETIF_F_HW_VLAN_CTAG_RX) {
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
}
}
/**
* qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
* based on the features to enable/disable hardware vlan accel
*/
static int qlge_update_hw_vlan_features(struct net_device *ndev,
netdev_features_t features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status = 0;
status = ql_adapter_down(qdev);
if (status) {
netif_err(qdev, link, qdev->ndev,
"Failed to bring down the adapter\n");
return status;
}
/* update the features with resent change */
ndev->features = features;
status = ql_adapter_up(qdev);
if (status) {
netif_err(qdev, link, qdev->ndev,
"Failed to bring up the adapter\n");
return status;
}
return status;
}
static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features)
{
int err;
/* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features);
if (err)
return err;
return features;
}
static int qlge_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
qlge_vlan_mode(ndev, features);
return 0;
}
static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = MAC_ADDR_E;
int err;
err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init vlan address.\n");
return err;
}
static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
err = __qlge_vlan_rx_add_vid(qdev, vid);
set_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return err;
}
static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = 0;
int err;
err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to clear vlan address.\n");
return err;
}
static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
err = __qlge_vlan_rx_kill_vid(qdev, vid);
clear_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return err;
}
static void qlge_restore_vlan(struct ql_adapter *qdev)
{
int status;
u16 vid;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return;
for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
__qlge_vlan_rx_add_vid(qdev, vid);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
struct rx_ring *rx_ring = dev_id;
napi_schedule(&rx_ring->napi);
return IRQ_HANDLED;
}
/* This handles a fatal error, MPI activity, and the default
* rx_ring in an MSI-X multiple vector environment.
* In MSI/Legacy environment it also process the rest of
* the rx_rings.
*/
static irqreturn_t qlge_isr(int irq, void *dev_id)
{
struct rx_ring *rx_ring = dev_id;
struct ql_adapter *qdev = rx_ring->qdev;
struct intr_context *intr_context = &qdev->intr_context[0];
u32 var;
int work_done = 0;
spin_lock(&qdev->hw_lock);
if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
"Shared Interrupt, Not ours!\n");
spin_unlock(&qdev->hw_lock);
return IRQ_NONE;
}
spin_unlock(&qdev->hw_lock);
var = ql_disable_completion_interrupt(qdev, intr_context->intr);
/*
* Check for fatal error.
*/
if (var & STS_FE) {
ql_queue_asic_error(qdev);
netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
netdev_err(qdev->ndev, "Resetting chip. "
"Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
/*
* Check MPI processor activity.
*/
if ((var & STS_PI) &&
(ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
/*
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
*/
netif_err(qdev, intr, qdev->ndev,
"Got MPI processor interrupt.\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
qdev->workqueue, &qdev->mpi_work, 0);
work_done++;
}
/*
* Get the bit-mask that shows the active queues for this
* pass. Compare it to the queues that this irq services
* and call napi if there's a match.
*/
var = ql_read32(qdev, ISR1);
if (var & intr_context->irq_mask) {
netif_info(qdev, intr, qdev->ndev,
"Waking handler for rx_ring[0].\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
napi_schedule(&rx_ring->napi);
work_done++;
}
ql_enable_completion_interrupt(qdev, intr_context->intr);
return work_done ? IRQ_HANDLED : IRQ_NONE;
}
static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
{
if (skb_is_gso(skb)) {
int err;
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
return err;
}
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
mac_iocb_ptr->total_hdrs_len =
cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
skb_transport_offset(skb)
<< OB_MAC_TRANSPORT_HDR_SHIFT);
mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
if (likely(skb->protocol == htons(ETH_P_IP))) {
struct iphdr *iph = ip_hdr(skb);
iph->check = 0;
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
iph->daddr, 0,
IPPROTO_TCP,
0);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
}
return 1;
}
return 0;
}
static void ql_hw_csum_setup(struct sk_buff *skb,
struct ob_mac_tso_iocb_req *mac_iocb_ptr)
{
int len;
struct iphdr *iph = ip_hdr(skb);
__sum16 *check;
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
len = (ntohs(iph->tot_len) - (iph->ihl << 2));
if (likely(iph->protocol == IPPROTO_TCP)) {
check = &(tcp_hdr(skb)->check);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
mac_iocb_ptr->total_hdrs_len =
cpu_to_le16(skb_transport_offset(skb) +
(tcp_hdr(skb)->doff << 2));
} else {
check = &(udp_hdr(skb)->check);
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
mac_iocb_ptr->total_hdrs_len =
cpu_to_le16(skb_transport_offset(skb) +
sizeof(struct udphdr));
}
*check = ~csum_tcpudp_magic(iph->saddr,
iph->daddr, len, iph->protocol, 0);
}
static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
{
struct tx_ring_desc *tx_ring_desc;
struct ob_mac_iocb_req *mac_iocb_ptr;
struct ql_adapter *qdev = netdev_priv(ndev);
int tso;
struct tx_ring *tx_ring;
u32 tx_ring_idx = (u32) skb->queue_mapping;
tx_ring = &qdev->tx_ring[tx_ring_idx];
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
netif_info(qdev, tx_queued, qdev->ndev,
"%s: BUG! shutting down tx queue %d due to lack of resources.\n",
__func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id);
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
mac_iocb_ptr = tx_ring_desc->queue_entry;
memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
mac_iocb_ptr->tid = tx_ring_desc->index;
/* We use the upper 32-bits to store the tx queue for this IO.
* When we get the completion we can use it to establish the context.
*/
mac_iocb_ptr->txq_idx = tx_ring_idx;
tx_ring_desc->skb = skb;
mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
if (vlan_tx_tag_present(skb)) {
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
}
tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
ql_hw_csum_setup(skb,
(struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
}
if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
NETDEV_TX_OK) {
netif_err(qdev, tx_queued, qdev->ndev,
"Could not map the segments.\n");
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
tx_ring->prod_idx++;
if (tx_ring->prod_idx == tx_ring->wq_len)
tx_ring->prod_idx = 0;
wmb();
ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len);
atomic_dec(&tx_ring->tx_count);
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
netif_stop_subqueue(ndev, tx_ring->wq_id);
if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
*/
netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
}
return NETDEV_TX_OK;
}
static void ql_free_shadow_space(struct ql_adapter *qdev)
{
if (qdev->rx_ring_shadow_reg_area) {
pci_free_consistent(qdev->pdev,
PAGE_SIZE,
qdev->rx_ring_shadow_reg_area,
qdev->rx_ring_shadow_reg_dma);
qdev->rx_ring_shadow_reg_area = NULL;
}
if (qdev->tx_ring_shadow_reg_area) {
pci_free_consistent(qdev->pdev,
PAGE_SIZE,
qdev->tx_ring_shadow_reg_area,
qdev->tx_ring_shadow_reg_dma);
qdev->tx_ring_shadow_reg_area = NULL;
}
}
static int ql_alloc_shadow_space(struct ql_adapter *qdev)
{
qdev->rx_ring_shadow_reg_area =
pci_alloc_consistent(qdev->pdev,
PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
if (qdev->rx_ring_shadow_reg_area == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of RX shadow space failed.\n");
return -ENOMEM;
}
memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
qdev->tx_ring_shadow_reg_area =
pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->tx_ring_shadow_reg_dma);
if (qdev->tx_ring_shadow_reg_area == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
}
memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
return 0;
err_wqp_sh_area:
pci_free_consistent(qdev->pdev,
PAGE_SIZE,
qdev->rx_ring_shadow_reg_area,
qdev->rx_ring_shadow_reg_dma);
return -ENOMEM;
}
static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
{
struct tx_ring_desc *tx_ring_desc;
int i;
struct ob_mac_iocb_req *mac_iocb_ptr;
mac_iocb_ptr = tx_ring->wq_base;
tx_ring_desc = tx_ring->q;
for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring_desc->index = i;
tx_ring_desc->skb = NULL;
tx_ring_desc->queue_entry = mac_iocb_ptr;
mac_iocb_ptr++;
tx_ring_desc++;
}
atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
}
static void ql_free_tx_resources(struct ql_adapter *qdev,
struct tx_ring *tx_ring)
{
if (tx_ring->wq_base) {
pci_free_consistent(qdev->pdev, tx_ring->wq_size,
tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
}
kfree(tx_ring->q);
tx_ring->q = NULL;
}
static int ql_alloc_tx_resources(struct ql_adapter *qdev,
struct tx_ring *tx_ring)
{
tx_ring->wq_base =
pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
&tx_ring->wq_base_dma);
if ((tx_ring->wq_base == NULL) ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
goto pci_alloc_err;
tx_ring->q =
kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
if (tx_ring->q == NULL)
goto err;
return 0;
err:
pci_free_consistent(qdev->pdev, tx_ring->wq_size,
tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
pci_alloc_err:
netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
return -ENOMEM;
}
static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
struct bq_desc *lbq_desc;
uint32_t curr_idx, clean_idx;
curr_idx = rx_ring->lbq_curr_idx;
clean_idx = rx_ring->lbq_clean_idx;
while (curr_idx != clean_idx) {
lbq_desc = &rx_ring->lbq[curr_idx];
if (lbq_desc->p.pg_chunk.last_flag) {
pci_unmap_page(qdev->pdev,
lbq_desc->p.pg_chunk.map,
ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
lbq_desc->p.pg_chunk.last_flag = 0;
}
put_page(lbq_desc->p.pg_chunk.page);
lbq_desc->p.pg_chunk.page = NULL;
if (++curr_idx == rx_ring->lbq_len)
curr_idx = 0;
}
if (rx_ring->pg_chunk.page) {
pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
put_page(rx_ring->pg_chunk.page);
rx_ring->pg_chunk.page = NULL;
}
}
static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
int i;
struct bq_desc *sbq_desc;
for (i = 0; i < rx_ring->sbq_len; i++) {
sbq_desc = &rx_ring->sbq[i];
if (sbq_desc == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"sbq_desc %d is NULL.\n", i);
return;
}
if (sbq_desc->p.skb) {
pci_unmap_single(qdev->pdev,
dma_unmap_addr(sbq_desc, mapaddr),
dma_unmap_len(sbq_desc, maplen),
PCI_DMA_FROMDEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
}
}
}
/* Free all large and small rx buffers associated
* with the completion queues for this device.
*/
static void ql_free_rx_buffers(struct ql_adapter *qdev)
{
int i;
struct rx_ring *rx_ring;
for (i = 0; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
if (rx_ring->lbq)
ql_free_lbq_buffers(qdev, rx_ring);
if (rx_ring->sbq)
ql_free_sbq_buffers(qdev, rx_ring);
}
}
static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
{
struct rx_ring *rx_ring;
int i;
for (i = 0; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
if (rx_ring->type != TX_Q)
ql_update_buffer_queues(qdev, rx_ring);
}
}
static void ql_init_lbq_ring(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
int i;
struct bq_desc *lbq_desc;
__le64 *bq = rx_ring->lbq_base;
memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
for (i = 0; i < rx_ring->lbq_len; i++) {
lbq_desc = &rx_ring->lbq[i];
memset(lbq_desc, 0, sizeof(*lbq_desc));
lbq_desc->index = i;
lbq_desc->addr = bq;
bq++;
}
}
static void ql_init_sbq_ring(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
int i;
struct bq_desc *sbq_desc;
__le64 *bq = rx_ring->sbq_base;
memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
for (i = 0; i < rx_ring->sbq_len; i++) {
sbq_desc = &rx_ring->sbq[i];
memset(sbq_desc, 0, sizeof(*sbq_desc));
sbq_desc->index = i;
sbq_desc->addr = bq;
bq++;
}
}
static void ql_free_rx_resources(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
/* Free the small buffer queue. */
if (rx_ring->sbq_base) {
pci_free_consistent(qdev->pdev,
rx_ring->sbq_size,
rx_ring->sbq_base, rx_ring->sbq_base_dma);
rx_ring->sbq_base = NULL;
}
/* Free the small buffer queue control blocks. */
kfree(rx_ring->sbq);
rx_ring->sbq = NULL;
/* Free the large buffer queue. */
if (rx_ring->lbq_base) {
pci_free_consistent(qdev->pdev,
rx_ring->lbq_size,
rx_ring->lbq_base, rx_ring->lbq_base_dma);
rx_ring->lbq_base = NULL;
}
/* Free the large buffer queue control blocks. */
kfree(rx_ring->lbq);
rx_ring->lbq = NULL;
/* Free the rx queue. */
if (rx_ring->cq_base) {
pci_free_consistent(qdev->pdev,
rx_ring->cq_size,
rx_ring->cq_base, rx_ring->cq_base_dma);
rx_ring->cq_base = NULL;
}
}
/* Allocate queues and buffers for this completions queue based
* on the values in the parameter structure. */
static int ql_alloc_rx_resources(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
/*
* Allocate the completion queue for this rx_ring.
*/
rx_ring->cq_base =
pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
&rx_ring->cq_base_dma);
if (rx_ring->cq_base == NULL) {
netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
return -ENOMEM;
}
if (rx_ring->sbq_len) {
/*
* Allocate small buffer queue.
*/
rx_ring->sbq_base =
pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
&rx_ring->sbq_base_dma);
if (rx_ring->sbq_base == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"Small buffer queue allocation failed.\n");
goto err_mem;
}
/*
* Allocate small buffer queue control blocks.
*/
rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->sbq == NULL)
goto err_mem;
ql_init_sbq_ring(qdev, rx_ring);
}
if (rx_ring->lbq_len) {
/*
* Allocate large buffer queue.
*/
rx_ring->lbq_base =
pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
&rx_ring->lbq_base_dma);
if (rx_ring->lbq_base == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"Large buffer queue allocation failed.\n");
goto err_mem;
}
/*
* Allocate large buffer queue control blocks.
*/
rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->lbq == NULL)
goto err_mem;
ql_init_lbq_ring(qdev, rx_ring);
}
return 0;
err_mem:
ql_free_rx_resources(qdev, rx_ring);
return -ENOMEM;
}
static void ql_tx_ring_clean(struct ql_adapter *qdev)
{
struct tx_ring *tx_ring;
struct tx_ring_desc *tx_ring_desc;
int i, j;
/*
* Loop through all queues and free
* any resources.
*/
for (j = 0; j < qdev->tx_ring_count; j++) {
tx_ring = &qdev->tx_ring[j];
for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring_desc = &tx_ring->q[i];
if (tx_ring_desc && tx_ring_desc->skb) {
netif_err(qdev, ifdown, qdev->ndev,
"Freeing lost SKB %p, from queue %d, index %d.\n",
tx_ring_desc->skb, j,
tx_ring_desc->index);
ql_unmap_send(qdev, tx_ring_desc,
tx_ring_desc->map_cnt);
dev_kfree_skb(tx_ring_desc->skb);
tx_ring_desc->skb = NULL;
}
}
}
}
static void ql_free_mem_resources(struct ql_adapter *qdev)
{
int i;
for (i = 0; i < qdev->tx_ring_count; i++)
ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
for (i = 0; i < qdev->rx_ring_count; i++)
ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
ql_free_shadow_space(qdev);
}
static int ql_alloc_mem_resources(struct ql_adapter *qdev)
{
int i;
/* Allocate space for our shadow registers and such. */
if (ql_alloc_shadow_space(qdev))
return -ENOMEM;
for (i = 0; i < qdev->rx_ring_count; i++) {
if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
netif_err(qdev, ifup, qdev->ndev,
"RX resource allocation failed.\n");
goto err_mem;
}
}
/* Allocate tx queue resources */
for (i = 0; i < qdev->tx_ring_count; i++) {
if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
netif_err(qdev, ifup, qdev->ndev,
"TX resource allocation failed.\n");
goto err_mem;
}
}
return 0;
err_mem:
ql_free_mem_resources(qdev);
return -ENOMEM;
}
/* Set up the rx ring control block and pass it to the chip.
* The control block is defined as
* "Completion Queue Initialization Control Block", or cqicb.
*/
static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
struct cqicb *cqicb = &rx_ring->cqicb;
void *shadow_reg = qdev->rx_ring_shadow_reg_area +
(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
u16 bq_len;
u64 tmp;
__le64 *base_indirect_ptr;
int page_entries;
/* Set up the shadow registers for this ring. */
rx_ring->prod_idx_sh_reg = shadow_reg;
rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
*rx_ring->prod_idx_sh_reg = 0;
shadow_reg += sizeof(u64);
shadow_reg_dma += sizeof(u64);
rx_ring->lbq_base_indirect = shadow_reg;
rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
rx_ring->sbq_base_indirect = shadow_reg;
rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
/* PCI doorbell mem area + 0x00 for consumer index register */
rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
rx_ring->cnsmr_idx = 0;
rx_ring->curr_entry = rx_ring->cq_base;
/* PCI doorbell mem area + 0x04 for valid register */
rx_ring->valid_db_reg = doorbell_area + 0x04;
/* PCI doorbell mem area + 0x18 for large buffer consumer */
rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
/* PCI doorbell mem area + 0x1c */
rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
memset((void *)cqicb, 0, sizeof(struct cqicb));
cqicb->msix_vect = rx_ring->irq;
bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
/*
* Set up the control block load flags.
*/
cqicb->flags = FLAGS_LC | /* Load queue base address */
FLAGS_LV | /* Load MSI-X vector */
FLAGS_LI; /* Load irq delay values */
if (rx_ring->lbq_len) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
tmp = (u64)rx_ring->lbq_base_dma;
base_indirect_ptr = rx_ring->lbq_base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
cqicb->lbq_addr =
cpu_to_le64(rx_ring->lbq_base_indirect_dma);
bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
(u16) rx_ring->lbq_buf_size;
cqicb->lbq_buf_size = cpu_to_le16(bq_len);
bq_len = (rx_ring->lbq_len == 65536) ? 0 :
(u16) rx_ring->lbq_len;
cqicb->lbq_len = cpu_to_le16(bq_len);
rx_ring->lbq_prod_idx = 0;
rx_ring->lbq_curr_idx = 0;
rx_ring->lbq_clean_idx = 0;
rx_ring->lbq_free_cnt = rx_ring->lbq_len;
}
if (rx_ring->sbq_len) {
cqicb->flags |= FLAGS_LS; /* Load sbq values */
tmp = (u64)rx_ring->sbq_base_dma;
base_indirect_ptr = rx_ring->sbq_base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_buf_size =
cpu_to_le16((u16)(rx_ring->sbq_buf_size));
bq_len = (rx_ring->sbq_len == 65536) ? 0 :
(u16) rx_ring->sbq_len;
cqicb->sbq_len = cpu_to_le16(bq_len);
rx_ring->sbq_prod_idx = 0;
rx_ring->sbq_curr_idx = 0;
rx_ring->sbq_clean_idx = 0;
rx_ring->sbq_free_cnt = rx_ring->sbq_len;
}
switch (rx_ring->type) {
case TX_Q:
cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
break;
case RX_Q:
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
64);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
break;
default:
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Invalid rx_ring->type = %d.\n", rx_ring->type);
}
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
return err;
}
return err;
}
static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
{
struct wqicb *wqicb = (struct wqicb *)tx_ring;
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
void *shadow_reg = qdev->tx_ring_shadow_reg_area +
(tx_ring->wq_id * sizeof(u64));
u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
(tx_ring->wq_id * sizeof(u64));
int err = 0;
/*
* Assign doorbell registers for this tx_ring.
*/
/* TX PCI doorbell mem area for tx producer index */
tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
tx_ring->prod_idx = 0;
/* TX PCI doorbell mem area + 0x04 */
tx_ring->valid_db_reg = doorbell_area + 0x04;
/*
* Assign shadow registers for this tx_ring.
*/
tx_ring->cnsmr_idx_sh_reg = shadow_reg;
tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
wqicb->rid = 0;
wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
ql_init_tx_ring(qdev, tx_ring);
err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
(u16) tx_ring->wq_id);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
return err;
}
return err;
}
static void ql_disable_msix(struct ql_adapter *qdev)
{
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
pci_disable_msix(qdev->pdev);
clear_bit(QL_MSIX_ENABLED, &qdev->flags);
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
pci_disable_msi(qdev->pdev);
clear_bit(QL_MSI_ENABLED, &qdev->flags);
}
}
/* We start by trying to get the number of vectors
* stored in qdev->intr_count. If we don't get that
* many then we reduce the count and try again.
*/
static void ql_enable_msix(struct ql_adapter *qdev)
{
int i, err;
/* Get the MSIX vectors. */
if (qlge_irq_type == MSIX_IRQ) {
/* Try to alloc space for the msix struct,
* if it fails then go to MSI/legacy.
*/
qdev->msi_x_entry = kcalloc(qdev->intr_count,
sizeof(struct msix_entry),
GFP_KERNEL);
if (!qdev->msi_x_entry) {
qlge_irq_type = MSI_IRQ;
goto msi;
}
for (i = 0; i < qdev->intr_count; i++)
qdev->msi_x_entry[i].entry = i;
err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
1, qdev->intr_count);
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
netif_warn(qdev, ifup, qdev->ndev,
"MSI-X Enable failed, trying MSI.\n");
qlge_irq_type = MSI_IRQ;
} else {
qdev->intr_count = err;
set_bit(QL_MSIX_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"MSI-X Enabled, got %d vectors.\n",
qdev->intr_count);
return;
}
}
msi:
qdev->intr_count = 1;
if (qlge_irq_type == MSI_IRQ) {
if (!pci_enable_msi(qdev->pdev)) {
set_bit(QL_MSI_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"Running with MSI interrupts.\n");
return;
}
}
qlge_irq_type = LEG_IRQ;
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Running with legacy interrupts.\n");
}
/* Each vector services 1 RSS ring and and 1 or more
* TX completion rings. This function loops through
* the TX completion rings and assigns the vector that
* will service it. An example would be if there are
* 2 vectors (so 2 RSS rings) and 8 TX completion rings.
* This would mean that vector 0 would service RSS ring 0
* and TX completion rings 0,1,2 and 3. Vector 1 would
* service RSS ring 1 and TX completion rings 4,5,6 and 7.
*/
static void ql_set_tx_vect(struct ql_adapter *qdev)
{
int i, j, vect;
u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
/* Assign irq vectors to TX rx_rings.*/
for (vect = 0, j = 0, i = qdev->rss_ring_count;
i < qdev->rx_ring_count; i++) {
if (j == tx_rings_per_vector) {
vect++;
j = 0;
}
qdev->rx_ring[i].irq = vect;
j++;
}
} else {
/* For single vector all rings have an irq
* of zero.
*/
for (i = 0; i < qdev->rx_ring_count; i++)
qdev->rx_ring[i].irq = 0;
}
}
/* Set the interrupt mask for this vector. Each vector
* will service 1 RSS ring and 1 or more TX completion
* rings. This function sets up a bit mask per vector
* that indicates which rings it services.
*/
static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
{
int j, vect = ctx->intr;
u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
/* Add the RSS ring serviced by this vector
* to the mask.
*/
ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
/* Add the TX ring(s) serviced by this vector
* to the mask. */
for (j = 0; j < tx_rings_per_vector; j++) {
ctx->irq_mask |=
(1 << qdev->rx_ring[qdev->rss_ring_count +
(vect * tx_rings_per_vector) + j].cq_id);
}
} else {
/* For single vector we just shift each queue's
* ID into the mask.
*/
for (j = 0; j < qdev->rx_ring_count; j++)
ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
}
}
/*
* Here we build the intr_context structures based on
* our rx_ring count and intr vector count.
* The intr_context structure is used to hook each vector
* to possibly different handlers.
*/
static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
{
int i = 0;
struct intr_context *intr_context = &qdev->intr_context[0];
if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
/* Each rx_ring has it's
* own intr_context since we have separate
* vectors for each queue.
*/
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
qdev->rx_ring[i].irq = i;
intr_context->intr = i;
intr_context->qdev = qdev;
/* Set up this vector's bit-mask that indicates
* which queues it services.
*/
ql_set_irq_mask(qdev, intr_context);
/*
* We set up each vectors enable/disable/read bits so
* there's no bit/mask calculations in the critical path.
*/
intr_context->intr_en_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
| i;
intr_context->intr_dis_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
INTR_EN_IHD | i;
intr_context->intr_read_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
i;
if (i == 0) {
/* The first vector/queue handles
* broadcast/multicast, fatal errors,
* and firmware events. This in addition
* to normal inbound NAPI processing.
*/
intr_context->handler = qlge_isr;
sprintf(intr_context->name, "%s-rx-%d",
qdev->ndev->name, i);
} else {
/*
* Inbound queues handle unicast frames only.
*/
intr_context->handler = qlge_msix_rx_isr;
sprintf(intr_context->name, "%s-rx-%d",
qdev->ndev->name, i);
}
}
} else {
/*
* All rx_rings use the same intr_context since
* there is only one vector.
*/
intr_context->intr = 0;
intr_context->qdev = qdev;
/*
* We set up each vectors enable/disable/read bits so
* there's no bit/mask calculations in the critical path.
*/
intr_context->intr_en_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
intr_context->intr_dis_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_DISABLE;
intr_context->intr_read_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
/*
* Single interrupt means one handler for all rings.
*/
intr_context->handler = qlge_isr;
sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
/* Set up this vector's bit-mask that indicates
* which queues it services. In this case there is
* a single vector so it will service all RSS and
* TX completion rings.
*/
ql_set_irq_mask(qdev, intr_context);
}
/* Tell the TX completion rings which MSIx vector
* they will be using.
*/
ql_set_tx_vect(qdev);
}
static void ql_free_irq(struct ql_adapter *qdev)
{
int i;
struct intr_context *intr_context = &qdev->intr_context[0];
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
if (intr_context->hooked) {
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
free_irq(qdev->msi_x_entry[i].vector,
&qdev->rx_ring[i]);
} else {
free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
}
}
}
ql_disable_msix(qdev);
}
static int ql_request_irq(struct ql_adapter *qdev)
{
int i;
int status = 0;
struct pci_dev *pdev = qdev->pdev;
struct intr_context *intr_context = &qdev->intr_context[0];
ql_resolve_queues_to_irqs(qdev);
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
atomic_set(&intr_context->irq_cnt, 0);
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
status = request_irq(qdev->msi_x_entry[i].vector,
intr_context->handler,
0,
intr_context->name,
&qdev->rx_ring[i]);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed request for MSIX interrupt %d.\n",
i);
goto err_irq;
}
} else {
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"trying msi or legacy interrupts.\n");
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"%s: irq = %d.\n", __func__, pdev->irq);
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"%s: context->name = %s.\n", __func__,
intr_context->name);
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"%s: dev_id = 0x%p.\n", __func__,
&qdev->rx_ring[0]);
status =
request_irq(pdev->irq, qlge_isr,
test_bit(QL_MSI_ENABLED,
&qdev->
flags) ? 0 : IRQF_SHARED,
intr_context->name, &qdev->rx_ring[0]);
if (status)
goto err_irq;
netif_err(qdev, ifup, qdev->ndev,
"Hooked intr %d, queue type %s, with name %s.\n",
i,
qdev->rx_ring[0].type == DEFAULT_Q ?
"DEFAULT_Q" :
qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
intr_context->name);
}
intr_context->hooked = 1;
}
return status;
err_irq:
netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
ql_free_irq(qdev);
return status;
}
static int ql_start_rss(struct ql_adapter *qdev)
{
static const u8 init_hash_seed[] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
struct ricb *ricb = &qdev->ricb;
int status = 0;
int i;
u8 *hash_id = (u8 *) ricb->hash_cq_id;
memset((void *)ricb, 0, sizeof(*ricb));
ricb->base_cq = RSS_L4K;
ricb->flags =
(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
ricb->mask = cpu_to_le16((u16)(0x3ff));
/*
* Fill out the Indirection Table.
*/
for (i = 0; i < 1024; i++)
hash_id[i] = (i & (qdev->rss_ring_count - 1));
memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
return status;
}
return status;
}
static int ql_clear_routing_entries(struct ql_adapter *qdev)
{
int i, status = 0;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
/* Clear all the entries in the routing table. */
for (i = 0; i < 16; i++) {
status = ql_set_routing_reg(qdev, i, 0, 0);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for CAM packets.\n");
break;
}
}
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
/* Initialize the frame-to-queue routing. */
static int ql_route_initialize(struct ql_adapter *qdev)
{
int status = 0;
/* Clear all the entries in the routing table. */
status = ql_clear_routing_entries(qdev);
if (status)
return status;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return status;
status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
RT_IDX_IP_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register "
"for IP CSUM error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
RT_IDX_TU_CSUM_ERR, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register "
"for TCP/UDP CSUM error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for broadcast packets.\n");
goto exit;
}
/* If we have more than one inbound queue, then turn on RSS in the
* routing block.
*/
if (qdev->rss_ring_count > 1) {
status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
RT_IDX_RSS_MATCH, 1);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for MATCH RSS packets.\n");
goto exit;
}
}
status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
RT_IDX_CAM_HIT, 1);
if (status)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init routing register for CAM packets.\n");
exit:
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
}
int ql_cam_route_initialize(struct ql_adapter *qdev)
{
int status, set;
/* If check if the link is up and use to
* determine if we are setting or clearing
* the MAC address in the CAM.
*/
set = ql_read32(qdev, STS);
set &= qdev->port_link_up;
status = ql_set_mac_addr(qdev, set);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
return status;
}
status = ql_route_initialize(qdev);
if (status)
netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
return status;
}
static int ql_adapter_initialize(struct ql_adapter *qdev)
{
u32 value, mask;
int i;
int status = 0;
/*
* Set up the System register to halt on errors.
*/
value = SYS_EFE | SYS_FAE;
mask = value << 16;
ql_write32(qdev, SYS, mask | value);
/* Set the default queue, and VLAN behavior. */
value = NIC_RCV_CFG_DFQ;
mask = NIC_RCV_CFG_DFQ_MASK;
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
value |= NIC_RCV_CFG_RV;
mask |= (NIC_RCV_CFG_RV << 16);
}
ql_write32(qdev, NIC_RCV_CFG, (mask | value));
/* Set the MPI interrupt to enabled. */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
/* Enable the function, set pagesize, enable error checking. */
value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
FSC_EC | FSC_VM_PAGE_4K;
value |= SPLT_SETTING;
/* Set/clear header splitting. */
mask = FSC_VM_PAGESIZE_MASK |
FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
ql_write32(qdev, FSC, mask | value);
ql_write32(qdev, SPLT_HDR, SPLT_LEN);
/* Set RX packet routing to use port/pci function on which the
* packet arrived on in addition to usual frame routing.
* This is helpful on bonding where both interfaces can have
* the same MAC address.
*/
ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
/* Reroute all packets to our Interface.
* They may have been routed to MPI firmware
* due to WOL.
*/
value = ql_read32(qdev, MGMT_RCV_CFG);
value &= ~MGMT_RCV_CFG_RM;
mask = 0xffff0000;
/* Sticky reg needs clearing due to WOL. */
ql_write32(qdev, MGMT_RCV_CFG, mask);
ql_write32(qdev, MGMT_RCV_CFG, mask | value);
/* Default WOL is enable on Mezz cards */
if (qdev->pdev->subsystem_device == 0x0068 ||
qdev->pdev->subsystem_device == 0x0180)
qdev->wol = WAKE_MAGIC;
/* Start up the rx queues. */
for (i = 0; i < qdev->rx_ring_count; i++) {
status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to start rx ring[%d].\n", i);
return status;
}
}
/* If there is more than one inbound completion queue
* then download a RICB to configure RSS.
*/
if (qdev->rss_ring_count > 1) {
status = ql_start_rss(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
return status;
}
}
/* Start up the tx queues. */
for (i = 0; i < qdev->tx_ring_count; i++) {
status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to start tx ring[%d].\n", i);
return status;
}
}
/* Initialize the port and set the max framesize. */
status = qdev->nic_ops->port_initialize(qdev);
if (status)
netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
/* Set up the MAC address and frame routing filter. */
status = ql_cam_route_initialize(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init CAM/Routing tables.\n");
return status;
}
/* Start NAPI for the RSS queues. */
for (i = 0; i < qdev->rss_ring_count; i++)
napi_enable(&qdev->rx_ring[i].napi);
return status;
}
/* Issue soft reset to chip. */
static int ql_adapter_reset(struct ql_adapter *qdev)
{
u32 value;
int status = 0;
unsigned long end_jiffies;
/* Clear all the entries in the routing table. */
status = ql_clear_routing_entries(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
return status;
}
end_jiffies = jiffies +
max((unsigned long)1, usecs_to_jiffies(30));
/* Check if bit is set then skip the mailbox command and
* clear the bit, else we are in normal reset process.
*/
if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
/* Stop management traffic. */
ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
/* Wait for the NIC and MGMNT FIFOs to empty. */
ql_wait_fifo_empty(qdev);
} else
clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
do {
value = ql_read32(qdev, RST_FO);
if ((value & RST_FO_FR) == 0)
break;
cpu_relax();
} while (time_before(jiffies, end_jiffies));
if (value & RST_FO_FR) {
netif_err(qdev, ifdown, qdev->ndev,
"ETIMEDOUT!!! errored out of resetting the chip!\n");
status = -ETIMEDOUT;
}
/* Resume management traffic. */
ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
return status;
}
static void ql_display_dev_info(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
netif_info(qdev, probe, qdev->ndev,
"Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
"XG Roll = %d, XG Rev = %d.\n",
qdev->func,
qdev->port,
qdev->chip_rev_id & 0x0000000f,
qdev->chip_rev_id >> 4 & 0x0000000f,
qdev->chip_rev_id >> 8 & 0x0000000f,
qdev->chip_rev_id >> 12 & 0x0000000f);
netif_info(qdev, probe, qdev->ndev,
"MAC address %pM\n", ndev->dev_addr);
}
static int ql_wol(struct ql_adapter *qdev)
{
int status = 0;
u32 wol = MB_WOL_DISABLE;
/* The CAM is still intact after a reset, but if we
* are doing WOL, then we may need to program the
* routing regs. We would also need to issue the mailbox
* commands to instruct the MPI what to do per the ethtool
* settings.
*/
if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
WAKE_MCAST | WAKE_BCAST)) {
netif_err(qdev, ifdown, qdev->ndev,
"Unsupported WOL parameter. qdev->wol = 0x%x.\n",
qdev->wol);
return -EINVAL;
}
if (qdev->wol & WAKE_MAGIC) {
status = ql_mb_wol_set_magic(qdev, 1);
if (status) {
netif_err(qdev, ifdown, qdev->ndev,
"Failed to set magic packet on %s.\n",
qdev->ndev->name);
return status;
} else
netif_info(qdev, drv, qdev->ndev,
"Enabled magic packet successfully on %s.\n",
qdev->ndev->name);
wol |= MB_WOL_MAGIC_PKT;
}
if (qdev->wol) {
wol |= MB_WOL_MODE_ON;
status = ql_mb_wol_mode(qdev, wol);
netif_err(qdev, drv, qdev->ndev,
"WOL %s (wol code 0x%x) on %s\n",
(status == 0) ? "Successfully set" : "Failed",
wol, qdev->ndev->name);
}
return status;
}
static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
{
/* Don't kill the reset worker thread if we
* are in the process of recovery.
*/
if (test_bit(QL_ADAPTER_UP, &qdev->flags))
cancel_delayed_work_sync(&qdev->asic_reset_work);
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
}
static int ql_adapter_down(struct ql_adapter *qdev)
{
int i, status = 0;
ql_link_off(qdev);
ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
napi_disable(&qdev->rx_ring[i].napi);
clear_bit(QL_ADAPTER_UP, &qdev->flags);
ql_disable_interrupts(qdev);
ql_tx_ring_clean(qdev);
/* Call netif_napi_del() from common point.
*/
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
status = ql_adapter_reset(qdev);
if (status)
netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
qdev->func);
ql_free_rx_buffers(qdev);
return status;
}
static int ql_adapter_up(struct ql_adapter *qdev)
{
int err = 0;
err = ql_adapter_initialize(qdev);
if (err) {
netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
goto err_init;
}
set_bit(QL_ADAPTER_UP, &qdev->flags);
ql_alloc_rx_buffers(qdev);
/* If the port is initialized and the
* link is up the turn on the carrier.
*/
if ((ql_read32(qdev, STS) & qdev->port_init) &&
(ql_read32(qdev, STS) & qdev->port_link_up))
ql_link_on(qdev);
/* Restore rx mode. */
clear_bit(QL_ALLMULTI, &qdev->flags);
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
/* Restore vlan setting. */
qlge_restore_vlan(qdev);
ql_enable_interrupts(qdev);
ql_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
return 0;
err_init:
ql_adapter_reset(qdev);
return err;
}
static void ql_release_adapter_resources(struct ql_adapter *qdev)
{
ql_free_mem_resources(qdev);
ql_free_irq(qdev);
}
static int ql_get_adapter_resources(struct ql_adapter *qdev)
{
int status = 0;
if (ql_alloc_mem_resources(qdev)) {
netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
return -ENOMEM;
}
status = ql_request_irq(qdev);
return status;
}
static int qlge_close(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
/* If we hit pci_channel_io_perm_failure
* failure condition, then we already
* brought the adapter down.
*/
if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
clear_bit(QL_EEH_FATAL, &qdev->flags);
return 0;
}
/*
* Wait for device to recover from a reset.
* (Rarely happens, but possible.)
*/
while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
msleep(1);
ql_adapter_down(qdev);
ql_release_adapter_resources(qdev);
return 0;
}
static int ql_configure_rings(struct ql_adapter *qdev)
{
int i;
struct rx_ring *rx_ring;
struct tx_ring *tx_ring;
int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
qdev->lbq_buf_order = get_order(lbq_buf_len);
/* In a perfect world we have one RSS ring for each CPU
* and each has it's own vector. To do that we ask for
* cpu_cnt vectors. ql_enable_msix() will adjust the
* vector count to what we actually get. We then
* allocate an RSS ring for each.
* Essentially, we are doing min(cpu_count, msix_vector_count).
*/
qdev->intr_count = cpu_cnt;
ql_enable_msix(qdev);
/* Adjust the RSS ring count to the actual vector count. */
qdev->rss_ring_count = qdev->intr_count;
qdev->tx_ring_count = cpu_cnt;
qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
for (i = 0; i < qdev->tx_ring_count; i++) {
tx_ring = &qdev->tx_ring[i];
memset((void *)tx_ring, 0, sizeof(*tx_ring));
tx_ring->qdev = qdev;
tx_ring->wq_id = i;
tx_ring->wq_len = qdev->tx_ring_size;
tx_ring->wq_size =
tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
/*
* The completion queue ID for the tx rings start
* immediately after the rss rings.
*/
tx_ring->cq_id = qdev->rss_ring_count + i;
}
for (i = 0; i < qdev->rx_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
memset((void *)rx_ring, 0, sizeof(*rx_ring));
rx_ring->qdev = qdev;
rx_ring->cq_id = i;
rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
if (i < qdev->rss_ring_count) {
/*
* Inbound (RSS) queues.
*/
rx_ring->cq_len = qdev->rx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
rx_ring->lbq_len = NUM_LARGE_BUFFERS;
rx_ring->lbq_size =
rx_ring->lbq_len * sizeof(__le64);
rx_ring->lbq_buf_size = (u16)lbq_buf_len;
rx_ring->sbq_len = NUM_SMALL_BUFFERS;
rx_ring->sbq_size =
rx_ring->sbq_len * sizeof(__le64);
rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
rx_ring->type = RX_Q;
} else {
/*
* Outbound queue handles outbound completions only.
*/
/* outbound cq is same size as tx_ring it services. */
rx_ring->cq_len = qdev->tx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
rx_ring->lbq_len = 0;
rx_ring->lbq_size = 0;
rx_ring->lbq_buf_size = 0;
rx_ring->sbq_len = 0;
rx_ring->sbq_size = 0;
rx_ring->sbq_buf_size = 0;
rx_ring->type = TX_Q;
}
}
return 0;
}
static int qlge_open(struct net_device *ndev)
{
int err = 0;
struct ql_adapter *qdev = netdev_priv(ndev);
err = ql_adapter_reset(qdev);
if (err)
return err;
err = ql_configure_rings(qdev);
if (err)
return err;
err = ql_get_adapter_resources(qdev);
if (err)
goto error_up;
err = ql_adapter_up(qdev);
if (err)
goto error_up;
return err;
error_up:
ql_release_adapter_resources(qdev);
return err;
}
static int ql_change_rx_buffers(struct ql_adapter *qdev)
{
struct rx_ring *rx_ring;
int i, status;
u32 lbq_buf_len;
/* Wait for an outstanding reset to complete. */
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
int i = 3;
while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
netif_err(qdev, ifup, qdev->ndev,
"Waiting for adapter UP...\n");
ssleep(1);
}
if (!i) {
netif_err(qdev, ifup, qdev->ndev,
"Timed out waiting for adapter UP\n");
return -ETIMEDOUT;
}
}
status = ql_adapter_down(qdev);
if (status)
goto error;
/* Get the new rx buffer size. */
lbq_buf_len = (qdev->ndev->mtu > 1500) ?
LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
qdev->lbq_buf_order = get_order(lbq_buf_len);
for (i = 0; i < qdev->rss_ring_count; i++) {
rx_ring = &qdev->rx_ring[i];
/* Set the new size. */
rx_ring->lbq_buf_size = lbq_buf_len;
}
status = ql_adapter_up(qdev);
if (status)
goto error;
return status;
error:
netif_alert(qdev, ifup, qdev->ndev,
"Driver up/down cycle failed, closing device.\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
return status;
}
static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
if (ndev->mtu == 1500 && new_mtu == 9000) {
netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
} else if (ndev->mtu == 9000 && new_mtu == 1500) {
netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
} else
return -EINVAL;
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 3*HZ);
ndev->mtu = new_mtu;
if (!netif_running(qdev->ndev)) {
return 0;
}
status = ql_change_rx_buffers(qdev);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Changing MTU failed.\n");
}
return status;
}
static struct net_device_stats *qlge_get_stats(struct net_device
*ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct rx_ring *rx_ring = &qdev->rx_ring[0];
struct tx_ring *tx_ring = &qdev->tx_ring[0];
unsigned long pkts, mcast, dropped, errors, bytes;
int i;
/* Get RX stats. */
pkts = mcast = dropped = errors = bytes = 0;
for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
pkts += rx_ring->rx_packets;
bytes += rx_ring->rx_bytes;
dropped += rx_ring->rx_dropped;
errors += rx_ring->rx_errors;
mcast += rx_ring->rx_multicast;
}
ndev->stats.rx_packets = pkts;
ndev->stats.rx_bytes = bytes;
ndev->stats.rx_dropped = dropped;
ndev->stats.rx_errors = errors;
ndev->stats.multicast = mcast;
/* Get TX stats. */
pkts = errors = bytes = 0;
for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
pkts += tx_ring->tx_packets;
bytes += tx_ring->tx_bytes;
errors += tx_ring->tx_errors;
}
ndev->stats.tx_packets = pkts;
ndev->stats.tx_bytes = bytes;
ndev->stats.tx_errors = errors;
return &ndev->stats;
}
static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct netdev_hw_addr *ha;
int i, status;
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return;
/*
* Set or clear promiscuous mode if a
* transition is taking place.
*/
if (ndev->flags & IFF_PROMISC) {
if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set promiscuous mode.\n");
} else {
set_bit(QL_PROMISCUOUS, &qdev->flags);
}
}
} else {
if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to clear promiscuous mode.\n");
} else {
clear_bit(QL_PROMISCUOUS, &qdev->flags);
}
}
}
/*
* Set or clear all multicast mode if a
* transition is taking place.
*/
if ((ndev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set all-multi mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
}
} else {
if (test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to clear all-multi mode.\n");
} else {
clear_bit(QL_ALLMULTI, &qdev->flags);
}
}
}
if (!netdev_mc_empty(ndev)) {
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
goto exit;
i = 0;
netdev_for_each_mc_addr(ha, ndev) {
if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to loadmulticast address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
goto exit;
}
i++;
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (ql_set_routing_reg
(qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to set multicast match mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
}
exit:
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
}
static int qlge_set_mac_address(struct net_device *ndev, void *p)
{
struct ql_adapter *qdev = netdev_priv(ndev);
struct sockaddr *addr = p;
int status;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
/* Update local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
if (status)
netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
static void qlge_tx_timeout(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
ql_queue_asic_error(qdev);
}
static void ql_asic_reset_work(struct work_struct *work)
{
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, asic_reset_work.work);
int status;
rtnl_lock();
status = ql_adapter_down(qdev);
if (status)
goto error;
status = ql_adapter_up(qdev);
if (status)
goto error;
/* Restore rx mode. */
clear_bit(QL_ALLMULTI, &qdev->flags);
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
rtnl_unlock();
return;
error:
netif_alert(qdev, ifup, qdev->ndev,
"Driver up/down cycle failed, closing device\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
rtnl_unlock();
}
static const struct nic_operations qla8012_nic_ops = {
.get_flash = ql_get_8012_flash_params,
.port_initialize = ql_8012_port_initialize,
};
static const struct nic_operations qla8000_nic_ops = {
.get_flash = ql_get_8000_flash_params,
.port_initialize = ql_8000_port_initialize,
};
/* Find the pcie function number for the other NIC
* on this chip. Since both NIC functions share a
* common firmware we have the lowest enabled function
* do any common work. Examples would be resetting
* after a fatal firmware error, or doing a firmware
* coredump.
*/
static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
{
int status = 0;
u32 temp;
u32 nic_func1, nic_func2;
status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
&temp);
if (status)
return status;
nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
MPI_TEST_NIC_FUNC_MASK);
nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
MPI_TEST_NIC_FUNC_MASK);
if (qdev->func == nic_func1)
qdev->alt_func = nic_func2;
else if (qdev->func == nic_func2)
qdev->alt_func = nic_func1;
else
status = -EIO;
return status;
}
static int ql_get_board_info(struct ql_adapter *qdev)
{
int status;
qdev->func =
(ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
if (qdev->func > 3)
return -EIO;
status = ql_get_alt_pcie_func(qdev);
if (status)
return status;
qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
if (qdev->port) {
qdev->xg_sem_mask = SEM_XGMAC1_MASK;
qdev->port_link_up = STS_PL1;
qdev->port_init = STS_PI1;
qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
} else {
qdev->xg_sem_mask = SEM_XGMAC0_MASK;
qdev->port_link_up = STS_PL0;
qdev->port_init = STS_PI0;
qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
}
qdev->chip_rev_id = ql_read32(qdev, REV_ID);
qdev->device_id = qdev->pdev->device;
if (qdev->device_id == QLGE_DEVICE_ID_8012)
qdev->nic_ops = &qla8012_nic_ops;
else if (qdev->device_id == QLGE_DEVICE_ID_8000)
qdev->nic_ops = &qla8000_nic_ops;
return status;
}
static void ql_release_all(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
if (qdev->workqueue) {
destroy_workqueue(qdev->workqueue);
qdev->workqueue = NULL;
}
if (qdev->reg_base)
iounmap(qdev->reg_base);
if (qdev->doorbell_area)
iounmap(qdev->doorbell_area);
vfree(qdev->mpi_coredump);
pci_release_regions(pdev);
}
static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
int cards_found)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int err = 0;
memset((void *)qdev, 0, sizeof(*qdev));
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "PCI device enable failed.\n");
return err;
}
qdev->ndev = ndev;
qdev->pdev = pdev;
pci_set_drvdata(pdev, ndev);
/* Set PCIe read request size */
err = pcie_set_readrq(pdev, 4096);
if (err) {
dev_err(&pdev->dev, "Set readrq failed.\n");
goto err_out1;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "PCI region request failed.\n");
return err;
}
pci_set_master(pdev);
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
set_bit(QL_DMA64, &qdev->flags);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
}
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration.\n");
goto err_out2;
}
/* Set PCIe reset type for EEH to fundamental. */
pdev->needs_freset = 1;
pci_save_state(pdev);
qdev->reg_base =
ioremap_nocache(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
err = -ENOMEM;
goto err_out2;
}
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
qdev->doorbell_area =
ioremap_nocache(pci_resource_start(pdev, 3),
pci_resource_len(pdev, 3));
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
err = -ENOMEM;
goto err_out2;
}
err = ql_get_board_info(qdev);
if (err) {
dev_err(&pdev->dev, "Register access failed.\n");
err = -EIO;
goto err_out2;
}
qdev->msg_enable = netif_msg_init(debug, default_msg);
spin_lock_init(&qdev->hw_lock);
spin_lock_init(&qdev->stats_lock);
if (qlge_mpi_coredump) {
qdev->mpi_coredump =
vmalloc(sizeof(struct ql_mpi_coredump));
if (qdev->mpi_coredump == NULL) {
err = -ENOMEM;
goto err_out2;
}
if (qlge_force_coredump)
set_bit(QL_FRC_COREDUMP, &qdev->flags);
}
/* make sure the EEPROM is good */
err = qdev->nic_ops->get_flash(qdev);
if (err) {
dev_err(&pdev->dev, "Invalid FLASH.\n");
goto err_out2;
}
/* Keep local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
/* Set up the default ring sizes. */
qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
/* Set up the coalescing parameters. */
qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
/*
* Set up the operating parameters.
*/
qdev->workqueue = create_singlethread_workqueue(ndev->name);
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
init_completion(&qdev->ide_completion);
mutex_init(&qdev->mpi_mutex);
if (!cards_found) {
dev_info(&pdev->dev, "%s\n", DRV_STRING);
dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
DRV_NAME, DRV_VERSION);
}
return 0;
err_out2:
ql_release_all(pdev);
err_out1:
pci_disable_device(pdev);
return err;
}
static const struct net_device_ops qlge_netdev_ops = {
.ndo_open = qlge_open,
.ndo_stop = qlge_close,
.ndo_start_xmit = qlge_send,
.ndo_change_mtu = qlge_change_mtu,
.ndo_get_stats = qlge_get_stats,
.ndo_set_rx_mode = qlge_set_multicast_list,
.ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout,
.ndo_fix_features = qlge_fix_features,
.ndo_set_features = qlge_set_features,
.ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};
static void ql_timer(unsigned long data)
{
struct ql_adapter *qdev = (struct ql_adapter *)data;
u32 var = 0;
var = ql_read32(qdev, STS);
if (pci_channel_offline(qdev->pdev)) {
netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
return;
}
mod_timer(&qdev->timer, jiffies + (5*HZ));
}
static int qlge_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_entry)
{
struct net_device *ndev = NULL;
struct ql_adapter *qdev = NULL;
static int cards_found = 0;
int err = 0;
ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
min(MAX_CPUS, netif_get_num_default_rss_queues()));
if (!ndev)
return -ENOMEM;
err = ql_init_device(pdev, ndev, cards_found);
if (err < 0) {
free_netdev(ndev);
return err;
}
qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_RXCSUM;
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
/* vlan gets same features (except vlan filter) */
ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
/*
* Set up net_device structure.
*/
ndev->tx_queue_len = qdev->tx_ring_size;
ndev->irq = pdev->irq;
ndev->netdev_ops = &qlge_netdev_ops;
SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
ndev->watchdog_timeo = 10 * HZ;
err = register_netdev(ndev);
if (err) {
dev_err(&pdev->dev, "net device registration failed.\n");
ql_release_all(pdev);
pci_disable_device(pdev);
free_netdev(ndev);
return err;
}
/* Start up the timer to trigger EEH if
* the bus goes dead
*/
init_timer_deferrable(&qdev->timer);
qdev->timer.data = (unsigned long)qdev;
qdev->timer.function = ql_timer;
qdev->timer.expires = jiffies + (5*HZ);
add_timer(&qdev->timer);
ql_link_off(qdev);
ql_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
cards_found++;
return 0;
}
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
{
return qlge_send(skb, ndev);
}
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
{
return ql_clean_inbound_rx_ring(rx_ring, budget);
}
static void qlge_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
del_timer_sync(&qdev->timer);
ql_cancel_all_work_sync(qdev);
unregister_netdev(ndev);
ql_release_all(pdev);
pci_disable_device(pdev);
free_netdev(ndev);
}
/* Clean up resources without touching hardware. */
static void ql_eeh_close(struct net_device *ndev)
{
int i;
struct ql_adapter *qdev = netdev_priv(ndev);
if (netif_carrier_ok(ndev)) {
netif_carrier_off(ndev);
netif_stop_queue(ndev);
}
/* Disabling the timer */
del_timer_sync(&qdev->timer);
ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
netif_napi_del(&qdev->rx_ring[i].napi);
clear_bit(QL_ADAPTER_UP, &qdev->flags);
ql_tx_ring_clean(qdev);
ql_free_rx_buffers(qdev);
ql_release_adapter_resources(qdev);
}
/*
* This callback is called by the PCI subsystem whenever
* a PCI bus error is detected.
*/
static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
enum pci_channel_state state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
netif_device_detach(ndev);
if (netif_running(ndev))
ql_eeh_close(ndev);
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
ql_eeh_close(ndev);
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
/*
* This callback is called after the PCI buss has been reset.
* Basically, this tries to restart the card from scratch.
* This is a shortened version of the device probe/discovery code,
* it resembles the first-half of the () routine.
*/
static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
pdev->error_state = pci_channel_io_normal;
pci_restore_state(pdev);
if (pci_enable_device(pdev)) {
netif_err(qdev, ifup, qdev->ndev,
"Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
if (ql_adapter_reset(qdev)) {
netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_RECOVERED;
}
static void qlge_io_resume(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
int err = 0;
if (netif_running(ndev)) {
err = qlge_open(ndev);
if (err) {
netif_err(qdev, ifup, qdev->ndev,
"Device initialization failed after reset.\n");
return;
}
} else {
netif_err(qdev, ifup, qdev->ndev,
"Device was not running prior to EEH.\n");
}
mod_timer(&qdev->timer, jiffies + (5*HZ));
netif_device_attach(ndev);
}
static const struct pci_error_handlers qlge_err_handler = {
.error_detected = qlge_io_error_detected,
.slot_reset = qlge_io_slot_reset,
.resume = qlge_io_resume,
};
static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
int err;
netif_device_detach(ndev);
del_timer_sync(&qdev->timer);
if (netif_running(ndev)) {
err = ql_adapter_down(qdev);
if (!err)
return err;
}
ql_wol(qdev);
err = pci_save_state(pdev);
if (err)
return err;
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
#ifdef CONFIG_PM
static int qlge_resume(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct ql_adapter *qdev = netdev_priv(ndev);
int err;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
if (netif_running(ndev)) {
err = ql_adapter_up(qdev);
if (err)
return err;
}
mod_timer(&qdev->timer, jiffies + (5*HZ));
netif_device_attach(ndev);
return 0;
}
#endif /* CONFIG_PM */
static void qlge_shutdown(struct pci_dev *pdev)
{
qlge_suspend(pdev, PMSG_SUSPEND);
}
static struct pci_driver qlge_driver = {
.name = DRV_NAME,
.id_table = qlge_pci_tbl,
.probe = qlge_probe,
.remove = qlge_remove,
#ifdef CONFIG_PM
.suspend = qlge_suspend,
.resume = qlge_resume,
#endif
.shutdown = qlge_shutdown,
.err_handler = &qlge_err_handler
};
module_pci_driver(qlge_driver);
|
uoaerg/linux-ecn
|
drivers/net/ethernet/qlogic/qlge/qlge_main.c
|
C
|
gpl-2.0
| 137,302
|
/**
* @package AcyMailing for Joomla!
* @version 4.5.1
* @author acyba.com
* @copyright (C) 2009-2013 ACYBA S.A.R.L. All rights reserved.
* @license GNU/GPLv3 http://www.gnu.org/licenses/gpl-3.0.html
*/
@import url("component_default_shadow_black.css");
#acyarchivelisting .button, #acymodifyform .button, #unsubbutton_div .button {
color:#fff !important;
background-color:#730028 !important;
background-image: linear-gradient(bottom, #777059 21%, #aca489 58%) !important;
background-image: -o-linear-gradient(bottom, #777059 21%, #aca489 58%) !important;
background-image: -moz-linear-gradient(bottom, #777059 21%, #aca489 58%) !important;
background-image: -webkit-linear-gradient(bottom, #777059 21%, #aca489 58%) !important;
background-image: -ms-linear-gradient(bottom, #777059 21%, #aca489 58%) !important;
background: -ms-linear-gradient(top, ellipse cover, #777059 0%, #aca489 100%);
background: radial-gradient(top, ellipse cover,#777059 0%, #aca489 100%);
filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#aca489', endColorstr='#777059',GradientType=0 ) !important;
}
#acyarchivelisting .button:hover, #acymodifyform .button:hover, #unsubbutton_div .button:hover{
color:#fff !important;
background-color:#730028 !important;
background-image: linear-gradient(bottom, #aca489 21%, #d0c9b3 58%) !important;
background-image: -o-linear-gradient(bottom, #aca489 21%, #d0c9b3 58%) !important;
background-image: -moz-linear-gradient(bottom, #aca489 21%, #d0c9b3 58%) !important;
background-image: -webkit-linear-gradient(bottom, #aca489 21%, #d0c9b3 58%) !important;
background-image: -ms-linear-gradient(bottom, #aca489 21%, #d0c9b3 58%) !important;
background: -ms-linear-gradient(top, ellipse cover, #aca489 0%, #d0c9b3 100%);
background: radial-gradient(top, ellipse cover,#aca489 0%, #d0c9b3 100%);
filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#d0c9b3', endColorstr='#aca489',GradientType=0 ) !important;
}
#acyarchivelisting .contentheading{
color:#777059;
border-bottom:1px solid #777059;
}
#acyarchivelisting .contentpane .contentdescription{
color:#aca489;
}
#acyarchivelisting .sectiontableheader a:hover{
color:#aca489;
}
#acyarchivelisting .contentpane tbody .sectiontableentry1 a:hover{
color:#777059;
}
#acyarchivelisting .contentpane tbody .sectiontableentry2 a:hover{
color:#777059;
}
#acyarchivelisting .contentpane tbody .sectiontableentry1{
background-color:#ece9e0;}
#acyarchivelisting .contentpane tbody .sectiontableentry1:hover{
background-color:#e8e4d6;}
#acyarchivelisting .contentpane tbody .sectiontableentry2{
background-color:#f2f0e8;}
#acyarchivelisting .contentpane tbody .sectiontableentry2:hover{
background-color:#e8e4d6;}
#acyarchiveview .contentheading{
color:#aca489;}
#acylistslisting .componentheading{
color:#777059;
border-bottom:1px solid #777059;
}
#acylistslisting .list_name a{
color:#aca489;
}
#acylistslisting .list_name a:hover, #acylistslisting .list_name a:focus {
color:#aca489;
}
div.acymailing_list:hover{
background-color:#f2f0e8;}
#acymodifyform legend{
color:#777059;
border-bottom:1px solid #777059;
}
#acyusersubscription .list_name{
color: #aca489;
}
#unsubpage .unsubintro{
color:#aca489;
border-bottom: 1px solid #aca489;
}
#unsubpage .unsubsurveytext{
border-bottom: 1px solid #aca489;
color: #aca489;
}
|
hscomp2002/edmansport
|
media/com_acymailing/css/component_default_shadow_sand.css
|
CSS
|
gpl-2.0
| 3,559
|
<link href="../../css/elements/cta.css" rel="stylesheet" type="text/css">
<p class="lead az-editable">Thousands of Drupal Professionals Love Glazed. Glazed replaces all other themes by providing dozens of full demos, with unique branding and amazing Drag and Drop features.</p>
<p>
<a class="btn btn-primary btn-lg clear-both clearfix az-editable" href="http://sooperthemes.com/pricing">
Try it now
</a>
</p>
|
eggla/LA-glazed
|
profiles/cms/modules/sooperthemes_premium/glazed_builder/glazed_elements/Call to action/cta-simple.html
|
HTML
|
gpl-2.0
| 423
|
// Copyright (C) 2007, 2008, 2009, 2012 EPITA Research and Development
// Laboratory (LRDE)
//
// This file is part of Olena.
//
// Olena is free software: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free
// Software Foundation, version 2 of the License.
//
// Olena is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Olena. If not, see <http://www.gnu.org/licenses/>.
//
// As a special exception, you may use this file as part of a free
// software project without restriction. Specifically, if other files
// instantiate templates or use macros or inline functions from this
// file, or you compile this file and link it with other files to produce
// an executable, this file does not by itself cause the resulting
// executable to be covered by the GNU General Public License. This
// exception does not however invalidate any other reasons why the
// executable file might be covered by the GNU General Public License.
#include <iostream>
#include <vector>
#include <mln/core/alias/point2d.hh>
#include <mln/core/site_set/p_vertices.hh>
#include <mln/fun/i2v/array.hh>
#include <mln/util/graph.hh>
#include <mln/core/image/graph_elt_neighborhood.hh>
unsigned fwd_neighb[] = { 0, 2, 3 };
unsigned bkd_neighb[] = { 3, 2, 0 };
int main()
{
using namespace mln;
typedef point2d p_t;
/*--------.
| Graph. |
`--------*/
/* The graph is as follows:
0 1 2 3 4
.-----------
|
0 | 0 2
1 | \ / |
2 | 1 |
3 | \ |
4 | 3-4
*/
// Points associated to vertices.
typedef fun::i2v::array<p_t> F;
F points(5);
points(0) = point2d(0,0); // Point associated to vertex 0.
points(1) = point2d(2,2); // Point associated to vertex 1.
points(2) = point2d(0,4); // Point associated to vertex 2.
points(3) = point2d(4,3); // Point associated to vertex 3.
points(4) = point2d(4,4); // Point associated to vertex 4.
// Edges.
typedef mln::util::graph G;
G g;
// Populate the graph with vertices.
g.add_vertices(points.size());
// Populate the graph with edges.
g.add_edge(0, 1);
g.add_edge(1, 2);
g.add_edge(1, 3);
g.add_edge(3, 4);
g.add_edge(4, 2);
/*-------------------------.
| Graph and neighborhood. |
`-------------------------*/
// Graph psite set.
typedef p_vertices<G, F> pv_t;
pv_t pg(g, points);
// Graph point site.
mln_psite_(pv_t) p(pg, 1);
// ``Sliding'' neighborhood of a psite of PG.
typedef graph_elt_neighborhood<G, pv_t> nbh_t;
nbh_t nbh;
unsigned i = 0;
mln_fwd_niter_(nbh_t) fq(nbh, p);
for_all(fq)
mln_assertion(fq.element().id() == fwd_neighb[i++]);
i = 0;
mln_bkd_niter_(nbh_t) bq(nbh, p);
for_all(bq)
mln_assertion(bq.element().id() == bkd_neighb[i++]);
}
|
codingforfun/Olena-Mirror
|
milena/tests/core/other/graph_elt_neighborhood.cc
|
C++
|
gpl-2.0
| 3,113
|
package sc
// Gendy3 is a dynamic stochastic synthesis generator.
// See Gendy1 for background.
// This variant of GENDYN normalises the durations in each period
// to force oscillation at the desired pitch.
// The breakpoints still get perturbed as in Gendy1.
// There is some glitching in the oscillator caused by the stochastic effects:
// control points as they vary cause big local jumps of amplitude.
// Put ampscale and durscalelow to minimise the rate of this.
type Gendy3 struct {
// Choice of probability distribution for the next perturbation
// of the amplitude of a control point.
AmpDist Input
// Choice of distribution for the perturbation of the current inter control point duration.
DurDist Input
// A parameter for the shape of the amplitude probability distribution,
// requires values in the range 0.0001 to 1 (there are safety checks
// in the code so don't worry too much if you want to modulate!).
ADParam Input
// A parameter for the shape of the duration probability distribution,
// requires values in the range 0.0001 to 1.
DDParam Input
// Oscillation frquency.
Freq Input
// Normally 0.0 to 1.0, multiplier for the distribution's delta value for amplitude.
// An ampscale of 1.0 allows the full range of -1 to 1 for a change of amplitude.
AmpScale Input
// Normally 0.0 to 1.0, multiplier for the distribution's delta value for duration.
// An ampscale of 1.0 allows the full range of -1 to 1 for a change of duration.
DurScale Input
// Initialise the number of control points in the memory.
// Xenakis specifies 12.
// There would be this number of control points per cycle of the oscillator,
// though the oscillator's period will constantly change due to the duration distribution.
InitCPs Input
// Current number of utilised control points, allows modulation.
KNum Input
}
func (g *Gendy3) defaults() {
if g.AmpDist == nil {
g.AmpDist = DistCauchy
}
if g.DurDist == nil {
g.DurDist = DistCauchy
}
if g.ADParam == nil {
g.ADParam = C(1)
}
if g.DDParam == nil {
g.DDParam = C(1)
}
if g.Freq == nil {
g.Freq = C(440)
}
if g.AmpScale == nil {
g.AmpScale = C(0.5)
}
if g.DurScale == nil {
g.DurScale = C(0.5)
}
if g.InitCPs == nil {
g.InitCPs = C(12)
}
if g.KNum == nil {
g.KNum = C(12)
}
}
// Rate creates a new ugen at a specific rate.
// If rate is an unsupported value this method will cause a runtime panic.
func (g Gendy3) Rate(rate int8) Input {
CheckRate(rate)
(&g).defaults()
return NewInput("Gendy3", rate, 0, 1, g.AmpDist, g.DurDist, g.ADParam, g.DDParam, g.Freq, g.AmpScale, g.DurScale, g.InitCPs, g.KNum)
}
|
scgolang/dx7
|
vendor/github.com/scgolang/sc/gendy3.go
|
GO
|
gpl-2.0
| 2,625
|
#region License
/*
* Copyright (C) 1999-2015 John Källén.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#endregion
using Reko.Core.Expressions;
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
namespace Reko.Core.Rtl
{
public abstract class RtlTransfer : RtlInstruction
{
public RtlTransfer(Expression target, RtlClass rtlClass)
{
this.Target = target;
this.Class = rtlClass;
}
public Expression Target { get; private set; }
}
}
|
chubbymaggie/reko
|
src/Core/Rtl/RtlTransfer.cs
|
C#
|
gpl-2.0
| 1,202
|
/*
* linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
*
* Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* Thanks to the following companies for their support:
*
* - JMicron (hardware and technical support)
*/
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/leds.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
#include "sdhci.h"
#define DRIVER_NAME "sdhci"
#define SDHCI_SUSPEND_TIMEOUT 300 /* 300 ms */
#define DBG(f, x...) \
pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
defined(CONFIG_MMC_SDHCI_MODULE))
#define SDHCI_USE_LEDS_CLASS
#endif
#define MAX_TUNING_LOOP 40
static unsigned int debug_quirks = 0;
static unsigned int debug_quirks2;
static void sdhci_finish_data(struct sdhci_host *);
static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_tuning_timer(unsigned long data);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
static bool sdhci_check_state(struct sdhci_host *);
#ifdef CONFIG_PM_RUNTIME
static int sdhci_runtime_pm_get(struct sdhci_host *host);
static int sdhci_runtime_pm_put(struct sdhci_host *host);
#else
static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
{
return 0;
}
static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
{
return 0;
}
#endif
static inline int sdhci_get_async_int_status(struct sdhci_host *host)
{
return (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
SDHCI_CTRL_ASYNC_INT_ENABLE) >> 14;
}
static void sdhci_dump_state(struct sdhci_host *host)
{
struct mmc_host *mmc = host->mmc;
pr_info("%s: clk: %d clk-gated: %d claimer: %s pwr: %d\n",
mmc_hostname(mmc), host->clock, mmc->clk_gated,
mmc->claimer->comm, host->pwr);
pr_info("%s: rpmstatus[pltfm](runtime-suspend:usage_count:disable_depth)(%d:%d:%d)\n",
mmc_hostname(mmc), mmc->parent->power.runtime_status,
atomic_read(&mmc->parent->power.usage_count),
mmc->parent->power.disable_depth);
}
static void sdhci_dumpregs(struct sdhci_host *host)
{
pr_info(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
mmc_hostname(host->mmc));
pr_info(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
sdhci_readl(host, SDHCI_DMA_ADDRESS),
sdhci_readw(host, SDHCI_HOST_VERSION));
pr_info(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
sdhci_readw(host, SDHCI_BLOCK_SIZE),
sdhci_readw(host, SDHCI_BLOCK_COUNT));
pr_info(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
sdhci_readl(host, SDHCI_ARGUMENT),
sdhci_readw(host, SDHCI_TRANSFER_MODE));
pr_info(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
sdhci_readl(host, SDHCI_PRESENT_STATE),
sdhci_readb(host, SDHCI_HOST_CONTROL));
pr_info(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
sdhci_readb(host, SDHCI_POWER_CONTROL),
sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
pr_info(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
sdhci_readw(host, SDHCI_CLOCK_CONTROL));
pr_info(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
sdhci_readl(host, SDHCI_INT_STATUS));
pr_info(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
sdhci_readl(host, SDHCI_INT_ENABLE),
sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
pr_info(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
host->auto_cmd_err_sts,
sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
pr_info(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
sdhci_readl(host, SDHCI_CAPABILITIES),
sdhci_readl(host, SDHCI_CAPABILITIES_1));
pr_info(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
sdhci_readw(host, SDHCI_COMMAND),
sdhci_readl(host, SDHCI_MAX_CURRENT));
pr_info(DRIVER_NAME ": Resp 1: 0x%08x | Resp 0: 0x%08x\n",
sdhci_readl(host, SDHCI_RESPONSE + 0x4),
sdhci_readl(host, SDHCI_RESPONSE));
pr_info(DRIVER_NAME ": Resp 3: 0x%08x | Resp 2: 0x%08x\n",
sdhci_readl(host, SDHCI_RESPONSE + 0xC),
sdhci_readl(host, SDHCI_RESPONSE + 0x8));
pr_info(DRIVER_NAME ": Host ctl2: 0x%08x\n",
sdhci_readw(host, SDHCI_HOST_CONTROL2));
if (host->flags & SDHCI_USE_ADMA)
pr_info(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
readl(host->ioaddr + SDHCI_ADMA_ERROR),
readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
if (host->ops->dump_vendor_regs)
host->ops->dump_vendor_regs(host);
sdhci_dump_state(host);
pr_info(DRIVER_NAME ": ===========================================\n");
}
#define MAX_PM_QOS_TIMEOUT_VALUE 100000 /* 100 ms */
static ssize_t
show_sdhci_pm_qos_tout(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sdhci_host *host = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%d us\n", host->pm_qos_timeout_us);
}
static ssize_t
store_sdhci_pm_qos_tout(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sdhci_host *host = dev_get_drvdata(dev);
uint32_t value;
unsigned long flags;
if (!kstrtou32(buf, 0, &value)) {
spin_lock_irqsave(&host->lock, flags);
if (value <= MAX_PM_QOS_TIMEOUT_VALUE)
host->pm_qos_timeout_us = value;
spin_unlock_irqrestore(&host->lock, flags);
}
return count;
}
/*****************************************************************************\
* *
* Low level functions *
* *
\*****************************************************************************/
static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
{
u32 ier;
ier = sdhci_readl(host, SDHCI_INT_ENABLE);
ier &= ~clear;
ier |= set;
sdhci_writel(host, ier, SDHCI_INT_ENABLE);
sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
}
static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
{
sdhci_clear_set_irqs(host, 0, irqs);
}
static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
{
sdhci_clear_set_irqs(host, irqs, 0);
}
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
u32 present, irqs;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
(host->mmc->caps & MMC_CAP_NONREMOVABLE))
return;
present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
SDHCI_CARD_PRESENT;
irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
if (enable)
sdhci_unmask_irqs(host, irqs);
else
sdhci_mask_irqs(host, irqs);
}
static void sdhci_enable_card_detection(struct sdhci_host *host)
{
sdhci_set_card_detection(host, true);
}
static void sdhci_disable_card_detection(struct sdhci_host *host)
{
sdhci_set_card_detection(host, false);
}
static void sdhci_reset(struct sdhci_host *host, u8 mask)
{
unsigned long timeout;
u32 uninitialized_var(ier);
if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
SDHCI_CARD_PRESENT))
return;
}
if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
ier = sdhci_readl(host, SDHCI_INT_ENABLE);
if (host->ops->platform_reset_enter)
host->ops->platform_reset_enter(host, mask);
sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
if (mask & SDHCI_RESET_ALL)
host->clock = 0;
/* Wait max 100 ms */
timeout = 100;
if (host->ops->check_power_status && host->pwr &&
(mask & SDHCI_RESET_ALL))
host->ops->check_power_status(host, REQ_BUS_OFF);
/* hw clears the bit when it's done */
while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
if (timeout == 0) {
pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
sdhci_dumpregs(host);
return;
}
timeout--;
mdelay(1);
}
if (host->ops->platform_reset_exit)
host->ops->platform_reset_exit(host, mask);
/* clear pending normal/error interrupt status */
sdhci_writel(host, sdhci_readl(host, SDHCI_INT_STATUS),
SDHCI_INT_STATUS);
if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
host->ops->enable_dma(host);
}
}
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
static void sdhci_init(struct sdhci_host *host, int soft)
{
if (soft)
sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
else
sdhci_reset(host, SDHCI_RESET_ALL);
sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
SDHCI_INT_AUTO_CMD_ERR);
if (soft) {
/* force clock reconfiguration */
host->clock = 0;
sdhci_set_ios(host->mmc, &host->mmc->ios);
}
}
static void sdhci_reinit(struct sdhci_host *host)
{
sdhci_init(host, 0);
/*
* Retuning stuffs are affected by different cards inserted and only
* applicable to UHS-I cards. So reset these fields to their initial
* value when card is removed.
*/
if (host->flags & SDHCI_USING_RETUNING_TIMER) {
host->flags &= ~SDHCI_USING_RETUNING_TIMER;
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
host->mmc->max_blk_count =
(host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
}
sdhci_enable_card_detection(host);
}
static void sdhci_activate_led(struct sdhci_host *host)
{
u8 ctrl;
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
ctrl |= SDHCI_CTRL_LED;
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
static void sdhci_deactivate_led(struct sdhci_host *host)
{
u8 ctrl;
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
ctrl &= ~SDHCI_CTRL_LED;
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
#ifdef SDHCI_USE_LEDS_CLASS
static void sdhci_led_control(struct led_classdev *led,
enum led_brightness brightness)
{
struct sdhci_host *host = container_of(led, struct sdhci_host, led);
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
if (host->runtime_suspended || sdhci_check_state(host))
goto out;
if (brightness == LED_OFF)
sdhci_deactivate_led(host);
else
sdhci_activate_led(host);
out:
spin_unlock_irqrestore(&host->lock, flags);
}
#endif
/*****************************************************************************\
* *
* Core functions *
* *
\*****************************************************************************/
static void sdhci_read_block_pio(struct sdhci_host *host)
{
unsigned long flags;
size_t blksize, len, chunk;
u32 uninitialized_var(scratch);
u8 *buf;
DBG("PIO reading\n");
blksize = host->data->blksz;
chunk = 0;
local_irq_save(flags);
while (blksize) {
if (!sg_miter_next(&host->sg_miter))
BUG();
len = min(host->sg_miter.length, blksize);
blksize -= len;
host->sg_miter.consumed = len;
buf = host->sg_miter.addr;
while (len) {
if (chunk == 0) {
scratch = sdhci_readl(host, SDHCI_BUFFER);
chunk = 4;
}
*buf = scratch & 0xFF;
buf++;
scratch >>= 8;
chunk--;
len--;
}
}
sg_miter_stop(&host->sg_miter);
local_irq_restore(flags);
}
static void sdhci_write_block_pio(struct sdhci_host *host)
{
unsigned long flags;
size_t blksize, len, chunk;
u32 scratch;
u8 *buf;
DBG("PIO writing\n");
blksize = host->data->blksz;
chunk = 0;
scratch = 0;
local_irq_save(flags);
while (blksize) {
if (!sg_miter_next(&host->sg_miter))
BUG();
len = min(host->sg_miter.length, blksize);
blksize -= len;
host->sg_miter.consumed = len;
buf = host->sg_miter.addr;
while (len) {
scratch |= (u32)*buf << (chunk * 8);
buf++;
chunk++;
len--;
if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
sdhci_writel(host, scratch, SDHCI_BUFFER);
chunk = 0;
scratch = 0;
}
}
}
sg_miter_stop(&host->sg_miter);
local_irq_restore(flags);
}
static void sdhci_transfer_pio(struct sdhci_host *host)
{
u32 mask;
BUG_ON(!host->data);
if (host->blocks == 0)
return;
if (host->data->flags & MMC_DATA_READ)
mask = SDHCI_DATA_AVAILABLE;
else
mask = SDHCI_SPACE_AVAILABLE;
/*
* Some controllers (JMicron JMB38x) mess up the buffer bits
* for transfers < 4 bytes. As long as it is just one block,
* we can ignore the bits.
*/
if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
(host->data->blocks == 1))
mask = ~0;
while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
udelay(100);
if (host->data->flags & MMC_DATA_READ)
sdhci_read_block_pio(host);
else
sdhci_write_block_pio(host);
host->blocks--;
if (host->blocks == 0)
break;
}
DBG("PIO transfer complete.\n");
}
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
local_irq_save(*flags);
return kmap_atomic(sg_page(sg)) + sg->offset;
}
static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
{
kunmap_atomic(buffer);
local_irq_restore(*flags);
}
static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
{
__le32 *dataddr = (__le32 __force *)(desc + 4);
__le16 *cmdlen = (__le16 __force *)desc;
/* SDHCI specification says ADMA descriptors should be 4 byte
* aligned, so using 16 or 32bit operations should be safe. */
cmdlen[0] = cpu_to_le16(cmd);
cmdlen[1] = cpu_to_le16(len);
dataddr[0] = cpu_to_le32(addr);
}
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
struct mmc_data *data,
struct sdhci_next *next)
{
int sg_count;
if (!next && data->host_cookie &&
data->host_cookie != host->next_data.cookie) {
printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
" host->next_data.cookie %d\n",
__func__, data->host_cookie, host->next_data.cookie);
data->host_cookie = 0;
}
/* Check if next job is already prepared */
if (next ||
(!next && data->host_cookie != host->next_data.cookie)) {
sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len,
(data->flags & MMC_DATA_WRITE) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
} else {
sg_count = host->next_data.sg_count;
host->next_data.sg_count = 0;
}
if (sg_count == 0)
return -EINVAL;
if (next) {
next->sg_count = sg_count;
data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
} else
host->sg_count = sg_count;
return sg_count;
}
static int sdhci_adma_table_pre(struct sdhci_host *host,
struct mmc_data *data)
{
u8 *desc;
u8 *align;
dma_addr_t addr;
dma_addr_t align_addr;
int len, offset;
struct scatterlist *sg;
int i;
char *buffer;
unsigned long flags;
/*
* The spec does not specify endianness of descriptor table.
* We currently guess that it is LE.
*/
host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
if (host->sg_count < 0)
goto fail;
desc = host->adma_desc;
align = host->align_buffer;
align_addr = host->align_addr;
for_each_sg(data->sg, sg, host->sg_count, i) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
/*
* The SDHCI specification states that ADMA
* addresses must be 32-bit aligned. If they
* aren't, then we use a bounce buffer for
* the (up to three) bytes that screw up the
* alignment.
*/
offset = (4 - (addr & 0x3)) & 0x3;
if (offset) {
if (data->flags & MMC_DATA_WRITE) {
buffer = sdhci_kmap_atomic(sg, &flags);
WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
memcpy(align, buffer, offset);
sdhci_kunmap_atomic(buffer, &flags);
}
/* tran, valid */
sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
BUG_ON(offset > 65536);
align += 4;
align_addr += 4;
desc += 8;
addr += offset;
len -= offset;
}
BUG_ON(len > 65536);
/* tran, valid */
sdhci_set_adma_desc(desc, addr, len, 0x21);
desc += 8;
/*
* If this triggers then we have a calculation bug
* somewhere. :/
*/
WARN_ON((desc - host->adma_desc) > host->adma_desc_sz);
}
if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
/*
* Mark the last descriptor as the terminating descriptor
*/
if (desc != host->adma_desc) {
desc -= 8;
desc[0] |= 0x2; /* end */
}
} else {
/*
* Add a terminating entry.
*/
/* nop, end, valid */
sdhci_set_adma_desc(desc, 0, 0, 0x3);
}
return 0;
fail:
return -EINVAL;
}
static void sdhci_adma_table_post(struct sdhci_host *host,
struct mmc_data *data)
{
int direction;
struct scatterlist *sg;
int i, size;
u8 *align;
char *buffer;
unsigned long flags;
bool has_unaligned = false;
if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
/* Do a quick scan of the SG list for any unaligned mappings */
for_each_sg(data->sg, sg, host->sg_count, i) {
if (sg_dma_address(sg) & 0x3) {
has_unaligned = true;
break;
}
}
} else {
direction = DMA_TO_DEVICE;
}
if (has_unaligned) {
dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
data->sg_len, direction);
align = host->align_buffer;
for_each_sg(data->sg, sg, host->sg_count, i) {
if (sg_dma_address(sg) & 0x3) {
size = 4 - (sg_dma_address(sg) & 0x3);
buffer = sdhci_kmap_atomic(sg, &flags);
WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
memcpy(buffer, align, size);
sdhci_kunmap_atomic(buffer, &flags);
align += 4;
}
}
}
if (!data->host_cookie)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
direction);
}
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
u8 count;
struct mmc_data *data = cmd->data;
unsigned target_timeout, current_timeout;
u32 curr_clk = 0; /* In KHz */
/*
* If the host controller provides us with an incorrect timeout
* value, just skip the check and use 0xE. The hardware may take
* longer to time out, but that's much better than having a too-short
* timeout value.
*/
if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
return 0xE;
/* Unspecified timeout, assume max */
if (!data && !cmd->cmd_timeout_ms)
return 0xE;
/* timeout in us */
if (!data)
target_timeout = cmd->cmd_timeout_ms * 1000;
else {
target_timeout = data->timeout_ns / 1000;
if (host->clock)
target_timeout += data->timeout_clks / host->clock;
}
/*
* Figure out needed cycles.
* We do this in steps in order to fit inside a 32 bit int.
* The first step is the minimum timeout, which will have a
* minimum resolution of 6 bits:
* (1) 2^13*1000 > 2^22,
* (2) host->timeout_clk < 2^16
* =>
* (1) / (2) > 2^6
*/
count = 0;
if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK) {
curr_clk = host->clock / 1000;
if (host->quirks2 & SDHCI_QUIRK2_DIVIDE_TOUT_BY_4)
curr_clk /= 4;
current_timeout = (1 << 13) * 1000 / curr_clk;
} else {
current_timeout = (1 << 13) * 1000 / host->timeout_clk;
}
while (current_timeout < target_timeout) {
count++;
current_timeout <<= 1;
if (count >= 0xF)
break;
}
if (!(host->quirks2 & SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT)) {
if (count >= 0xF) {
DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
mmc_hostname(host->mmc), count, cmd->opcode);
count = 0xE;
}
}
return count;
}
static void sdhci_set_transfer_irqs(struct sdhci_host *host)
{
u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
if (host->flags & SDHCI_REQ_USE_DMA)
sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
else
sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
}
static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
u8 count;
u8 ctrl;
struct mmc_data *data = cmd->data;
int ret;
WARN_ON(host->data);
if (data || (cmd->flags & MMC_RSP_BUSY)) {
count = sdhci_calc_timeout(host, cmd);
sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
}
if (!data)
return;
/* Sanity checks */
BUG_ON(data->blksz * data->blocks > host->mmc->max_req_size);
BUG_ON(data->blksz > host->mmc->max_blk_size);
BUG_ON(data->blocks > 65535);
host->data = data;
host->data_early = 0;
host->data->bytes_xfered = 0;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
host->flags |= SDHCI_REQ_USE_DMA;
/*
* FIXME: This doesn't account for merging when mapping the
* scatterlist.
*/
if (host->flags & SDHCI_REQ_USE_DMA) {
int broken, i;
struct scatterlist *sg;
broken = 0;
if (host->flags & SDHCI_USE_ADMA) {
if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
broken = 1;
} else {
if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
broken = 1;
}
if (unlikely(broken)) {
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->length & 0x3) {
DBG("Reverting to PIO because of "
"transfer size (%d)\n",
sg->length);
host->flags &= ~SDHCI_REQ_USE_DMA;
break;
}
}
}
}
/*
* The assumption here being that alignment is the same after
* translation to device address space.
*/
if (host->flags & SDHCI_REQ_USE_DMA) {
int broken, i;
struct scatterlist *sg;
broken = 0;
if (host->flags & SDHCI_USE_ADMA) {
/*
* As we use 3 byte chunks to work around
* alignment problems, we need to check this
* quirk.
*/
if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
broken = 1;
} else {
if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
broken = 1;
}
if (unlikely(broken)) {
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 0x3) {
DBG("Reverting to PIO because of "
"bad alignment\n");
host->flags &= ~SDHCI_REQ_USE_DMA;
break;
}
}
}
}
if (host->flags & SDHCI_REQ_USE_DMA) {
if (host->flags & SDHCI_USE_ADMA) {
ret = sdhci_adma_table_pre(host, data);
if (ret) {
/*
* This only happens when someone fed
* us an invalid request.
*/
WARN_ON(1);
host->flags &= ~SDHCI_REQ_USE_DMA;
} else {
sdhci_writel(host, host->adma_addr,
SDHCI_ADMA_ADDRESS);
}
} else {
int sg_cnt;
sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
if (sg_cnt == 0) {
/*
* This only happens when someone fed
* us an invalid request.
*/
WARN_ON(1);
host->flags &= ~SDHCI_REQ_USE_DMA;
} else {
WARN_ON(sg_cnt != 1);
sdhci_writel(host, sg_dma_address(data->sg),
SDHCI_DMA_ADDRESS);
}
}
}
/*
* Always adjust the DMA selection as some controllers
* (e.g. JMicron) can't do PIO properly when the selection
* is ADMA.
*/
if (host->version >= SDHCI_SPEC_200) {
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
ctrl &= ~SDHCI_CTRL_DMA_MASK;
if ((host->flags & SDHCI_REQ_USE_DMA) &&
(host->flags & SDHCI_USE_ADMA))
ctrl |= SDHCI_CTRL_ADMA32;
else
ctrl |= SDHCI_CTRL_SDMA;
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
if (!(host->flags & SDHCI_REQ_USE_DMA)) {
int flags;
flags = SG_MITER_ATOMIC;
if (host->data->flags & MMC_DATA_READ)
flags |= SG_MITER_TO_SG;
else
flags |= SG_MITER_FROM_SG;
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
host->blocks = data->blocks;
}
sdhci_set_transfer_irqs(host);
/* Set the DMA boundary value and block size */
sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
data->blksz), SDHCI_BLOCK_SIZE);
sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
}
static void sdhci_set_transfer_mode(struct sdhci_host *host,
struct mmc_command *cmd)
{
u16 mode;
struct mmc_data *data = cmd->data;
if (data == NULL)
return;
WARN_ON(!host->data);
mode = SDHCI_TRNS_BLK_CNT_EN;
if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
mode |= SDHCI_TRNS_MULTI;
/*
* If we are sending CMD23, CMD12 never gets sent
* on successful completion (so no Auto-CMD12).
*/
if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12))
mode |= SDHCI_TRNS_AUTO_CMD12;
else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
mode |= SDHCI_TRNS_AUTO_CMD23;
sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
}
}
if (data->flags & MMC_DATA_READ) {
mode |= SDHCI_TRNS_READ;
if (host->ops->toggle_cdr) {
if ((cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) ||
(cmd->opcode == MMC_SEND_TUNING_BLOCK_HS400) ||
(cmd->opcode == MMC_SEND_TUNING_BLOCK))
host->ops->toggle_cdr(host, false);
else
host->ops->toggle_cdr(host, true);
}
}
if (host->ops->toggle_cdr && (data->flags & MMC_DATA_WRITE))
host->ops->toggle_cdr(host, false);
if (host->flags & SDHCI_REQ_USE_DMA)
mode |= SDHCI_TRNS_DMA;
sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
}
static void sdhci_finish_data(struct sdhci_host *host)
{
struct mmc_data *data;
BUG_ON(!host->data);
data = host->data;
host->data = NULL;
if (host->flags & SDHCI_REQ_USE_DMA) {
if (host->flags & SDHCI_USE_ADMA)
sdhci_adma_table_post(host, data);
else {
if (!data->host_cookie)
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
data->sg_len,
(data->flags & MMC_DATA_READ) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
}
/*
* The specification states that the block count register must
* be updated, but it does not specify at what point in the
* data flow. That makes the register entirely useless to read
* back so we have to assume that nothing made it to the card
* in the event of an error.
*/
if (data->error)
data->bytes_xfered = 0;
else
data->bytes_xfered = data->blksz * data->blocks;
/*
* Need to send CMD12 if -
* a) open-ended multiblock transfer (no CMD23)
* b) error in multiblock transfer
*/
if (data->stop &&
(data->error ||
!host->mrq->sbc)) {
/*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
if (data->error) {
sdhci_reset(host, SDHCI_RESET_CMD);
sdhci_reset(host, SDHCI_RESET_DATA);
}
sdhci_send_command(host, data->stop);
} else
tasklet_schedule(&host->finish_tasklet);
}
#define SDHCI_REQUEST_TIMEOUT 10 /* Default request timeout in seconds */
static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
{
int flags;
u32 mask;
unsigned long timeout;
WARN_ON(host->cmd);
/* Wait max 10 ms */
timeout = 10;
mask = SDHCI_CMD_INHIBIT;
if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
mask |= SDHCI_DATA_INHIBIT;
/* We shouldn't wait for data inihibit for stop commands, even
though they might use busy signaling */
if (host->mrq->data && (cmd == host->mrq->data->stop))
mask &= ~SDHCI_DATA_INHIBIT;
while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
if (timeout == 0) {
pr_err("%s: Controller never released "
"inhibit bit(s).\n", mmc_hostname(host->mmc));
sdhci_dumpregs(host);
cmd->error = -EIO;
tasklet_schedule(&host->finish_tasklet);
return;
}
timeout--;
mdelay(1);
}
mod_timer(&host->timer, jiffies + SDHCI_REQUEST_TIMEOUT * HZ);
if (cmd->cmd_timeout_ms > SDHCI_REQUEST_TIMEOUT * MSEC_PER_SEC)
mod_timer(&host->timer, jiffies +
(msecs_to_jiffies(cmd->cmd_timeout_ms * 2)));
host->cmd = cmd;
sdhci_prepare_data(host, cmd);
sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
sdhci_set_transfer_mode(host, cmd);
if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
pr_err("%s: Unsupported response type!\n",
mmc_hostname(host->mmc));
cmd->error = -EINVAL;
tasklet_schedule(&host->finish_tasklet);
return;
}
if (!(cmd->flags & MMC_RSP_PRESENT))
flags = SDHCI_CMD_RESP_NONE;
else if (cmd->flags & MMC_RSP_136)
flags = SDHCI_CMD_RESP_LONG;
else if (cmd->flags & MMC_RSP_BUSY)
flags = SDHCI_CMD_RESP_SHORT_BUSY;
else
flags = SDHCI_CMD_RESP_SHORT;
if (cmd->flags & MMC_RSP_CRC)
flags |= SDHCI_CMD_CRC;
if (cmd->flags & MMC_RSP_OPCODE)
flags |= SDHCI_CMD_INDEX;
/* CMD19 is special in that the Data Present Select should be set */
if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
cmd->opcode == MMC_SEND_TUNING_BLOCK_HS400 ||
cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
flags |= SDHCI_CMD_DATA;
if (cmd->data)
host->data_start_time = ktime_get();
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
}
static void sdhci_finish_command(struct sdhci_host *host)
{
int i;
BUG_ON(host->cmd == NULL);
if (host->cmd->flags & MMC_RSP_PRESENT) {
if (host->cmd->flags & MMC_RSP_136) {
/* CRC is stripped so we need to do some shifting. */
for (i = 0;i < 4;i++) {
host->cmd->resp[i] = sdhci_readl(host,
SDHCI_RESPONSE + (3-i)*4) << 8;
if (i != 3)
host->cmd->resp[i] |=
sdhci_readb(host,
SDHCI_RESPONSE + (3-i)*4-1);
}
} else {
host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
}
}
host->cmd->error = 0;
/* Finished CMD23, now send actual command. */
if (host->cmd == host->mrq->sbc) {
host->cmd = NULL;
sdhci_send_command(host, host->mrq->cmd);
} else {
/* Processed actual command. */
if (host->data && host->data_early)
sdhci_finish_data(host);
if (!host->cmd->data)
tasklet_schedule(&host->finish_tasklet);
host->cmd = NULL;
}
}
static u16 sdhci_get_preset_value(struct sdhci_host *host)
{
u16 ctrl, preset = 0;
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
switch (ctrl & SDHCI_CTRL_UHS_MASK) {
case SDHCI_CTRL_UHS_SDR12:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
break;
case SDHCI_CTRL_UHS_SDR25:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
break;
case SDHCI_CTRL_UHS_SDR50:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
break;
case SDHCI_CTRL_UHS_SDR104:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
break;
case SDHCI_CTRL_UHS_DDR50:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
break;
default:
pr_warn("%s: Invalid UHS-I mode selected\n",
mmc_hostname(host->mmc));
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
break;
}
return preset;
}
static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
int div = 0; /* Initialized for compiler warning */
int real_div = div, clk_mul = 1;
u16 clk = 0;
unsigned long timeout;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
if (clock && clock == host->clock)
goto ret;
host->mmc->actual_clock = 0;
if (host->ops->set_clock) {
spin_unlock_irqrestore(&host->lock, flags);
host->ops->set_clock(host, clock);
spin_lock_irqsave(&host->lock, flags);
if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
goto ret;
}
if (host->clock)
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0)
goto out;
if (host->version >= SDHCI_SPEC_300) {
if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
SDHCI_CTRL_PRESET_VAL_ENABLE) {
u16 pre_val;
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
pre_val = sdhci_get_preset_value(host);
div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
if (host->clk_mul &&
(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
clk = SDHCI_PROG_CLOCK_MODE;
real_div = div + 1;
clk_mul = host->clk_mul;
} else {
real_div = max_t(int, 1, div << 1);
}
goto clock_set;
}
/*
* Check if the Host Controller supports Programmable Clock
* Mode.
*/
if (host->clk_mul) {
for (div = 1; div <= 1024; div++) {
if ((host->max_clk * host->clk_mul / div)
<= clock)
break;
}
/*
* Set Programmable Clock Mode in the Clock
* Control register.
*/
clk = SDHCI_PROG_CLOCK_MODE;
real_div = div;
clk_mul = host->clk_mul;
div--;
} else {
/* Version 3.00 divisors must be a multiple of 2. */
if (host->max_clk <= clock)
div = 1;
else {
for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
div += 2) {
if ((host->max_clk / div) <= clock)
break;
}
}
real_div = div;
div >>= 1;
}
} else {
/* Version 2.00 divisors must be a power of 2. */
for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
if ((host->max_clk / div) <= clock)
break;
}
real_div = div;
div >>= 1;
}
clock_set:
if (real_div)
host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
div = 0;
clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
<< SDHCI_DIVIDER_HI_SHIFT;
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Wait max 20 ms */
timeout = 20;
while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
& SDHCI_CLOCK_INT_STABLE)) {
if (timeout == 0) {
pr_err("%s: Internal clock never "
"stabilised.\n", mmc_hostname(host->mmc));
sdhci_dumpregs(host);
goto ret;
}
timeout--;
mdelay(1);
}
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
out:
host->clock = clock;
ret:
spin_unlock_irqrestore(&host->lock, flags);
}
static inline unsigned long sdhci_update_clock(struct sdhci_host *host,
unsigned long flags)
{
unsigned int clock;
clock = host->clock;
host->clock = 0;
spin_unlock_irqrestore(&host->lock, flags);
sdhci_set_clock(host, clock);
spin_lock_irqsave(&host->lock, flags);
return flags;
}
static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
{
u8 pwr = 0;
if (power != (unsigned short)-1) {
switch (1 << power) {
case MMC_VDD_165_195:
pwr = SDHCI_POWER_180;
break;
case MMC_VDD_29_30:
case MMC_VDD_30_31:
pwr = SDHCI_POWER_300;
break;
case MMC_VDD_32_33:
case MMC_VDD_33_34:
pwr = SDHCI_POWER_330;
break;
default:
BUG();
}
}
if (host->pwr == pwr)
return -1;
host->pwr = pwr;
if (pwr == 0) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
if (host->ops->check_power_status)
host->ops->check_power_status(host, REQ_BUS_OFF);
return 0;
}
/*
* Spec says that we should clear the power reg before setting
* a new value. Some controllers don't seem to like this though.
*/
if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
if (host->ops->check_power_status)
host->ops->check_power_status(host, REQ_BUS_OFF);
}
/*
* At least the Marvell CaFe chip gets confused if we set the voltage
* and set turn on power at the same time, so set the voltage first.
*/
if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) {
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
if (host->ops->check_power_status)
host->ops->check_power_status(host, REQ_BUS_ON);
}
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
if (host->ops->check_power_status)
host->ops->check_power_status(host, REQ_BUS_ON);
/*
* Some controllers need an extra 10ms delay of 10ms before they
* can apply clock after applying power
*/
if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
mdelay(10);
return power;
}
/*****************************************************************************\
* *
* MMC callbacks *
* *
\*****************************************************************************/
static int sdhci_enable(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
if (host->cpu_dma_latency_us)
pm_qos_update_request(&host->pm_qos_req_dma,
host->cpu_dma_latency_us);
if (host->ops->platform_bus_voting)
host->ops->platform_bus_voting(host, 1);
return 0;
}
static int sdhci_disable(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
if (host->cpu_dma_latency_us) {
/*
* In performance mode, release QoS vote after a timeout to
* make sure back-to-back requests don't suffer from latencies
* that are involved to wake CPU from low power modes in cases
* where the CPU goes into low power mode as soon as QoS vote is
* released.
*/
if (host->power_policy == SDHCI_PERFORMANCE_MODE)
pm_qos_update_request_timeout(&host->pm_qos_req_dma,
host->cpu_dma_latency_us,
host->pm_qos_timeout_us);
else
pm_qos_update_request(&host->pm_qos_req_dma,
PM_QOS_DEFAULT_VALUE);
}
if (host->ops->platform_bus_voting)
host->ops->platform_bus_voting(host, 0);
return 0;
}
static inline void sdhci_update_power_policy(struct sdhci_host *host,
enum sdhci_power_policy policy)
{
host->power_policy = policy;
}
static int sdhci_notify_load(struct mmc_host *mmc, enum mmc_load state)
{
int err = 0;
struct sdhci_host *host = mmc_priv(mmc);
switch (state) {
case MMC_LOAD_HIGH:
sdhci_update_power_policy(host, SDHCI_PERFORMANCE_MODE);
break;
case MMC_LOAD_INIT:
sdhci_update_power_policy(host, SDHCI_PERFORMANCE_MODE_INIT);
break;
case MMC_LOAD_LOW:
sdhci_update_power_policy(host, SDHCI_POWER_SAVE_MODE);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
bool is_first_req)
{
struct sdhci_host *host = mmc_priv(mmc);
if (mrq->data->host_cookie) {
mrq->data->host_cookie = 0;
return;
}
if (host->flags & SDHCI_REQ_USE_DMA)
if (sdhci_pre_dma_transfer(host, mrq->data, &host->next_data) < 0)
mrq->data->host_cookie = 0;
}
static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
int err)
{
struct sdhci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (host->flags & SDHCI_REQ_USE_DMA) {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
(data->flags & MMC_DATA_WRITE) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
data->host_cookie = 0;
}
}
static bool sdhci_check_state(struct sdhci_host *host)
{
struct mmc_host *mmc = host->mmc;
if (!host->clock || !host->pwr ||
(mmc_use_core_runtime_pm(mmc) ?
pm_runtime_suspended(mmc->parent) : 0))
return true;
else
return false;
}
static bool sdhci_check_auto_tuning(struct sdhci_host *host,
struct mmc_command *cmd)
{
if (((cmd->opcode != MMC_READ_SINGLE_BLOCK) &&
(cmd->opcode != MMC_READ_MULTIPLE_BLOCK) &&
(cmd->opcode != SD_IO_RW_EXTENDED)) || (host->clock < 100000000))
return false;
else if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
host->mmc->ios.timing == MMC_TIMING_UHS_SDR104)
return true;
else
return false;
}
static int sdhci_get_tuning_cmd(struct sdhci_host *host)
{
if (!host->mmc || !host->mmc->card)
return 0;
/*
* If we are here, all conditions have already been true
* and the card can either be an eMMC or SD/SDIO
*/
if (mmc_card_mmc(host->mmc->card))
return MMC_SEND_TUNING_BLOCK_HS200;
else
return MMC_SEND_TUNING_BLOCK;
}
void sdhci_cfg_irq(struct sdhci_host *host, bool enable, bool sync)
{
if (enable && !host->irq_enabled) {
enable_irq(host->irq);
host->irq_enabled = true;
} else if (!enable && host->irq_enabled) {
if (sync)
disable_irq(host->irq);
else
disable_irq_nosync(host->irq);
host->irq_enabled = false;
}
}
EXPORT_SYMBOL(sdhci_cfg_irq);
static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host;
bool present;
unsigned long flags;
u32 tuning_opcode;
host = mmc_priv(mmc);
sdhci_runtime_pm_get(host);
if (sdhci_check_state(host)) {
sdhci_dump_state(host);
WARN(1, "sdhci in bad state");
mrq->cmd->error = -EIO;
if (mrq->data)
mrq->data->error = -EIO;
mmc_request_done(host->mmc, mrq);
sdhci_runtime_pm_put(host);
return;
}
spin_lock_irqsave(&host->lock, flags);
WARN_ON(host->mrq != NULL);
#ifndef SDHCI_USE_LEDS_CLASS
sdhci_activate_led(host);
#endif
/*
* Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
* requests if Auto-CMD12 is enabled.
*/
if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
if (mrq->stop) {
mrq->data->stop = NULL;
mrq->stop = NULL;
}
}
host->mrq = mrq;
/* If polling, assume that the card is always present. */
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
present = true;
else
present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
SDHCI_CARD_PRESENT;
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
} else {
u32 present_state;
present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
/*
* Check if the re-tuning timer has already expired and there
* is no on-going data transfer. If so, we need to execute
* tuning procedure before sending command.
*/
if ((mrq->cmd->opcode != MMC_SEND_TUNING_BLOCK) &&
(mrq->cmd->opcode != MMC_SEND_TUNING_BLOCK_HS400) &&
(mrq->cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
(host->flags & SDHCI_NEEDS_RETUNING) &&
!(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
if (mmc->card) {
/* eMMC uses cmd21 but sd and sdio use cmd19 */
tuning_opcode =
mmc->card->type == MMC_TYPE_MMC ?
MMC_SEND_TUNING_BLOCK_HS200 :
MMC_SEND_TUNING_BLOCK;
host->mrq = NULL;
host->flags &= ~SDHCI_NEEDS_RETUNING;
spin_unlock_irqrestore(&host->lock, flags);
sdhci_execute_tuning(mmc, tuning_opcode);
spin_lock_irqsave(&host->lock, flags);
/* Restore original mmc_request structure */
host->mrq = mrq;
}
}
if (host->ops->config_auto_tuning_cmd) {
if (sdhci_check_auto_tuning(host, mrq->cmd))
host->ops->config_auto_tuning_cmd(host, true,
sdhci_get_tuning_cmd(host));
else
host->ops->config_auto_tuning_cmd(host, false,
sdhci_get_tuning_cmd(host));
}
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
sdhci_send_command(host, mrq->sbc);
else
sdhci_send_command(host, mrq->cmd);
}
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
static void sdhci_cfg_async_intr(struct sdhci_host *host, bool enable)
{
if (!host->async_int_supp)
return;
if (enable)
sdhci_writew(host,
sdhci_readw(host, SDHCI_HOST_CONTROL2) |
SDHCI_CTRL_ASYNC_INT_ENABLE,
SDHCI_HOST_CONTROL2);
else
sdhci_writew(host, sdhci_readw(host, SDHCI_HOST_CONTROL2) &
~SDHCI_CTRL_ASYNC_INT_ENABLE,
SDHCI_HOST_CONTROL2);
}
static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
{
unsigned long flags;
int vdd_bit = -1;
u8 ctrl;
int ret;
mutex_lock(&host->ios_mutex);
if (host->flags & SDHCI_DEVICE_DEAD) {
if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
mutex_unlock(&host->ios_mutex);
return;
}
spin_lock_irqsave(&host->lock, flags);
/* lock is being released intermittently below, hence disable irq */
sdhci_cfg_irq(host, false, false);
spin_unlock_irqrestore(&host->lock, flags);
if (ios->clock) {
sdhci_set_clock(host, ios->clock);
if (host->async_int_supp && sdhci_get_async_int_status(host)) {
if (host->disable_sdio_irq_deferred) {
pr_debug("%s: %s: disable sdio irq\n",
mmc_hostname(host->mmc), __func__);
host->mmc->ops->enable_sdio_irq(host->mmc, 0);
host->disable_sdio_irq_deferred = false;
}
spin_lock_irqsave(&host->lock, flags);
sdhci_cfg_async_intr(host, false);
spin_unlock_irqrestore(&host->lock, flags);
pr_debug("%s: %s: unconfig async intr\n",
mmc_hostname(host->mmc), __func__);
}
}
/*
* The controller clocks may be off during power-up and we may end up
* enabling card clock before giving power to the card. Hence, during
* MMC_POWER_UP enable the controller clock and turn-on the regulators.
* The mmc_power_up would provide the necessary delay before turning on
* the clocks to the card.
*/
if (ios->power_mode & MMC_POWER_UP) {
if (host->ops->enable_controller_clock) {
ret = host->ops->enable_controller_clock(host);
if (ret) {
pr_err("%s: enabling controller clock: failed: %d\n",
mmc_hostname(host->mmc), ret);
} else {
vdd_bit = sdhci_set_power(host, ios->vdd);
if (host->vmmc && vdd_bit != -1)
mmc_regulator_set_ocr(host->mmc,
host->vmmc,
vdd_bit);
}
}
/*
* make sure interrupts are enabled, these are the same
* interrupts which get enabled in sdhci_init().
*/
sdhci_clear_set_irqs(host, 0,
SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
SDHCI_INT_INDEX |
SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
SDHCI_INT_AUTO_CMD_ERR);
}
spin_lock_irqsave(&host->lock, flags);
if (!host->clock) {
sdhci_cfg_irq(host, true, false);
spin_unlock_irqrestore(&host->lock, flags);
mutex_unlock(&host->ios_mutex);
return;
}
spin_unlock_irqrestore(&host->lock, flags);
if (host->version >= SDHCI_SPEC_300 &&
(ios->power_mode == MMC_POWER_UP))
sdhci_enable_preset_value(host, false);
if (!host->ops->enable_controller_clock && (ios->power_mode &
(MMC_POWER_UP |
MMC_POWER_ON))) {
vdd_bit = sdhci_set_power(host, ios->vdd);
if (host->vmmc && vdd_bit != -1)
mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
}
spin_lock_irqsave(&host->lock, flags);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
/*
* If your platform has 8-bit width support but is not a v3 controller,
* or if it requires special setup code, you should implement that in
* platform_8bit_width().
*/
if (host->ops->platform_8bit_width)
host->ops->platform_8bit_width(host, ios->bus_width);
else {
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if (ios->bus_width == MMC_BUS_WIDTH_8) {
ctrl &= ~SDHCI_CTRL_4BITBUS;
if (host->version >= SDHCI_SPEC_300)
ctrl |= SDHCI_CTRL_8BITBUS;
} else {
if (host->version >= SDHCI_SPEC_300)
ctrl &= ~SDHCI_CTRL_8BITBUS;
if (ios->bus_width == MMC_BUS_WIDTH_4)
ctrl |= SDHCI_CTRL_4BITBUS;
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
}
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if ((ios->timing == MMC_TIMING_SD_HS ||
ios->timing == MMC_TIMING_MMC_HS)
&& !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
ctrl |= SDHCI_CTRL_HISPD;
else
ctrl &= ~SDHCI_CTRL_HISPD;
if (host->version >= SDHCI_SPEC_300) {
u16 clk, ctrl_2;
/* In case of UHS-I modes, set High Speed Enable */
if ((ios->timing == MMC_TIMING_MMC_HS400) ||
(ios->timing == MMC_TIMING_MMC_HS200) ||
(ios->timing == MMC_TIMING_UHS_SDR50) ||
(ios->timing == MMC_TIMING_UHS_SDR104) ||
(ios->timing == MMC_TIMING_UHS_DDR50) ||
(ios->timing == MMC_TIMING_UHS_SDR25))
ctrl |= SDHCI_CTRL_HISPD;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/*
* We only need to set Driver Strength if the
* preset value enable is not set.
*/
ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
} else {
/*
* According to SDHC Spec v3.00, if the Preset Value
* Enable in the Host Control 2 register is set, we
* need to reset SD Clock Enable before changing High
* Speed Enable to avoid generating clock gliches.
*/
/* Reset SD Clock Enable */
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clk &= ~SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/* Re-enable SD Clock */
if (ios->clock)
flags = sdhci_update_clock(host, flags);
}
/* Reset SD Clock Enable */
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clk &= ~SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
if (host->ops->set_uhs_signaling)
host->ops->set_uhs_signaling(host, ios->timing);
else {
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
if (ios->timing == MMC_TIMING_MMC_HS400)
ctrl_2 |= SDHCI_CTRL_HS_SDR200;
else if (ios->timing == MMC_TIMING_MMC_HS200)
ctrl_2 |= SDHCI_CTRL_HS_SDR200;
else if (ios->timing == MMC_TIMING_UHS_SDR12)
ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
else if (ios->timing == MMC_TIMING_UHS_SDR25)
ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
else if (ios->timing == MMC_TIMING_UHS_SDR50)
ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
else if (ios->timing == MMC_TIMING_UHS_SDR104)
ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
else if (ios->timing == MMC_TIMING_UHS_DDR50)
ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
((ios->timing == MMC_TIMING_UHS_SDR12) ||
(ios->timing == MMC_TIMING_UHS_SDR25) ||
(ios->timing == MMC_TIMING_UHS_SDR50) ||
(ios->timing == MMC_TIMING_UHS_SDR104) ||
(ios->timing == MMC_TIMING_UHS_DDR50))) {
u16 preset;
sdhci_enable_preset_value(host, true);
preset = sdhci_get_preset_value(host);
ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
>> SDHCI_PRESET_DRV_SHIFT;
}
/* Re-enable SD Clock */
if (ios->clock)
flags = sdhci_update_clock(host, flags);
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
spin_unlock_irqrestore(&host->lock, flags);
/*
* Some (ENE) controllers go apeshit on some ios operation,
* signalling timeout and CRC errors even on CMD0. Resetting
* it on each ios seems to solve the problem.
*/
if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
/*
* Reset the chip on each power off.
* Should clear out any weird states.
*/
if (ios->power_mode == MMC_POWER_OFF) {
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
sdhci_reinit(host);
vdd_bit = sdhci_set_power(host, -1);
if (host->vmmc && vdd_bit != -1)
mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
(host->mmc->caps & MMC_CAP_NONREMOVABLE)) {
/* clear all the pending interrupts */
sdhci_writel(host, sdhci_readl(host, SDHCI_INT_STATUS),
SDHCI_INT_STATUS);
sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
}
}
if (!ios->clock) {
if (host->async_int_supp && host->mmc->card &&
mmc_card_sdio(host->mmc->card)) {
sdhci_cfg_async_intr(host, true);
pr_debug("%s: %s: config async intr\n",
mmc_hostname(host->mmc), __func__);
}
sdhci_set_clock(host, ios->clock);
}
if (ios->power_mode == MMC_POWER_OFF) {
/* Keep interrupt line disabled as card is anyway powered off */
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
(host->mmc->caps & MMC_CAP_NONREMOVABLE))
goto out;
}
spin_lock_irqsave(&host->lock, flags);
sdhci_cfg_irq(host, true, false);
spin_unlock_irqrestore(&host->lock, flags);
out:
mmiowb();
mutex_unlock(&host->ios_mutex);
}
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
sdhci_runtime_pm_get(host);
sdhci_do_set_ios(host, ios);
sdhci_runtime_pm_put(host);
}
static int sdhci_check_ro(struct sdhci_host *host)
{
unsigned long flags;
int is_readonly;
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD)
is_readonly = 0;
else if (host->ops->get_ro)
is_readonly = host->ops->get_ro(host);
else
is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
& SDHCI_WRITE_PROTECT);
spin_unlock_irqrestore(&host->lock, flags);
/* This quirk needs to be replaced by a callback-function later */
return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
!is_readonly : is_readonly;
}
#define SAMPLE_COUNT 5
static int sdhci_do_get_ro(struct sdhci_host *host)
{
int i, ro_count;
if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
return sdhci_check_ro(host);
ro_count = 0;
for (i = 0; i < SAMPLE_COUNT; i++) {
if (sdhci_check_ro(host)) {
if (++ro_count > SAMPLE_COUNT / 2)
return 1;
}
msleep(30);
}
return 0;
}
static void sdhci_hw_reset(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
if (host->ops && host->ops->hw_reset)
host->ops->hw_reset(host);
}
static int sdhci_get_ro(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
int ret;
sdhci_runtime_pm_get(host);
ret = sdhci_do_get_ro(host);
sdhci_runtime_pm_put(host);
return ret;
}
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
if (host->flags & SDHCI_DEVICE_DEAD)
goto out;
if (!enable && !host->clock) {
pr_debug("%s: %s: defered disabling card intr\n",
host->mmc ? mmc_hostname(host->mmc) : "null",
__func__);
host->disable_sdio_irq_deferred = true;
return;
}
if (enable)
host->flags |= SDHCI_SDIO_IRQ_ENABLED;
else
host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
/* SDIO IRQ will be enabled as appropriate in runtime resume */
if (host->runtime_suspended)
goto out;
if (enable)
sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
else
sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
out:
mmiowb();
}
static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
}
static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
struct mmc_ios *ios)
{
u8 pwr;
u16 clk, ctrl;
u32 present_state;
/*
* Signal Voltage Switching is only applicable for Host Controllers
* v3.00 and above.
*/
if (host->version < SDHCI_SPEC_300)
return 0;
/*
* We first check whether the request is to set signalling voltage
* to 3.3V. If so, we change the voltage to 3.3V and return quickly.
*/
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
ctrl &= ~SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
if (host->ops->check_power_status)
host->ops->check_power_status(host, REQ_IO_HIGH);
/* Wait for 5ms */
usleep_range(5000, 5500);
/* 3.3V regulator output should be stable within 5 ms */
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (!(ctrl & SDHCI_CTRL_VDD_180))
return 0;
else {
pr_info(DRIVER_NAME ": Switching to 3.3V "
"signalling voltage failed\n");
return -EIO;
}
} else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
(ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
/* Stop SDCLK */
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clk &= ~SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Check whether DAT[3:0] is 0000 */
present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
if (!((present_state & SDHCI_DATA_LVL_MASK) >>
SDHCI_DATA_LVL_SHIFT)) {
/*
* Enable 1.8V Signal Enable in the Host Control2
* register
*/
ctrl |= SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
if (host->ops->check_power_status)
host->ops->check_power_status(host, REQ_IO_LOW);
/* Wait for 5ms */
usleep_range(5000, 5500);
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (ctrl & SDHCI_CTRL_VDD_180) {
/* Provide SDCLK again and wait for 1ms*/
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
clk |= SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
usleep_range(1000, 1500);
/*
* If DAT[3:0] level is 1111b, then the card
* was successfully switched to 1.8V signaling.
*/
present_state = sdhci_readl(host,
SDHCI_PRESENT_STATE);
if ((present_state & SDHCI_DATA_LVL_MASK) ==
SDHCI_DATA_LVL_MASK)
return 0;
}
}
/*
* If we are here, that means the switch to 1.8V signaling
* failed. We power cycle the card, and retry initialization
* sequence by setting S18R to 0.
*/
pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
pwr &= ~SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
if (host->ops->check_power_status)
host->ops->check_power_status(host, REQ_BUS_OFF);
/* Wait for 1ms as per the spec */
usleep_range(1000, 1500);
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
if (host->ops->check_power_status)
host->ops->check_power_status(host, REQ_BUS_ON);
pr_info(DRIVER_NAME ": Switching to 1.8V signalling "
"voltage failed, retrying with S18R set to 0\n");
return -EAGAIN;
} else
/* No signal voltage switch required */
return 0;
}
static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
int err;
if (host->version < SDHCI_SPEC_300)
return 0;
sdhci_runtime_pm_get(host);
err = sdhci_do_start_signal_voltage_switch(host, ios);
sdhci_runtime_pm_put(host);
return err;
}
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host;
u16 ctrl;
u32 ier = 0;
int tuning_loop_counter = MAX_TUNING_LOOP;
unsigned long timeout;
int err = 0;
bool requires_tuning_nonuhs = false;
host = mmc_priv(mmc);
sdhci_runtime_pm_get(host);
disable_irq(host->irq);
spin_lock(&host->lock);
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/*
* The Host Controller needs tuning only in case of SDR104 mode
* and for SDR50 mode when Use Tuning for SDR50 is set in the
* Capabilities register.
* If the Host Controller supports the HS400/HS200 mode then the
* tuning function has to be executed.
*/
if ((((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
(host->flags & SDHCI_SDR50_NEEDS_TUNING)) ||
(host->flags & SDHCI_HS200_NEEDS_TUNING) ||
(host->flags & SDHCI_HS400_NEEDS_TUNING))
requires_tuning_nonuhs = true;
if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
requires_tuning_nonuhs)
ctrl |= SDHCI_CTRL_EXEC_TUNING;
else {
spin_unlock(&host->lock);
enable_irq(host->irq);
sdhci_runtime_pm_put(host);
return 0;
}
if (host->ops->execute_tuning) {
spin_unlock(&host->lock);
enable_irq(host->irq);
err = host->ops->execute_tuning(host, opcode);
disable_irq(host->irq);
spin_lock(&host->lock);
goto out;
}
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
/*
* As per the Host Controller spec v3.00, tuning command
* generates Buffer Read Ready interrupt, so enable that.
*
* Note: The spec clearly says that when tuning sequence
* is being performed, the controller does not generate
* interrupts other than Buffer Read Ready interrupt. But
* to make sure we don't hit a controller bug, we _only_
* enable Buffer Read Ready interrupt here.
*/
ier = sdhci_readl(host, SDHCI_INT_ENABLE);
sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
/*
* Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
* of loops reaches 40 times or a timeout of 150ms occurs.
*/
timeout = 150;
do {
struct mmc_command cmd = {0};
struct mmc_request mrq = {NULL};
if (!tuning_loop_counter && !timeout)
break;
cmd.opcode = opcode;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
cmd.retries = 0;
cmd.data = NULL;
cmd.error = 0;
mrq.cmd = &cmd;
host->mrq = &mrq;
/*
* In response to CMD19, the card sends 64 bytes of tuning
* block to the Host Controller. So we set the block size
* to 64 here.
*/
if ((cmd.opcode == MMC_SEND_TUNING_BLOCK_HS400) ||
(cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200)) {
if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
SDHCI_BLOCK_SIZE);
else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
SDHCI_BLOCK_SIZE);
} else {
sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
SDHCI_BLOCK_SIZE);
}
/*
* The tuning block is sent by the card to the host controller.
* So we set the TRNS_READ bit in the Transfer Mode register.
* This also takes care of setting DMA Enable and Multi Block
* Select in the same register to 0.
*/
sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
sdhci_send_command(host, &cmd);
host->cmd = NULL;
host->mrq = NULL;
spin_unlock(&host->lock);
enable_irq(host->irq);
/* Wait for Buffer Read Ready interrupt */
wait_event_interruptible_timeout(host->buf_ready_int,
(host->tuning_done == 1),
msecs_to_jiffies(50));
disable_irq(host->irq);
spin_lock(&host->lock);
if (!host->tuning_done) {
pr_info(DRIVER_NAME ": Timeout waiting for "
"Buffer Read Ready interrupt during tuning "
"procedure, falling back to fixed sampling "
"clock\n");
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl &= ~SDHCI_CTRL_TUNED_CLK;
ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
err = -EIO;
goto out;
}
host->tuning_done = 0;
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
tuning_loop_counter--;
timeout--;
mdelay(1);
} while (ctrl & SDHCI_CTRL_EXEC_TUNING);
/*
* The Host Driver has exhausted the maximum number of loops allowed,
* so use fixed sampling frequency.
*/
if (!tuning_loop_counter || !timeout) {
ctrl &= ~SDHCI_CTRL_TUNED_CLK;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
} else {
if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
pr_info(DRIVER_NAME ": Tuning procedure"
" failed, falling back to fixed sampling"
" clock\n");
err = -EIO;
}
}
out:
/*
* If this is the very first time we are here, we start the retuning
* timer. Since only during the first time, SDHCI_NEEDS_RETUNING
* flag won't be set, we check this condition before actually starting
* the timer.
*/
if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
(host->tuning_mode == SDHCI_TUNING_MODE_1)) {
host->flags |= SDHCI_USING_RETUNING_TIMER;
mod_timer(&host->tuning_timer, jiffies +
host->tuning_count * HZ);
/* Tuning mode 1 limits the maximum data length to 4MB */
mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
} else {
host->flags &= ~SDHCI_NEEDS_RETUNING;
/* Reload the new initial value for timer */
if (host->tuning_mode == SDHCI_TUNING_MODE_1)
mod_timer(&host->tuning_timer, jiffies +
host->tuning_count * HZ);
}
/*
* In case tuning fails, host controllers which support re-tuning can
* try tuning again at a later time, when the re-tuning timer expires.
* So for these controllers, we return 0. Since there might be other
* controllers who do not have this capability, we return error for
* them. SDHCI_USING_RETUNING_TIMER means the host is currently using
* a retuning timer to do the retuning for the card.
*/
if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
err = 0;
sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
spin_unlock(&host->lock);
enable_irq(host->irq);
sdhci_runtime_pm_put(host);
return err;
}
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
{
u16 ctrl;
/* Host Controller v3.00 defines preset value registers */
if (host->version < SDHCI_SPEC_300)
return;
if (host->quirks2 & SDHCI_QUIRK2_BROKEN_PRESET_VALUE)
return;
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/*
* We only enable or disable Preset Value if they are not already
* enabled or disabled respectively. Otherwise, we bail out.
*/
if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
host->flags |= SDHCI_PV_ENABLED;
} else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
host->flags &= ~SDHCI_PV_ENABLED;
}
}
static int sdhci_stop_request(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
struct mmc_data *data;
int ret = 0;
spin_lock_irqsave(&host->lock, flags);
if (!host->mrq || !host->data) {
ret = MMC_BLK_NO_REQ_TO_STOP;
goto out;
}
data = host->data;
if (host->ops->disable_data_xfer)
host->ops->disable_data_xfer(host);
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
if (host->flags & SDHCI_REQ_USE_DMA) {
if (host->flags & SDHCI_USE_ADMA) {
sdhci_adma_table_post(host, data);
} else {
if (!data->host_cookie)
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
data->sg_len,
(data->flags & MMC_DATA_READ) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
}
del_timer(&host->timer);
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
out:
spin_unlock_irqrestore(&host->lock, flags);
return ret;
}
static unsigned int sdhci_get_xfer_remain(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
u32 present_state = 0;
present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
return present_state & SDHCI_DOING_WRITE;
}
static const struct mmc_host_ops sdhci_ops = {
.pre_req = sdhci_pre_req,
.post_req = sdhci_post_req,
.request = sdhci_request,
.set_ios = sdhci_set_ios,
.get_ro = sdhci_get_ro,
.hw_reset = sdhci_hw_reset,
.enable_sdio_irq = sdhci_enable_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.execute_tuning = sdhci_execute_tuning,
.enable = sdhci_enable,
.disable = sdhci_disable,
.stop_request = sdhci_stop_request,
.get_xfer_remain = sdhci_get_xfer_remain,
.notify_load = sdhci_notify_load,
};
/*****************************************************************************\
* *
* Tasklets *
* *
\*****************************************************************************/
static void sdhci_tasklet_card(unsigned long param)
{
struct sdhci_host *host;
unsigned long flags;
host = (struct sdhci_host*)param;
spin_lock_irqsave(&host->lock, flags);
/* Check host->mrq first in case we are runtime suspended */
if (host->mrq &&
!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
pr_err("%s: Resetting controller.\n",
mmc_hostname(host->mmc));
sdhci_reset(host, SDHCI_RESET_CMD);
sdhci_reset(host, SDHCI_RESET_DATA);
host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
}
spin_unlock_irqrestore(&host->lock, flags);
mmc_detect_change(host->mmc, msecs_to_jiffies(200));
}
static void sdhci_tasklet_finish(unsigned long param)
{
struct sdhci_host *host;
unsigned long flags;
struct mmc_request *mrq;
host = (struct sdhci_host*)param;
spin_lock_irqsave(&host->lock, flags);
/*
* If this tasklet gets rescheduled while running, it will
* be run again afterwards but without any active request.
*/
if (!host->mrq) {
spin_unlock_irqrestore(&host->lock, flags);
return;
}
del_timer(&host->timer);
mrq = host->mrq;
/*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
if (!(host->flags & SDHCI_DEVICE_DEAD) &&
((mrq->cmd && mrq->cmd->error) ||
(mrq->data && (mrq->data->error ||
(mrq->data->stop && mrq->data->stop->error))) ||
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
sdhci_update_clock(host, flags);
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
sdhci_reset(host, SDHCI_RESET_CMD);
sdhci_reset(host, SDHCI_RESET_DATA);
} else {
if (host->quirks2 & SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT)
sdhci_reset(host, SDHCI_RESET_DATA);
}
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
host->auto_cmd_err_sts = 0;
#ifndef SDHCI_USE_LEDS_CLASS
sdhci_deactivate_led(host);
#endif
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
sdhci_runtime_pm_put(host);
}
static void sdhci_timeout_timer(unsigned long data)
{
struct sdhci_host *host;
unsigned long flags;
host = (struct sdhci_host*)data;
spin_lock_irqsave(&host->lock, flags);
if (host->mrq) {
if (!host->mrq->cmd->ignore_timeout) {
pr_err("%s: Timeout waiting for hardware interrupt.\n",
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
}
if (host->data) {
pr_info("%s: bytes to transfer: %d transferred: %d\n",
mmc_hostname(host->mmc),
(host->data->blksz * host->data->blocks),
(sdhci_readw(host, SDHCI_BLOCK_SIZE) & 0xFFF) *
sdhci_readw(host, SDHCI_BLOCK_COUNT));
host->data->error = -ETIMEDOUT;
sdhci_finish_data(host);
} else {
if (host->cmd)
host->cmd->error = -ETIMEDOUT;
else
host->mrq->cmd->error = -ETIMEDOUT;
tasklet_schedule(&host->finish_tasklet);
}
}
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
static void sdhci_tuning_timer(unsigned long data)
{
struct sdhci_host *host;
unsigned long flags;
host = (struct sdhci_host *)data;
spin_lock_irqsave(&host->lock, flags);
host->flags |= SDHCI_NEEDS_RETUNING;
spin_unlock_irqrestore(&host->lock, flags);
}
/*****************************************************************************\
* *
* Interrupt handling *
* *
\*****************************************************************************/
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
{
u16 auto_cmd_status;
u32 command;
BUG_ON(intmask == 0);
if (!host->cmd) {
pr_err("%s: Got command interrupt 0x%08x even "
"though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
return;
}
if (intmask & SDHCI_INT_TIMEOUT)
host->cmd->error = -ETIMEDOUT;
else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
SDHCI_INT_INDEX))
host->cmd->error = -EILSEQ;
if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
auto_cmd_status = host->auto_cmd_err_sts;
pr_err("%s: %s: AUTO CMD err sts 0x%08x\n",
mmc_hostname(host->mmc), __func__, auto_cmd_status);
if (auto_cmd_status & (SDHCI_AUTO_CMD12_NOT_EXEC |
SDHCI_AUTO_CMD_INDEX_ERR |
SDHCI_AUTO_CMD_ENDBIT_ERR))
host->cmd->error = -EIO;
else if (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT_ERR)
host->cmd->error = -ETIMEDOUT;
else if (auto_cmd_status & SDHCI_AUTO_CMD_CRC_ERR)
host->cmd->error = -EILSEQ;
}
if (host->cmd->error) {
command = SDHCI_GET_CMD(sdhci_readw(host,
SDHCI_COMMAND));
if (host->cmd->error == -EILSEQ &&
(command != MMC_SEND_TUNING_BLOCK_HS400) &&
(command != MMC_SEND_TUNING_BLOCK_HS200) &&
(command != MMC_SEND_TUNING_BLOCK))
host->flags |= SDHCI_NEEDS_RETUNING;
tasklet_schedule(&host->finish_tasklet);
return;
}
/*
* The host can send and interrupt when the busy state has
* ended, allowing us to wait without wasting CPU cycles.
* Unfortunately this is overloaded on the "data complete"
* interrupt, so we need to take some care when handling
* it.
*
* Note: The 1.0 specification is a bit ambiguous about this
* feature so there might be some problems with older
* controllers.
*/
if (host->cmd->flags & MMC_RSP_BUSY) {
if (host->cmd->data)
DBG("Cannot wait for busy signal when also "
"doing a data transfer");
else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
return;
/* The controller does not support the end-of-busy IRQ,
* fall through and take the SDHCI_INT_RESPONSE */
}
if (intmask & SDHCI_INT_RESPONSE)
sdhci_finish_command(host);
}
static void sdhci_show_adma_error(struct sdhci_host *host)
{
const char *name = mmc_hostname(host->mmc);
u8 *desc = host->adma_desc;
__le32 *dma;
__le16 *len;
u8 attr;
sdhci_dumpregs(host);
while (true) {
dma = (__le32 *)(desc + 4);
len = (__le16 *)(desc + 2);
attr = *desc;
pr_info("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
desc += 8;
if (attr & 2)
break;
}
}
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
u32 command;
bool pr_msg = false;
BUG_ON(intmask == 0);
/* CMD19 generates _only_ Buffer Read Ready interrupt */
if (intmask & SDHCI_INT_DATA_AVAIL) {
command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
if (command == MMC_SEND_TUNING_BLOCK ||
command == MMC_SEND_TUNING_BLOCK_HS200 ||
command == MMC_SEND_TUNING_BLOCK_HS400) {
host->tuning_done = 1;
wake_up(&host->buf_ready_int);
return;
}
}
if (!host->data) {
/*
* The "data complete" interrupt is also used to
* indicate that a busy state has ended. See comment
* above in sdhci_cmd_irq().
*/
if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
if (intmask & SDHCI_INT_DATA_END) {
sdhci_finish_command(host);
return;
}
if (host->quirks2 &
SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD)
return;
}
pr_err("%s: Got data interrupt 0x%08x even "
"though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
return;
}
if (intmask & SDHCI_INT_DATA_TIMEOUT)
host->data->error = -ETIMEDOUT;
else if (intmask & SDHCI_INT_DATA_END_BIT)
host->data->error = -EILSEQ;
else if ((intmask & SDHCI_INT_DATA_CRC) &&
SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
!= MMC_BUS_TEST_R)
host->data->error = -EILSEQ;
else if (intmask & SDHCI_INT_ADMA_ERROR) {
pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
sdhci_show_adma_error(host);
host->data->error = -EIO;
}
if (host->data->error) {
if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT)) {
command = SDHCI_GET_CMD(sdhci_readw(host,
SDHCI_COMMAND));
if ((command != MMC_SEND_TUNING_BLOCK_HS400) &&
(command != MMC_SEND_TUNING_BLOCK_HS200) &&
(command != MMC_SEND_TUNING_BLOCK)) {
pr_msg = true;
if (intmask & SDHCI_INT_DATA_CRC)
host->flags |= SDHCI_NEEDS_RETUNING;
}
} else {
pr_msg = true;
}
if (pr_msg) {
pr_err("%s: data txfr (0x%08x) error: %d after %lld ms\n",
mmc_hostname(host->mmc), intmask,
host->data->error, ktime_to_ms(ktime_sub(
ktime_get(), host->data_start_time)));
sdhci_dumpregs(host);
}
sdhci_finish_data(host);
} else {
if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
sdhci_transfer_pio(host);
/*
* We currently don't do anything fancy with DMA
* boundaries, but as we can't disable the feature
* we need to at least restart the transfer.
*
* According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
* should return a valid address to continue from, but as
* some controllers are faulty, don't trust them.
*/
if (intmask & SDHCI_INT_DMA_END) {
u32 dmastart, dmanow;
dmastart = sg_dma_address(host->data->sg);
dmanow = dmastart + host->data->bytes_xfered;
/*
* Force update to the next DMA block boundary.
*/
dmanow = (dmanow &
~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
SDHCI_DEFAULT_BOUNDARY_SIZE;
host->data->bytes_xfered = dmanow - dmastart;
DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
" next 0x%08x\n",
mmc_hostname(host->mmc), dmastart,
host->data->bytes_xfered, dmanow);
sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
}
if (intmask & SDHCI_INT_DATA_END) {
if (host->cmd) {
/*
* Data managed to finish before the
* command completed. Make sure we do
* things in the proper order.
*/
host->data_early = 1;
} else {
sdhci_finish_data(host);
}
}
}
}
static irqreturn_t sdhci_irq(int irq, void *dev_id)
{
irqreturn_t result;
struct sdhci_host *host = dev_id;
u32 intmask, unexpected = 0;
int cardint = 0, max_loops = 16;
spin_lock(&host->lock);
if (host->runtime_suspended) {
spin_unlock(&host->lock);
pr_warning("%s: got irq while runtime suspended\n",
mmc_hostname(host->mmc));
return IRQ_HANDLED;
}
if (!host->clock && host->mmc->card &&
mmc_card_sdio(host->mmc->card)) {
/* SDIO async. interrupt is level-sensitive */
sdhci_cfg_irq(host, false, false);
pr_debug("%s: got async-irq: clocks: %d gated: %d host-irq[en:1/dis:0]: %d\n",
mmc_hostname(host->mmc), host->clock,
host->mmc->clk_gated, host->irq_enabled);
spin_unlock(&host->lock);
/* prevent suspend till the ksdioirqd runs or resume happens */
if ((host->mmc->dev_status == DEV_SUSPENDING) ||
(host->mmc->dev_status == DEV_SUSPENDED))
pm_wakeup_event(&host->mmc->card->dev,
SDHCI_SUSPEND_TIMEOUT);
else
mmc_signal_sdio_irq(host->mmc);
return IRQ_HANDLED;
} else if (!host->clock) {
/*
* As clocks are disabled, controller registers might not be
* accessible hence return from here.
*/
spin_unlock(&host->lock);
return IRQ_HANDLED;
}
intmask = sdhci_readl(host, SDHCI_INT_STATUS);
if (!intmask || intmask == 0xffffffff) {
result = IRQ_NONE;
goto out;
}
again:
DBG("*** %s got interrupt: 0x%08x\n",
mmc_hostname(host->mmc), intmask);
if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
SDHCI_CARD_PRESENT;
/*
* There is a observation on i.mx esdhc. INSERT bit will be
* immediately set again when it gets cleared, if a card is
* inserted. We have to mask the irq to prevent interrupt
* storm which will freeze the system. And the REMOVE gets
* the same situation.
*
* More testing are needed here to ensure it works for other
* platforms though.
*/
sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
SDHCI_INT_CARD_REMOVE);
sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
SDHCI_INT_CARD_INSERT);
sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
tasklet_schedule(&host->card_tasklet);
}
if (intmask & SDHCI_INT_CMD_MASK) {
if (intmask & SDHCI_INT_AUTO_CMD_ERR)
host->auto_cmd_err_sts = sdhci_readw(host,
SDHCI_AUTO_CMD_ERR);
sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
SDHCI_INT_STATUS);
if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
(host->clock <= 400000))
udelay(40);
sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
}
if (intmask & SDHCI_INT_DATA_MASK) {
sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
SDHCI_INT_STATUS);
if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
(host->clock <= 400000))
udelay(40);
sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
}
intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
intmask &= ~SDHCI_INT_ERROR;
if (intmask & SDHCI_INT_BUS_POWER) {
pr_err("%s: Card is consuming too much power!\n",
mmc_hostname(host->mmc));
sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
}
intmask &= ~SDHCI_INT_BUS_POWER;
if (intmask & SDHCI_INT_CARD_INT)
cardint = 1;
intmask &= ~SDHCI_INT_CARD_INT;
if (intmask) {
unexpected |= intmask;
sdhci_writel(host, intmask, SDHCI_INT_STATUS);
}
result = IRQ_HANDLED;
intmask = sdhci_readl(host, SDHCI_INT_STATUS);
if (intmask && --max_loops)
goto again;
out:
spin_unlock(&host->lock);
if (unexpected) {
pr_err("%s: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), unexpected);
sdhci_dumpregs(host);
}
/*
* We have to delay this as it calls back into the driver.
*/
if (cardint) {
/* clks are on, but suspend may be in progress */
if (host->mmc->dev_status == DEV_SUSPENDING)
pm_wakeup_event(&host->mmc->card->dev,
SDHCI_SUSPEND_TIMEOUT);
mmc_signal_sdio_irq(host->mmc);
}
return result;
}
/*****************************************************************************\
* *
* Suspend/resume *
* *
\*****************************************************************************/
#ifdef CONFIG_PM
void sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
u8 val;
u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
| SDHCI_WAKE_ON_INT;
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val |= mask ;
/* Avoid fake wake up */
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
void sdhci_disable_irq_wakeups(struct sdhci_host *host)
{
u8 val;
u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
| SDHCI_WAKE_ON_INT;
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val &= ~mask;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
int sdhci_suspend_host(struct sdhci_host *host)
{
int ret;
if (host->ops->platform_suspend)
host->ops->platform_suspend(host);
sdhci_disable_card_detection(host);
/* Disable tuning since we are suspending */
if (host->flags & SDHCI_USING_RETUNING_TIMER) {
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
}
ret = mmc_suspend_host(host->mmc);
if (ret) {
if (host->flags & SDHCI_USING_RETUNING_TIMER) {
host->flags |= SDHCI_NEEDS_RETUNING;
mod_timer(&host->tuning_timer, jiffies +
host->tuning_count * HZ);
}
sdhci_enable_card_detection(host);
return ret;
}
if (!device_may_wakeup(mmc_dev(host->mmc))) {
sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
} else {
sdhci_enable_irq_wakeups(host);
enable_irq_wake(host->irq);
}
return ret;
}
EXPORT_SYMBOL_GPL(sdhci_suspend_host);
int sdhci_resume_host(struct sdhci_host *host)
{
int ret;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
}
if (!device_may_wakeup(mmc_dev(host->mmc))) {
ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
mmc_hostname(host->mmc), host);
if (ret)
return ret;
} else {
sdhci_disable_irq_wakeups(host);
disable_irq_wake(host->irq);
}
if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
(host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
/* Card keeps power but host controller does not */
sdhci_init(host, 0);
host->pwr = 0;
host->clock = 0;
sdhci_do_set_ios(host, &host->mmc->ios);
} else {
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
mmiowb();
}
ret = mmc_resume_host(host->mmc);
sdhci_enable_card_detection(host);
if (host->ops->platform_resume)
host->ops->platform_resume(host);
/* Set the re-tuning expiration flag */
if (host->flags & SDHCI_USING_RETUNING_TIMER)
host->flags |= SDHCI_NEEDS_RETUNING;
return ret;
}
EXPORT_SYMBOL_GPL(sdhci_resume_host);
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
static int sdhci_runtime_pm_get(struct sdhci_host *host)
{
if (!mmc_use_core_runtime_pm(host->mmc))
return pm_runtime_get_sync(host->mmc->parent);
else
return 0;
}
static int sdhci_runtime_pm_put(struct sdhci_host *host)
{
if (!mmc_use_core_runtime_pm(host->mmc)) {
pm_runtime_mark_last_busy(host->mmc->parent);
return pm_runtime_put_autosuspend(host->mmc->parent);
} else {
return 0;
}
}
int sdhci_runtime_suspend_host(struct sdhci_host *host)
{
unsigned long flags;
int ret = 0;
/* Disable tuning since we are suspending */
if (host->flags & SDHCI_USING_RETUNING_TIMER) {
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
}
spin_lock_irqsave(&host->lock, flags);
sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
spin_unlock_irqrestore(&host->lock, flags);
synchronize_irq(host->irq);
spin_lock_irqsave(&host->lock, flags);
host->runtime_suspended = true;
spin_unlock_irqrestore(&host->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
int sdhci_runtime_resume_host(struct sdhci_host *host)
{
unsigned long flags;
int ret = 0, host_flags = host->flags;
if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
}
sdhci_init(host, 0);
/* Force clock and power re-program */
host->pwr = 0;
host->clock = 0;
sdhci_do_set_ios(host, &host->mmc->ios);
sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
if ((host_flags & SDHCI_PV_ENABLED) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
spin_lock_irqsave(&host->lock, flags);
sdhci_enable_preset_value(host, true);
spin_unlock_irqrestore(&host->lock, flags);
}
/* Set the re-tuning expiration flag */
if (host->flags & SDHCI_USING_RETUNING_TIMER)
host->flags |= SDHCI_NEEDS_RETUNING;
spin_lock_irqsave(&host->lock, flags);
host->runtime_suspended = false;
/* Enable SDIO IRQ */
if ((host->flags & SDHCI_SDIO_IRQ_ENABLED))
sdhci_enable_sdio_irq_nolock(host, true);
/* Enable Card Detection */
sdhci_enable_card_detection(host);
spin_unlock_irqrestore(&host->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
#endif
/*****************************************************************************\
* *
* Device allocation/registration *
* *
\*****************************************************************************/
struct sdhci_host *sdhci_alloc_host(struct device *dev,
size_t priv_size)
{
struct mmc_host *mmc;
struct sdhci_host *host;
WARN_ON(dev == NULL);
mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
if (!mmc)
return ERR_PTR(-ENOMEM);
host = mmc_priv(mmc);
host->mmc = mmc;
spin_lock_init(&host->lock);
mutex_init(&host->ios_mutex);
return host;
}
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
int sdhci_add_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
u32 caps[2];
u32 max_current_caps;
unsigned int ocr_avail;
int ret;
WARN_ON(host == NULL);
if (host == NULL)
return -EINVAL;
mmc = host->mmc;
if (debug_quirks)
host->quirks = debug_quirks;
if (debug_quirks2)
host->quirks2 = debug_quirks2;
sdhci_reset(host, SDHCI_RESET_ALL);
host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
host->version = (host->version & SDHCI_SPEC_VER_MASK)
>> SDHCI_SPEC_VER_SHIFT;
if (host->version > SDHCI_SPEC_300) {
pr_err("%s: Unknown controller version (%d). "
"You may experience problems.\n", mmc_hostname(mmc),
host->version);
}
caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
sdhci_readl(host, SDHCI_CAPABILITIES);
caps[1] = (host->version >= SDHCI_SPEC_300) ?
sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0;
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_SDMA;
else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
DBG("Controller doesn't have SDMA capability\n");
else
host->flags |= SDHCI_USE_SDMA;
if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
(host->flags & SDHCI_USE_SDMA)) {
DBG("Disabling DMA as it is marked broken\n");
host->flags &= ~SDHCI_USE_SDMA;
}
if ((host->version >= SDHCI_SPEC_200) &&
(caps[0] & SDHCI_CAN_DO_ADMA2))
host->flags |= SDHCI_USE_ADMA;
if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
(host->flags & SDHCI_USE_ADMA)) {
DBG("Disabling ADMA as it is marked broken\n");
host->flags &= ~SDHCI_USE_ADMA;
}
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma) {
if (host->ops->enable_dma(host)) {
pr_warning("%s: No suitable DMA "
"available. Falling back to PIO.\n",
mmc_hostname(mmc));
host->flags &=
~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
}
}
}
if (host->flags & SDHCI_USE_ADMA) {
/*
* We need to allocate descriptors for all sg entries
* (128/max_segments) and potentially one alignment transfer for
* each of those entries.
*/
if (host->ops->get_max_segments)
host->adma_max_desc = host->ops->get_max_segments();
else
host->adma_max_desc = 128;
host->adma_desc_sz = (host->adma_max_desc * 2 + 1) * 4;
host->align_buf_sz = host->adma_max_desc * 4;
pr_debug("%s: %s: dma_desc_size: %d\n",
mmc_hostname(host->mmc), __func__, host->adma_desc_sz);
host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc),
host->adma_desc_sz,
&host->adma_addr,
GFP_KERNEL);
host->align_buffer = dma_alloc_coherent(mmc_dev(host->mmc),
host->align_buf_sz,
&host->align_addr,
GFP_KERNEL);
if (!host->adma_desc || !host->align_buffer) {
dma_free_coherent(mmc_dev(host->mmc),
host->adma_desc_sz,
host->adma_desc,
host->adma_addr);
dma_free_coherent(mmc_dev(host->mmc),
host->align_buf_sz,
host->align_buffer,
host->align_addr);
pr_warn("%s: Unable to allocate ADMA "
"buffers. Falling back to standard DMA.\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
host->adma_desc = NULL;
host->align_buffer = NULL;
} else if ((host->adma_addr & 0x3) ||
(host->align_addr & 0x3)) {
dma_free_coherent(mmc_dev(host->mmc),
host->adma_desc_sz,
host->adma_desc,
host->adma_addr);
dma_free_coherent(mmc_dev(host->mmc),
host->align_buf_sz,
host->align_buffer,
host->align_addr);
pr_warn("%s: Unable to allocate aligned ADMA buffers.\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
host->adma_desc = NULL;
host->align_buffer = NULL;
}
}
host->next_data.cookie = 1;
/*
* If we use DMA, then it's up to the caller to set the DMA
* mask, but PIO does not need the hw shim so we set a new
* mask here in that case.
*/
if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
host->dma_mask = DMA_BIT_MASK(64);
mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
}
if (host->version >= SDHCI_SPEC_300)
host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
>> SDHCI_CLOCK_BASE_SHIFT;
else
host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
>> SDHCI_CLOCK_BASE_SHIFT;
host->max_clk *= 1000000;
sdhci_update_power_policy(host, SDHCI_PERFORMANCE_MODE_INIT);
if (host->max_clk == 0 || host->quirks &
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
if (!host->ops->get_max_clock) {
pr_err("%s: Hardware doesn't specify base clock "
"frequency.\n", mmc_hostname(mmc));
return -ENODEV;
}
host->max_clk = host->ops->get_max_clock(host);
}
/*
* In case of Host Controller v3.00, find out whether clock
* multiplier is supported.
*/
host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
SDHCI_CLOCK_MUL_SHIFT;
/*
* In case the value in Clock Multiplier is 0, then programmable
* clock mode is not supported, otherwise the actual clock
* multiplier is one more than the value of Clock Multiplier
* in the Capabilities Register.
*/
if (host->clk_mul)
host->clk_mul += 1;
/*
* Set host parameters.
*/
mmc->ops = &sdhci_ops;
mmc->f_max = host->max_clk;
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
if (host->clk_mul) {
mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
mmc->f_max = host->max_clk * host->clk_mul;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
host->timeout_clk =
(caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
if (host->timeout_clk == 0) {
if (host->ops->get_timeout_clock) {
host->timeout_clk = host->ops->get_timeout_clock(host);
} else if (!(host->quirks &
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
pr_err("%s: Hardware doesn't specify timeout clock "
"frequency.\n", mmc_hostname(mmc));
return -ENODEV;
}
}
if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
host->timeout_clk = mmc->f_max / 1000;
if (!(host->quirks2 & SDHCI_QUIRK2_USE_MAX_DISCARD_SIZE))
mmc->max_discard_to = (1 << 27) / host->timeout_clk;
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
host->flags |= SDHCI_AUTO_CMD12;
/* Auto-CMD23 stuff only works in ADMA or PIO. */
if ((host->version >= SDHCI_SPEC_300) &&
((host->flags & SDHCI_USE_ADMA) ||
!(host->flags & SDHCI_USE_SDMA))) {
host->flags |= SDHCI_AUTO_CMD23;
DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
} else {
DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
}
/*
* A controller may support 8-bit width, but the board itself
* might not have the pins brought out. Boards that support
* 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
* their platform code before calling sdhci_add_host(), and we
* won't assume 8-bit width for hosts without that CAP.
*/
if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (caps[0] & SDHCI_CAN_DO_HISPD)
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
mmc_card_is_removable(mmc))
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
SDHCI_SUPPORT_DDR50))
mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
/* SDR104 supports also implies SDR50 support */
if (caps[1] & SDHCI_SUPPORT_SDR104)
mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
else if (caps[1] & SDHCI_SUPPORT_SDR50)
mmc->caps |= MMC_CAP_UHS_SDR50;
if (caps[1] & SDHCI_SUPPORT_DDR50)
mmc->caps |= MMC_CAP_UHS_DDR50;
/* Does the host need tuning for SDR50? */
if (caps[1] & SDHCI_USE_SDR50_TUNING)
host->flags |= SDHCI_SDR50_NEEDS_TUNING;
/* Does the host need tuning for HS200? */
if (mmc->caps2 & MMC_CAP2_HS200)
host->flags |= SDHCI_HS200_NEEDS_TUNING;
/* Does the host need tuning for HS400? */
if (mmc->caps2 & MMC_CAP2_HS400)
host->flags |= SDHCI_HS400_NEEDS_TUNING;
/* Driver Type(s) (A, C, D) supported by the host */
if (caps[1] & SDHCI_DRIVER_TYPE_A)
mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
if (caps[1] & SDHCI_DRIVER_TYPE_C)
mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
if (caps[1] & SDHCI_DRIVER_TYPE_D)
mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
/* Initial value for re-tuning timer count */
host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
SDHCI_RETUNING_TIMER_COUNT_SHIFT;
/*
* In case Re-tuning Timer is not disabled, the actual value of
* re-tuning timer will be 2 ^ (n - 1).
*/
if (host->tuning_count)
host->tuning_count = 1 << (host->tuning_count - 1);
/* Re-tuning mode supported by the Host Controller */
host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
SDHCI_RETUNING_MODE_SHIFT;
ocr_avail = 0;
/*
* According to SD Host Controller spec v3.00, if the Host System
* can afford more than 150mA, Host Driver should set XPC to 1. Also
* the value is meaningful only if Voltage Support in the Capabilities
* register is set. The actual current value is 4 times the register
* value.
*/
max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
if (caps[0] & SDHCI_CAN_VDD_330) {
int max_current_330;
ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
max_current_330 = ((max_current_caps &
SDHCI_MAX_CURRENT_330_MASK) >>
SDHCI_MAX_CURRENT_330_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
if (max_current_330 > 150)
mmc->caps |= MMC_CAP_SET_XPC_330;
}
if (caps[0] & SDHCI_CAN_VDD_300) {
int max_current_300;
ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
max_current_300 = ((max_current_caps &
SDHCI_MAX_CURRENT_300_MASK) >>
SDHCI_MAX_CURRENT_300_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
if (max_current_300 > 150)
mmc->caps |= MMC_CAP_SET_XPC_300;
}
if (caps[0] & SDHCI_CAN_VDD_180) {
int max_current_180;
ocr_avail |= MMC_VDD_165_195;
max_current_180 = ((max_current_caps &
SDHCI_MAX_CURRENT_180_MASK) >>
SDHCI_MAX_CURRENT_180_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
if (max_current_180 > 150)
mmc->caps |= MMC_CAP_SET_XPC_180;
/* Maximum current capabilities of the host at 1.8V */
if (max_current_180 >= 800)
mmc->caps |= MMC_CAP_MAX_CURRENT_800;
else if (max_current_180 >= 600)
mmc->caps |= MMC_CAP_MAX_CURRENT_600;
else if (max_current_180 >= 400)
mmc->caps |= MMC_CAP_MAX_CURRENT_400;
else
mmc->caps |= MMC_CAP_MAX_CURRENT_200;
}
mmc->ocr_avail = ocr_avail;
mmc->ocr_avail_sdio = ocr_avail;
if (host->ocr_avail_sdio)
mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
mmc->ocr_avail_sd = ocr_avail;
if (host->ocr_avail_sd)
mmc->ocr_avail_sd &= host->ocr_avail_sd;
else /* normal SD controllers don't support 1.8V */
mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
mmc->ocr_avail_mmc = ocr_avail;
if (host->ocr_avail_mmc)
mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
if (mmc->ocr_avail == 0) {
pr_err("%s: Hardware doesn't report any "
"support voltages.\n", mmc_hostname(mmc));
return -ENODEV;
}
/*
* Maximum number of segments. Depends on if the hardware
* can do scatter/gather or not.
*/
if (host->flags & SDHCI_USE_ADMA)
mmc->max_segs = host->adma_max_desc;
else if (host->flags & SDHCI_USE_SDMA)
mmc->max_segs = 1;
else/* PIO */
mmc->max_segs = host->adma_max_desc;
/*
* Maximum number of sectors in one transfer. Limited by DMA boundary
* size (512KiB), unless specified by platform specific driver. Each
* descriptor can transfer a maximum of 64KB.
*/
if (host->ops->get_max_segments)
mmc->max_req_size = (host->adma_max_desc * 65536);
else
mmc->max_req_size = 524288;
/*
* Maximum segment size. Could be one segment with the maximum number
* of bytes. When doing hardware scatter/gather, each entry cannot
* be larger than 64 KiB though.
*/
if (host->flags & SDHCI_USE_ADMA) {
if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
mmc->max_seg_size = 65535;
else
mmc->max_seg_size = 65536;
} else {
mmc->max_seg_size = mmc->max_req_size;
}
/*
* Maximum block size. This varies from controller to controller and
* is specified in the capabilities register.
*/
if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
mmc->max_blk_size = 2;
} else {
mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
SDHCI_MAX_BLOCK_SHIFT;
if (mmc->max_blk_size >= 3) {
pr_warning("%s: Invalid maximum block size, "
"assuming 512 bytes\n", mmc_hostname(mmc));
mmc->max_blk_size = 0;
}
}
mmc->max_blk_size = 512 << mmc->max_blk_size;
/*
* Maximum block count.
*/
mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
/*
* Init tasklets.
*/
tasklet_init(&host->card_tasklet,
sdhci_tasklet_card, (unsigned long)host);
tasklet_init(&host->finish_tasklet,
sdhci_tasklet_finish, (unsigned long)host);
setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
if (host->version >= SDHCI_SPEC_300) {
init_waitqueue_head(&host->buf_ready_int);
/* Initialize re-tuning timer */
init_timer(&host->tuning_timer);
host->tuning_timer.data = (unsigned long)host;
host->tuning_timer.function = sdhci_tuning_timer;
}
ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
mmc_hostname(mmc), host);
if (ret)
goto untasklet;
host->irq_enabled = true;
host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
if (IS_ERR(host->vmmc)) {
pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
host->vmmc = NULL;
}
host->irq_enabled = true;
sdhci_init(host, 0);
#ifdef CONFIG_MMC_DEBUG
sdhci_dumpregs(host);
#endif
#ifdef SDHCI_USE_LEDS_CLASS
snprintf(host->led_name, sizeof(host->led_name),
"%s::", mmc_hostname(mmc));
host->led.name = host->led_name;
host->led.brightness = LED_OFF;
host->led.default_trigger = mmc_hostname(mmc);
host->led.brightness_set = sdhci_led_control;
ret = led_classdev_register(mmc_dev(mmc), &host->led);
if (ret)
goto reset;
#endif
mmiowb();
if (host->cpu_dma_latency_us) {
host->pm_qos_timeout_us = 10000; /* default value */
pm_qos_add_request(&host->pm_qos_req_dma,
PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
host->pm_qos_tout.show = show_sdhci_pm_qos_tout;
host->pm_qos_tout.store = store_sdhci_pm_qos_tout;
sysfs_attr_init(&host->pm_qos_tout.attr);
host->pm_qos_tout.attr.name = "pm_qos_unvote_delay";
host->pm_qos_tout.attr.mode = S_IRUGO | S_IWUSR;
ret = device_create_file(mmc_dev(mmc), &host->pm_qos_tout);
if (ret)
pr_err("%s: cannot create pm_qos_unvote_delay %d\n",
mmc_hostname(mmc), ret);
}
if (caps[0] & SDHCI_ASYNC_INTR)
host->async_int_supp = true;
mmc_add_host(mmc);
if (host->quirks2 & SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR)
sdhci_clear_set_irqs(host, SDHCI_INT_DATA_END_BIT, 0);
pr_info("%s: SDHCI controller on %s [%s] using %s\n",
mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
(host->flags & SDHCI_USE_ADMA) ? "ADMA" :
(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
sdhci_enable_card_detection(host);
return 0;
#ifdef SDHCI_USE_LEDS_CLASS
reset:
sdhci_reset(host, SDHCI_RESET_ALL);
sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
#endif
untasklet:
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
return ret;
}
EXPORT_SYMBOL_GPL(sdhci_add_host);
void sdhci_remove_host(struct sdhci_host *host, int dead)
{
unsigned long flags;
if (dead) {
spin_lock_irqsave(&host->lock, flags);
host->flags |= SDHCI_DEVICE_DEAD;
if (host->mrq) {
pr_err("%s: Controller removed during "
" transfer!\n", mmc_hostname(host->mmc));
host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
}
spin_unlock_irqrestore(&host->lock, flags);
}
sdhci_update_power_policy(host, SDHCI_POWER_SAVE_MODE);
sdhci_disable_card_detection(host);
if (host->cpu_dma_latency_us)
pm_qos_remove_request(&host->pm_qos_req_dma);
mmc_remove_host(host->mmc);
#ifdef SDHCI_USE_LEDS_CLASS
led_classdev_unregister(&host->led);
#endif
if (!dead)
sdhci_reset(host, SDHCI_RESET_ALL);
sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
free_irq(host->irq, host);
del_timer_sync(&host->timer);
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
if (host->vmmc)
regulator_put(host->vmmc);
if (host->adma_desc)
dma_free_coherent(mmc_dev(host->mmc), host->adma_desc_sz,
host->adma_desc, host->adma_addr);
if (host->align_buffer)
dma_free_coherent(mmc_dev(host->mmc), host->align_buf_sz,
host->align_buffer, host->align_addr);
host->adma_desc = NULL;
host->align_buffer = NULL;
}
EXPORT_SYMBOL_GPL(sdhci_remove_host);
void sdhci_free_host(struct sdhci_host *host)
{
mmc_free_host(host->mmc);
}
EXPORT_SYMBOL_GPL(sdhci_free_host);
/*****************************************************************************\
* *
* Driver init/exit *
* *
\*****************************************************************************/
static int __init sdhci_drv_init(void)
{
pr_info(DRIVER_NAME
": Secure Digital Host Controller Interface driver\n");
pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
return 0;
}
static void __exit sdhci_drv_exit(void)
{
}
module_init(sdhci_drv_init);
module_exit(sdhci_drv_exit);
module_param(debug_quirks, uint, 0444);
module_param(debug_quirks2, uint, 0444);
MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
|
farchanrifai/kernel_cancro
|
drivers/mmc/host/sdhci.c
|
C
|
gpl-2.0
| 105,152
|
/*
* Written by Bastien Chevreux (BaCh)
*
* Copyright (C) 2000 and later by Bastien Chevreux
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*
*/
// $Id$
#include "io/fasta.H"
// for boost::trim, split
#include <boost/algorithm/string.hpp>
using namespace std;
// Plain vanilla constructor
FASTA::FASTA()
{
FUNCSTART("FASTA::FASTA()");
zeroVars();
init();
FUNCEND();
}
void FASTA::zeroVars()
{
FUNCSTART("void FASTA::zeroVars()");
FA_fastaseqname.clear();
FA_qualseqname.clear();
FA_sequence.clear();
FA_intvalues.clear();
FUNCEND();
}
void FASTA::init()
{
FUNCSTART("void FASTA::init()");
FUNCEND();
}
FASTA::~FASTA()
{
FUNCSTART("FASTA::~FASTA()");
discard();
FUNCEND();
}
void FASTA::discard()
{
FUNCSTART("FASTA::discard()");
zeroVars();
FUNCEND();
}
/*************************************************************************
*
* data downloaded from NCBI has one problem: the name of the sequences
* in fasta file is the gnl|ti number ... the real read name is some-
* what later, preceded by the string " name:"
* this function searches for reads with that characteristics and sets
* the name of the string given to the "real" one
*
*************************************************************************/
void FASTA::adjustNameNCBIHack(string & name)
{
FUNCSTART("void FASTA::adjustNameNCBIHack(string & name)");
string blanks=" \t";
if(name.size()>7
&& name[0] == 'g'
&& name[1] == 'n'
&& name[2] == 'l'
&& name[3] == '|'
&& name[4] == 't'
&& name[5] == 'i'
&& name[6] == '|'){
string::size_type tokenstart=string::npos;
tokenstart=name.find(" name:",0);
if(tokenstart!=string::npos){
// great, found a name: description
string::size_type tokenend=string::npos;
tokenstart+=6;
tokenend=name.find_first_of(blanks,tokenstart);
if(tokenend==string::npos) tokenend=name.size();
string token=name.substr(tokenstart, tokenend-tokenstart);
swap(name,token);
return;
}
}
// ok, no gnl|ti|
// but maybe a NCBI GI "name"?
if(name.size() > 3
&& name[0]=='g'
&& name[1]=='i'
&& name[2]=='|'){
// OK, this *might* be a GI line
vector<string> subnames;
boost::split(subnames, name, boost::is_any_of("|"));
if(subnames.size()==5){
// bloody well looks like an NCBI style name line. Extract the real sequence name
swap(name,subnames[3]);
return;
}
}
// nothing of the above, get only the first "token"
// as the name
string::size_type tokenend=string::npos;
tokenend=name.find_first_of(blanks,0);
if(tokenend!=string::npos){
string token=name.substr(0, tokenend);
name=token;
}
// there might still be "|" in the name, replace those
//for(uint32 i=0; i<name.size(); i++){
// if(name[i]=='|') name[i]='_';
//}
FUNCEND();
}
/*************************************************************************
*
*
*
*
*************************************************************************/
void FASTA::loadNextSeq(ifstream & fin)
{
FUNCSTART("void FASTA::loadNextSeq(ifstream & fin)");
FA_fastaseqname.clear();
FA_sequence.clear();
bool read_read=false;
bool nl=true;
bool inseq=false;
char inchar;
vector<char> badchars;
vector<streampos> badpos;
//cout << "------------\n";
while(!(fin.get(inchar)).eof() && !read_read) {
//cout << "++" << inchar << "++ "; cout.flush();
switch(inchar) {
case ' ' : continue;
case '\t' : continue;
case '\r' : continue; // that's from MS-DOS Files, where \n is \r\n
case '\n' : {
nl=true;
continue;
}
case '>' : {
if(inseq==true) {
read_read=true;
fin.unget();
// FIXME: why? where does this char get eaten?
fin.unget();
//fin.putback('>');
//// FIXME: why? where does this char get eaten?
//fin.putback(' ');
break;
}
if(nl==true) {
while(!(fin.get(inchar)).eof()) {
//cout << "--" << inchar << "-- "; cout.flush();
if(inchar!='\n' && inchar!='\r'){
FA_fastaseqname+=inchar;
} else {
fin.unget();
//fin.putback(inchar);
break;
}
}
if(FA_fastaseqname.size() == 0) {
MIRANOTIFY(Notify::FATAL,"Missing name of fasta sequence at file byte position " << fin.tellg());
}
//cout << "RawName: " << FA_fastaseqname << endl;
adjustNameNCBIHack(FA_fastaseqname);
nl=true;
inseq=true;
} else {
MIRANOTIFY(Notify::FATAL,"Illegal character (" << inchar << ": " << hex << static_cast<uint16>(inchar) << dec << ") in fasta sequence name at file byte position " << fin.tellg() << endl);
}
break;
}
default : {
if(inseq==false || nl==false) {
MIRANOTIFY(Notify::FATAL,"Illegal character (" << inchar << ": " << hex << static_cast<uint16>(inchar) << dec << ") at begin of fasta sequence at file byte position " << fin.tellg());
}
fin.unget();
//fin.putback(inchar);
badchars.clear();
badpos.clear();
while(!(fin.get(inchar)).eof()) {
//cout << "IC: " << inchar<<endl;
if(inchar== '>' && nl==true) break;
if(inchar== ' ') continue;
if(inchar== '\t') continue;
if(inchar== '\r' ) continue; // from MS-DOS ... bla bla *sigh*
nl=false;
if(inchar== '\n') {
nl=true;
//cout << "dingNL:" << nl;
continue;
}
//cout <<"...";
switch(toupper(inchar)){
case 'A' :
case 'C' :
case 'G' :
case 'T' :
case 'N' :
case 'X' :
case 'R' :
case 'Y' :
case 'M' :
case 'S' :
case 'K' :
case 'W' :
case 'H' :
case 'B' :
case 'V' :
case 'D' :
case '*' : break;
case '-' :
inchar='*';
break;
default : {
if(badchars.size()<=100){
badchars.push_back(inchar);
badpos.push_back(fin.tellg());
}
}
}
FA_sequence+=inchar;
}
inseq=false;
if(!fin.eof()) {
fin.unget();
// FIXME: why? where does this character get eaten?
fin.unget();
// that doesn't work with gcc3.2
//fin.putback(inchar);
//// FIXME: why? where does this character get eaten?
//fin.putback(' ');
}
//cout << FA_sequence << endl;
if(badchars.size()>0){
cerr << '\n';
for(uint32 i=0; i<badchars.size(); i++){
cerr << "-- 2 Illegal character (" << badchars[i] << ": " << hex << static_cast<uint16>(badchars[i]) << dec << ") in fasta sequence at file byte position " << badpos[i] << endl;
}
if(badchars.size()==100){
cerr << "\nThere may be more errors like the above, but stopping reporting here.\n";
}
cerr << "This happened in sequence: " << FA_fastaseqname << "\nPlease fix your file.\n";
MIRANOTIFY(Notify::FATAL,"The sequence " << FA_sequence << " in file " << FA_fastaseqname << " showed unrecoverable errors while trying to load it (see also log above). Is it a valid FASTA sequence? Please double check ... and fix your file if necessary.");
}
read_read=true;
}
}
// cout << inchar;
}
FUNCEND();
}
/*************************************************************************
*
*
*
*
*************************************************************************/
void FASTA::loadQual(const char * qualin)
{
loadINT(qualin,255);
}
void FASTA::loadINT(const char * qualin, int32 maxvalue)
{
FUNCSTART("void FASTA::loadQual(const char * qualin)");
ifstream qualfin;
qualfin.open(qualin, ios::in|ios::ate);
if(!qualfin){
MIRANOTIFY(Notify::WARNING, "File not found: " << qualin);
}
if(!qualfin.tellg()){
MIRANOTIFY(Notify::FATAL, "Zero length file: " << qualin);
}
qualfin.seekg(0, ios::beg);
loadNextINTSeq(qualfin,maxvalue);
qualfin.close();
FUNCEND();
}
/*************************************************************************
*
*
*
*
*************************************************************************/
void FASTA::loadNextINTSeq(ifstream & fin, int32 maxvalue)
{
FUNCSTART("void FASTA::loadNextINTSeq(ifstream & fin, int32 maxvalue)");
FA_qualseqname.clear();
FA_intvalues.clear();
bool read_read=false;
bool nl=true;
bool inseq=false;
bool isnegative=false;
char inchar=' ';
while(!(fin.get(inchar)).eof() && !read_read) {
switch(inchar) {
case ' ' :
case '\t' :
case '\r' : { // that's from MS-DOS Files, where \n is \r\n
isnegative=false;
break;
}
case '\n' : {
nl=true;
isnegative=false;
continue;
}
case '-' : {
isnegative=true;
break;
}
case '>' : {
if(inseq==true) {
read_read=true;
fin.unget();
// FIXME: why? where does this char get eaten?
fin.unget();
break;
}
if(nl==true) {
while(!(fin.get(inchar)).eof()) {
if(inchar!='\n' && inchar != '\r'){
FA_qualseqname+=inchar;
} else {
fin.unget();
break;
}
}
if(FA_qualseqname.size() == 0) {
MIRANOTIFY(Notify::FATAL,"Missing name of fasta sequence in quality file at byte position " << fin.tellg());
}
//cout << "Name: " << FA_qualseqname << endl;
adjustNameNCBIHack(FA_qualseqname);
nl=true;
inseq=true;
} else {
MIRANOTIFY(Notify::FATAL,"Illegal character (" << inchar << ": " << hex << static_cast<uint16>(inchar) << dec << ") in fasta sequence name in integer value file at byte position " << fin.tellg());
}
break;
}
default : {
if(inseq==false || nl==false) {
MIRANOTIFY(Notify::FATAL,"Illegal character (" << inchar << ": " << hex << static_cast<uint16>(inchar) << dec << ") at begin of fasta integer value sequence in file at byte position " << fin.tellg());
}
fin.unget();
string tmp;
while(!fin.eof()) {
tmp.clear();
fin >> tmp;
if(tmp.size()==0) break;
if(tmp[0]=='>') break;
if(tmp[0]=='-') {
isnegative=true;
tmp=tmp.substr(1);
}
if(tmp[0]<'0' || tmp[0]>'9') {
MIRANOTIFY(Notify::FATAL,"Illegal character (" << tmp[0] << ": " << hex << static_cast<uint16>(tmp[0]) << dec << ") in integer value sequence in file at byte position " << (static_cast<uint32>(fin.tellg()))-tmp.size());
}
int32 thequal=static_cast<int32>(atoi(tmp.c_str()));
if(thequal>maxvalue){
MIRANOTIFY(Notify::FATAL,"Illegal value " << thequal << " (>" << maxvalue << ") in sequence " << FA_qualseqname << " at byte position in file " << fin.tellg() << "\nPlease fix your file.");
}
if(isnegative) thequal=-thequal;
isnegative=false;
FA_intvalues.push_back(thequal);
}
if(!fin.eof() && tmp.size()) {
//cout << "Putting back:\n";
for(size_t tmpi=tmp.size()-1; tmpi!=0; tmpi--){
fin.unget();
//cout << tmp[tmpi];
}
fin.unget();
// FIXME: why? where does this char get eaten?
fin.unget();
}
read_read=true;
}
}
// cout << inchar;
}
FUNCEND();
}
/*************************************************************************
*
*
*
*
*************************************************************************/
void FASTA::testIfSeqAndQualMatch()
{
FUNCSTART("void FASTA::testIfSeqAndQualMatch()");
if(FA_fastaseqname!=FA_qualseqname) {
MIRANOTIFY(Notify::FATAL,"Name of read in fasta file (" << FA_fastaseqname << ") and in quality file (" << FA_qualseqname << ") do not match.");
}
if(FA_sequence.size()!=FA_intvalues.size()){
MIRANOTIFY(Notify::FATAL,"Read " << FA_fastaseqname << " has " << FA_sequence.size() << " bases in fasta file, but " << FA_intvalues.size() << " quality values. Cannot be.");
}
FUNCEND();
}
/*************************************************************************
*
*
*
*
*************************************************************************/
bool FASTA::testIfEmpty()
{
if(FA_fastaseqname.size()) return false;
if(FA_qualseqname.size()) return false;
if(FA_sequence.size()) return false;
if(FA_intvalues.size()) return false;
return true;
}
/*************************************************************************
*
*
*
*
*************************************************************************/
void FASTA::loadNext(ifstream & fastafin, ifstream & qualin)
{
FUNCSTART("void FASTA::loadNext(ifstream & fastafin, ifstream & qualin)");
try {
loadNextSeq(fastafin);
}
catch(Notify n){
loadNextINTSeq(qualin,255);
throw n;
}
loadNextINTSeq(qualin,255);
testIfSeqAndQualMatch();
FUNCEND();
}
/*************************************************************************
*
*
*
*
*************************************************************************/
void FASTA::load(const char * fastain)
{
FUNCSTART("void FASTA::load(const char * fastain)");
ifstream fin;
fin.open(fastain, ios::in|ios::ate);
if(!fin){
MIRANOTIFY(Notify::WARNING, "File not found: " << fastain);
}
if(!fin.tellg()){
MIRANOTIFY(Notify::FATAL, "Zero length file: " << fastain);
}
fin.seekg(0, ios::beg);
loadNextSeq(fin);
fin.close();
FUNCEND();
}
/*************************************************************************
*
*
*
*
*************************************************************************/
void FASTA::load(const char * fastain, const char * qualin)
{
FUNCSTART("void FASTA::load(const char * fastain, const char * qualin)");
ifstream fin1;
fin1.open(fastain, ios::in|ios::ate);
if(!fin1){
MIRANOTIFY(Notify::WARNING, "File not found: " << fastain);
}
if(!fin1.tellg()){
MIRANOTIFY(Notify::FATAL, "Zero length file: " << fastain);
}
fin1.seekg(0, ios::beg);
ifstream fin2;
fin2.open(qualin, ios::in|ios::ate);
if(!fin2){
MIRANOTIFY(Notify::WARNING, "File not found: " << qualin);
}
if(!fin2.tellg()){
MIRANOTIFY(Notify::FATAL, "Zero length file: " << qualin);
}
fin2.seekg(0, ios::beg);
loadNext(fin1,fin2);
fin1.close();
fin2.close();
FUNCEND();
}
/*************************************************************************
*
*
*
*
*************************************************************************/
void FASTA::dumpSequence(ostream & fout)
{
FUNCSTART("void FASTA::dumpSequence(ofstream & fout)");
if(FA_fastaseqname.size()){
fout << ">" << FA_fastaseqname;
for(uint32 i=0; i<FA_sequence.size(); i++){
if(i%60==0) fout << "\n";
fout << FA_sequence[i];
}
fout << endl;
}
FUNCEND();
}
/*************************************************************************
*
*
*
*
*************************************************************************/
void FASTA::dumpQuality(ostream & fout)
{
FUNCSTART("void FASTA::dumpQuality(ofstream & fout)");
if(FA_qualseqname.size()){
fout << ">" << FA_qualseqname;
for(uint32 i=0; i<FA_intvalues.size(); i++){
if(i%25==0) fout << "\n";
fout << FA_intvalues[i] << " ";
}
fout << endl;
}
FUNCEND();
}
//// Copy constructor
//// no discard needed as this object will be freshly created when
//// called through this constructor
//FASTA::FASTA(FASTA const &other)
//{
// FUNCSTART("FASTA::FASTA(FASTA const &other)");
//
// FA_valid=0;
//
// *this=other; // call the copy operator
//
// FUNCEND();
//}
//
//// Copy operator, needed by copy-constructor
//FASTA const & FASTA::operator=(FASTA const & other)
//{
// FUNCSTART("FASTA const & FASTA::operator=(FASTA const & other)");
// ERROR("Not implemented yet.");
// FUNCEND();
// return *this;
//}
//ostream & operator<<(ostream &ostr, FASTA const &fas)
//{
// FUNCSTART("friend ostream & FASTA::operator<<(ostream &ostr, const &fas)");
// ERROR("Not implemented yet.");
//
// FUNCEND();
// return ostr;
//}
|
sidney/mira4-assembler-patches
|
src/io/fasta.C
|
C++
|
gpl-2.0
| 16,164
|
/*
* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.max.vm;
public enum BuildLevel {
PRODUCT, DEBUG;
}
|
arodchen/MaxSim
|
maxine/com.oracle.max.vm/src/com/sun/max/vm/BuildLevel.java
|
Java
|
gpl-2.0
| 1,130
|
/*
Copyright (c) 2001, 2013, Oracle and/or its affiliates.
Copyright (c) 2010, 2015, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* By Jani Tolonen, 2001-04-20, MySQL Development Team */
#define CHECK_VERSION "2.7.4-MariaDB"
#include "client_priv.h"
#include <m_ctype.h>
#include <mysql_version.h>
#include <mysqld_error.h>
#include <sslopt-vars.h>
#include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */
/* Exit codes */
#define EX_USAGE 1
#define EX_MYSQLERR 2
/* ALTER instead of repair. */
#define MAX_ALTER_STR_SIZE 128 * 1024
#define KEY_PARTITIONING_CHANGED_STR "KEY () partitioning changed"
static MYSQL mysql_connection, *sock = 0;
static my_bool opt_alldbs = 0, opt_check_only_changed = 0, opt_extended = 0,
opt_compress = 0, opt_databases = 0, opt_fast = 0,
opt_medium_check = 0, opt_quick = 0, opt_all_in_1 = 0,
opt_silent = 0, opt_auto_repair = 0, ignore_errors = 0,
tty_password= 0, opt_frm= 0, debug_info_flag= 0, debug_check_flag= 0,
opt_fix_table_names= 0, opt_fix_db_names= 0, opt_upgrade= 0,
opt_do_tables= 1;
static my_bool opt_write_binlog= 1, opt_flush_tables= 0;
static uint verbose = 0, opt_mysql_port=0;
static int my_end_arg;
static char * opt_mysql_unix_port = 0;
static char *opt_password = 0, *current_user = 0,
*default_charset= 0, *current_host= 0;
static char *opt_plugin_dir= 0, *opt_default_auth= 0;
static int first_error = 0;
static char *opt_skip_database;
DYNAMIC_ARRAY tables4repair, tables4rebuild, alter_table_cmds;
DYNAMIC_ARRAY views4repair;
static char *shared_memory_base_name=0;
static uint opt_protocol=0;
enum operations { DO_CHECK=1, DO_REPAIR, DO_ANALYZE, DO_OPTIMIZE, DO_FIX_NAMES };
const char *operation_name[]=
{
"???", "check", "repair", "analyze", "optimize", "fix names"
};
typedef enum { DO_VIEWS_NO, DO_VIEWS_YES, DO_VIEWS_FROM_MYSQL } enum_do_views;
const char *do_views_opts[]= {"NO", "YES", "UPGRADE_FROM_MYSQL", NullS};
TYPELIB do_views_typelib= { array_elements(do_views_opts) - 1, "",
do_views_opts, NULL };
static ulong opt_do_views= DO_VIEWS_NO;
static struct my_option my_long_options[] =
{
{"all-databases", 'A',
"Check all the databases. This is the same as --databases with all databases selected.",
&opt_alldbs, &opt_alldbs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
0, 0},
{"analyze", 'a', "Analyze given tables.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0,
0, 0, 0, 0},
{"all-in-1", '1',
"Instead of issuing one query for each table, use one query per database, naming all tables in the database in a comma-separated list.",
&opt_all_in_1, &opt_all_in_1, 0, GET_BOOL, NO_ARG, 0, 0, 0,
0, 0, 0},
{"auto-repair", OPT_AUTO_REPAIR,
"If a checked table is corrupted, automatically fix it. Repairing will be done after all tables have been checked, if corrupted ones were found.",
&opt_auto_repair, &opt_auto_repair, 0, GET_BOOL, NO_ARG, 0,
0, 0, 0, 0, 0},
{"character-sets-dir", OPT_CHARSETS_DIR,
"Directory for character set files.", (char**) &charsets_dir,
(char**) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"check", 'c', "Check table for errors.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0,
0, 0, 0, 0},
{"check-only-changed", 'C',
"Check only tables that have changed since last check or haven't been closed properly.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"check-upgrade", 'g',
"Check tables for version-dependent changes. May be used with --auto-repair to correct tables requiring version-dependent updates.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"compress", OPT_COMPRESS, "Use compression in server/client protocol.",
&opt_compress, &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0,
0, 0, 0},
{"databases", 'B',
"Check several databases. Note the difference in usage; in this case no tables are given. All name arguments are regarded as database names.",
&opt_databases, &opt_databases, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
#ifdef DBUG_OFF
{"debug", '#', "This is a non-debug version. Catch this and exit.",
0, 0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0},
#else
{"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.",
0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.",
&debug_check_flag, &debug_check_flag, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.",
&debug_info_flag, &debug_info_flag,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"default-character-set", OPT_DEFAULT_CHARSET,
"Set the default character set.", &default_charset,
&default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"default_auth", OPT_DEFAULT_AUTH,
"Default authentication client-side plugin to use.",
&opt_default_auth, &opt_default_auth, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"fast",'F', "Check only tables that haven't been closed properly.",
&opt_fast, &opt_fast, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
0},
{"fix-db-names", OPT_FIX_DB_NAMES, "Fix database names.",
&opt_fix_db_names, &opt_fix_db_names,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"fix-table-names", OPT_FIX_TABLE_NAMES, "Fix table names.",
&opt_fix_table_names, &opt_fix_table_names,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"force", 'f', "Continue even if we get an SQL error.",
&ignore_errors, &ignore_errors, 0, GET_BOOL, NO_ARG, 0, 0,
0, 0, 0, 0},
{"extended", 'e',
"If you are using this option with CHECK TABLE, it will ensure that the table is 100 percent consistent, but will take a long time. If you are using this option with REPAIR TABLE, it will force using old slow repair with keycache method, instead of much faster repair by sorting.",
&opt_extended, &opt_extended, 0, GET_BOOL, NO_ARG, 0, 0, 0,
0, 0, 0},
{"flush", OPT_FLUSH_TABLES, "Flush each table after check. This is useful if you don't want to have the checked tables take up space in the caches after the check",
&opt_flush_tables, &opt_flush_tables, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
0, 0 },
{"help", '?', "Display this help message and exit.", 0, 0, 0, GET_NO_ARG,
NO_ARG, 0, 0, 0, 0, 0, 0},
{"host",'h', "Connect to host.", ¤t_host,
¤t_host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"medium-check", 'm',
"Faster than extended-check, but only finds 99.99 percent of all errors. Should be good enough for most cases.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"write-binlog", OPT_WRITE_BINLOG,
"Log ANALYZE, OPTIMIZE and REPAIR TABLE commands. Use --skip-write-binlog "
"when commands should not be sent to replication slaves.",
&opt_write_binlog, &opt_write_binlog, 0, GET_BOOL, NO_ARG,
1, 0, 0, 0, 0, 0},
{"optimize", 'o', "Optimize table.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0,
0, 0},
{"password", 'p',
"Password to use when connecting to server. If password is not given, it's solicited on the tty.",
0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
#ifdef __WIN__
{"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG,
NO_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.",
&opt_plugin_dir, &opt_plugin_dir, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"port", 'P', "Port number to use for connection or 0 for default to, in "
"order of preference, my.cnf, $MYSQL_TCP_PORT, "
#if MYSQL_PORT_DEFAULT == 0
"/etc/services, "
#endif
"built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").",
&opt_mysql_port, &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0,
0},
{"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"quick", 'q',
"If you are using this option with CHECK TABLE, it prevents the check from scanning the rows to check for wrong links. This is the fastest check. If you are using this option with REPAIR TABLE, it will try to repair only the index tree. This is the fastest repair method for a table.",
&opt_quick, &opt_quick, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
0},
{"repair", 'r',
"Can fix almost anything except unique keys that aren't unique.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_SMEM
{"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
"Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name,
0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"silent", 's', "Print only error messages.", &opt_silent,
&opt_silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"skip_database", 0, "Don't process the database specified as argument",
&opt_skip_database, &opt_skip_database, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"socket", 'S', "The socket file to use for connection.",
&opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#include <sslopt-longopts.h>
{"tables", OPT_TABLES, "Overrides option --databases (-B).", 0, 0, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"use-frm", OPT_FRM,
"When used with REPAIR, get table structure from .frm file, so the table can be repaired even if .MYI header is corrupted.",
&opt_frm, &opt_frm, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0,
0},
#ifndef DONT_ALLOW_USER_CHANGE
{"user", 'u', "User for login if not current user.", ¤t_user,
¤t_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"verbose", 'v', "Print info about the various stages; Using it 3 times will print out all CHECK, RENAME and ALTER TABLE during the check phase.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG,
NO_ARG, 0, 0, 0, 0, 0, 0},
{"process-views", 0,
"Perform the requested operation (check or repair) on views. "
"One of: NO, YES (correct the checksum, if necessary, add the "
"mariadb-version field), UPGRADE_FROM_MYSQL (same as YES and toggle "
"the algorithm MERGE<->TEMPTABLE.", &opt_do_views, &opt_do_views,
&do_views_typelib, GET_ENUM, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"process-tables", 0, "Perform the requested operation on tables.",
&opt_do_tables, &opt_do_tables, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
static const char *load_default_groups[]=
{ "mysqlcheck", "client", "client-server", "client-mariadb", 0 };
static void print_version(void);
static void usage(void);
static int get_options(int *argc, char ***argv);
static int process_all_databases();
static int process_databases(char **db_names);
static int process_selected_tables(char *db, char **table_names, int tables);
static int process_all_tables_in_db(char *database);
static int process_one_db(char *database);
static int use_db(char *database);
static int handle_request_for_tables(char *tables, size_t length, my_bool view);
static int dbConnect(char *host, char *user,char *passwd);
static void dbDisconnect(char *host);
static void DBerror(MYSQL *mysql, const char *when);
static void safe_exit(int error);
static void print_result();
static uint fixed_name_length(const char *name);
static char *fix_table_name(char *dest, char *src);
int what_to_do = 0;
static void print_version(void)
{
printf("%s Ver %s Distrib %s, for %s (%s)\n", my_progname, CHECK_VERSION,
MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE);
} /* print_version */
static void usage(void)
{
DBUG_ENTER("usage");
print_version();
puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"));
puts("This program can be used to CHECK (-c, -m, -C), REPAIR (-r), ANALYZE (-a),");
puts("or OPTIMIZE (-o) tables. Some of the options (like -e or -q) can be");
puts("used at the same time. Not all options are supported by all storage engines.");
puts("The options -c, -r, -a, and -o are exclusive to each other, which");
puts("means that the last option will be used, if several was specified.\n");
puts("The option -c (--check) will be used by default, if none was specified.");
puts("You can change the default behavior by making a symbolic link, or");
puts("copying this file somewhere with another name, the alternatives are:");
puts("mysqlrepair: The default option will be -r");
puts("mysqlanalyze: The default option will be -a");
puts("mysqloptimize: The default option will be -o\n");
printf("Usage: %s [OPTIONS] database [tables]\n", my_progname);
printf("OR %s [OPTIONS] --databases DB1 [DB2 DB3...]\n",
my_progname);
puts("Please consult the MariaDB/MySQL knowledgebase at");
puts("http://kb.askmonty.org/v/mysqlcheck for latest information about");
puts("this program.");
print_defaults("my", load_default_groups);
puts("");
my_print_help(my_long_options);
my_print_variables(my_long_options);
DBUG_VOID_RETURN;
} /* usage */
static my_bool
get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
char *argument)
{
int orig_what_to_do= what_to_do;
DBUG_ENTER("get_one_option");
switch(optid) {
case 'a':
what_to_do = DO_ANALYZE;
break;
case 'c':
what_to_do = DO_CHECK;
break;
case 'C':
what_to_do = DO_CHECK;
opt_check_only_changed = 1;
break;
case 'I': /* Fall through */
case '?':
usage();
exit(0);
case 'm':
what_to_do = DO_CHECK;
opt_medium_check = 1;
break;
case 'o':
what_to_do = DO_OPTIMIZE;
break;
case OPT_FIX_DB_NAMES:
what_to_do= DO_FIX_NAMES;
opt_databases= 1;
break;
case OPT_FIX_TABLE_NAMES:
what_to_do= DO_FIX_NAMES;
break;
case 'p':
if (argument == disabled_my_option)
argument= (char*) ""; /* Don't require password */
if (argument)
{
char *start = argument;
my_free(opt_password);
opt_password = my_strdup(argument, MYF(MY_FAE));
while (*argument) *argument++= 'x'; /* Destroy argument */
if (*start)
start[1] = 0; /* Cut length of argument */
tty_password= 0;
}
else
tty_password = 1;
break;
case 'r':
what_to_do = DO_REPAIR;
break;
case 'g':
what_to_do= DO_CHECK;
opt_upgrade= 1;
break;
case 'W':
#ifdef __WIN__
opt_protocol = MYSQL_PROTOCOL_PIPE;
#endif
break;
case '#':
DBUG_PUSH(argument ? argument : "d:t:o");
debug_check_flag= 1;
break;
#include <sslopt-case.h>
case OPT_TABLES:
opt_databases = 0;
break;
case 'v':
verbose++;
break;
case 'V':
print_version(); exit(0);
break;
case OPT_MYSQL_PROTOCOL:
opt_protocol= find_type_or_exit(argument, &sql_protocol_typelib,
opt->name);
break;
}
if (orig_what_to_do && (what_to_do != orig_what_to_do))
{
fprintf(stderr, "Error: %s doesn't support multiple contradicting commands.\n",
my_progname);
DBUG_RETURN(1);
}
DBUG_RETURN(0);
}
static int get_options(int *argc, char ***argv)
{
int ho_error;
DBUG_ENTER("get_options");
if (*argc == 1)
{
usage();
exit(0);
}
if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option)))
exit(ho_error);
if (what_to_do == DO_REPAIR && !opt_do_views && !opt_do_tables)
{
fprintf(stderr, "Error: Nothing to repair when both "
"--process-tables=NO and --process-views=NO\n");
exit(1);
}
if (!what_to_do)
{
size_t pnlen= strlen(my_progname);
if (pnlen < 6) /* name too short */
what_to_do = DO_CHECK;
else if (!strcmp("repair", my_progname + pnlen - 6))
what_to_do = DO_REPAIR;
else if (!strcmp("analyze", my_progname + pnlen - 7))
what_to_do = DO_ANALYZE;
else if (!strcmp("optimize", my_progname + pnlen - 8))
what_to_do = DO_OPTIMIZE;
else
what_to_do = DO_CHECK;
}
if (opt_do_views && what_to_do != DO_REPAIR && what_to_do != DO_CHECK)
{
fprintf(stderr, "Error: %s doesn't support %s for views.\n",
my_progname, operation_name[what_to_do]);
exit(1);
}
/*
If there's no --default-character-set option given with
--fix-table-name or --fix-db-name set the default character set to "utf8".
*/
if (!default_charset)
{
if (opt_fix_db_names || opt_fix_table_names)
default_charset= (char*) "utf8";
else
default_charset= (char*) MYSQL_AUTODETECT_CHARSET_NAME;
}
if (strcmp(default_charset, MYSQL_AUTODETECT_CHARSET_NAME) &&
!get_charset_by_csname(default_charset, MY_CS_PRIMARY, MYF(MY_WME)))
{
printf("Unsupported character set: %s\n", default_charset);
DBUG_RETURN(1);
}
if (*argc > 0 && opt_alldbs)
{
printf("You should give only options, no arguments at all, with option\n");
printf("--all-databases. Please see %s --help for more information.\n",
my_progname);
DBUG_RETURN(1);
}
if (*argc < 1 && !opt_alldbs)
{
printf("You forgot to give the arguments! Please see %s --help\n",
my_progname);
printf("for more information.\n");
DBUG_RETURN(1);
}
if (tty_password)
opt_password = get_tty_password(NullS);
if (debug_info_flag)
my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO;
if (debug_check_flag)
my_end_arg= MY_CHECK_ERROR;
DBUG_RETURN((0));
} /* get_options */
static int process_all_databases()
{
MYSQL_ROW row;
MYSQL_RES *tableres;
int result = 0;
DBUG_ENTER("process_all_databases");
if (mysql_query(sock, "SHOW DATABASES") ||
!(tableres = mysql_store_result(sock)))
{
my_printf_error(0, "Error: Couldn't execute 'SHOW DATABASES': %s",
MYF(0), mysql_error(sock));
DBUG_RETURN(1);
}
if (verbose)
printf("Processing databases\n");
while ((row = mysql_fetch_row(tableres)))
{
if (process_one_db(row[0]))
result = 1;
}
mysql_free_result(tableres);
DBUG_RETURN(result);
}
/* process_all_databases */
static int process_databases(char **db_names)
{
int result = 0;
DBUG_ENTER("process_databases");
if (verbose)
printf("Processing databases\n");
for ( ; *db_names ; db_names++)
{
if (process_one_db(*db_names))
result = 1;
}
DBUG_RETURN(result);
} /* process_databases */
/* returns: -1 for error, 1 for view, 0 for table */
static int is_view(const char *table)
{
char query[1024];
MYSQL_RES *res;
MYSQL_FIELD *field;
int view;
DBUG_ENTER("is_view");
my_snprintf(query, sizeof(query), "SHOW CREATE TABLE %`s", table);
if (mysql_query(sock, query))
{
fprintf(stderr, "Failed to %s\n", query);
fprintf(stderr, "Error: %s\n", mysql_error(sock));
my_free(query);
DBUG_RETURN(-1);
}
res= mysql_store_result(sock);
field= mysql_fetch_field(res);
view= (strcmp(field->name,"View") == 0) ? 1 : 0;
mysql_free_result(res);
DBUG_RETURN(view);
}
static int process_selected_tables(char *db, char **table_names, int tables)
{
int view;
char *table;
uint table_len;
DBUG_ENTER("process_selected_tables");
if (use_db(db))
DBUG_RETURN(1);
if (opt_all_in_1 && what_to_do != DO_FIX_NAMES)
{
/*
We need table list in form `a`, `b`, `c`
that's why we need 2 more chars added to to each table name
space is for more readable output in logs and in case of error
*/
char *table_names_comma_sep, *end;
size_t tot_length= 0;
int i= 0;
if (opt_do_tables && opt_do_views)
{
fprintf(stderr, "Error: %s cannot process both tables and views "
"in one command (--process-tables=YES "
"--process-views=YES --all-in-1).\n",
my_progname);
DBUG_RETURN(1);
}
for (i = 0; i < tables; i++)
tot_length+= fixed_name_length(*(table_names + i)) + 2;
if (!(table_names_comma_sep = (char *)
my_malloc((sizeof(char) * tot_length) + 4, MYF(MY_WME))))
DBUG_RETURN(1);
for (end = table_names_comma_sep + 1; tables > 0;
tables--, table_names++)
{
end= fix_table_name(end, *table_names);
*end++= ',';
}
*--end = 0;
handle_request_for_tables(table_names_comma_sep + 1, tot_length - 1,
opt_do_views != 0);
my_free(table_names_comma_sep);
}
else
for (; tables > 0; tables--, table_names++)
{
table= *table_names;
table_len= fixed_name_length(*table_names);
view= is_view(table);
if (view < 0)
continue;
handle_request_for_tables(table, table_len, (view == 1));
}
DBUG_RETURN(0);
} /* process_selected_tables */
static uint fixed_name_length(const char *name)
{
const char *p;
uint extra_length= 2; /* count the first/last backticks */
DBUG_ENTER("fixed_name_length");
for (p= name; *p; p++)
{
if (*p == '`')
extra_length++;
}
DBUG_RETURN((uint) ((p - name) + extra_length));
}
static char *fix_table_name(char *dest, char *src)
{
DBUG_ENTER("fix_table_name");
*dest++= '`';
for (; *src; src++)
{
switch (*src) {
case '`': /* escape backtick character */
*dest++= '`';
/* fall through */
default:
*dest++= *src;
}
}
*dest++= '`';
DBUG_RETURN(dest);
}
static int process_all_tables_in_db(char *database)
{
MYSQL_RES *UNINIT_VAR(res);
MYSQL_ROW row;
uint num_columns;
my_bool system_database= 0;
my_bool view= FALSE;
DBUG_ENTER("process_all_tables_in_db");
if (use_db(database))
DBUG_RETURN(1);
if ((mysql_query(sock, "SHOW /*!50002 FULL*/ TABLES") &&
mysql_query(sock, "SHOW TABLES")) ||
!(res= mysql_store_result(sock)))
{
my_printf_error(0, "Error: Couldn't get table list for database %s: %s",
MYF(0), database, mysql_error(sock));
DBUG_RETURN(1);
}
if (!strcmp(database, "mysql") || !strcmp(database, "MYSQL"))
system_database= 1;
num_columns= mysql_num_fields(res);
if (opt_all_in_1 && what_to_do != DO_FIX_NAMES)
{
/*
We need table list in form `a`, `b`, `c`
that's why we need 2 more chars added to to each table name
space is for more readable output in logs and in case of error
*/
char *tables, *end;
uint tot_length = 0;
char *views, *views_end;
uint tot_views_length = 0;
while ((row = mysql_fetch_row(res)))
{
if ((num_columns == 2) && (strcmp(row[1], "VIEW") == 0) &&
opt_do_views)
tot_views_length+= fixed_name_length(row[0]) + 2;
else if (opt_do_tables)
tot_length+= fixed_name_length(row[0]) + 2;
}
mysql_data_seek(res, 0);
if (!(tables=(char *) my_malloc(sizeof(char)*tot_length+4, MYF(MY_WME))))
{
mysql_free_result(res);
DBUG_RETURN(1);
}
if (!(views=(char *) my_malloc(sizeof(char)*tot_views_length+4, MYF(MY_WME))))
{
my_free(tables);
mysql_free_result(res);
DBUG_RETURN(1);
}
for (end = tables + 1, views_end= views + 1; (row = mysql_fetch_row(res)) ;)
{
if ((num_columns == 2) && (strcmp(row[1], "VIEW") == 0))
{
if (!opt_do_views)
continue;
views_end= fix_table_name(views_end, row[0]);
*views_end++= ',';
}
else
{
if (!opt_do_tables)
continue;
end= fix_table_name(end, row[0]);
*end++= ',';
}
}
*--end = 0;
*--views_end = 0;
if (tot_length)
handle_request_for_tables(tables + 1, tot_length - 1, FALSE);
if (tot_views_length)
handle_request_for_tables(views + 1, tot_views_length - 1, TRUE);
my_free(tables);
my_free(views);
}
else
{
while ((row = mysql_fetch_row(res)))
{
/* Skip views if we don't perform renaming. */
if ((what_to_do != DO_FIX_NAMES) && (num_columns == 2) && (strcmp(row[1], "VIEW") == 0))
{
if (!opt_do_views)
continue;
view= TRUE;
}
else
{
if (!opt_do_tables)
continue;
view= FALSE;
}
if (system_database &&
(!strcmp(row[0], "general_log") ||
!strcmp(row[0], "slow_log")))
continue; /* Skip logging tables */
handle_request_for_tables(row[0], fixed_name_length(row[0]), view);
}
}
mysql_free_result(res);
DBUG_RETURN(0);
} /* process_all_tables_in_db */
static int run_query(const char *query, my_bool log_query)
{
if (verbose >=3 && log_query)
puts(query);
if (mysql_query(sock, query))
{
fprintf(stderr, "Failed to %s\n", query);
fprintf(stderr, "Error: %s\n", mysql_error(sock));
return 1;
}
return 0;
}
static int fix_table_storage_name(const char *name)
{
char qbuf[100 + NAME_LEN*4];
int rc= 0;
DBUG_ENTER("fix_table_storage_name");
if (strncmp(name, "#mysql50#", 9))
DBUG_RETURN(1);
sprintf(qbuf, "RENAME TABLE `%s` TO `%s`", name, name + 9);
rc= run_query(qbuf, 1);
if (verbose)
printf("%-50s %s\n", name, rc ? "FAILED" : "OK");
DBUG_RETURN(rc);
}
static int fix_database_storage_name(const char *name)
{
char qbuf[100 + NAME_LEN*4];
int rc= 0;
DBUG_ENTER("fix_database_storage_name");
if (strncmp(name, "#mysql50#", 9))
DBUG_RETURN(1);
sprintf(qbuf, "ALTER DATABASE `%s` UPGRADE DATA DIRECTORY NAME", name);
rc= run_query(qbuf, 1);
if (verbose)
printf("%-50s %s\n", name, rc ? "FAILED" : "OK");
DBUG_RETURN(rc);
}
static int rebuild_table(char *name)
{
char *query, *ptr;
int rc= 0;
DBUG_ENTER("rebuild_table");
query= (char*)my_malloc(sizeof(char) * (12 + fixed_name_length(name) + 6 + 1),
MYF(MY_WME));
if (!query)
DBUG_RETURN(1);
ptr= strmov(query, "ALTER TABLE ");
ptr= fix_table_name(ptr, name);
ptr= strxmov(ptr, " FORCE", NullS);
if (verbose >= 3)
puts(query);
if (mysql_real_query(sock, query, (uint)(ptr - query)))
{
fprintf(stderr, "Failed to %s\n", query);
fprintf(stderr, "Error: %s\n", mysql_error(sock));
rc= 1;
}
if (verbose)
printf("%-50s %s\n", name, rc ? "FAILED" : "FIXED");
my_free(query);
DBUG_RETURN(rc);
}
static int process_one_db(char *database)
{
DBUG_ENTER("process_one_db");
if (opt_skip_database && !strcmp(database, opt_skip_database))
DBUG_RETURN(0);
if (verbose)
puts(database);
if (what_to_do == DO_FIX_NAMES)
{
int rc= 0;
if (opt_fix_db_names && !strncmp(database,"#mysql50#", 9))
{
rc= fix_database_storage_name(database);
database+= 9;
}
if (rc || !opt_fix_table_names)
DBUG_RETURN(rc);
}
DBUG_RETURN(process_all_tables_in_db(database));
}
static int use_db(char *database)
{
DBUG_ENTER("use_db");
if (mysql_get_server_version(sock) >= FIRST_INFORMATION_SCHEMA_VERSION &&
!my_strcasecmp(&my_charset_latin1, database, INFORMATION_SCHEMA_DB_NAME))
DBUG_RETURN(1);
if (mysql_get_server_version(sock) >= FIRST_PERFORMANCE_SCHEMA_VERSION &&
!my_strcasecmp(&my_charset_latin1, database, PERFORMANCE_SCHEMA_DB_NAME))
DBUG_RETURN(1);
if (mysql_select_db(sock, database))
{
DBerror(sock, "when selecting the database");
DBUG_RETURN(1);
}
DBUG_RETURN(0);
} /* use_db */
static int disable_binlog()
{
const char *stmt= "SET SQL_LOG_BIN=0";
return run_query(stmt, 0);
}
static int handle_request_for_tables(char *tables, size_t length, my_bool view)
{
char *query, *end, options[100], message[100];
char table_name_buff[NAME_CHAR_LEN*2*2+1], *table_name;
uint query_length= 0;
const char *op = 0;
const char *tab_view;
DBUG_ENTER("handle_request_for_tables");
options[0] = 0;
tab_view= view ? " VIEW " : " TABLE ";
end = options;
switch (what_to_do) {
case DO_CHECK:
op = "CHECK";
if (view)
{
if (opt_fast || opt_check_only_changed)
DBUG_RETURN(0);
}
else
{
if (opt_quick) end = strmov(end, " QUICK");
if (opt_fast) end = strmov(end, " FAST");
if (opt_extended) end = strmov(end, " EXTENDED");
if (opt_medium_check) end = strmov(end, " MEDIUM"); /* Default */
if (opt_check_only_changed) end = strmov(end, " CHANGED");
}
if (opt_upgrade) end = strmov(end, " FOR UPGRADE");
break;
case DO_REPAIR:
op= opt_write_binlog ? "REPAIR" : "REPAIR NO_WRITE_TO_BINLOG";
if (view)
{
if (opt_do_views == DO_VIEWS_FROM_MYSQL) end = strmov(end, " FROM MYSQL");
}
else
{
if (opt_quick) end = strmov(end, " QUICK");
if (opt_extended) end = strmov(end, " EXTENDED");
if (opt_frm) end = strmov(end, " USE_FRM");
}
break;
case DO_ANALYZE:
DBUG_ASSERT(!view);
op= (opt_write_binlog) ? "ANALYZE" : "ANALYZE NO_WRITE_TO_BINLOG";
break;
case DO_OPTIMIZE:
DBUG_ASSERT(!view);
op= (opt_write_binlog) ? "OPTIMIZE" : "OPTIMIZE NO_WRITE_TO_BINLOG";
break;
case DO_FIX_NAMES:
DBUG_ASSERT(!view);
DBUG_RETURN(fix_table_storage_name(tables));
}
if (!(query =(char *) my_malloc((sizeof(char)*(length+110)), MYF(MY_WME))))
DBUG_RETURN(1);
if (opt_all_in_1)
{
/* No backticks here as we added them before */
query_length= sprintf(query, "%s%s%s %s", op,
tab_view, tables, options);
table_name= tables;
}
else
{
char *ptr, *org;
org= ptr= strmov(strmov(query, op), tab_view);
ptr= fix_table_name(ptr, tables);
strmake(table_name_buff, org, MY_MIN((int) sizeof(table_name_buff)-1,
(int) (ptr - org)));
table_name= table_name_buff;
ptr= strxmov(ptr, " ", options, NullS);
query_length= (uint) (ptr - query);
}
if (verbose >= 3)
puts(query);
if (mysql_real_query(sock, query, query_length))
{
sprintf(message, "when executing '%s%s... %s'", op, tab_view, options);
DBerror(sock, message);
my_free(query);
DBUG_RETURN(1);
}
print_result();
if (opt_flush_tables)
{
query_length= sprintf(query, "FLUSH TABLES %s", table_name);
if (mysql_real_query(sock, query, query_length))
{
DBerror(sock, query);
my_free(query);
DBUG_RETURN(1);
}
}
my_free(query);
DBUG_RETURN(0);
}
static void print_result()
{
MYSQL_RES *res;
MYSQL_ROW row;
char prev[(NAME_LEN+9)*3+2];
char prev_alter[MAX_ALTER_STR_SIZE];
char *db_name;
uint length_of_db;
uint i;
my_bool found_error=0, table_rebuild=0;
DYNAMIC_ARRAY *array4repair= &tables4repair;
DBUG_ENTER("print_result");
res = mysql_use_result(sock);
db_name= sock->db;
length_of_db= strlen(db_name);
prev[0] = '\0';
prev_alter[0]= 0;
for (i = 0; (row = mysql_fetch_row(res)); i++)
{
int changed = strcmp(prev, row[0]);
my_bool status = !strcmp(row[2], "status");
if (status)
{
/*
if there was an error with the table, we have --auto-repair set,
and this isn't a repair op, then add the table to the tables4repair
list
*/
if (found_error && opt_auto_repair && what_to_do != DO_REPAIR &&
strcmp(row[3],"OK"))
{
if (table_rebuild)
{
if (prev_alter[0])
insert_dynamic(&alter_table_cmds, (uchar*) prev_alter);
else
{
char *table_name= prev + (length_of_db+1);
insert_dynamic(&tables4rebuild, (uchar*) table_name);
}
}
else
{
char *table_name= prev + (length_of_db+1);
insert_dynamic(array4repair, table_name);
}
}
array4repair= &tables4repair;
found_error=0;
table_rebuild=0;
prev_alter[0]= 0;
if (opt_silent)
continue;
}
if (status && changed)
printf("%-50s %s", row[0], row[3]);
else if (!status && changed)
{
/*
If the error message includes REPAIR TABLE, we assume it means
we have to run upgrade on it. In this case we write a nicer message
than "Please do "REPAIR TABLE""...
*/
if (!strcmp(row[2],"error") && strstr(row[3],"REPAIR "))
{
printf("%-50s %s", row[0], "Needs upgrade");
array4repair= strstr(row[3], "VIEW") ? &views4repair : &tables4repair;
}
else
printf("%s\n%-9s: %s", row[0], row[2], row[3]);
if (opt_auto_repair && strcmp(row[2],"note"))
{
found_error=1;
if (opt_auto_repair && strstr(row[3], "ALTER TABLE") != NULL)
table_rebuild=1;
}
}
else
printf("%-9s: %s", row[2], row[3]);
strmov(prev, row[0]);
putchar('\n');
}
/* add the last table to be repaired to the list */
if (found_error && opt_auto_repair && what_to_do != DO_REPAIR)
{
if (table_rebuild)
{
if (prev_alter[0])
insert_dynamic(&alter_table_cmds, prev_alter);
else
{
char *table_name= prev + (length_of_db+1);
insert_dynamic(&tables4rebuild, table_name);
}
}
else
{
char *table_name= prev + (length_of_db+1);
insert_dynamic(array4repair, table_name);
}
}
mysql_free_result(res);
DBUG_VOID_RETURN;
}
static int dbConnect(char *host, char *user, char *passwd)
{
DBUG_ENTER("dbConnect");
if (verbose > 1)
{
fprintf(stderr, "# Connecting to %s...\n", host ? host : "localhost");
}
mysql_init(&mysql_connection);
if (opt_compress)
mysql_options(&mysql_connection, MYSQL_OPT_COMPRESS, NullS);
#ifdef HAVE_OPENSSL
if (opt_use_ssl)
{
mysql_ssl_set(&mysql_connection, opt_ssl_key, opt_ssl_cert, opt_ssl_ca,
opt_ssl_capath, opt_ssl_cipher);
mysql_options(&mysql_connection, MYSQL_OPT_SSL_CRL, opt_ssl_crl);
mysql_options(&mysql_connection, MYSQL_OPT_SSL_CRLPATH, opt_ssl_crlpath);
}
#endif
if (opt_protocol)
mysql_options(&mysql_connection,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
if (shared_memory_base_name)
mysql_options(&mysql_connection,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
if (opt_plugin_dir && *opt_plugin_dir)
mysql_options(&mysql_connection, MYSQL_PLUGIN_DIR, opt_plugin_dir);
if (opt_default_auth && *opt_default_auth)
mysql_options(&mysql_connection, MYSQL_DEFAULT_AUTH, opt_default_auth);
mysql_options(&mysql_connection, MYSQL_SET_CHARSET_NAME, default_charset);
mysql_options(&mysql_connection, MYSQL_OPT_CONNECT_ATTR_RESET, 0);
mysql_options4(&mysql_connection, MYSQL_OPT_CONNECT_ATTR_ADD,
"program_name", "mysqlcheck");
if (!(sock = mysql_real_connect(&mysql_connection, host, user, passwd,
NULL, opt_mysql_port, opt_mysql_unix_port, 0)))
{
DBerror(&mysql_connection, "when trying to connect");
DBUG_RETURN(1);
}
mysql_connection.reconnect= 1;
DBUG_RETURN(0);
} /* dbConnect */
static void dbDisconnect(char *host)
{
DBUG_ENTER("dbDisconnect");
if (verbose > 1)
fprintf(stderr, "# Disconnecting from %s...\n", host ? host : "localhost");
mysql_close(sock);
DBUG_VOID_RETURN;
} /* dbDisconnect */
static void DBerror(MYSQL *mysql, const char *when)
{
DBUG_ENTER("DBerror");
my_printf_error(0,"Got error: %d: %s %s", MYF(0),
mysql_errno(mysql), mysql_error(mysql), when);
safe_exit(EX_MYSQLERR);
DBUG_VOID_RETURN;
} /* DBerror */
static void safe_exit(int error)
{
DBUG_ENTER("safe_exit");
if (!first_error)
first_error= error;
if (ignore_errors)
DBUG_VOID_RETURN;
if (sock)
mysql_close(sock);
sf_leaking_memory= 1; /* don't check for memory leaks */
exit(error);
DBUG_VOID_RETURN;
}
int main(int argc, char **argv)
{
int ret= EX_USAGE;
char **defaults_argv;
MY_INIT(argv[0]);
sf_leaking_memory=1; /* don't report memory leaks on early exits */
/*
** Check out the args
*/
if (load_defaults("my", load_default_groups, &argc, &argv))
goto end2;
defaults_argv= argv;
if (get_options(&argc, &argv))
goto end1;
sf_leaking_memory=0; /* from now on we cleanup properly */
ret= EX_MYSQLERR;
if (dbConnect(current_host, current_user, opt_password))
goto end1;
ret= 1;
if (!opt_write_binlog)
{
if (disable_binlog())
goto end;
}
if (opt_auto_repair &&
(my_init_dynamic_array(&tables4repair, sizeof(char)*(NAME_LEN*2+2),16,
64, MYF(0)) ||
my_init_dynamic_array(&views4repair, sizeof(char)*(NAME_LEN*2+2),16,
64, MYF(0)) ||
my_init_dynamic_array(&tables4rebuild, sizeof(char)*(NAME_LEN*2+2),16,
64, MYF(0)) ||
my_init_dynamic_array(&alter_table_cmds, MAX_ALTER_STR_SIZE, 0, 1,
MYF(0))))
goto end;
if (opt_alldbs)
process_all_databases();
/* Only one database and selected table(s) */
else if (argc > 1 && !opt_databases)
process_selected_tables(*argv, (argv + 1), (argc - 1));
/* One or more databases, all tables */
else
process_databases(argv);
if (opt_auto_repair)
{
uint i;
if (!opt_silent && (tables4repair.elements || tables4rebuild.elements))
puts("\nRepairing tables");
what_to_do = DO_REPAIR;
for (i = 0; i < tables4repair.elements ; i++)
{
char *name= (char*) dynamic_array_ptr(&tables4repair, i);
handle_request_for_tables(name, fixed_name_length(name), FALSE);
}
for (i = 0; i < tables4rebuild.elements ; i++)
rebuild_table((char*) dynamic_array_ptr(&tables4rebuild, i));
for (i = 0; i < alter_table_cmds.elements ; i++)
run_query((char*) dynamic_array_ptr(&alter_table_cmds, i), 1);
if (!opt_silent && views4repair.elements)
puts("\nRepairing views");
for (i = 0; i < views4repair.elements ; i++)
{
char *name= (char*) dynamic_array_ptr(&views4repair, i);
handle_request_for_tables(name, fixed_name_length(name), TRUE);
}
}
ret= MY_TEST(first_error);
end:
dbDisconnect(current_host);
if (opt_auto_repair)
{
delete_dynamic(&views4repair);
delete_dynamic(&tables4repair);
delete_dynamic(&tables4rebuild);
delete_dynamic(&alter_table_cmds);
}
end1:
my_free(opt_password);
my_free(shared_memory_base_name);
mysql_library_end();
free_defaults(defaults_argv);
end2:
my_end(my_end_arg);
return ret;
} /* main */
|
jb-boin/mariadb-10.0
|
client/mysqlcheck.c
|
C
|
gpl-2.0
| 38,847
|
// SPDX-License-Identifier: GPL-2.0+
/*------------------------------------------------------------------------
. smc91111.c
. This is a driver for SMSC's 91C111 single-chip Ethernet device.
.
. (C) Copyright 2002
. Sysgo Real-Time Solutions, GmbH <www.elinos.com>
. Rolf Offermanns <rof@sysgo.de>
.
. Copyright (C) 2001 Standard Microsystems Corporation (SMSC)
. Developed by Simple Network Magic Corporation (SNMC)
. Copyright (C) 1996 by Erik Stahlman (ES)
.
.
. Information contained in this file was obtained from the LAN91C111
. manual from SMC. To get a copy, if you really want one, you can find
. information under www.smsc.com.
.
.
. "Features" of the SMC chip:
. Integrated PHY/MAC for 10/100BaseT Operation
. Supports internal and external MII
. Integrated 8K packet memory
. EEPROM interface for configuration
.
. Arguments:
. io = for the base address
. irq = for the IRQ
.
. author:
. Erik Stahlman ( erik@vt.edu )
. Daris A Nevil ( dnevil@snmc.com )
.
.
. Hardware multicast code from Peter Cammaert ( pc@denkart.be )
.
. Sources:
. o SMSC LAN91C111 databook (www.smsc.com)
. o smc9194.c by Erik Stahlman
. o skeleton.c by Donald Becker ( becker@cesdis.gsfc.nasa.gov )
.
. History:
. 06/19/03 Richard Woodruff Made u-boot environment aware and added mac addr checks.
. 10/17/01 Marco Hasewinkel Modify for DNP/1110
. 07/25/01 Woojung Huh Modify for ADS Bitsy
. 04/25/01 Daris A Nevil Initial public release through SMSC
. 03/16/01 Daris A Nevil Modified smc9194.c for use with LAN91C111
----------------------------------------------------------------------------*/
#include <common.h>
#include <command.h>
#include <config.h>
#include <malloc.h>
#include "smc91111.h"
#include <net.h>
/* Use power-down feature of the chip */
#define POWER_DOWN 0
#define NO_AUTOPROBE
#define SMC_DEBUG 0
#if SMC_DEBUG > 1
static const char version[] =
"smc91111.c:v1.0 04/25/01 by Daris A Nevil (dnevil@snmc.com)\n";
#endif
/* Autonegotiation timeout in seconds */
#ifndef CONFIG_SMC_AUTONEG_TIMEOUT
#define CONFIG_SMC_AUTONEG_TIMEOUT 10
#endif
/*------------------------------------------------------------------------
.
. Configuration options, for the experienced user to change.
.
-------------------------------------------------------------------------*/
/*
. Wait time for memory to be free. This probably shouldn't be
. tuned that much, as waiting for this means nothing else happens
. in the system
*/
#define MEMORY_WAIT_TIME 16
#if (SMC_DEBUG > 2 )
#define PRINTK3(args...) printf(args)
#else
#define PRINTK3(args...)
#endif
#if SMC_DEBUG > 1
#define PRINTK2(args...) printf(args)
#else
#define PRINTK2(args...)
#endif
#ifdef SMC_DEBUG
#define PRINTK(args...) printf(args)
#else
#define PRINTK(args...)
#endif
/*------------------------------------------------------------------------
.
. The internal workings of the driver. If you are changing anything
. here with the SMC stuff, you should have the datasheet and know
. what you are doing.
.
-------------------------------------------------------------------------*/
/* Memory sizing constant */
#define LAN91C111_MEMORY_MULTIPLIER (1024*2)
#ifndef CONFIG_SMC91111_BASE
#error "SMC91111 Base address must be passed to initialization funciton"
/* #define CONFIG_SMC91111_BASE 0x20000300 */
#endif
#define SMC_DEV_NAME "SMC91111"
#define SMC_PHY_ADDR 0x0000
#define SMC_ALLOC_MAX_TRY 5
#define SMC_TX_TIMEOUT 30
#define SMC_PHY_CLOCK_DELAY 1000
#define ETH_ZLEN 60
#ifdef CONFIG_SMC_USE_32_BIT
#define USE_32_BIT 1
#else
#undef USE_32_BIT
#endif
#ifdef SHARED_RESOURCES
extern void swap_to(int device_id);
#else
# define swap_to(x)
#endif
#ifndef CONFIG_SMC91111_EXT_PHY
static void smc_phy_configure(struct eth_device *dev);
#endif /* !CONFIG_SMC91111_EXT_PHY */
/*
------------------------------------------------------------
.
. Internal routines
.
------------------------------------------------------------
*/
#ifdef CONFIG_SMC_USE_IOFUNCS
/*
* input and output functions
*
* Implemented due to inx,outx macros accessing the device improperly
* and putting the device into an unkown state.
*
* For instance, on Sharp LPD7A400 SDK, affects were chip memory
* could not be free'd (hence the alloc failures), duplicate packets,
* packets being corrupt (shifted) on the wire, etc. Switching to the
* inx,outx functions fixed this problem.
*/
static inline word SMC_inw(struct eth_device *dev, dword offset)
{
word v;
v = *((volatile word*)(dev->iobase + offset));
barrier(); *(volatile u32*)(0xc0000000);
return v;
}
static inline void SMC_outw(struct eth_device *dev, word value, dword offset)
{
*((volatile word*)(dev->iobase + offset)) = value;
barrier(); *(volatile u32*)(0xc0000000);
}
static inline byte SMC_inb(struct eth_device *dev, dword offset)
{
word _w;
_w = SMC_inw(dev, offset & ~((dword)1));
return (offset & 1) ? (byte)(_w >> 8) : (byte)(_w);
}
static inline void SMC_outb(struct eth_device *dev, byte value, dword offset)
{
word _w;
_w = SMC_inw(dev, offset & ~((dword)1));
if (offset & 1)
*((volatile word*)(dev->iobase + (offset & ~((dword)1)))) =
(value<<8) | (_w & 0x00ff);
else
*((volatile word*)(dev->iobase + offset)) =
value | (_w & 0xff00);
}
static inline void SMC_insw(struct eth_device *dev, dword offset,
volatile uchar* buf, dword len)
{
volatile word *p = (volatile word *)buf;
while (len-- > 0) {
*p++ = SMC_inw(dev, offset);
barrier();
*((volatile u32*)(0xc0000000));
}
}
static inline void SMC_outsw(struct eth_device *dev, dword offset,
uchar* buf, dword len)
{
volatile word *p = (volatile word *)buf;
while (len-- > 0) {
SMC_outw(dev, *p++, offset);
barrier();
*(volatile u32*)(0xc0000000);
}
}
#endif /* CONFIG_SMC_USE_IOFUNCS */
/*
. A rather simple routine to print out a packet for debugging purposes.
*/
#if SMC_DEBUG > 2
static void print_packet( byte *, int );
#endif
#define tx_done(dev) 1
static int poll4int (struct eth_device *dev, byte mask, int timeout)
{
int tmo = get_timer (0) + timeout * CONFIG_SYS_HZ;
int is_timeout = 0;
word old_bank = SMC_inw (dev, BSR_REG);
PRINTK2 ("Polling...\n");
SMC_SELECT_BANK (dev, 2);
while ((SMC_inw (dev, SMC91111_INT_REG) & mask) == 0) {
if (get_timer (0) >= tmo) {
is_timeout = 1;
break;
}
}
/* restore old bank selection */
SMC_SELECT_BANK (dev, old_bank);
if (is_timeout)
return 1;
else
return 0;
}
/* Only one release command at a time, please */
static inline void smc_wait_mmu_release_complete (struct eth_device *dev)
{
int count = 0;
/* assume bank 2 selected */
while (SMC_inw (dev, MMU_CMD_REG) & MC_BUSY) {
udelay (1); /* Wait until not busy */
if (++count > 200)
break;
}
}
/*
. Function: smc_reset( void )
. Purpose:
. This sets the SMC91111 chip to its normal state, hopefully from whatever
. mess that any other DOS driver has put it in.
.
. Maybe I should reset more registers to defaults in here? SOFTRST should
. do that for me.
.
. Method:
. 1. send a SOFT RESET
. 2. wait for it to finish
. 3. enable autorelease mode
. 4. reset the memory management unit
. 5. clear all interrupts
.
*/
static void smc_reset (struct eth_device *dev)
{
PRINTK2 ("%s: smc_reset\n", SMC_DEV_NAME);
/* This resets the registers mostly to defaults, but doesn't
affect EEPROM. That seems unnecessary */
SMC_SELECT_BANK (dev, 0);
SMC_outw (dev, RCR_SOFTRST, RCR_REG);
/* Setup the Configuration Register */
/* This is necessary because the CONFIG_REG is not affected */
/* by a soft reset */
SMC_SELECT_BANK (dev, 1);
#if defined(CONFIG_SMC91111_EXT_PHY)
SMC_outw (dev, CONFIG_DEFAULT | CONFIG_EXT_PHY, CONFIG_REG);
#else
SMC_outw (dev, CONFIG_DEFAULT, CONFIG_REG);
#endif
/* Release from possible power-down state */
/* Configuration register is not affected by Soft Reset */
SMC_outw (dev, SMC_inw (dev, CONFIG_REG) | CONFIG_EPH_POWER_EN,
CONFIG_REG);
SMC_SELECT_BANK (dev, 0);
/* this should pause enough for the chip to be happy */
udelay (10);
/* Disable transmit and receive functionality */
SMC_outw (dev, RCR_CLEAR, RCR_REG);
SMC_outw (dev, TCR_CLEAR, TCR_REG);
/* set the control register */
SMC_SELECT_BANK (dev, 1);
SMC_outw (dev, CTL_DEFAULT, CTL_REG);
/* Reset the MMU */
SMC_SELECT_BANK (dev, 2);
smc_wait_mmu_release_complete (dev);
SMC_outw (dev, MC_RESET, MMU_CMD_REG);
while (SMC_inw (dev, MMU_CMD_REG) & MC_BUSY)
udelay (1); /* Wait until not busy */
/* Note: It doesn't seem that waiting for the MMU busy is needed here,
but this is a place where future chipsets _COULD_ break. Be wary
of issuing another MMU command right after this */
/* Disable all interrupts */
SMC_outb (dev, 0, IM_REG);
}
/*
. Function: smc_enable
. Purpose: let the chip talk to the outside work
. Method:
. 1. Enable the transmitter
. 2. Enable the receiver
. 3. Enable interrupts
*/
static void smc_enable(struct eth_device *dev)
{
PRINTK2("%s: smc_enable\n", SMC_DEV_NAME);
SMC_SELECT_BANK( dev, 0 );
/* see the header file for options in TCR/RCR DEFAULT*/
SMC_outw( dev, TCR_DEFAULT, TCR_REG );
SMC_outw( dev, RCR_DEFAULT, RCR_REG );
/* clear MII_DIS */
/* smc_write_phy_register(PHY_CNTL_REG, 0x0000); */
}
/*
. Function: smc_halt
. Purpose: closes down the SMC91xxx chip.
. Method:
. 1. zero the interrupt mask
. 2. clear the enable receive flag
. 3. clear the enable xmit flags
.
. TODO:
. (1) maybe utilize power down mode.
. Why not yet? Because while the chip will go into power down mode,
. the manual says that it will wake up in response to any I/O requests
. in the register space. Empirical results do not show this working.
*/
static void smc_halt(struct eth_device *dev)
{
PRINTK2("%s: smc_halt\n", SMC_DEV_NAME);
/* no more interrupts for me */
SMC_SELECT_BANK( dev, 2 );
SMC_outb( dev, 0, IM_REG );
/* and tell the card to stay away from that nasty outside world */
SMC_SELECT_BANK( dev, 0 );
SMC_outb( dev, RCR_CLEAR, RCR_REG );
SMC_outb( dev, TCR_CLEAR, TCR_REG );
swap_to(FLASH);
}
/*
. Function: smc_send(struct net_device * )
. Purpose:
. This sends the actual packet to the SMC9xxx chip.
.
. Algorithm:
. First, see if a saved_skb is available.
. ( this should NOT be called if there is no 'saved_skb'
. Now, find the packet number that the chip allocated
. Point the data pointers at it in memory
. Set the length word in the chip's memory
. Dump the packet to chip memory
. Check if a last byte is needed ( odd length packet )
. if so, set the control flag right
. Tell the card to send it
. Enable the transmit interrupt, so I know if it failed
. Free the kernel data if I actually sent it.
*/
static int smc_send(struct eth_device *dev, void *packet, int packet_length)
{
byte packet_no;
byte *buf;
int length;
int numPages;
int try = 0;
int time_out;
byte status;
byte saved_pnr;
word saved_ptr;
/* save PTR and PNR registers before manipulation */
SMC_SELECT_BANK (dev, 2);
saved_pnr = SMC_inb( dev, PN_REG );
saved_ptr = SMC_inw( dev, PTR_REG );
PRINTK3 ("%s: smc_hardware_send_packet\n", SMC_DEV_NAME);
length = ETH_ZLEN < packet_length ? packet_length : ETH_ZLEN;
/* allocate memory
** The MMU wants the number of pages to be the number of 256 bytes
** 'pages', minus 1 ( since a packet can't ever have 0 pages :) )
**
** The 91C111 ignores the size bits, but the code is left intact
** for backwards and future compatibility.
**
** Pkt size for allocating is data length +6 (for additional status
** words, length and ctl!)
**
** If odd size then last byte is included in this header.
*/
numPages = ((length & 0xfffe) + 6);
numPages >>= 8; /* Divide by 256 */
if (numPages > 7) {
printf ("%s: Far too big packet error. \n", SMC_DEV_NAME);
return 0;
}
/* now, try to allocate the memory */
SMC_SELECT_BANK (dev, 2);
SMC_outw (dev, MC_ALLOC | numPages, MMU_CMD_REG);
/* FIXME: the ALLOC_INT bit never gets set *
* so the following will always give a *
* memory allocation error. *
* same code works in armboot though *
* -ro
*/
again:
try++;
time_out = MEMORY_WAIT_TIME;
do {
status = SMC_inb (dev, SMC91111_INT_REG);
if (status & IM_ALLOC_INT) {
/* acknowledge the interrupt */
SMC_outb (dev, IM_ALLOC_INT, SMC91111_INT_REG);
break;
}
} while (--time_out);
if (!time_out) {
PRINTK2 ("%s: memory allocation, try %d failed ...\n",
SMC_DEV_NAME, try);
if (try < SMC_ALLOC_MAX_TRY)
goto again;
else
return 0;
}
PRINTK2 ("%s: memory allocation, try %d succeeded ...\n",
SMC_DEV_NAME, try);
buf = (byte *) packet;
/* If I get here, I _know_ there is a packet slot waiting for me */
packet_no = SMC_inb (dev, AR_REG);
if (packet_no & AR_FAILED) {
/* or isn't there? BAD CHIP! */
printf ("%s: Memory allocation failed. \n", SMC_DEV_NAME);
return 0;
}
/* we have a packet address, so tell the card to use it */
SMC_outb (dev, packet_no, PN_REG);
/* do not write new ptr value if Write data fifo not empty */
while ( saved_ptr & PTR_NOTEMPTY )
printf ("Write data fifo not empty!\n");
/* point to the beginning of the packet */
SMC_outw (dev, PTR_AUTOINC, PTR_REG);
PRINTK3 ("%s: Trying to xmit packet of length %x\n",
SMC_DEV_NAME, length);
#if SMC_DEBUG > 2
printf ("Transmitting Packet\n");
print_packet (buf, length);
#endif
/* send the packet length ( +6 for status, length and ctl byte )
and the status word ( set to zeros ) */
#ifdef USE_32_BIT
SMC_outl (dev, (length + 6) << 16, SMC91111_DATA_REG);
#else
SMC_outw (dev, 0, SMC91111_DATA_REG);
/* send the packet length ( +6 for status words, length, and ctl */
SMC_outw (dev, (length + 6), SMC91111_DATA_REG);
#endif
/* send the actual data
. I _think_ it's faster to send the longs first, and then
. mop up by sending the last word. It depends heavily
. on alignment, at least on the 486. Maybe it would be
. a good idea to check which is optimal? But that could take
. almost as much time as is saved?
*/
#ifdef USE_32_BIT
SMC_outsl (dev, SMC91111_DATA_REG, buf, length >> 2);
if (length & 0x2)
SMC_outw (dev, *((word *) (buf + (length & 0xFFFFFFFC))),
SMC91111_DATA_REG);
#else
SMC_outsw (dev, SMC91111_DATA_REG, buf, (length) >> 1);
#endif /* USE_32_BIT */
/* Send the last byte, if there is one. */
if ((length & 1) == 0) {
SMC_outw (dev, 0, SMC91111_DATA_REG);
} else {
SMC_outw (dev, buf[length - 1] | 0x2000, SMC91111_DATA_REG);
}
/* and let the chipset deal with it */
SMC_outw (dev, MC_ENQUEUE, MMU_CMD_REG);
/* poll for TX INT */
/* if (poll4int (dev, IM_TX_INT, SMC_TX_TIMEOUT)) { */
/* poll for TX_EMPTY INT - autorelease enabled */
if (poll4int(dev, IM_TX_EMPTY_INT, SMC_TX_TIMEOUT)) {
/* sending failed */
PRINTK2 ("%s: TX timeout, sending failed...\n", SMC_DEV_NAME);
/* release packet */
/* no need to release, MMU does that now */
/* wait for MMU getting ready (low) */
while (SMC_inw (dev, MMU_CMD_REG) & MC_BUSY) {
udelay (10);
}
PRINTK2 ("MMU ready\n");
return 0;
} else {
/* ack. int */
SMC_outb (dev, IM_TX_EMPTY_INT, SMC91111_INT_REG);
/* SMC_outb (IM_TX_INT, SMC91111_INT_REG); */
PRINTK2 ("%s: Sent packet of length %d \n", SMC_DEV_NAME,
length);
/* release packet */
/* no need to release, MMU does that now */
/* wait for MMU getting ready (low) */
while (SMC_inw (dev, MMU_CMD_REG) & MC_BUSY) {
udelay (10);
}
PRINTK2 ("MMU ready\n");
}
/* restore previously saved registers */
SMC_outb( dev, saved_pnr, PN_REG );
SMC_outw( dev, saved_ptr, PTR_REG );
return length;
}
static int smc_write_hwaddr(struct eth_device *dev)
{
int i;
swap_to(ETHERNET);
SMC_SELECT_BANK (dev, 1);
#ifdef USE_32_BIT
for (i = 0; i < 6; i += 2) {
word address;
address = dev->enetaddr[i + 1] << 8;
address |= dev->enetaddr[i];
SMC_outw(dev, address, (ADDR0_REG + i));
}
#else
for (i = 0; i < 6; i++)
SMC_outb(dev, dev->enetaddr[i], (ADDR0_REG + i));
#endif
swap_to(FLASH);
return 0;
}
/*
* Open and Initialize the board
*
* Set up everything, reset the card, etc ..
*
*/
static int smc_init(struct eth_device *dev, bd_t *bd)
{
swap_to(ETHERNET);
PRINTK2 ("%s: smc_init\n", SMC_DEV_NAME);
/* reset the hardware */
smc_reset (dev);
smc_enable (dev);
/* Configure the PHY */
#ifndef CONFIG_SMC91111_EXT_PHY
smc_phy_configure (dev);
#endif
/* conservative setting (10Mbps, HalfDuplex, no AutoNeg.) */
/* SMC_SELECT_BANK(dev, 0); */
/* SMC_outw(dev, 0, RPC_REG); */
printf(SMC_DEV_NAME ": MAC %pM\n", dev->enetaddr);
return 0;
}
/*-------------------------------------------------------------
.
. smc_rcv - receive a packet from the card
.
. There is ( at least ) a packet waiting to be read from
. chip-memory.
.
. o Read the status
. o If an error, record it
. o otherwise, read in the packet
--------------------------------------------------------------
*/
static int smc_rcv(struct eth_device *dev)
{
int packet_number;
word status;
word packet_length;
int is_error = 0;
#ifdef USE_32_BIT
dword stat_len;
#endif
byte saved_pnr;
word saved_ptr;
SMC_SELECT_BANK(dev, 2);
/* save PTR and PTR registers */
saved_pnr = SMC_inb( dev, PN_REG );
saved_ptr = SMC_inw( dev, PTR_REG );
packet_number = SMC_inw( dev, RXFIFO_REG );
if ( packet_number & RXFIFO_REMPTY ) {
return 0;
}
PRINTK3("%s: smc_rcv\n", SMC_DEV_NAME);
/* start reading from the start of the packet */
SMC_outw( dev, PTR_READ | PTR_RCV | PTR_AUTOINC, PTR_REG );
/* First two words are status and packet_length */
#ifdef USE_32_BIT
stat_len = SMC_inl(dev, SMC91111_DATA_REG);
status = stat_len & 0xffff;
packet_length = stat_len >> 16;
#else
status = SMC_inw( dev, SMC91111_DATA_REG );
packet_length = SMC_inw( dev, SMC91111_DATA_REG );
#endif
packet_length &= 0x07ff; /* mask off top bits */
PRINTK2("RCV: STATUS %4x LENGTH %4x\n", status, packet_length );
if ( !(status & RS_ERRORS ) ){
/* Adjust for having already read the first two words */
packet_length -= 4; /*4; */
/* set odd length for bug in LAN91C111, */
/* which never sets RS_ODDFRAME */
/* TODO ? */
#ifdef USE_32_BIT
PRINTK3(" Reading %d dwords (and %d bytes)\n",
packet_length >> 2, packet_length & 3 );
/* QUESTION: Like in the TX routine, do I want
to send the DWORDs or the bytes first, or some
mixture. A mixture might improve already slow PIO
performance */
SMC_insl(dev, SMC91111_DATA_REG, net_rx_packets[0],
packet_length >> 2);
/* read the left over bytes */
if (packet_length & 3) {
int i;
byte *tail = (byte *)(net_rx_packets[0] +
(packet_length & ~3));
dword leftover = SMC_inl(dev, SMC91111_DATA_REG);
for (i=0; i<(packet_length & 3); i++)
*tail++ = (byte) (leftover >> (8*i)) & 0xff;
}
#else
PRINTK3(" Reading %d words and %d byte(s)\n",
(packet_length >> 1 ), packet_length & 1 );
SMC_insw(dev, SMC91111_DATA_REG , net_rx_packets[0],
packet_length >> 1);
#endif /* USE_32_BIT */
#if SMC_DEBUG > 2
printf("Receiving Packet\n");
print_packet(net_rx_packets[0], packet_length);
#endif
} else {
/* error ... */
/* TODO ? */
is_error = 1;
}
while ( SMC_inw( dev, MMU_CMD_REG ) & MC_BUSY )
udelay(1); /* Wait until not busy */
/* error or good, tell the card to get rid of this packet */
SMC_outw( dev, MC_RELEASE, MMU_CMD_REG );
while ( SMC_inw( dev, MMU_CMD_REG ) & MC_BUSY )
udelay(1); /* Wait until not busy */
/* restore saved registers */
SMC_outb( dev, saved_pnr, PN_REG );
SMC_outw( dev, saved_ptr, PTR_REG );
if (!is_error) {
/* Pass the packet up to the protocol layers. */
net_process_received_packet(net_rx_packets[0], packet_length);
return packet_length;
} else {
return 0;
}
}
#if 0
/*------------------------------------------------------------
. Modify a bit in the LAN91C111 register set
.-------------------------------------------------------------*/
static word smc_modify_regbit(struct eth_device *dev, int bank, int ioaddr, int reg,
unsigned int bit, int val)
{
word regval;
SMC_SELECT_BANK( dev, bank );
regval = SMC_inw( dev, reg );
if (val)
regval |= bit;
else
regval &= ~bit;
SMC_outw( dev, regval, 0 );
return(regval);
}
/*------------------------------------------------------------
. Retrieve a bit in the LAN91C111 register set
.-------------------------------------------------------------*/
static int smc_get_regbit(struct eth_device *dev, int bank, int ioaddr, int reg, unsigned int bit)
{
SMC_SELECT_BANK( dev, bank );
if ( SMC_inw( dev, reg ) & bit)
return(1);
else
return(0);
}
/*------------------------------------------------------------
. Modify a LAN91C111 register (word access only)
.-------------------------------------------------------------*/
static void smc_modify_reg(struct eth_device *dev, int bank, int ioaddr, int reg, word val)
{
SMC_SELECT_BANK( dev, bank );
SMC_outw( dev, val, reg );
}
/*------------------------------------------------------------
. Retrieve a LAN91C111 register (word access only)
.-------------------------------------------------------------*/
static int smc_get_reg(struct eth_device *dev, int bank, int ioaddr, int reg)
{
SMC_SELECT_BANK( dev, bank );
return(SMC_inw( dev, reg ));
}
#endif /* 0 */
/*---PHY CONTROL AND CONFIGURATION----------------------------------------- */
#if (SMC_DEBUG > 2 )
/*------------------------------------------------------------
. Debugging function for viewing MII Management serial bitstream
.-------------------------------------------------------------*/
static void smc_dump_mii_stream (byte * bits, int size)
{
int i;
printf ("BIT#:");
for (i = 0; i < size; ++i) {
printf ("%d", i % 10);
}
printf ("\nMDOE:");
for (i = 0; i < size; ++i) {
if (bits[i] & MII_MDOE)
printf ("1");
else
printf ("0");
}
printf ("\nMDO :");
for (i = 0; i < size; ++i) {
if (bits[i] & MII_MDO)
printf ("1");
else
printf ("0");
}
printf ("\nMDI :");
for (i = 0; i < size; ++i) {
if (bits[i] & MII_MDI)
printf ("1");
else
printf ("0");
}
printf ("\n");
}
#endif
/*------------------------------------------------------------
. Reads a register from the MII Management serial interface
.-------------------------------------------------------------*/
#ifndef CONFIG_SMC91111_EXT_PHY
static word smc_read_phy_register (struct eth_device *dev, byte phyreg)
{
int oldBank;
int i;
byte mask;
word mii_reg;
byte bits[64];
int clk_idx = 0;
int input_idx;
word phydata;
byte phyaddr = SMC_PHY_ADDR;
/* 32 consecutive ones on MDO to establish sync */
for (i = 0; i < 32; ++i)
bits[clk_idx++] = MII_MDOE | MII_MDO;
/* Start code <01> */
bits[clk_idx++] = MII_MDOE;
bits[clk_idx++] = MII_MDOE | MII_MDO;
/* Read command <10> */
bits[clk_idx++] = MII_MDOE | MII_MDO;
bits[clk_idx++] = MII_MDOE;
/* Output the PHY address, msb first */
mask = (byte) 0x10;
for (i = 0; i < 5; ++i) {
if (phyaddr & mask)
bits[clk_idx++] = MII_MDOE | MII_MDO;
else
bits[clk_idx++] = MII_MDOE;
/* Shift to next lowest bit */
mask >>= 1;
}
/* Output the phy register number, msb first */
mask = (byte) 0x10;
for (i = 0; i < 5; ++i) {
if (phyreg & mask)
bits[clk_idx++] = MII_MDOE | MII_MDO;
else
bits[clk_idx++] = MII_MDOE;
/* Shift to next lowest bit */
mask >>= 1;
}
/* Tristate and turnaround (2 bit times) */
bits[clk_idx++] = 0;
/*bits[clk_idx++] = 0; */
/* Input starts at this bit time */
input_idx = clk_idx;
/* Will input 16 bits */
for (i = 0; i < 16; ++i)
bits[clk_idx++] = 0;
/* Final clock bit */
bits[clk_idx++] = 0;
/* Save the current bank */
oldBank = SMC_inw (dev, BANK_SELECT);
/* Select bank 3 */
SMC_SELECT_BANK (dev, 3);
/* Get the current MII register value */
mii_reg = SMC_inw (dev, MII_REG);
/* Turn off all MII Interface bits */
mii_reg &= ~(MII_MDOE | MII_MCLK | MII_MDI | MII_MDO);
/* Clock all 64 cycles */
for (i = 0; i < sizeof bits; ++i) {
/* Clock Low - output data */
SMC_outw (dev, mii_reg | bits[i], MII_REG);
udelay (SMC_PHY_CLOCK_DELAY);
/* Clock Hi - input data */
SMC_outw (dev, mii_reg | bits[i] | MII_MCLK, MII_REG);
udelay (SMC_PHY_CLOCK_DELAY);
bits[i] |= SMC_inw (dev, MII_REG) & MII_MDI;
}
/* Return to idle state */
/* Set clock to low, data to low, and output tristated */
SMC_outw (dev, mii_reg, MII_REG);
udelay (SMC_PHY_CLOCK_DELAY);
/* Restore original bank select */
SMC_SELECT_BANK (dev, oldBank);
/* Recover input data */
phydata = 0;
for (i = 0; i < 16; ++i) {
phydata <<= 1;
if (bits[input_idx++] & MII_MDI)
phydata |= 0x0001;
}
#if (SMC_DEBUG > 2 )
printf ("smc_read_phy_register(): phyaddr=%x,phyreg=%x,phydata=%x\n",
phyaddr, phyreg, phydata);
smc_dump_mii_stream (bits, sizeof bits);
#endif
return (phydata);
}
/*------------------------------------------------------------
. Writes a register to the MII Management serial interface
.-------------------------------------------------------------*/
static void smc_write_phy_register (struct eth_device *dev, byte phyreg,
word phydata)
{
int oldBank;
int i;
word mask;
word mii_reg;
byte bits[65];
int clk_idx = 0;
byte phyaddr = SMC_PHY_ADDR;
/* 32 consecutive ones on MDO to establish sync */
for (i = 0; i < 32; ++i)
bits[clk_idx++] = MII_MDOE | MII_MDO;
/* Start code <01> */
bits[clk_idx++] = MII_MDOE;
bits[clk_idx++] = MII_MDOE | MII_MDO;
/* Write command <01> */
bits[clk_idx++] = MII_MDOE;
bits[clk_idx++] = MII_MDOE | MII_MDO;
/* Output the PHY address, msb first */
mask = (byte) 0x10;
for (i = 0; i < 5; ++i) {
if (phyaddr & mask)
bits[clk_idx++] = MII_MDOE | MII_MDO;
else
bits[clk_idx++] = MII_MDOE;
/* Shift to next lowest bit */
mask >>= 1;
}
/* Output the phy register number, msb first */
mask = (byte) 0x10;
for (i = 0; i < 5; ++i) {
if (phyreg & mask)
bits[clk_idx++] = MII_MDOE | MII_MDO;
else
bits[clk_idx++] = MII_MDOE;
/* Shift to next lowest bit */
mask >>= 1;
}
/* Tristate and turnaround (2 bit times) */
bits[clk_idx++] = 0;
bits[clk_idx++] = 0;
/* Write out 16 bits of data, msb first */
mask = 0x8000;
for (i = 0; i < 16; ++i) {
if (phydata & mask)
bits[clk_idx++] = MII_MDOE | MII_MDO;
else
bits[clk_idx++] = MII_MDOE;
/* Shift to next lowest bit */
mask >>= 1;
}
/* Final clock bit (tristate) */
bits[clk_idx++] = 0;
/* Save the current bank */
oldBank = SMC_inw (dev, BANK_SELECT);
/* Select bank 3 */
SMC_SELECT_BANK (dev, 3);
/* Get the current MII register value */
mii_reg = SMC_inw (dev, MII_REG);
/* Turn off all MII Interface bits */
mii_reg &= ~(MII_MDOE | MII_MCLK | MII_MDI | MII_MDO);
/* Clock all cycles */
for (i = 0; i < sizeof bits; ++i) {
/* Clock Low - output data */
SMC_outw (dev, mii_reg | bits[i], MII_REG);
udelay (SMC_PHY_CLOCK_DELAY);
/* Clock Hi - input data */
SMC_outw (dev, mii_reg | bits[i] | MII_MCLK, MII_REG);
udelay (SMC_PHY_CLOCK_DELAY);
bits[i] |= SMC_inw (dev, MII_REG) & MII_MDI;
}
/* Return to idle state */
/* Set clock to low, data to low, and output tristated */
SMC_outw (dev, mii_reg, MII_REG);
udelay (SMC_PHY_CLOCK_DELAY);
/* Restore original bank select */
SMC_SELECT_BANK (dev, oldBank);
#if (SMC_DEBUG > 2 )
printf ("smc_write_phy_register(): phyaddr=%x,phyreg=%x,phydata=%x\n",
phyaddr, phyreg, phydata);
smc_dump_mii_stream (bits, sizeof bits);
#endif
}
#endif /* !CONFIG_SMC91111_EXT_PHY */
/*------------------------------------------------------------
. Configures the specified PHY using Autonegotiation. Calls
. smc_phy_fixed() if the user has requested a certain config.
.-------------------------------------------------------------*/
#ifndef CONFIG_SMC91111_EXT_PHY
static void smc_phy_configure (struct eth_device *dev)
{
int timeout;
word my_phy_caps; /* My PHY capabilities */
word my_ad_caps; /* My Advertised capabilities */
word status = 0; /*;my status = 0 */
PRINTK3 ("%s: smc_program_phy()\n", SMC_DEV_NAME);
/* Reset the PHY, setting all other bits to zero */
smc_write_phy_register (dev, PHY_CNTL_REG, PHY_CNTL_RST);
/* Wait for the reset to complete, or time out */
timeout = 6; /* Wait up to 3 seconds */
while (timeout--) {
if (!(smc_read_phy_register (dev, PHY_CNTL_REG)
& PHY_CNTL_RST)) {
/* reset complete */
break;
}
mdelay(500); /* wait 500 millisecs */
}
if (timeout < 1) {
printf ("%s:PHY reset timed out\n", SMC_DEV_NAME);
goto smc_phy_configure_exit;
}
/* Read PHY Register 18, Status Output */
/* lp->lastPhy18 = smc_read_phy_register(PHY_INT_REG); */
/* Enable PHY Interrupts (for register 18) */
/* Interrupts listed here are disabled */
smc_write_phy_register (dev, PHY_MASK_REG, 0xffff);
/* Configure the Receive/Phy Control register */
SMC_SELECT_BANK (dev, 0);
SMC_outw (dev, RPC_DEFAULT, RPC_REG);
/* Copy our capabilities from PHY_STAT_REG to PHY_AD_REG */
my_phy_caps = smc_read_phy_register (dev, PHY_STAT_REG);
my_ad_caps = PHY_AD_CSMA; /* I am CSMA capable */
if (my_phy_caps & PHY_STAT_CAP_T4)
my_ad_caps |= PHY_AD_T4;
if (my_phy_caps & PHY_STAT_CAP_TXF)
my_ad_caps |= PHY_AD_TX_FDX;
if (my_phy_caps & PHY_STAT_CAP_TXH)
my_ad_caps |= PHY_AD_TX_HDX;
if (my_phy_caps & PHY_STAT_CAP_TF)
my_ad_caps |= PHY_AD_10_FDX;
if (my_phy_caps & PHY_STAT_CAP_TH)
my_ad_caps |= PHY_AD_10_HDX;
/* Update our Auto-Neg Advertisement Register */
smc_write_phy_register (dev, PHY_AD_REG, my_ad_caps);
/* Read the register back. Without this, it appears that when */
/* auto-negotiation is restarted, sometimes it isn't ready and */
/* the link does not come up. */
smc_read_phy_register(dev, PHY_AD_REG);
PRINTK2 ("%s: phy caps=%x\n", SMC_DEV_NAME, my_phy_caps);
PRINTK2 ("%s: phy advertised caps=%x\n", SMC_DEV_NAME, my_ad_caps);
/* Restart auto-negotiation process in order to advertise my caps */
smc_write_phy_register (dev, PHY_CNTL_REG,
PHY_CNTL_ANEG_EN | PHY_CNTL_ANEG_RST);
/* Wait for the auto-negotiation to complete. This may take from */
/* 2 to 3 seconds. */
/* Wait for the reset to complete, or time out */
timeout = CONFIG_SMC_AUTONEG_TIMEOUT * 2;
while (timeout--) {
status = smc_read_phy_register (dev, PHY_STAT_REG);
if (status & PHY_STAT_ANEG_ACK) {
/* auto-negotiate complete */
break;
}
mdelay(500); /* wait 500 millisecs */
/* Restart auto-negotiation if remote fault */
if (status & PHY_STAT_REM_FLT) {
printf ("%s: PHY remote fault detected\n",
SMC_DEV_NAME);
/* Restart auto-negotiation */
printf ("%s: PHY restarting auto-negotiation\n",
SMC_DEV_NAME);
smc_write_phy_register (dev, PHY_CNTL_REG,
PHY_CNTL_ANEG_EN |
PHY_CNTL_ANEG_RST |
PHY_CNTL_SPEED |
PHY_CNTL_DPLX);
}
}
if (timeout < 1) {
printf ("%s: PHY auto-negotiate timed out\n", SMC_DEV_NAME);
}
/* Fail if we detected an auto-negotiate remote fault */
if (status & PHY_STAT_REM_FLT) {
printf ("%s: PHY remote fault detected\n", SMC_DEV_NAME);
}
/* Re-Configure the Receive/Phy Control register */
SMC_outw (dev, RPC_DEFAULT, RPC_REG);
smc_phy_configure_exit: ;
}
#endif /* !CONFIG_SMC91111_EXT_PHY */
#if SMC_DEBUG > 2
static void print_packet( byte * buf, int length )
{
int i;
int remainder;
int lines;
printf("Packet of length %d \n", length );
#if SMC_DEBUG > 3
lines = length / 16;
remainder = length % 16;
for ( i = 0; i < lines ; i ++ ) {
int cur;
for ( cur = 0; cur < 8; cur ++ ) {
byte a, b;
a = *(buf ++ );
b = *(buf ++ );
printf("%02x%02x ", a, b );
}
printf("\n");
}
for ( i = 0; i < remainder/2 ; i++ ) {
byte a, b;
a = *(buf ++ );
b = *(buf ++ );
printf("%02x%02x ", a, b );
}
printf("\n");
#endif
}
#endif
int smc91111_initialize(u8 dev_num, int base_addr)
{
struct smc91111_priv *priv;
struct eth_device *dev;
int i;
priv = malloc(sizeof(*priv));
if (!priv)
return 0;
dev = malloc(sizeof(*dev));
if (!dev) {
free(priv);
return 0;
}
memset(dev, 0, sizeof(*dev));
priv->dev_num = dev_num;
dev->priv = priv;
dev->iobase = base_addr;
swap_to(ETHERNET);
SMC_SELECT_BANK(dev, 1);
for (i = 0; i < 6; ++i)
dev->enetaddr[i] = SMC_inb(dev, (ADDR0_REG + i));
swap_to(FLASH);
dev->init = smc_init;
dev->halt = smc_halt;
dev->send = smc_send;
dev->recv = smc_rcv;
dev->write_hwaddr = smc_write_hwaddr;
sprintf(dev->name, "%s-%hu", SMC_DEV_NAME, dev_num);
eth_register(dev);
return 0;
}
|
ev3dev/u-boot
|
drivers/net/smc91111.c
|
C
|
gpl-2.0
| 32,578
|
<?php
/**
* Licensed to Jasig under one or more contributor license
* agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*
* Jasig licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* PHP Version 5
*
* @file CAS/AuthenticationException.php
* @category Authentication
* @package PhpCAS
* @author Joachim Fritschi <jfritschi@freenet.de>
* @license http://www.apache.org/licenses/LICENSE-2.0 Apache License 2.0
* @link https://wiki.jasig.org/display/CASC/phpCAS
*/
/**
* This interface defines methods that allow proxy-authenticated service handlers
* to interact with phpCAS.
*
* Proxy service handlers must implement this interface as well as call
* phpCAS::initializeProxiedService($this) at some point in their implementation.
*
* While not required, proxy-authenticated service handlers are encouraged to
* implement the CAS_ProxiedService_Testable interface to facilitate unit testing.
*
* @class CAS_AuthenticationException
* @category Authentication
* @package PhpCAS
* @author Joachim Fritschi <jfritschi@freenet.de>
* @license http://www.apache.org/licenses/LICENSE-2.0 Apache License 2.0
* @link https://wiki.jasig.org/display/CASC/phpCAS
*/
class CAS_AuthenticationException
extends RuntimeException
implements CAS_Exception
{
/**
* This method is used to print the HTML output when the user was not
* authenticated.
*
* @param CAS_Client $client phpcas client
* @param string $failure the failure that occured
* @param string $cas_url the URL the CAS server was asked for
* @param bool $no_response the response from the CAS server (other
* parameters are ignored if TRUE)
* @param bool $bad_response bad response from the CAS server ($err_code
* and $err_msg ignored if TRUE)
* @param string $cas_response the response of the CAS server
* @param int $err_code the error code given by the CAS server
* @param string $err_msg the error message given by the CAS server
*/
public function __construct($client,$failure,$cas_url,$no_response,
$bad_response='',$cas_response='',$err_code='',$err_msg=''
) {
phpCAS::traceBegin();
$lang = $client->getLangObj();
$client->printHTMLHeader($lang->getAuthenticationFailed());
printf(
$lang->getYouWereNotAuthenticated(),
htmlentities($client->getURL()),
$_SERVER['SERVER_ADMIN']
);
phpCAS::trace('CAS URL: '.$cas_url);
phpCAS::trace('Authentication failure: '.$failure);
if ( $no_response ) {
phpCAS::trace('Reason: no response from the CAS server');
} else {
if ( $bad_response ) {
phpCAS::trace('Reason: bad response from the CAS server');
} else {
switch ($client->getServerVersion()) {
case CAS_VERSION_1_0:
phpCAS::trace('Reason: CAS error');
break;
case CAS_VERSION_2_0:
if ( empty($err_code) ) {
phpCAS::trace('Reason: no CAS error');
} else {
phpCAS::trace('Reason: ['.$err_code.'] CAS error: '.$err_msg);
}
break;
}
}
phpCAS::trace('CAS response: '.$cas_response);
}
$client->printHTMLFooter();
phpCAS::traceExit();
}
}
?>
|
CasaJasmina/website
|
wp-content/plugins/uthsc-wpcas/phpCAS-1.3-stable/source/CAS/AuthenticationException.php
|
PHP
|
gpl-2.0
| 4,228
|
/*
* osApi.h
*
* Copyright(c) 1998 - 2010 Texas Instruments. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Texas Instruments nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*--------------------------------------------------------------------------*/
/* Module: OSAPI.H*/
/**/
/* Purpose: This module defines unified interface to the OS specific*/
/* sources and services.*/
/**/
/*--------------------------------------------------------------------------*/
#ifndef __OS_API_H__
#define __OS_API_H__
/** \file osApi.h
* \brief Operating System APIs \n
* This module defines unified interface to the OS specific sources and services
*/
#include "tidef.h"
#include "TI_IPC_Api.h"
#ifdef __cplusplus
extern "C" {
#endif
/** \struct TI_CONNECTION_STATUS
* \struct *PTI_CONNECTION_STATUS
* \brief Ti Connection Status
*
* \par Description
*
* \sa
*/
typedef struct
{
TI_UINT32 Event;
TI_UINT8* Data;
} TI_CONNECTION_STATUS, *PTI_CONNECTION_STATUS;
typedef struct
{
TI_UINT8 uFormat;
TI_UINT8 uLevel;
TI_UINT8 uParamsNum;
TI_UINT8 uReserved;
TI_UINT16 uFileId;
TI_UINT16 uLineNum;
} TTraceMsg;
#define OS_PAGE_SIZE 4096
#define MAX_MESSAGE_SIZE 500
#define MICROSECOND_IN_SECONDS 1000000
#define UINT16_MAX_VAL 0xffff
#define UINT8_MAX_VAL 0xff
#define TRACE_FORMAT_8_BITS_PARAMS 2
#define TRACE_FORMAT_16_BITS_PARAMS 4
#define TRACE_FORMAT_32_BITS_PARAMS 6
#define TRACE_MSG_MAX_PARAMS 32
#define TRACE_MSG_MIN_LENGTH (sizeof(TTraceMsg))
#define TRACE_MSG_MAX_LENGTH ((TRACE_MSG_MAX_PARAMS * 4) + sizeof(TTraceMsg))
#define INSERT_BYTE(pBuf, dataByte) (*((TI_UINT8 *)pBuf) = (TI_UINT8 )dataByte ); pBuf++;
#define INSERT_2_BYTES(pBuf, dataBytes) (*((TI_UINT16 *)pBuf) = (TI_UINT16)dataBytes); pBuf+=2;
#define INSERT_4_BYTES(pBuf, dataBytes) (*((TI_UINT32 *)pBuf) = (TI_UINT32)dataBytes); pBuf+=4;
/****************************************************************************************
START OF OS API (Common to all GWSI LIB, Driver and TI Driver)
*****************************************************************************************/
/****************************************************************************************
OS HW API NEEDED BY DRIVER
*****************************************************************************************/
/** \brief OS Disable IRQ
*
* \param OsContext - Handle to the OS object
* \return void
*
* \par Description
* This function disables the Interrupts
*
* \sa
*/
void os_disableIrq (TI_HANDLE OsContext);
/** \brief OS Enable IRQ
*
* \param OsContext - Handle to the OS object
* \return void
*
* \par Description
* This function enables the Interrupts
*
* \sa
*/
void os_enableIrq (TI_HANDLE OsContext);
/** \brief OS IRQ Serviced
*
* \param OsContext - Handle to the OS object
* \return void
*
* \par Description
* This function is used in Level IRQ only. At this point the interrupt line is not asserted anymore
* and we can inform the OS to enable IRQ again.
*
* \sa
*/
void os_InterruptServiced (TI_HANDLE OsContext);
/****************************************************************************************
* OS Report API *
****************************************************************************************/
/** \brief OS Set Debug Mode
*
* \param enable - Indicates if debug mode should be enabled or disabled ( TI_TRUE | TI_FALSE )
* \return void
*
* \par Description
* This function sets the Debug Mode flag to True or False - according to user's request
*
* \sa
*/
void os_setDebugMode (TI_BOOL enable);
/** \brief OS Printf
*
* \param format - String to print (with formatted parametrs in string if needed) and parameters values
* if formatted parameters are used in string
* \return void
*
* \par Description
* This function prints formatted output using OS available printf method
*
* \sa
*/
void os_printf (const char *format ,...);
/****************************************************************************************
* OS Memory API *
****************************************************************************************/
/** \brief OS Memory Allocation
*
* \param OsContext - Handle to the OS object
* \param Size - Size (in bytes) to be allocated
* \return Pointer to the allocated memory on success ; NULL on failure (there isn't enough memory available)
*
* \par Description
* This function allocates resident (nonpaged) system-space memory with calling specific OS allocation function. \n
* It is assumed that this function will never be called in an interrupt context since the OS allocation function
* has the potential to put the caller to sleep while waiting for memory to become available.
*
* \sa
*/
void *os_memoryAlloc (TI_HANDLE OsContext,TI_UINT32 Size);
/** \brief OS Memory CAllocation
*
* \param OsContext - Handle to the OS object
* \param Number - Number of element to be allocated
* \param Size - Size (in bytes) of one element
* \return Pointer to the allocated memory on success ; NULL on failure (there isn't enough memory available)
*
* \par Description
* This function allocates an array in memory with elements initialized to 0.
* Allocates resident (nonpaged) system-space memory for an array with elements initialized to 0,
* with specific OS allocation function.
* It is assumed that this function will never be called in an interrupt context since the OS allocation function
* has the potential to put the caller to sleep while waiting for memory to become available.
*
* \sa
*/
void *os_memoryCAlloc (TI_HANDLE OsContext, TI_UINT32 Number, TI_UINT32 Size);
/** \brief OS Memory Set
*
* \param OsContext - Handle to the OS object
* \param pMemPtr - Pointer to the base address of a memory block
* \param Value - Value to set to memory block
* \param Length - Length (in bytes) of memory block
* \return void
*
* \par Description
* This function fills a block of memory with a given value
*
* \sa
*/
void os_memorySet (TI_HANDLE OsContext, void *pMemPtr, TI_INT32 Value, TI_UINT32 Length);
/** \brief OS Memory Zero
*
* \param OsContext - Handle to the OS object
* \param pMemPtr - Pointer to the base address of a memory block
* \param Length - Length (in bytes) of memory block
* \return void
*
* \par Description
* This function fills a block of memory with zeros
*
* \sa
*/
void os_memoryZero (TI_HANDLE OsContext, void *pMemPtr, TI_UINT32 Length);
/** \brief OS Memory Copy
*
* \param OsContext - Handle to the OS object
* \param pDestination - Pointer to destination buffer
* \param pSource - Pointer to Source buffer
* \param Size - Size (in bytes) to copy
* \return void
*
* \par Description
* This function copies a specified number of bytes from one caller-supplied location (source buffer) to another (destination buffer)
*
* \sa
*/
void os_memoryCopy (TI_HANDLE OsContext, void *pDestination, void *pSource, TI_UINT32 Size);
/** \brief OS Memory Free
*
* \param OsContext - Handle to the OS object
* \param pMemPtr - Pointer to the base address of a memory block
* \param Size - Size (in bytes) to free
* \return void
*
* \par Description
* This function releases a block of memory which was previously allocated by user
*
* \sa
*/
void os_memoryFree (TI_HANDLE OsContext, void *pMemPtr, TI_UINT32 Size);
/** \brief OS Memory Compare
*
* \param OsContext - Handle to the OS object
* \param Buf1 - Pointer to the first buffer in comperation
* \param Buf2 - Pointer to the second buffer in comperation
* \param Count - Count (in bytes) to compare
* \return A value which indicates the relationship between the two compared buffers:
* value < 0: Buf1 less than Buf2
* value == 0: Buf1 identical to Buf2
* value > 0: Buf1 greater than Buf2
*
* \par Description
* This function compares between two given buffers
*
* \sa
*/
TI_INT32 os_memoryCompare (TI_HANDLE OsContext, TI_UINT8* Buf1, TI_UINT8* Buf2, TI_INT32 Count);
/** \brief OS Memory Allocation for HW DMA
*
* \param pOsContext - Handle to the OS object
* \param Size - Size (in bytes) to allocate
* \return Pointer to the allocated memory on success ; NULL on failure (there isn't enough memory available)
*
* \par Description
* This function allocates resident (nonpaged) system-space memory for HW DMA operations
*
* \sa
*/
void *os_memoryAlloc4HwDma (TI_HANDLE pOsContext, TI_UINT32 Size);
/** \brief OS Memory for HW DMA Free
*
* \param pOsContext - Handle to the OS object
* \param pMem_ptr - Pointer to the base virtual address of allocated memory block
* This is the address that was returned to user when he allocated the memory for HW DMA usage
* \param Size - Size (in bytes) of the memory block to be released. This parameter must be identical to the Length
* which was given by the user when he allocated the memory block for HW DMA usage
* \return Pointer to the allocated memory on success ; NULL on failure (there isn't enough memory available)
*
* \par Description
* This function releases a block of memory previously allocated by user for HW DMA operations
*
* \sa
*/
void os_memory4HwDmaFree (TI_HANDLE pOsContext, void *pMem_ptr, TI_UINT32 Size);
/** \brief OS Memory Copy from User
*
* \param OsContext - Handle to the OS object
* \param pDstPtr - Pointer to destination buffer
* \param pSrcPtr - Pointer to Source buffer
* \param Size - Size (in bytes) to copy
* \return TI_OK on success ; TI_NOK otherwise
*
* \par Description
* This function copies a specified number of bytes from one caller-supplied location (Source) to another (Destination)
*
* \sa
*/
int os_memoryCopyFromUser (TI_HANDLE OsContext, void *pDstPtr, void *pSrcPtr, TI_UINT32 Size);
/** \brief OS Memory Copy To User
*
* \param OsContext - Handle to the OS object
* \param pDstPtr - Pointer to destination buffer
* \param pSrcPtr - Pointer to Source buffer
* \param Size - Size (in bytes) to copy
* \return TI_OK on success ; TI_NOK otherwise
*
* \par Description
* This function copies a specified number of bytes from one caller-supplied location (Source) to another (Destination)
*
* \sa
*/
int os_memoryCopyToUser (TI_HANDLE OsContext, void *pDstPtr, void *pSrcPtr, TI_UINT32 Size);
/****************************************************************************************
* OS TIMER API *
****************************************************************************************/
/** \brief Timer Callback Function
*
* \param Context - Handle to the OS object
* \return void
*
* \par Description
* This callback is passed by user to OS timer when created, and is called directly from OS timer context when expired.
* E.g. the user user the timer in order to operate this function after a defined time expires
*
*/
typedef void (*fTimerFunction)(TI_HANDLE Context);
/** \brief OS Timer Create
*
* \param OsContext - Handle to the OS object
* \param pRoutine - Pointer to user's Timer Callback function
* \param hFuncHandle - Handle to user's Timer Callback function parameters
* \return Handle to timer object on success ; NULL on failure
*
* \par Description
* This function creates and initializes an OS timer object associated with a user's Timer Callback function \n
* \note 1) The user's callback is called directly from OS timer context when expired.
* \note 2) In some OSs, it may be needed to use an intermediate callback in the
* \note osapi layer (use os_timerHandlr for that).
*
* \sa
*/
TI_HANDLE os_timerCreate (TI_HANDLE OsContext, fTimerFunction pRoutine, TI_HANDLE hFuncHandle);
/** \brief OS Timer Destroy
*
* \param OsContext - Handle to the OS object
* \param TimerHandle - Handle to timer object which user got when created the timer
* \return void
*
* \par Description
* This function destroys the OS timer object which was previously created by user
*
* \sa
*/
void os_timerDestroy (TI_HANDLE OsContext, TI_HANDLE TimerHandle);
/** \brief OS Timer Start
*
* \param OsContext - Handle to the OS object
* \param TimerHandle - Handle to timer object which user got when created the timer
* \param DelayMs - The time in MS untill the timer is awaken
* \return void
*
* \par Description
* This function Start the OS timer object which was previously created by user
*
* \sa
*/
void os_timerStart (TI_HANDLE OsContext, TI_HANDLE TimerHandle, TI_UINT32 DelayMs);
/** \brief OS Timer Stop
*
* \param OsContext - Handle to the OS object
* \param TimerHandle - Handle to timer object which user got when created the timer
* \return void
*
* \par Description
* This function Stops the OS timer object which was previously created by user
*
* \sa
*/
void os_timerStop (TI_HANDLE OsContext, TI_HANDLE TimerHandle);
/** \brief OS Periodic Interrupt Timer Start
*
* \param OsContext - Handle to the OS object
* \return void
*
* \par Description
* This function starts the periodic interrupt mechanism. This function is used when PRIODIC_INTERRUPT mode is used.
* This Mode is enabled when interrupts that are usually received from the FW are masked,
* and there is need to check- in a given time periods - if handling of any FW interrupt is needed.
*
* \sa
*/
#ifdef PRIODIC_INTERRUPT
void os_periodicIntrTimerStart (TI_HANDLE OsContext);
#endif
/** \brief OS Time Stamp Ms
*
* \param OsContext - Handle to the OS object
* \return The number of milliseconds that have elapsed since the system was booted
*
* \par Description
* This function returns the number of milliseconds that have elapsed since the system was booted.
*/
TI_UINT32 os_timeStampMs (TI_HANDLE OsContext);
/** \brief OS Time Stamp Us
*
* \param OsContext - Handle to the OS object
* \return The number of microseconds that have elapsed since the system was booted
*
* \par Description
* This function returns the number of microseconds that have elapsed since the system was booted. \n
* Note that sometimes this function will be called with NULL(!!!) as argument!
*/
TI_UINT32 os_timeStampUs (TI_HANDLE OsContext);
/** \brief OS Stall uSec
*
* \param OsContext - Handle to the OS object
* \param uSec - The time to delay in microseconds
* \return void
*
* \par Description
* This function makes delay in microseconds
*
* \sa
*/
void os_StalluSec (TI_HANDLE OsContext, TI_UINT32 uSec);
/****************************************************************************************
* Protection services API *
****************************************************************************************/
/** \brief OS Protect Create
*
* \param OsContext - Handle to the OS object
* \return Handle of the created mutex/spin lock object on Success ; NULL on Failure (not enough memory available or problems to initializing the mutex)
*
* \par Description
* This function allocates a mutex/spin lock object.
* The mutex/spinlock object which is created by this function is used for mutual-exclusion and protection of resources which are shared between
* multi-Tasks/Threads
*
* \sa
*/
TI_HANDLE os_protectCreate (TI_HANDLE OsContext);
/** \brief OS Protect Destroy
*
* \param OsContext - Handle to the OS object
* \param ProtectContext - Handle to the mutex/spin lock object
* \return void
*
* \par Description
* This function destroys s a mutex/spin lock object which was previously created by user:
* it frees the mutex/spin lock and then frees the object's memory
*
* \sa
*/
void os_protectDestroy (TI_HANDLE OsContext, TI_HANDLE ProtectContext);
/** \brief OS Protect Lock
*
* \param OsContext - Handle to the OS object
* \param ProtectContext - Handle to the mutex/spin lock object
* \return void
*
* \par Description
* This function locks the mutex/spin lock object. E.g. the caller acquires a mutex/spin lock and gains exclusive
* access to the shared resources, that the mutex/spin lock protects of.
*
* \sa
*/
void os_protectLock (TI_HANDLE OsContext, TI_HANDLE ProtectContext);
/** \brief OS Protect Unlock
*
* \param OsContext - Handle to the OS object
* \param ProtectContext - Handle to the mutex/spin lock object
* \return void
*
* \par Description
* This function unlocks the mutex/spin lock object.
*
* \sa
*/
void os_protectUnlock (TI_HANDLE OsContext, TI_HANDLE ProtectContext);
/* Wakelock functionality */
int os_wake_lock (TI_HANDLE OsContext);
int os_wake_unlock (TI_HANDLE OsContext);
int os_wake_lock_timeout (TI_HANDLE OsContext);
int os_wake_lock_timeout_enable (TI_HANDLE OsContext);
#define os_profile(hos,fn,par)
/****************************************************************************************
START OF GWSI DRIVER API
*****************************************************************************************/
/** \brief OS Signaling Object Create
*
* \param OsContext - Handle to the OS object
* \return Pointer to Signal Object on Success ; NULL on Failure
*
* \par Description
* This function creates a new Signaling Object or opens an already exists Signaling Object.
* The Signaling Object created by this function is used for mutual-exclusion and protection
* of resources which are shared between multi-Tasks/Threads by using a signaling mechanism
*
* \sa
*/
void *os_SignalObjectCreate (TI_HANDLE OsContext);
/** \brief OS Signaling Object Wait
*
* \param OsContext - Handle to the OS object
* \param ptr - Pointer to Signaling Object previously created by user
* \return TI_OK (0) on Success ; TI_NOK (1) on Failure
*
* \par Description
* This function perform waiting on Signaling Object. The coller waits until signaled or until timeout
*
* \sa
*/
int os_SignalObjectWait (TI_HANDLE OsContext, void *ptr);
/** \brief OS Signaling Object Set
*
* \param OsContext - Handle to the OS object
* \param ptr - Pointer to Signaling Object previously created by user
* \return TI_OK (0) on Success ; TI_NOK (1) on Failure
*
* \par Description
* This function sets a Signaling Object to signaled state (e.g the siganeling object is released)
*
* \sa
*/
int os_SignalObjectSet (TI_HANDLE OsContext, void *ptr);
/** \brief OS Signaling Object Free
*
* \param OsContext - Handle to the OS object
* \param ptr - Pointer to Signaling Object previously created by user
* \return TI_OK (0) on Success ; TI_NOK (1) on Failure
*
* \par Description
* This function frees (closes) a Signaling Object Handle
*
* \sa
*/
int os_SignalObjectFree (TI_HANDLE OsContext, void *ptr);
/** \brief OS Schedule Request
*
* \param OsContext - Handle to the OS object
* \return TI_OK (0) on Success ; TI_NOK (1) on Failure
*
* \par Description
* This function performs scheduling (context switch) according to user request
*
* \sa
*/
int os_RequestSchedule (TI_HANDLE OsContext);
/****************************************************************************************
START OF TI DRIVER API
*****************************************************************************************/
/** \brief OS Read Memory Register UINT32
*
* \param OsContext - Handle to the OS object
* \param Register - Pointer to register address
* \param Data - Pointer to output read data
* \return void
*
* \par Description
* This function reads register in 32 bit length
*
* \sa
*/
void os_hwReadMemRegisterUINT32 (TI_HANDLE OsContext, TI_UINT32* Register, TI_UINT32* Data);
/** \brief OS Write Memory Register UINT32
*
* \param OsContext - Handle to the OS object
* \param Register - Pointer to register address
* \param Data - Data to write to register
* \return void
*
* \par Description
* This function reads register in 32 bit length
*
* \sa
*/
void os_hwWriteMemRegisterUINT32 (TI_HANDLE OsContext, TI_UINT32* Register, TI_UINT32 Data);
/** \brief OS Receive Packet
*
* \param OsContext - Handle to the OS object
* \param pPacket - Pointer to received packet data
* \param Length - Length of received packet
* \return TI_TRUE on Success ; TI_FALSE on Failure
*
* \par Description
* This function transfers a packet from WLAN driver to OS
*
* \sa
*/
TI_BOOL os_receivePacket(TI_HANDLE OsContext, void *pRxDesc ,void *pPacket, TI_UINT16 Length);
/** \brief OS Indicate Event
*
* \param OsContext - Handle to the OS object
* \param pData - Pointer to event data
* \return TI_OK (0) on Success ;
*
* \par Description
* This function indicate the OS about different connection driver's events,
* The function performs the rewuired operations for the event - in the OS side
*
* \sa
*/
TI_INT32 os_IndicateEvent (TI_HANDLE OsContext, IPC_EV_DATA *pData);
/** \brief OS Send Trace Message to Logger
*
* \param OsContext - The OS handle
* \param uLevel - Severity level of the trace message
* \param uFileId - Source file ID of the trace message
* \param uLineNum - Line number of the trace message
* \param uParamsNum - Number of parameters in the trace message
* \param ... - The trace message parameters
* \return void
*
* \par Description
* This function sends trace message to logger
*
* \sa
*/
void os_Trace (TI_HANDLE OsContext, TI_UINT32 uLevel, TI_UINT32 uFileId, TI_UINT32 uLineNum, TI_UINT32 uParamsNum, ...);
/**
* \fn os_SetDrvThreadPriority
* \brief Called upon init to set WLAN driver thread priority.
*
* \param OsContext - The OS handle
* \param uWlanDrvThreadPriority - The WLAN driver thread priority
* \return
*/
void os_SetDrvThreadPriority (TI_HANDLE OsContext, TI_UINT32 uWlanDrvThreadPriority);
#ifdef __cplusplus
}
#endif
#endif /* __OS_API_H__ */
|
jjm2473/AMLOGIC_M8
|
drivers/net/wifi-fw-kk-amlogic/wgt7310/wl1271_m603/platforms/os/common/inc/osApi.h
|
C
|
gpl-2.0
| 24,803
|
/*
* Copyright (c) International Business Machines Corp., 2001
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* NAME
* fork08.c
*
* DESCRIPTION
* Check if the parent's file descriptors are affected by
* actions in the child; they should not be.
*
* ALGORITHM
* Parent opens a file.
* Forks a child which closes a file.
* Parent forks a second child which attempts to read the (closed)
* file.
*
* USAGE
* fork08
*
* HISTORY
* 07/2001 Ported by Wayne Boyer
*
* RESTRICTIONS
* None
*/
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <stdio.h>
#include "test.h"
#include "usctest.h"
char *TCID = "fork08";
int TST_TOTAL = 1;
static void setup(void);
static void cleanup(void);
static char pbuf[10];
static char fnamebuf[40];
int main(int ac, char **av)
{
int status, count, forks, pid1;
int ch_r_stat;
FILE *rea, *writ;
int lc;
const char *msg;
msg = parse_opts(ac, av, NULL, NULL);
if (msg != NULL)
tst_brkm(TBROK, NULL, "OPTION PARSING ERROR - %s", msg);
setup();
for (lc = 0; TEST_LOOPING(lc); lc++) {
tst_count = 0;
writ = fopen(fnamebuf, "w");
if (writ == NULL)
tst_resm(TFAIL, "failed to fopen file for write");
rea = fopen(fnamebuf, "r");
if (rea == NULL)
tst_resm(TFAIL, "failed to fopen file for read");
fprintf(writ, "abcdefghijklmnopqrstuv");
fflush(writ);
sleep(1);
if ((getc(rea)) != 'a')
tst_resm(TFAIL, "getc from read side was confused");
forks = 0;
forkone:
++forks;
pid1 = fork();
if (pid1 != 0) {
tst_resm(TINFO, "parent forksval: %d", forks);
if ((pid1 != (-1)) && (forks < 2))
goto forkone;
else if (pid1 < 0)
tst_resm(TINFO, "Fork failed");
} else { /* child */
/*
* If first child close the file descriptor for the
* read stream
*/
if (forks == 1) {
if ((fclose(rea)) == -1) {
tst_resm(TFAIL, "error in first child"
" closing fildes");
}
_exit(0);
}
/*
* If second child attempt to read from the file
*/
else if (forks == 2) {
ch_r_stat = getc(rea);
tst_resm(TINFO, "second child got char: %c",
ch_r_stat);
if (ch_r_stat == 'b') {
tst_resm(TPASS, "Test passed in child"
"number %d", forks);
exit(0);
} else if (ch_r_stat == EOF) {
tst_resm(TFAIL, "Second child got "
"EOF");
exit(-1);
} else {
tst_resm(TFAIL, "test failed in child"
"no %d", forks);
exit(-1);
}
} else { /* end of second child */
tst_resm(TINFO, "forksnumber: %d", forks);
exit(3);
}
}
for (count = 0; count <= forks; count++) {
wait(&status);
tst_resm(TINFO, "exit status of wait "
" expected 0 got %d", status);
status >>= 8;
if (status == 0)
tst_resm(TPASS, "parent test PASSED");
else
tst_resm(TFAIL, "parent test FAILED");
}
tst_resm(TINFO, "Number of processes forked is %d", forks);
fclose(rea);
fclose(writ);
}
cleanup();
tst_exit();
}
static void setup(void)
{
tst_sig(FORK, DEF_HANDLER, cleanup);
umask(0);
TEST_PAUSE;
tst_tmpdir();
strcpy(fnamebuf, "fork07.");
sprintf(pbuf, "%d", getpid());
strcat(fnamebuf, pbuf);
}
static void cleanup(void)
{
TEST_CLEANUP;
tst_rmdir();
}
|
heluxie/LTP
|
testcases/kernel/syscalls/fork/fork08.c
|
C
|
gpl-2.0
| 3,921
|
<?php
/**
* This file prints the Email Settings tab in the admin
*
* @scine 0.3.6
* @package IT_Exchange
*/
global $wp_version;
?>
<div class="wrap">
<?php
ITUtility::screen_icon( 'it-exchange' );
$this->print_general_settings_tabs();
do_action( 'it_exchange_general_settings_email_page_top' );
$form->start_form( $form_options, 'exchange-email-settings' );
do_action( 'it_exchange_general_settings_email_form_top' );
?>
<table class="form-table">
<?php do_action( 'it_exchange_general_settings_email_top' ); ?>
<tr valign="top">
<th scope="row"><strong><?php _e( 'Admin Notifications', 'it-l10n-ithemes-exchange' ); ?></strong></th>
<td></td>
</tr>
<tr valign="top">
<th scope="row"><label for="notification-email-address"><?php _e( 'Sales Notification Email Address', 'it-l10n-ithemes-exchange' ) ?></label></th>
<td>
<?php $form->add_text_box( 'notification-email-address', array( 'class' => 'large-text' ) ); ?>
<br /><span class="description"><?php _e( 'Enter the email address(es) that should receive a notification anytime a sale is made, comma separated', 'it-l10n-ithemes-exchange' ); ?></span>
</td>
</tr>
<tr valign="top">
<th scope="row"><label for="admin-email-address"><?php _e( 'Email Address', 'it-l10n-ithemes-exchange' ) ?></label></th>
<td>
<?php $form->add_text_box( 'admin-email-address', array( 'class' => 'normal-text' ) ); ?>
<br /><span class="description"><?php _e( 'Email address used for admin notification emails.', 'it-l10n-ithemes-exchange' ); ?></span>
</td>
</tr>
<tr valign="top">
<th scope="row"><label for="admin-email-name"><?php _e( 'Email Name', 'it-l10n-ithemes-exchange' ) ?></label></th>
<td>
<?php $form->add_text_box( 'admin-email-name', array( 'class' => 'normal-text' ) ); ?>
<br /><span class="description"><?php _e( 'Name used for account that sends admin notification emails.', 'it-l10n-ithemes-exchange' ); ?></span>
</td>
</tr>
<tr valign="top">
<th scope="row"><label for="admin-email-subject"><?php _e( 'Notification Subject Line', 'it-l10n-ithemes-exchange' ) ?></label></th>
<td>
<?php $form->add_text_box( 'admin-email-subject', array( 'class' => 'large-text' ) ); ?>
<br /><span class="description"><?php _e( 'Subject line used for admin notification emails.', 'it-l10n-ithemes-exchange' ); ?></span>
</td>
</tr>
<tr valign="top">
<th scope="row"><label for="admin-email-template"><?php _e( 'Notification Email Template', 'it-l10n-ithemes-exchange' ) ?></label></th>
<td>
<?php
if ( $wp_version >= 3.3 && function_exists( 'wp_editor' ) ) {
echo wp_editor( $settings['admin-email-template'], 'admin-email-template', array( 'textarea_name' => 'it_exchange_email_settings-admin-email-template', 'textarea_rows' => 10, 'textarea_cols' => 30, 'editor_class' => 'large-text' ) );
//We do this for some ITForm trickery... just to add receipt-email-template to the used inputs field
$form->get_text_area( 'admin-email-template', array( 'rows' => 10, 'cols' => 30, 'class' => 'large-text' ) );
} else {
$form->add_text_area( 'admin-email-template', array( 'rows' => 10, 'cols' => 30, 'class' => 'large-text' ) );
}
?>
<p class="description">
<?php
_e( 'Enter the email that is sent to administrator after a customer completes a successful purchase. HTML is accepted. Available shortcode functions:', 'it-l10n-ithemes-exchange' );
echo '<br />';
printf( __( 'You call these shortcode functions like this: %s', 'it-l10n-ithemes-exchange' ), '[it_exchange_email show=order_table option=purchase_message]' );
echo '<ul>';
echo '<li>download_list - ' . __( 'A list of download links for each download purchased', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>name - ' . __( "The buyer's first name", 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>fullname - ' . __( "The buyer's full name, first and last", 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>username - ' . __( "The buyer's username on the site, if they registered an account", 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>order_table - ' . __( 'A table of the order details. Accepts "purchase_message" option.', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>purchase_date - ' . __( 'The date of the purchase', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>total - ' . __( 'The total price of the purchase', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>payment_id - ' . __( 'The unique ID number for this purchase', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>receipt_id - ' . __( 'The unique ID number for this transaction', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>payment_method - ' . __( 'The method of payment used for this purchase', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>shipping_address - ' . __( 'The shipping address for this product. Blank if shipping is not required. Also accepts "before" and "after" arguments.', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>billing_address - ' . __( 'The billing address for this product. Blank if shipping is not required. Also accepts "before" and "after" arguments.', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>sitename - ' . __( 'Your site name', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>receipt_link - ' . __( 'Adds a link so users can view their receipt directly on your website if they are unable to view it in the email correctly.', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>login_link - ' . __( 'Adds a link to the login page on your website.', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>account_link - ' . __( 'Adds a link to the customer\'s account page on your website.', 'it-l10n-ithemes-exchange' ) . '</li>';
do_action( 'it_exchange_email_template_tags_list' );
echo '</ul>';
?>
</p>
</td>
</tr>
<tr valign="top">
<th scope="row"><strong><?php _e( 'Customer Receipt Emails', 'it-l10n-ithemes-exchange' ); ?></strong></th>
<td></td>
</tr>
<tr valign="top">
<th scope="row"><label for="receipt-email-address"><?php _e( 'Email Address', 'it-l10n-ithemes-exchange' ) ?></label></th>
<td>
<?php $form->add_text_box( 'receipt-email-address', array( 'class' => 'normal-text' ) ); ?>
<br /><span class="description"><?php _e( 'Email address used for customer receipt emails.', 'it-l10n-ithemes-exchange' ); ?></span>
</td>
</tr>
<tr valign="top">
<th scope="row"><label for="receipt-email-name"><?php _e( 'Email Name', 'it-l10n-ithemes-exchange' ) ?></label></th>
<td>
<?php $form->add_text_box( 'receipt-email-name', array( 'class' => 'normal-text' ) ); ?>
<br /><span class="description"><?php _e( 'Name used for account that sends customer receipt emails.', 'it-l10n-ithemes-exchange' ); ?></span>
</td>
</tr>
<tr valign="top">
<th scope="row"><label for="receipt-email-subject"><?php _e( 'Subject Line', 'it-l10n-ithemes-exchange' ) ?></label></th>
<td>
<?php $form->add_text_box( 'receipt-email-subject', array( 'class' => 'large-text' ) ); ?>
<br /><span class="description"><?php _e( 'Subject line used for customer receipt emails.', 'it-l10n-ithemes-exchange' ); ?></span>
</td>
</tr>
<tr valign="top">
<th scope="row"><label for="receipt-email-template"><?php _e( 'Email Template', 'it-l10n-ithemes-exchange' ) ?></label></th>
<td>
<?php
if ( $wp_version >= 3.3 && function_exists( 'wp_editor' ) ) {
echo wp_editor( $settings['receipt-email-template'], 'receipt-email-template', array( 'textarea_name' => 'it_exchange_email_settings-receipt-email-template', 'textarea_rows' => 10, 'textarea_cols' => 30, 'editor_class' => 'large-text' ) );
//We do this for some ITForm trickery... just to add receipt-email-template to the used inputs field
$form->get_text_area( 'receipt-email-template', array( 'rows' => 10, 'cols' => 30, 'class' => 'large-text' ) );
} else {
$form->add_text_area( 'receipt-email-template', array( 'rows' => 10, 'cols' => 30, 'class' => 'large-text' ) );
}
?>
<p class="description">
<?php
_e( 'Enter the email that is sent to users after completing a successful purchase. HTML is accepted. Available shortcode functions:', 'it-l10n-ithemes-exchange' );
echo '<br />';
printf( __( 'You call these shortcode functions like this: %s', 'it-l10n-ithemes-exchange' ), '[it_exchange_email show=order_table option=purchase_message]' );
echo '<ul>';
echo '<li>download_list - ' . __( 'A list of download links for each download purchased', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>name - ' . __( "The buyer's first name", 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>fullname - ' . __( "The buyer's full name, first and last", 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>username - ' . __( "The buyer's username on the site, if they registered an account", 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>order_table - ' . __( 'A table of the order details. Accept "purchase_message" option.', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>purchase_date - ' . __( 'The date of the purchase', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>total - ' . __( 'The total price of the purchase', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>payment_id - ' . __( 'The unique ID number for this purchase', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>receipt_id - ' . __( 'The unique ID number for this transaction', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>payment_method - ' . __( 'The method of payment used for this purchase', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>shipping_address - ' . __( 'The shipping address for this product. Blank if shipping is not required. Also accepts "before" and "after" arguments.', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>billing_address - ' . __( 'The billing address for this product. Blank if shipping is not required. Also accepts "before" and "after" arguments.', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>sitename - ' . __( 'Your site name', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>receipt_link - ' . __( 'Adds a link so users can view their receipt directly on your website if they are unable to view it in the email correctly.', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>login_link - ' . __( 'Adds a link to the login page on your website.', 'it-l10n-ithemes-exchange' ) . '</li>';
echo '<li>account_link - ' . __( 'Adds a link to the customer\'s account page on your website.', 'it-l10n-ithemes-exchange' ) . '</li>';
do_action( 'it_exchange_email_template_tags_list' );
echo '</ul>';
?>
</p>
</td>
</tr>
<?php do_action( 'it_exchange_general_settings_email_table_bottom' ); ?>
</table>
<?php wp_nonce_field( 'save-email-settings', 'exchange-email-settings' ); ?>
<p class="submit"><input type="submit" value="<?php _e( 'Save Changes', 'it-l10n-ithemes-exchange' ); ?>" class="button button-primary" /></p>
<?php
do_action( 'it_exchange_general_settings_email_form_bottom' );
$form->end_form();
do_action( 'it_exchange_general_settings_email_page_bottom' );
?>
</div>
|
aukrainskii/SNDW
|
wp-content/plugins/ithemes-exchange/lib/admin/views/admin-email-settings.php
|
PHP
|
gpl-2.0
| 11,501
|
/*
* Copyright (C) 2011 Department of Robotics Brain and Cognitive Sciences - Istituto Italiano di Tecnologia
* Authors: Vadim Tikhanoff
* email: vadim.tikhanoff@iit.it
* website: www.robotcub.org
* Permission is granted to copy, distribute, and/or modify this program
* under the terms of the GNU General Public License, version 2 or any
* later version published by the Free Software Foundation.
*
* A copy of the license can be found at
* http://www.robotcub.org/icub/license/gpl.txtd
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
* Public License for more details
*/
#include <cv.h>
#include <highgui.h>
#include <yarp/os/RateThread.h>
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <yarp/os/all.h>
#define DENSE 1
using namespace cv;
/**
* \ingroup StereoVisionLib
*
* The base class defining the 2D optical flow.
* It computes the 2D motion field in the image.
*/
class OpticalFlowThread : public yarp::os::RateThread
{
private:
cv::Mat optFlow;
cv::Mat leftPrev;
cv::Mat leftNext;
bool done;
bool work;
bool dense;
void computeFlowSparse(IplImage* previous, IplImage* current, Mat &optFlow);
public:
OpticalFlowThread(yarp::os::ResourceFinder &rf);
~OpticalFlowThread() {};
void setImages(cv::Mat &_leftPrev, cv::Mat &_leftNext);
void getOptFlow(cv::Mat &_optFlow);
void setFlow(int flowType);
bool checkDone();
bool threadInit();
void threadRelease();
void run();
void onStop(void);
};
|
GiuliaP/stereo-vision
|
lib/include/iCub/stereoVision/opticalFlowThread.h
|
C
|
gpl-2.0
| 1,737
|
<?php
/**
* Magento
*
* NOTICE OF LICENSE
*
* This source file is subject to the Open Software License (OSL 3.0)
* that is bundled with this package in the file LICENSE.txt.
* It is also available through the world-wide-web at this URL:
* http://opensource.org/licenses/osl-3.0.php
* If you did not receive a copy of the license and are unable to
* obtain it through the world-wide-web, please send an email
* to license@magentocommerce.com so we can send you a copy immediately.
*
* DISCLAIMER
*
* Do not edit or add to this file if you wish to upgrade Magento to newer
* versions in the future. If you wish to customize Magento for your
* needs please refer to http://www.magentocommerce.com for more information.
*
* @category Mage
* @package Mage_Catalog
* @copyright Copyright (c) 2009 Irubin Consulting Inc. DBA Varien (http://www.varien.com)
* @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0)
*/
/**
* Catalog Product Flat Indexer Model
*
* @category Mage
* @package Mage_Catalog
* @author Magento Core Team <core@magentocommerce.com>
*/
class Mage_Catalog_Model_Product_Flat_Indexer extends Mage_Core_Model_Abstract
{
/**
* Standart model resource initialization
*
*/
protected function _construct()
{
$this->_init('catalog/product_flat_indexer');
}
/**
* Get resource instance
*
* @return Mage_Catalog_Model_Resource_Eav_Mysql4_Product_Flat_Indexer
*/
protected function _getResource()
{
return parent::_getResource();
}
/**
* Rebuild Catalog Product Flat Data
*
* @param mixed $store
* @return Mage_Catalog_Model_Product_Flat_Indexer
*/
public function rebuild($store = null)
{
$this->_getResource()->rebuild($store);
return $this;
}
/**
* Update attribute data for flat table
*
* @param string $attributeCode
* @param int $store
* @return Mage_Catalog_Model_Product_Flat_Indexer
*/
public function updateAttribute($attributeCode, $store = null)
{
if (is_null($store)) {
foreach (Mage::app()->getStores() as $store) {
$this->updateAttribute($attributeCode, $store->getId());
}
return $this;
}
$this->_getResource()->prepareFlatTable($store);
$attribute = $this->_getResource()->getAttribute($attributeCode);
$this->_getResource()->updateAttribute($attribute, $store);
$this->_getResource()->updateChildrenDataFromParent($store);
return $this;
}
/**
* Prepare datastorage for catalog product flat
*
* @param int $store
* @return Mage_Catalog_Model_Product_Flat_Indexer
*/
public function prepareDataStorage($store = null)
{
if (is_null($store)) {
foreach (Mage::app()->getStores() as $store) {
$this->prepareDataStorage($store->getId());
}
return $this;
}
$this->_getResource()->prepareFlatTable($store);
return $this;
}
/**
* Update events observer attributes
*
* @param int $store
* @return Mage_Catalog_Model_Product_Flat_Indexer
*/
public function updateEventAttributes($store = null)
{
if (is_null($store)) {
foreach (Mage::app()->getStores() as $store) {
$this->updateEventAttributes($store->getId());
}
return $this;
}
$this->_getResource()->prepareFlatTable($store);
$this->_getResource()->updateEventAttributes($store);
$this->_getResource()->updateRelationProducts($store);
return $this;
}
/**
* Update product status
*
* @param int $productId
* @param int $status
* @param int $store
* @return Mage_Catalog_Model_Product_Flat_Indexer
*/
public function updateProductStatus($productId, $status, $store = null)
{
if (is_null($store)) {
foreach (Mage::app()->getStores() as $store) {
$this->updateProductStatus($productId, $status, $store->getId());
}
return $this;
}
if ($status == Mage_Catalog_Model_Product_Status::STATUS_ENABLED) {
$this->_getResource()->updateProduct($productId, $store);
$this->_getResource()->updateChildrenDataFromParent($store, $productId);
}
else {
$this->_getResource()->removeProduct($productId, $store);
}
return $this;
}
/**
* Update Catalog Product Flat data
*
* @param int|array $productIds
* @param int $store
* @return Mage_Catalog_Model_Product_Flat_Indexer
*/
public function updateProduct($productIds, $store = null)
{
if (is_null($store)) {
foreach (Mage::app()->getStores() as $store) {
$this->updateProduct($productIds, $store->getId());
}
return $this;
}
$this->_getResource()->removeProduct($productIds, $store);
$this->_getResource()->updateProduct($productIds, $store);
$this->_getResource()->updateRelationProducts($store, $productIds);
return $this;
}
/**
* Save Catalog Product(s) Flat data
*
* @param int|array $productIds
* @param int $store
* @return Mage_Catalog_Model_Product_Flat_Indexer
*/
public function saveProduct($productIds, $store = null)
{
if (is_null($store)) {
foreach (Mage::app()->getStores() as $store) {
$this->saveProduct($productIds, $store->getId());
}
return $this;
}
$this->_getResource()->removeProduct($productIds, $store);
$this->_getResource()->saveProduct($productIds, $store);
$this->_getResource()->updateRelationProducts($store, $productIds);
return $this;
}
/**
* Remove product from flat
*
* @param int|array $productIds
* @param int $store
* @return Mage_Catalog_Model_Product_Flat_Indexer
*/
public function removeProduct($productIds, $store = null)
{
if (is_null($store)) {
foreach (Mage::app()->getStores() as $store) {
$this->removeProduct($productIds, $store->getId());
}
return $this;
}
$this->_getResource()->removeProduct($productIds, $store);
return $this;
}
/**
* Delete store process
*
* @param int $store
* @return Mage_Catalog_Model_Product_Flat_Indexer
*/
public function deleteStore($store)
{
$this->_getResource()->deleteFlatTable($store);
return $this;
}
}
|
tonio-44/tikflak
|
shop/app/code/core/Mage/Catalog/Model/Product/Flat/Indexer.php
|
PHP
|
gpl-2.0
| 6,822
|
#ifndef __NET_IP_TUNNELS_H
#define __NET_IP_TUNNELS_H 1
#include <linux/if_tunnel.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/types.h>
#include <linux/u64_stats_sync.h>
#include <linux/bitops.h>
#include <net/dsfield.h>
#include <net/gro_cells.h>
#include <net/inet_ecn.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/lwtunnel.h>
#include <net/dst_cache.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#endif
/* Keep error state on tunnel for 30 sec */
#define IPTUNNEL_ERR_TIMEO (30*HZ)
/* Used to memset ip_tunnel padding. */
#define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
/* Used to memset ipv4 address padding. */
#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
#define IP_TUNNEL_KEY_IPV4_PAD_LEN \
(FIELD_SIZEOF(struct ip_tunnel_key, u) - \
FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
struct ip_tunnel_key {
__be64 tun_id;
union {
struct {
__be32 src;
__be32 dst;
} ipv4;
struct {
struct in6_addr src;
struct in6_addr dst;
} ipv6;
} u;
__be16 tun_flags;
u8 tos; /* TOS for IPv4, TC for IPv6 */
u8 ttl; /* TTL for IPv4, HL for IPv6 */
__be32 label; /* Flow Label for IPv6 */
__be16 tp_src;
__be16 tp_dst;
};
/* Flags for ip_tunnel_info mode. */
#define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
#define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
/* Maximum tunnel options length. */
#define IP_TUNNEL_OPTS_MAX \
GENMASK((FIELD_SIZEOF(struct ip_tunnel_info, \
options_len) * BITS_PER_BYTE) - 1, 0)
struct ip_tunnel_info {
struct ip_tunnel_key key;
#ifdef CONFIG_DST_CACHE
struct dst_cache dst_cache;
#endif
u8 options_len;
u8 mode;
};
/* 6rd prefix/relay information */
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm {
struct in6_addr prefix;
__be32 relay_prefix;
u16 prefixlen;
u16 relay_prefixlen;
};
#endif
struct ip_tunnel_encap {
u16 type;
u16 flags;
__be16 sport;
__be16 dport;
};
struct ip_tunnel_prl_entry {
struct ip_tunnel_prl_entry __rcu *next;
__be32 addr;
u16 flags;
struct rcu_head rcu_head;
};
struct metadata_dst;
struct ip_tunnel {
struct ip_tunnel __rcu *next;
struct hlist_node hash_node;
struct net_device *dev;
struct net *net; /* netns for packet i/o */
unsigned long err_time; /* Time when the last ICMP error
* arrived */
int err_count; /* Number of arrived ICMP errors */
/* These four fields used only by GRE */
u32 i_seqno; /* The last seen seqno */
u32 o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
struct dst_cache dst_cache;
struct ip_tunnel_parm parms;
int mlink;
int encap_hlen; /* Encap header length (FOU,GUE) */
int hlen; /* tun_hlen + encap_hlen */
struct ip_tunnel_encap encap;
/* for SIT */
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm ip6rd;
#endif
struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
unsigned int prl_count; /* # of entries in PRL */
unsigned int ip_tnl_net_id;
struct gro_cells gro_cells;
bool collect_md;
bool ignore_df;
};
#define TUNNEL_CSUM __cpu_to_be16(0x01)
#define TUNNEL_ROUTING __cpu_to_be16(0x02)
#define TUNNEL_KEY __cpu_to_be16(0x04)
#define TUNNEL_SEQ __cpu_to_be16(0x08)
#define TUNNEL_STRICT __cpu_to_be16(0x10)
#define TUNNEL_REC __cpu_to_be16(0x20)
#define TUNNEL_VERSION __cpu_to_be16(0x40)
#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
#define TUNNEL_OAM __cpu_to_be16(0x0200)
#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
#define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
struct tnl_ptk_info {
__be16 flags;
__be16 proto;
__be32 key;
__be32 seq;
int hdr_len;
};
#define PACKET_RCVD 0
#define PACKET_REJECT 1
#define PACKET_NEXT 2
#define IP_TNL_HASH_BITS 7
#define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
struct ip_tunnel_net {
struct net_device *fb_tunnel_dev;
struct hlist_head tunnels[IP_TNL_HASH_SIZE];
struct ip_tunnel __rcu *collect_md_tun;
};
static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
__be32 saddr, __be32 daddr,
u8 tos, u8 ttl, __be32 label,
__be16 tp_src, __be16 tp_dst,
__be64 tun_id, __be16 tun_flags)
{
key->tun_id = tun_id;
key->u.ipv4.src = saddr;
key->u.ipv4.dst = daddr;
memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
key->tos = tos;
key->ttl = ttl;
key->label = label;
key->tun_flags = tun_flags;
/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
* the upper tunnel are used.
* E.g: GRE over IPSEC, the tp_src and tp_port are zero.
*/
key->tp_src = tp_src;
key->tp_dst = tp_dst;
/* Clear struct padding. */
if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
}
static inline bool
ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
const struct ip_tunnel_info *info)
{
if (skb->mark)
return false;
if (!info)
return true;
if (info->key.tun_flags & TUNNEL_NOCACHE)
return false;
return true;
}
static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
*tun_info)
{
return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
}
static inline __be64 key32_to_tunnel_id(__be32 key)
{
#ifdef __BIG_ENDIAN
return (__force __be64)key;
#else
return (__force __be64)((__force u64)key << 32);
#endif
}
/* Returns the least-significant 32 bits of a __be64. */
static inline __be32 tunnel_id_to_key32(__be64 tun_id)
{
#ifdef __BIG_ENDIAN
return (__force __be32)tun_id;
#else
return (__force __be32)((__force u64)tun_id >> 32);
#endif
}
#ifdef CONFIG_INET
int ip_tunnel_init(struct net_device *dev);
void ip_tunnel_uninit(struct net_device *dev);
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
struct net *ip_tunnel_get_link_net(const struct net_device *dev);
int ip_tunnel_get_iflink(const struct net_device *dev);
int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const u8 proto);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
void ip_tunnel_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot);
struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
int link, __be16 flags,
__be32 remote, __be32 local,
__be32 key);
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
bool log_ecn_error);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
struct ip_tunnel_encap_ops {
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, struct flowi4 *fl4);
};
#define MAX_IPTUN_ENCAP_OPS 8
extern const struct ip_tunnel_encap_ops __rcu *
iptun_encaps[MAX_IPTUN_ENCAP_OPS];
int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
unsigned int num);
int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
unsigned int num);
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap);
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
{
const struct ip_tunnel_encap_ops *ops;
int hlen = -EINVAL;
if (e->type == TUNNEL_ENCAP_NONE)
return 0;
if (e->type >= MAX_IPTUN_ENCAP_OPS)
return -EINVAL;
rcu_read_lock();
ops = rcu_dereference(iptun_encaps[e->type]);
if (likely(ops && ops->encap_hlen))
hlen = ops->encap_hlen(e);
rcu_read_unlock();
return hlen;
}
static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
u8 *protocol, struct flowi4 *fl4)
{
const struct ip_tunnel_encap_ops *ops;
int ret = -EINVAL;
if (t->encap.type == TUNNEL_ENCAP_NONE)
return 0;
if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
return -EINVAL;
rcu_read_lock();
ops = rcu_dereference(iptun_encaps[t->encap.type]);
if (likely(ops && ops->build_header))
ret = ops->build_header(skb, &t->encap, protocol, fl4);
rcu_read_unlock();
return ret;
}
/* Extract dsfield from inner protocol */
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
const struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP))
return iph->tos;
else if (skb->protocol == htons(ETH_P_IPV6))
return ipv6_get_dsfield((const struct ipv6hdr *)iph);
else
return 0;
}
/* Propogate ECN bits out */
static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
const struct sk_buff *skb)
{
u8 inner = ip_tunnel_get_dsfield(iph, skb);
return INET_ECN_encapsulate(tos, inner);
}
int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
__be16 inner_proto, bool raw_proto, bool xnet);
static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
__be16 inner_proto, bool xnet)
{
return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
}
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, u8 proto,
u8 tos, u8 ttl, __be16 df, bool xnet);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
static inline int iptunnel_pull_offloads(struct sk_buff *skb)
{
if (skb_is_gso(skb)) {
int err;
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
return err;
skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
NETIF_F_GSO_SHIFT);
}
skb->encapsulation = 0;
return 0;
}
static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
{
if (pkt_len > 0) {
struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->tx_bytes += pkt_len;
tstats->tx_packets++;
u64_stats_update_end(&tstats->syncp);
put_cpu_ptr(tstats);
} else {
struct net_device_stats *err_stats = &dev->stats;
if (pkt_len < 0) {
err_stats->tx_errors++;
err_stats->tx_aborted_errors++;
} else {
err_stats->tx_dropped++;
}
}
}
static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
{
return info + 1;
}
static inline void ip_tunnel_info_opts_get(void *to,
const struct ip_tunnel_info *info)
{
memcpy(to, info + 1, info->options_len);
}
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
const void *from, int len)
{
memcpy(ip_tunnel_info_opts(info), from, len);
info->options_len = len;
}
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
{
return (struct ip_tunnel_info *)lwtstate->data;
}
extern struct static_key ip_tunnel_metadata_cnt;
/* Returns > 0 if metadata should be collected */
static inline int ip_tunnel_collect_metadata(void)
{
return static_key_false(&ip_tunnel_metadata_cnt);
}
void __init ip_tunnel_core_init(void);
void ip_tunnel_need_metadata(void);
void ip_tunnel_unneed_metadata(void);
#else /* CONFIG_INET */
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
{
return NULL;
}
static inline void ip_tunnel_need_metadata(void)
{
}
static inline void ip_tunnel_unneed_metadata(void)
{
}
static inline void ip_tunnel_info_opts_get(void *to,
const struct ip_tunnel_info *info)
{
}
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
const void *from, int len)
{
info->options_len = 0;
}
#endif /* CONFIG_INET */
#endif /* __NET_IP_TUNNELS_H */
|
aospan/linux-next-bcm4708-edgecore-ecw7220-l
|
include/net/ip_tunnels.h
|
C
|
gpl-2.0
| 12,552
|
/*
* Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* ScriptData
SDName: Boss_Nefarian
SD%Complete: 80
SDComment: Some issues with class calls effecting more than one class
SDCategory: Blackwing Lair
EndScriptData */
#include "ScriptPCH.h"
#define SAY_AGGRO -1469007
#define SAY_XHEALTH -1469008
#define SAY_SHADOWFLAME -1469009
#define SAY_RAISE_SKELETONS -1469010
#define SAY_SLAY -1469011
#define SAY_DEATH -1469012
#define SAY_MAGE -1469013
#define SAY_WARRIOR -1469014
#define SAY_DRUID -1469015
#define SAY_PRIEST -1469016
#define SAY_PALADIN -1469017
#define SAY_SHAMAN -1469018
#define SAY_WARLOCK -1469019
#define SAY_HUNTER -1469020
#define SAY_ROGUE -1469021
#define SPELL_SHADOWFLAME_INITIAL 22972
#define SPELL_SHADOWFLAME 22539
#define SPELL_BELLOWINGROAR 22686
#define SPELL_VEILOFSHADOW 7068
#define SPELL_CLEAVE 20691
#define SPELL_TAILLASH 23364
#define SPELL_BONECONTRUST 23363 //23362, 23361
#define SPELL_MAGE 23410 //wild magic
#define SPELL_WARRIOR 23397 //beserk
#define SPELL_DRUID 23398 // cat form
#define SPELL_PRIEST 23401 // corrupted healing
#define SPELL_PALADIN 23418 //syphon blessing
#define SPELL_SHAMAN 23425 //totems
#define SPELL_WARLOCK 23427 //infernals
#define SPELL_HUNTER 23436 //bow broke
#define SPELL_ROGUE 23414 //Paralise
class boss_nefarian : public CreatureScript
{
public:
boss_nefarian() : CreatureScript("boss_nefarian") { }
CreatureAI* GetAI(Creature* creature) const
{
return new boss_nefarianAI (creature);
}
struct boss_nefarianAI : public ScriptedAI
{
boss_nefarianAI(Creature* c) : ScriptedAI(c) {}
uint32 ShadowFlame_Timer;
uint32 BellowingRoar_Timer;
uint32 VeilOfShadow_Timer;
uint32 Cleave_Timer;
uint32 TailLash_Timer;
uint32 ClassCall_Timer;
bool Phase3;
uint32 DespawnTimer;
void Reset()
{
ShadowFlame_Timer = 12000; //These times are probably wrong
BellowingRoar_Timer = 30000;
VeilOfShadow_Timer = 15000;
Cleave_Timer = 7000;
TailLash_Timer = 10000;
ClassCall_Timer = 35000; //35-40 seconds
Phase3 = false;
DespawnTimer = 5000;
}
void KilledUnit(Unit* Victim)
{
if (rand()%5)
return;
DoScriptText(SAY_SLAY, me, Victim);
}
void JustDied(Unit* /*Killer*/)
{
DoScriptText(SAY_DEATH, me);
}
void EnterCombat(Unit* who)
{
DoScriptText(RAND(SAY_XHEALTH, SAY_AGGRO, SAY_SHADOWFLAME), me);
DoCast(who, SPELL_SHADOWFLAME_INITIAL);
DoZoneInCombat();
}
void UpdateAI(const uint32 diff)
{
if (DespawnTimer <= diff)
{
if (!UpdateVictim())
me->DespawnOrUnsummon();
DespawnTimer = 5000;
} else DespawnTimer -= diff;
if (!UpdateVictim())
return;
//ShadowFlame_Timer
if (ShadowFlame_Timer <= diff)
{
DoCast(me->getVictim(), SPELL_SHADOWFLAME);
ShadowFlame_Timer = 12000;
} else ShadowFlame_Timer -= diff;
//BellowingRoar_Timer
if (BellowingRoar_Timer <= diff)
{
DoCast(me->getVictim(), SPELL_BELLOWINGROAR);
BellowingRoar_Timer = 30000;
} else BellowingRoar_Timer -= diff;
//VeilOfShadow_Timer
if (VeilOfShadow_Timer <= diff)
{
DoCast(me->getVictim(), SPELL_VEILOFSHADOW);
VeilOfShadow_Timer = 15000;
} else VeilOfShadow_Timer -= diff;
//Cleave_Timer
if (Cleave_Timer <= diff)
{
DoCast(me->getVictim(), SPELL_CLEAVE);
Cleave_Timer = 7000;
} else Cleave_Timer -= diff;
//TailLash_Timer
if (TailLash_Timer <= diff)
{
//Cast NYI since we need a better check for behind target
//DoCast(me->getVictim(), SPELL_TAILLASH);
TailLash_Timer = 10000;
} else TailLash_Timer -= diff;
//ClassCall_Timer
if (ClassCall_Timer <= diff)
{
//Cast a random class call
//On official it is based on what classes are currently on the hostil list
//but we can't do that yet so just randomly call one
switch (urand(0, 8))
{
case 0:
DoScriptText(SAY_MAGE, me);
DoCast(me, SPELL_MAGE);
break;
case 1:
DoScriptText(SAY_WARRIOR, me);
DoCast(me, SPELL_WARRIOR);
break;
case 2:
DoScriptText(SAY_DRUID, me);
DoCast(me, SPELL_DRUID);
break;
case 3:
DoScriptText(SAY_PRIEST, me);
DoCast(me, SPELL_PRIEST);
break;
case 4:
DoScriptText(SAY_PALADIN, me);
DoCast(me, SPELL_PALADIN);
break;
case 5:
DoScriptText(SAY_SHAMAN, me);
DoCast(me, SPELL_SHAMAN);
break;
case 6:
DoScriptText(SAY_WARLOCK, me);
DoCast(me, SPELL_WARLOCK);
break;
case 7:
DoScriptText(SAY_HUNTER, me);
DoCast(me, SPELL_HUNTER);
break;
case 8:
DoScriptText(SAY_ROGUE, me);
DoCast(me, SPELL_ROGUE);
break;
}
ClassCall_Timer = 35000 + (rand() % 5000);
} else ClassCall_Timer -= diff;
//Phase3 begins when we are below X health
if (!Phase3 && HealthBelowPct(20))
{
Phase3 = true;
DoScriptText(SAY_RAISE_SKELETONS, me);
}
DoMeleeAttackIfReady();
}
};
};
void AddSC_boss_nefarian()
{
new boss_nefarian();
}
|
rebirth-core/Rebirth---old
|
src/server/scripts/EasternKingdoms/BlackwingLair/boss_nefarian.cpp
|
C++
|
gpl-2.0
| 7,916
|
/*
* QEMU Block backends
*
* Copyright (C) 2014-2016 Red Hat, Inc.
*
* Authors:
* Markus Armbruster <armbru@redhat.com>,
*
* This work is licensed under the terms of the GNU LGPL, version 2.1
* or later. See the COPYING.LIB file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "sysemu/block-backend.h"
#include "block/block_int.h"
#include "block/blockjob.h"
#include "block/throttle-groups.h"
#include "sysemu/blockdev.h"
#include "sysemu/sysemu.h"
#include "qapi-event.h"
#include "qemu/id.h"
#include "trace.h"
/* Number of coroutines to reserve per attached device model */
#define COROUTINE_POOL_RESERVATION 64
#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
struct BlockBackend {
char *name;
int refcnt;
BdrvChild *root;
DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */
QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
BlockBackendPublic public;
void *dev; /* attached device model, if any */
bool legacy_dev; /* true if dev is not a DeviceState */
/* TODO change to DeviceState when all users are qdevified */
const BlockDevOps *dev_ops;
void *dev_opaque;
/* the block size for which the guest device expects atomicity */
int guest_block_size;
/* If the BDS tree is removed, some of its options are stored here (which
* can be used to restore those options in the new BDS on insert) */
BlockBackendRootState root_state;
bool enable_write_cache;
/* I/O stats (display with "info blockstats"). */
BlockAcctStats stats;
BlockdevOnError on_read_error, on_write_error;
bool iostatus_enabled;
BlockDeviceIoStatus iostatus;
bool allow_write_beyond_eof;
NotifierList remove_bs_notifiers, insert_bs_notifiers;
};
typedef struct BlockBackendAIOCB {
BlockAIOCB common;
BlockBackend *blk;
int ret;
} BlockBackendAIOCB;
static const AIOCBInfo block_backend_aiocb_info = {
.get_aio_context = blk_aiocb_get_aio_context,
.aiocb_size = sizeof(BlockBackendAIOCB),
};
static void drive_info_del(DriveInfo *dinfo);
static BlockBackend *bdrv_first_blk(BlockDriverState *bs);
/* All BlockBackends */
static QTAILQ_HEAD(, BlockBackend) block_backends =
QTAILQ_HEAD_INITIALIZER(block_backends);
/* All BlockBackends referenced by the monitor and which are iterated through by
* blk_next() */
static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
static void blk_root_inherit_options(int *child_flags, QDict *child_options,
int parent_flags, QDict *parent_options)
{
/* We're not supposed to call this function for root nodes */
abort();
}
static void blk_root_drained_begin(BdrvChild *child);
static void blk_root_drained_end(BdrvChild *child);
static void blk_root_change_media(BdrvChild *child, bool load);
static void blk_root_resize(BdrvChild *child);
static const char *blk_root_get_name(BdrvChild *child)
{
return blk_name(child->opaque);
}
static const BdrvChildRole child_root = {
.inherit_options = blk_root_inherit_options,
.change_media = blk_root_change_media,
.resize = blk_root_resize,
.get_name = blk_root_get_name,
.drained_begin = blk_root_drained_begin,
.drained_end = blk_root_drained_end,
};
/*
* Create a new BlockBackend with a reference count of one.
* Store an error through @errp on failure, unless it's null.
* Return the new BlockBackend on success, null on failure.
*/
BlockBackend *blk_new(void)
{
BlockBackend *blk;
blk = g_new0(BlockBackend, 1);
blk->refcnt = 1;
blk_set_enable_write_cache(blk, true);
qemu_co_queue_init(&blk->public.throttled_reqs[0]);
qemu_co_queue_init(&blk->public.throttled_reqs[1]);
notifier_list_init(&blk->remove_bs_notifiers);
notifier_list_init(&blk->insert_bs_notifiers);
QTAILQ_INSERT_TAIL(&block_backends, blk, link);
return blk;
}
/*
* Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
*
* Just as with bdrv_open(), after having called this function the reference to
* @options belongs to the block layer (even on failure).
*
* TODO: Remove @filename and @flags; it should be possible to specify a whole
* BDS tree just by specifying the @options QDict (or @reference,
* alternatively). At the time of adding this function, this is not possible,
* though, so callers of this function have to be able to specify @filename and
* @flags.
*/
BlockBackend *blk_new_open(const char *filename, const char *reference,
QDict *options, int flags, Error **errp)
{
BlockBackend *blk;
BlockDriverState *bs;
blk = blk_new();
bs = bdrv_open(filename, reference, options, flags, errp);
if (!bs) {
blk_unref(blk);
return NULL;
}
blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk);
return blk;
}
static void blk_delete(BlockBackend *blk)
{
assert(!blk->refcnt);
assert(!blk->name);
assert(!blk->dev);
if (blk->root) {
blk_remove_bs(blk);
}
assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
QTAILQ_REMOVE(&block_backends, blk, link);
drive_info_del(blk->legacy_dinfo);
block_acct_cleanup(&blk->stats);
g_free(blk);
}
static void drive_info_del(DriveInfo *dinfo)
{
if (!dinfo) {
return;
}
qemu_opts_del(dinfo->opts);
g_free(dinfo->serial);
g_free(dinfo);
}
int blk_get_refcnt(BlockBackend *blk)
{
return blk ? blk->refcnt : 0;
}
/*
* Increment @blk's reference count.
* @blk must not be null.
*/
void blk_ref(BlockBackend *blk)
{
blk->refcnt++;
}
/*
* Decrement @blk's reference count.
* If this drops it to zero, destroy @blk.
* For convenience, do nothing if @blk is null.
*/
void blk_unref(BlockBackend *blk)
{
if (blk) {
assert(blk->refcnt > 0);
if (!--blk->refcnt) {
blk_delete(blk);
}
}
}
/*
* Behaves similarly to blk_next() but iterates over all BlockBackends, even the
* ones which are hidden (i.e. are not referenced by the monitor).
*/
static BlockBackend *blk_all_next(BlockBackend *blk)
{
return blk ? QTAILQ_NEXT(blk, link)
: QTAILQ_FIRST(&block_backends);
}
void blk_remove_all_bs(void)
{
BlockBackend *blk = NULL;
while ((blk = blk_all_next(blk)) != NULL) {
AioContext *ctx = blk_get_aio_context(blk);
aio_context_acquire(ctx);
if (blk->root) {
blk_remove_bs(blk);
}
aio_context_release(ctx);
}
}
/*
* Return the monitor-owned BlockBackend after @blk.
* If @blk is null, return the first one.
* Else, return @blk's next sibling, which may be null.
*
* To iterate over all BlockBackends, do
* for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
* ...
* }
*/
BlockBackend *blk_next(BlockBackend *blk)
{
return blk ? QTAILQ_NEXT(blk, monitor_link)
: QTAILQ_FIRST(&monitor_block_backends);
}
/* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
* the monitor or attached to a BlockBackend */
BlockDriverState *bdrv_next(BdrvNextIterator *it)
{
BlockDriverState *bs;
/* First, return all root nodes of BlockBackends. In order to avoid
* returning a BDS twice when multiple BBs refer to it, we only return it
* if the BB is the first one in the parent list of the BDS. */
if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
do {
it->blk = blk_all_next(it->blk);
bs = it->blk ? blk_bs(it->blk) : NULL;
} while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk));
if (bs) {
return bs;
}
it->phase = BDRV_NEXT_MONITOR_OWNED;
}
/* Then return the monitor-owned BDSes without a BB attached. Ignore all
* BDSes that are attached to a BlockBackend here; they have been handled
* by the above block already */
do {
it->bs = bdrv_next_monitor_owned(it->bs);
bs = it->bs;
} while (bs && bdrv_has_blk(bs));
return bs;
}
BlockDriverState *bdrv_first(BdrvNextIterator *it)
{
*it = (BdrvNextIterator) {
.phase = BDRV_NEXT_BACKEND_ROOTS,
};
return bdrv_next(it);
}
/*
* Add a BlockBackend into the list of backends referenced by the monitor, with
* the given @name acting as the handle for the monitor.
* Strictly for use by blockdev.c.
*
* @name must not be null or empty.
*
* Returns true on success and false on failure. In the latter case, an Error
* object is returned through @errp.
*/
bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
{
assert(!blk->name);
assert(name && name[0]);
if (!id_wellformed(name)) {
error_setg(errp, "Invalid device name");
return false;
}
if (blk_by_name(name)) {
error_setg(errp, "Device with id '%s' already exists", name);
return false;
}
if (bdrv_find_node(name)) {
error_setg(errp,
"Device name '%s' conflicts with an existing node name",
name);
return false;
}
blk->name = g_strdup(name);
QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
return true;
}
/*
* Remove a BlockBackend from the list of backends referenced by the monitor.
* Strictly for use by blockdev.c.
*/
void monitor_remove_blk(BlockBackend *blk)
{
if (!blk->name) {
return;
}
QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
g_free(blk->name);
blk->name = NULL;
}
/*
* Return @blk's name, a non-null string.
* Returns an empty string iff @blk is not referenced by the monitor.
*/
const char *blk_name(BlockBackend *blk)
{
return blk->name ?: "";
}
/*
* Return the BlockBackend with name @name if it exists, else null.
* @name must not be null.
*/
BlockBackend *blk_by_name(const char *name)
{
BlockBackend *blk = NULL;
assert(name);
while ((blk = blk_next(blk)) != NULL) {
if (!strcmp(name, blk->name)) {
return blk;
}
}
return NULL;
}
/*
* Return the BlockDriverState attached to @blk if any, else null.
*/
BlockDriverState *blk_bs(BlockBackend *blk)
{
return blk->root ? blk->root->bs : NULL;
}
static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
{
BdrvChild *child;
QLIST_FOREACH(child, &bs->parents, next_parent) {
if (child->role == &child_root) {
return child->opaque;
}
}
return NULL;
}
/*
* Returns true if @bs has an associated BlockBackend.
*/
bool bdrv_has_blk(BlockDriverState *bs)
{
return bdrv_first_blk(bs) != NULL;
}
/*
* Returns true if @bs has only BlockBackends as parents.
*/
bool bdrv_is_root_node(BlockDriverState *bs)
{
BdrvChild *c;
QLIST_FOREACH(c, &bs->parents, next_parent) {
if (c->role != &child_root) {
return false;
}
}
return true;
}
/*
* Return @blk's DriveInfo if any, else null.
*/
DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
{
return blk->legacy_dinfo;
}
/*
* Set @blk's DriveInfo to @dinfo, and return it.
* @blk must not have a DriveInfo set already.
* No other BlockBackend may have the same DriveInfo set.
*/
DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
{
assert(!blk->legacy_dinfo);
return blk->legacy_dinfo = dinfo;
}
/*
* Return the BlockBackend with DriveInfo @dinfo.
* It must exist.
*/
BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
{
BlockBackend *blk = NULL;
while ((blk = blk_next(blk)) != NULL) {
if (blk->legacy_dinfo == dinfo) {
return blk;
}
}
abort();
}
/*
* Returns a pointer to the publicly accessible fields of @blk.
*/
BlockBackendPublic *blk_get_public(BlockBackend *blk)
{
return &blk->public;
}
/*
* Returns a BlockBackend given the associated @public fields.
*/
BlockBackend *blk_by_public(BlockBackendPublic *public)
{
return container_of(public, BlockBackend, public);
}
/*
* Disassociates the currently associated BlockDriverState from @blk.
*/
void blk_remove_bs(BlockBackend *blk)
{
notifier_list_notify(&blk->remove_bs_notifiers, blk);
if (blk->public.throttle_state) {
throttle_timers_detach_aio_context(&blk->public.throttle_timers);
}
blk_update_root_state(blk);
bdrv_root_unref_child(blk->root);
blk->root = NULL;
}
/*
* Associates a new BlockDriverState with @blk.
*/
void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs)
{
bdrv_ref(bs);
blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk);
notifier_list_notify(&blk->insert_bs_notifiers, blk);
if (blk->public.throttle_state) {
throttle_timers_attach_aio_context(
&blk->public.throttle_timers, bdrv_get_aio_context(bs));
}
}
static int blk_do_attach_dev(BlockBackend *blk, void *dev)
{
if (blk->dev) {
return -EBUSY;
}
blk_ref(blk);
blk->dev = dev;
blk->legacy_dev = false;
blk_iostatus_reset(blk);
return 0;
}
/*
* Attach device model @dev to @blk.
* Return 0 on success, -EBUSY when a device model is attached already.
*/
int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
{
return blk_do_attach_dev(blk, dev);
}
/*
* Attach device model @dev to @blk.
* @blk must not have a device model attached already.
* TODO qdevified devices don't use this, remove when devices are qdevified
*/
void blk_attach_dev_legacy(BlockBackend *blk, void *dev)
{
if (blk_do_attach_dev(blk, dev) < 0) {
abort();
}
blk->legacy_dev = true;
}
/*
* Detach device model @dev from @blk.
* @dev must be currently attached to @blk.
*/
void blk_detach_dev(BlockBackend *blk, void *dev)
/* TODO change to DeviceState *dev when all users are qdevified */
{
assert(blk->dev == dev);
blk->dev = NULL;
blk->dev_ops = NULL;
blk->dev_opaque = NULL;
blk->guest_block_size = 512;
blk_unref(blk);
}
/*
* Return the device model attached to @blk if any, else null.
*/
void *blk_get_attached_dev(BlockBackend *blk)
/* TODO change to return DeviceState * when all users are qdevified */
{
return blk->dev;
}
/* Return the qdev ID, or if no ID is assigned the QOM path, of the block
* device attached to the BlockBackend. */
static char *blk_get_attached_dev_id(BlockBackend *blk)
{
DeviceState *dev;
assert(!blk->legacy_dev);
dev = blk->dev;
if (!dev) {
return g_strdup("");
} else if (dev->id) {
return g_strdup(dev->id);
}
return object_get_canonical_path(OBJECT(dev));
}
/*
* Return the BlockBackend which has the device model @dev attached if it
* exists, else null.
*
* @dev must not be null.
*/
BlockBackend *blk_by_dev(void *dev)
{
BlockBackend *blk = NULL;
assert(dev != NULL);
while ((blk = blk_all_next(blk)) != NULL) {
if (blk->dev == dev) {
return blk;
}
}
return NULL;
}
/*
* Set @blk's device model callbacks to @ops.
* @opaque is the opaque argument to pass to the callbacks.
* This is for use by device models.
*/
void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
void *opaque)
{
/* All drivers that use blk_set_dev_ops() are qdevified and we want to keep
* it that way, so we can assume blk->dev is a DeviceState if blk->dev_ops
* is set. */
assert(!blk->legacy_dev);
blk->dev_ops = ops;
blk->dev_opaque = opaque;
}
/*
* Notify @blk's attached device model of media change.
* If @load is true, notify of media load.
* Else, notify of media eject.
* Also send DEVICE_TRAY_MOVED events as appropriate.
*/
void blk_dev_change_media_cb(BlockBackend *blk, bool load)
{
if (blk->dev_ops && blk->dev_ops->change_media_cb) {
bool tray_was_open, tray_is_open;
assert(!blk->legacy_dev);
tray_was_open = blk_dev_is_tray_open(blk);
blk->dev_ops->change_media_cb(blk->dev_opaque, load);
tray_is_open = blk_dev_is_tray_open(blk);
if (tray_was_open != tray_is_open) {
char *id = blk_get_attached_dev_id(blk);
qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open,
&error_abort);
g_free(id);
}
}
}
static void blk_root_change_media(BdrvChild *child, bool load)
{
blk_dev_change_media_cb(child->opaque, load);
}
/*
* Does @blk's attached device model have removable media?
* %true if no device model is attached.
*/
bool blk_dev_has_removable_media(BlockBackend *blk)
{
return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
}
/*
* Does @blk's attached device model have a tray?
*/
bool blk_dev_has_tray(BlockBackend *blk)
{
return blk->dev_ops && blk->dev_ops->is_tray_open;
}
/*
* Notify @blk's attached device model of a media eject request.
* If @force is true, the medium is about to be yanked out forcefully.
*/
void blk_dev_eject_request(BlockBackend *blk, bool force)
{
if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
}
}
/*
* Does @blk's attached device model have a tray, and is it open?
*/
bool blk_dev_is_tray_open(BlockBackend *blk)
{
if (blk_dev_has_tray(blk)) {
return blk->dev_ops->is_tray_open(blk->dev_opaque);
}
return false;
}
/*
* Does @blk's attached device model have the medium locked?
* %false if the device model has no such lock.
*/
bool blk_dev_is_medium_locked(BlockBackend *blk)
{
if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
return blk->dev_ops->is_medium_locked(blk->dev_opaque);
}
return false;
}
/*
* Notify @blk's attached device model of a backend size change.
*/
static void blk_root_resize(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
if (blk->dev_ops && blk->dev_ops->resize_cb) {
blk->dev_ops->resize_cb(blk->dev_opaque);
}
}
void blk_iostatus_enable(BlockBackend *blk)
{
blk->iostatus_enabled = true;
blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
}
/* The I/O status is only enabled if the drive explicitly
* enables it _and_ the VM is configured to stop on errors */
bool blk_iostatus_is_enabled(const BlockBackend *blk)
{
return (blk->iostatus_enabled &&
(blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
}
BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
{
return blk->iostatus;
}
void blk_iostatus_disable(BlockBackend *blk)
{
blk->iostatus_enabled = false;
}
void blk_iostatus_reset(BlockBackend *blk)
{
if (blk_iostatus_is_enabled(blk)) {
BlockDriverState *bs = blk_bs(blk);
blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
if (bs && bs->job) {
block_job_iostatus_reset(bs->job);
}
}
}
void blk_iostatus_set_err(BlockBackend *blk, int error)
{
assert(blk_iostatus_is_enabled(blk));
if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
BLOCK_DEVICE_IO_STATUS_FAILED;
}
}
void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
{
blk->allow_write_beyond_eof = allow;
}
static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
size_t size)
{
int64_t len;
if (size > INT_MAX) {
return -EIO;
}
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
if (offset < 0) {
return -EIO;
}
if (!blk->allow_write_beyond_eof) {
len = blk_getlength(blk);
if (len < 0) {
return len;
}
if (offset > len || len - offset < size) {
return -EIO;
}
}
return 0;
}
int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
int ret;
BlockDriverState *bs = blk_bs(blk);
trace_blk_co_preadv(blk, bs, offset, bytes, flags);
ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
}
bdrv_inc_in_flight(bs);
/* throttling disk I/O */
if (blk->public.throttle_state) {
throttle_group_co_io_limits_intercept(blk, bytes, false);
}
ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
bdrv_dec_in_flight(bs);
return ret;
}
int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
int ret;
BlockDriverState *bs = blk_bs(blk);
trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
}
bdrv_inc_in_flight(bs);
/* throttling disk I/O */
if (blk->public.throttle_state) {
throttle_group_co_io_limits_intercept(blk, bytes, true);
}
if (!blk->enable_write_cache) {
flags |= BDRV_REQ_FUA;
}
ret = bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
bdrv_dec_in_flight(bs);
return ret;
}
typedef struct BlkRwCo {
BlockBackend *blk;
int64_t offset;
QEMUIOVector *qiov;
int ret;
BdrvRequestFlags flags;
} BlkRwCo;
static void blk_read_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, rwco->qiov->size,
rwco->qiov, rwco->flags);
}
static void blk_write_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, rwco->qiov->size,
rwco->qiov, rwco->flags);
}
static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
int64_t bytes, CoroutineEntry co_entry,
BdrvRequestFlags flags)
{
QEMUIOVector qiov;
struct iovec iov;
Coroutine *co;
BlkRwCo rwco;
iov = (struct iovec) {
.iov_base = buf,
.iov_len = bytes,
};
qemu_iovec_init_external(&qiov, &iov, 1);
rwco = (BlkRwCo) {
.blk = blk,
.offset = offset,
.qiov = &qiov,
.flags = flags,
.ret = NOT_DONE,
};
co = qemu_coroutine_create(co_entry, &rwco);
qemu_coroutine_enter(co);
BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
return rwco.ret;
}
int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
int count)
{
int ret;
ret = blk_check_byte_request(blk, offset, count);
if (ret < 0) {
return ret;
}
blk_root_drained_begin(blk->root);
ret = blk_pread(blk, offset, buf, count);
blk_root_drained_end(blk->root);
return ret;
}
int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int count, BdrvRequestFlags flags)
{
return blk_prw(blk, offset, NULL, count, blk_write_entry,
flags | BDRV_REQ_ZERO_WRITE);
}
int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
{
return bdrv_make_zero(blk->root, flags);
}
static void error_callback_bh(void *opaque)
{
struct BlockBackendAIOCB *acb = opaque;
bdrv_dec_in_flight(acb->common.bs);
acb->common.cb(acb->common.opaque, acb->ret);
qemu_aio_unref(acb);
}
BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
BlockCompletionFunc *cb,
void *opaque, int ret)
{
struct BlockBackendAIOCB *acb;
bdrv_inc_in_flight(blk_bs(blk));
acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
acb->blk = blk;
acb->ret = ret;
aio_bh_schedule_oneshot(blk_get_aio_context(blk), error_callback_bh, acb);
return &acb->common;
}
typedef struct BlkAioEmAIOCB {
BlockAIOCB common;
BlkRwCo rwco;
int bytes;
bool has_returned;
} BlkAioEmAIOCB;
static const AIOCBInfo blk_aio_em_aiocb_info = {
.aiocb_size = sizeof(BlkAioEmAIOCB),
};
static void blk_aio_complete(BlkAioEmAIOCB *acb)
{
if (acb->has_returned) {
bdrv_dec_in_flight(acb->common.bs);
acb->common.cb(acb->common.opaque, acb->rwco.ret);
qemu_aio_unref(acb);
}
}
static void blk_aio_complete_bh(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
assert(acb->has_returned);
blk_aio_complete(acb);
}
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
QEMUIOVector *qiov, CoroutineEntry co_entry,
BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
BlkAioEmAIOCB *acb;
Coroutine *co;
bdrv_inc_in_flight(blk_bs(blk));
acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
acb->rwco = (BlkRwCo) {
.blk = blk,
.offset = offset,
.qiov = qiov,
.flags = flags,
.ret = NOT_DONE,
};
acb->bytes = bytes;
acb->has_returned = false;
co = qemu_coroutine_create(co_entry, acb);
qemu_coroutine_enter(co);
acb->has_returned = true;
if (acb->rwco.ret != NOT_DONE) {
aio_bh_schedule_oneshot(blk_get_aio_context(blk),
blk_aio_complete_bh, acb);
}
return &acb->common;
}
static void blk_aio_read_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
assert(rwco->qiov->size == acb->bytes);
rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
rwco->qiov, rwco->flags);
blk_aio_complete(acb);
}
static void blk_aio_write_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
assert(!rwco->qiov || rwco->qiov->size == acb->bytes);
rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
rwco->qiov, rwco->flags);
blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int count, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry,
flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
}
int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
{
int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
if (ret < 0) {
return ret;
}
return count;
}
int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
BdrvRequestFlags flags)
{
int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
flags);
if (ret < 0) {
return ret;
}
return count;
}
int64_t blk_getlength(BlockBackend *blk)
{
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
return bdrv_getlength(blk_bs(blk));
}
void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
{
if (!blk_bs(blk)) {
*nb_sectors_ptr = 0;
} else {
bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
}
}
int64_t blk_nb_sectors(BlockBackend *blk)
{
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
return bdrv_nb_sectors(blk_bs(blk));
}
BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
return blk_aio_prwv(blk, offset, qiov->size, qiov,
blk_aio_read_entry, flags, cb, opaque);
}
BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
return blk_aio_prwv(blk, offset, qiov->size, qiov,
blk_aio_write_entry, flags, cb, opaque);
}
static void blk_aio_flush_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
rwco->ret = blk_co_flush(rwco->blk);
blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
BlockCompletionFunc *cb, void *opaque)
{
return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
}
static void blk_aio_pdiscard_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes);
blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
int64_t offset, int count,
BlockCompletionFunc *cb, void *opaque)
{
return blk_aio_prwv(blk, offset, count, NULL, blk_aio_pdiscard_entry, 0,
cb, opaque);
}
void blk_aio_cancel(BlockAIOCB *acb)
{
bdrv_aio_cancel(acb);
}
void blk_aio_cancel_async(BlockAIOCB *acb)
{
bdrv_aio_cancel_async(acb);
}
int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
return bdrv_co_ioctl(blk_bs(blk), req, buf);
}
static void blk_ioctl_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
rwco->qiov->iov[0].iov_base);
}
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0);
}
static void blk_aio_ioctl_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
rwco->qiov->iov[0].iov_base);
blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
BlockCompletionFunc *cb, void *opaque)
{
QEMUIOVector qiov;
struct iovec iov;
iov = (struct iovec) {
.iov_base = buf,
.iov_len = 0,
};
qemu_iovec_init_external(&qiov, &iov, 1);
return blk_aio_prwv(blk, req, 0, &qiov, blk_aio_ioctl_entry, 0, cb, opaque);
}
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count)
{
int ret = blk_check_byte_request(blk, offset, count);
if (ret < 0) {
return ret;
}
return bdrv_co_pdiscard(blk_bs(blk), offset, count);
}
int blk_co_flush(BlockBackend *blk)
{
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
return bdrv_co_flush(blk_bs(blk));
}
static void blk_flush_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
rwco->ret = blk_co_flush(rwco->blk);
}
int blk_flush(BlockBackend *blk)
{
return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0);
}
void blk_drain(BlockBackend *blk)
{
if (blk_bs(blk)) {
bdrv_drain(blk_bs(blk));
}
}
void blk_drain_all(void)
{
bdrv_drain_all();
}
void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
BlockdevOnError on_write_error)
{
blk->on_read_error = on_read_error;
blk->on_write_error = on_write_error;
}
BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
{
return is_read ? blk->on_read_error : blk->on_write_error;
}
BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
int error)
{
BlockdevOnError on_err = blk_get_on_error(blk, is_read);
switch (on_err) {
case BLOCKDEV_ON_ERROR_ENOSPC:
return (error == ENOSPC) ?
BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
case BLOCKDEV_ON_ERROR_STOP:
return BLOCK_ERROR_ACTION_STOP;
case BLOCKDEV_ON_ERROR_REPORT:
return BLOCK_ERROR_ACTION_REPORT;
case BLOCKDEV_ON_ERROR_IGNORE:
return BLOCK_ERROR_ACTION_IGNORE;
case BLOCKDEV_ON_ERROR_AUTO:
default:
abort();
}
}
static void send_qmp_error_event(BlockBackend *blk,
BlockErrorAction action,
bool is_read, int error)
{
IoOperationType optype;
optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
qapi_event_send_block_io_error(blk_name(blk),
bdrv_get_node_name(blk_bs(blk)), optype,
action, blk_iostatus_is_enabled(blk),
error == ENOSPC, strerror(error),
&error_abort);
}
/* This is done by device models because, while the block layer knows
* about the error, it does not know whether an operation comes from
* the device or the block layer (from a job, for example).
*/
void blk_error_action(BlockBackend *blk, BlockErrorAction action,
bool is_read, int error)
{
assert(error >= 0);
if (action == BLOCK_ERROR_ACTION_STOP) {
/* First set the iostatus, so that "info block" returns an iostatus
* that matches the events raised so far (an additional error iostatus
* is fine, but not a lost one).
*/
blk_iostatus_set_err(blk, error);
/* Then raise the request to stop the VM and the event.
* qemu_system_vmstop_request_prepare has two effects. First,
* it ensures that the STOP event always comes after the
* BLOCK_IO_ERROR event. Second, it ensures that even if management
* can observe the STOP event and do a "cont" before the STOP
* event is issued, the VM will not stop. In this case, vm_start()
* also ensures that the STOP/RESUME pair of events is emitted.
*/
qemu_system_vmstop_request_prepare();
send_qmp_error_event(blk, action, is_read, error);
qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
} else {
send_qmp_error_event(blk, action, is_read, error);
}
}
int blk_is_read_only(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
return bdrv_is_read_only(bs);
} else {
return blk->root_state.read_only;
}
}
int blk_is_sg(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
if (!bs) {
return 0;
}
return bdrv_is_sg(bs);
}
int blk_enable_write_cache(BlockBackend *blk)
{
return blk->enable_write_cache;
}
void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
{
blk->enable_write_cache = wce;
}
void blk_invalidate_cache(BlockBackend *blk, Error **errp)
{
BlockDriverState *bs = blk_bs(blk);
if (!bs) {
error_setg(errp, "Device '%s' has no medium", blk->name);
return;
}
bdrv_invalidate_cache(bs, errp);
}
bool blk_is_inserted(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
return bs && bdrv_is_inserted(bs);
}
bool blk_is_available(BlockBackend *blk)
{
return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
}
void blk_lock_medium(BlockBackend *blk, bool locked)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
bdrv_lock_medium(bs, locked);
}
}
void blk_eject(BlockBackend *blk, bool eject_flag)
{
BlockDriverState *bs = blk_bs(blk);
char *id;
/* blk_eject is only called by qdevified devices */
assert(!blk->legacy_dev);
if (bs) {
bdrv_eject(bs, eject_flag);
}
/* Whether or not we ejected on the backend,
* the frontend experienced a tray event. */
id = blk_get_attached_dev_id(blk);
qapi_event_send_device_tray_moved(blk_name(blk), id,
eject_flag, &error_abort);
g_free(id);
}
int blk_get_flags(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
return bdrv_get_flags(bs);
} else {
return blk->root_state.open_flags;
}
}
/* Returns the maximum transfer length, in bytes; guaranteed nonzero */
uint32_t blk_get_max_transfer(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
uint32_t max = 0;
if (bs) {
max = bs->bl.max_transfer;
}
return MIN_NON_ZERO(max, INT_MAX);
}
int blk_get_max_iov(BlockBackend *blk)
{
return blk->root->bs->bl.max_iov;
}
void blk_set_guest_block_size(BlockBackend *blk, int align)
{
blk->guest_block_size = align;
}
void *blk_try_blockalign(BlockBackend *blk, size_t size)
{
return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
}
void *blk_blockalign(BlockBackend *blk, size_t size)
{
return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
}
bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
{
BlockDriverState *bs = blk_bs(blk);
if (!bs) {
return false;
}
return bdrv_op_is_blocked(bs, op, errp);
}
void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
bdrv_op_unblock(bs, op, reason);
}
}
void blk_op_block_all(BlockBackend *blk, Error *reason)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
bdrv_op_block_all(bs, reason);
}
}
void blk_op_unblock_all(BlockBackend *blk, Error *reason)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
bdrv_op_unblock_all(bs, reason);
}
}
AioContext *blk_get_aio_context(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
return bdrv_get_aio_context(bs);
} else {
return qemu_get_aio_context();
}
}
static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
{
BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
return blk_get_aio_context(blk_acb->blk);
}
void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
if (blk->public.throttle_state) {
throttle_timers_detach_aio_context(&blk->public.throttle_timers);
}
bdrv_set_aio_context(bs, new_context);
if (blk->public.throttle_state) {
throttle_timers_attach_aio_context(&blk->public.throttle_timers,
new_context);
}
}
}
void blk_add_aio_context_notifier(BlockBackend *blk,
void (*attached_aio_context)(AioContext *new_context, void *opaque),
void (*detach_aio_context)(void *opaque), void *opaque)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
bdrv_add_aio_context_notifier(bs, attached_aio_context,
detach_aio_context, opaque);
}
}
void blk_remove_aio_context_notifier(BlockBackend *blk,
void (*attached_aio_context)(AioContext *,
void *),
void (*detach_aio_context)(void *),
void *opaque)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
bdrv_remove_aio_context_notifier(bs, attached_aio_context,
detach_aio_context, opaque);
}
}
void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
{
notifier_list_add(&blk->remove_bs_notifiers, notify);
}
void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
{
notifier_list_add(&blk->insert_bs_notifiers, notify);
}
void blk_io_plug(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
bdrv_io_plug(bs);
}
}
void blk_io_unplug(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
if (bs) {
bdrv_io_unplug(bs);
}
}
BlockAcctStats *blk_get_stats(BlockBackend *blk)
{
return &blk->stats;
}
void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
BlockCompletionFunc *cb, void *opaque)
{
return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
}
int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int count, BdrvRequestFlags flags)
{
return blk_co_pwritev(blk, offset, count, NULL,
flags | BDRV_REQ_ZERO_WRITE);
}
int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
int count)
{
return blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
BDRV_REQ_WRITE_COMPRESSED);
}
int blk_truncate(BlockBackend *blk, int64_t offset)
{
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
return bdrv_truncate(blk_bs(blk), offset);
}
static void blk_pdiscard_entry(void *opaque)
{
BlkRwCo *rwco = opaque;
rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, rwco->qiov->size);
}
int blk_pdiscard(BlockBackend *blk, int64_t offset, int count)
{
return blk_prw(blk, offset, NULL, count, blk_pdiscard_entry, 0);
}
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
int64_t pos, int size)
{
int ret;
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
if (ret < 0) {
return ret;
}
if (ret == size && !blk->enable_write_cache) {
ret = bdrv_flush(blk_bs(blk));
}
return ret < 0 ? ret : size;
}
int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
{
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
}
int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
{
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
return bdrv_probe_blocksizes(blk_bs(blk), bsz);
}
int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
{
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
return bdrv_probe_geometry(blk_bs(blk), geo);
}
/*
* Updates the BlockBackendRootState object with data from the currently
* attached BlockDriverState.
*/
void blk_update_root_state(BlockBackend *blk)
{
assert(blk->root);
blk->root_state.open_flags = blk->root->bs->open_flags;
blk->root_state.read_only = blk->root->bs->read_only;
blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
}
/*
* Returns the detect-zeroes setting to be used for bdrv_open() of a
* BlockDriverState which is supposed to inherit the root state.
*/
bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
{
return blk->root_state.detect_zeroes;
}
/*
* Returns the flags to be used for bdrv_open() of a BlockDriverState which is
* supposed to inherit the root state.
*/
int blk_get_open_flags_from_root_state(BlockBackend *blk)
{
int bs_flags;
bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
return bs_flags;
}
BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
{
return &blk->root_state;
}
int blk_commit_all(void)
{
BlockBackend *blk = NULL;
while ((blk = blk_all_next(blk)) != NULL) {
AioContext *aio_context = blk_get_aio_context(blk);
aio_context_acquire(aio_context);
if (blk_is_inserted(blk) && blk->root->bs->backing) {
int ret = bdrv_commit(blk->root->bs);
if (ret < 0) {
aio_context_release(aio_context);
return ret;
}
}
aio_context_release(aio_context);
}
return 0;
}
/* throttling disk I/O limits */
void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
{
throttle_group_config(blk, cfg);
}
void blk_io_limits_disable(BlockBackend *blk)
{
assert(blk->public.throttle_state);
bdrv_drained_begin(blk_bs(blk));
throttle_group_unregister_blk(blk);
bdrv_drained_end(blk_bs(blk));
}
/* should be called before blk_set_io_limits if a limit is set */
void blk_io_limits_enable(BlockBackend *blk, const char *group)
{
assert(!blk->public.throttle_state);
throttle_group_register_blk(blk, group);
}
void blk_io_limits_update_group(BlockBackend *blk, const char *group)
{
/* this BB is not part of any group */
if (!blk->public.throttle_state) {
return;
}
/* this BB is a part of the same group than the one we want */
if (!g_strcmp0(throttle_group_get_name(blk), group)) {
return;
}
/* need to change the group this bs belong to */
blk_io_limits_disable(blk);
blk_io_limits_enable(blk, group);
}
static void blk_root_drained_begin(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
/* Note that blk->root may not be accessible here yet if we are just
* attaching to a BlockDriverState that is drained. Use child instead. */
if (blk->public.io_limits_disabled++ == 0) {
throttle_group_restart_blk(blk);
}
}
static void blk_root_drained_end(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
assert(blk->public.io_limits_disabled);
--blk->public.io_limits_disabled;
}
|
gongleiarei/qemu
|
block/block-backend.c
|
C
|
gpl-2.0
| 45,510
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Dockable MirrorMap
Description : Creates a dockable map canvas
Date : February 1, 2011
copyright : (C) 2011 by Giuseppe Sucameli (Faunalia)
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
import resources_rc
class DockableMirrorMapPlugin:
def __init__(self, iface):
# Save a reference to the QGIS iface
self.iface = iface
def initGui(self):
self.dockableMirrors = []
self.lastDockableMirror = 0
self.dockableAction = QAction(QIcon(":/plugins/DockableMirrorMap/icons/dockablemirrormap.png"), "Dockable MirrorMap", self.iface.mainWindow())
QObject.connect(self.dockableAction, SIGNAL("triggered()"), self.runDockableMirror)
self.aboutAction = QAction(QIcon(":/plugins/DockableMirrorMap/icons/about.png"), "About", self.iface.mainWindow())
QObject.connect(self.aboutAction, SIGNAL("triggered()"), self.about)
# Add to the plugin menu and toolbar
self.iface.addPluginToMenu("Dockable MirrorMap", self.dockableAction)
self.iface.addPluginToMenu("Dockable MirrorMap", self.aboutAction)
self.iface.addToolBarIcon(self.dockableAction)
QObject.connect(self.iface, SIGNAL("projectRead()"), self.onProjectLoaded)
QObject.connect(QgsProject.instance(), SIGNAL("writeProject(QDomDocument &)"), self.onWriteProject)
def unload(self):
QObject.disconnect(self.iface, SIGNAL("projectRead()"), self.onProjectLoaded)
QObject.disconnect(QgsProject.instance(), SIGNAL("writeProject(QDomDocument &)"), self.onWriteProject)
self.removeDockableMirrors()
# Remove the plugin
self.iface.removePluginMenu("Dockable MirrorMap",self.dockableAction)
self.iface.removePluginMenu("Dockable MirrorMap",self.aboutAction)
self.iface.removeToolBarIcon(self.dockableAction)
def about(self):
from DlgAbout import DlgAbout
DlgAbout(self.iface.mainWindow()).exec_()
def removeDockableMirrors(self):
for d in list(self.dockableMirrors):
d.close()
self.dockableMirrors = []
self.lastDockableMirror = 0
def runDockableMirror(self):
from dockableMirrorMap import DockableMirrorMap
wdg = DockableMirrorMap(self.iface.mainWindow(), self.iface)
minsize = wdg.minimumSize()
maxsize = wdg.maximumSize()
self.setupDockWidget(wdg)
self.addDockWidget(wdg)
wdg.setMinimumSize(minsize)
wdg.setMaximumSize(maxsize)
if wdg.isFloating():
wdg.move(50, 50) # move the widget to the center
def setupDockWidget(self, wdg):
othersize = QGridLayout().verticalSpacing()
if len(self.dockableMirrors) <= 0:
width = self.iface.mapCanvas().size().width()/2 - othersize
wdg.setLocation( Qt.RightDockWidgetArea )
wdg.setMinimumWidth( width )
wdg.setMaximumWidth( width )
elif len(self.dockableMirrors) == 1:
height = self.dockableMirrors[0].size().height()/2 - othersize/2
wdg.setLocation( Qt.RightDockWidgetArea )
wdg.setMinimumHeight( height )
wdg.setMaximumHeight( height )
elif len(self.dockableMirrors) == 2:
height = self.iface.mapCanvas().size().height()/2 - othersize/2
wdg.setLocation( Qt.BottomDockWidgetArea )
wdg.setMinimumHeight( height )
wdg.setMaximumHeight( height )
else:
wdg.setLocation( Qt.BottomDockWidgetArea )
wdg.setFloating( True )
def addDockWidget(self, wdg, position=None):
if position == None:
position = wdg.getLocation()
else:
wdg.setLocation( position )
mapCanvas = self.iface.mapCanvas()
oldSize = mapCanvas.size()
prevFlag = mapCanvas.renderFlag()
mapCanvas.setRenderFlag(False)
self.iface.addDockWidget(position, wdg)
wdg.setNumber( self.lastDockableMirror )
self.lastDockableMirror = self.lastDockableMirror+1
self.dockableMirrors.append( wdg )
QObject.connect(wdg, SIGNAL( "closed(PyQt_PyObject)" ), self.onCloseDockableMirror)
newSize = mapCanvas.size()
if newSize != oldSize:
# trick: update the canvas size
mapCanvas.resize(newSize.width() - 1, newSize.height())
mapCanvas.setRenderFlag(prevFlag)
mapCanvas.resize(newSize)
else:
mapCanvas.setRenderFlag(prevFlag)
def onCloseDockableMirror(self, wdg):
if self.dockableMirrors.count( wdg ) > 0:
self.dockableMirrors.remove( wdg )
if len(self.dockableMirrors) <= 0:
self.lastDockableMirror = 0
def onWriteProject(self, domproject):
if len(self.dockableMirrors) <= 0:
return
QgsProject.instance().writeEntry( "DockableMirrorMap", "/numMirrors", len(self.dockableMirrors) )
for i, dockwidget in enumerate(self.dockableMirrors):
# save position and geometry
floating = dockwidget.isFloating()
QgsProject.instance().writeEntry( "DockableMirrorMap", "/mirror%s/floating" % i, floating )
if floating:
position = "%s %s" % (dockwidget.pos().x(), dockwidget.pos().y())
else:
position = u"%s" % dockwidget.getLocation()
QgsProject.instance().writeEntry( "DockableMirrorMap", "/mirror%s/position" % i, str(position) )
size = "%s %s" % (dockwidget.size().width(), dockwidget.size().height())
QgsProject.instance().writeEntry( "DockableMirrorMap", "/mirror%s/size" % i, str(size) )
# save the layer list
layerIds = dockwidget.getMirror().getLayerSet()
QgsProject.instance().writeEntry( "DockableMirrorMap", "/mirror%s/layers" % i, layerIds )
scaleFactor = dockwidget.getMirror().scaleFactor.value()
QgsProject.instance().writeEntryDouble("DockableMirrorMap", "/mirror%s/scaleFactor" % i, scaleFactor)
def onProjectLoaded(self):
# restore mirrors?
num, ok = QgsProject.instance().readNumEntry("DockableMirrorMap", "/numMirrors")
if not ok or num <= 0:
return
# remove all mirrors
self.removeDockableMirrors()
mirror2lids = {}
# load mirrors
for i in range(num):
if num >= 2:
if i == 0:
prevFlag = self.iface.mapCanvas().renderFlag()
self.iface.mapCanvas().setRenderFlag(False)
elif i == num-1:
self.iface.mapCanvas().setRenderFlag(True)
from dockableMirrorMap import DockableMirrorMap
dockwidget = DockableMirrorMap(self.iface.mainWindow(), self.iface)
minsize = dockwidget.minimumSize()
maxsize = dockwidget.maximumSize()
# restore position
floating, ok = QgsProject.instance().readBoolEntry("DockableMirrorMap", "/mirror%s/floating" % i)
if ok:
dockwidget.setFloating( floating )
position, ok = QgsProject.instance().readEntry("DockableMirrorMap", "/mirror%s/position" % i)
if ok:
try:
if floating:
parts = position.split(" ")
if len(parts) >= 2:
dockwidget.move( int(parts[0]), int(parts[1]) )
else:
dockwidget.setLocation( int(position) )
except ValueError:
pass
# restore geometry
dockwidget.setFixedSize( dockwidget.geometry().width(), dockwidget.geometry().height() )
size, ok = QgsProject.instance().readEntry("DockableMirrorMap", "/mirror%s/size" % i)
if ok:
try:
parts = size.split(" ")
dockwidget.setFixedSize( int(parts[0]), int(parts[1]) )
except ValueError:
pass
scaleFactor, ok = QgsProject.instance().readDoubleEntry("DockableMirrorMap", "/mirror%s/scaleFactor" % i, 1.0)
if ok: dockwidget.getMirror().scaleFactor.setValue( scaleFactor )
# get layer list
layerIds, ok = QgsProject.instance().readListEntry("DockableMirrorMap", "/mirror%s/layers" % i)
if ok: dockwidget.getMirror().setLayerSet( layerIds )
self.addDockWidget( dockwidget )
dockwidget.setMinimumSize(minsize)
dockwidget.setMaximumSize(maxsize)
|
alfanugraha/LUMENS-repo
|
processing/DockableMirrorMap/dockableMirrorMapPlugin.py
|
Python
|
gpl-2.0
| 8,306
|
#!/bin/bash
echo "
------------------------- -----------------
| _ _ || __ __ |
| | | | | || \ \ / / |
| | |__| | __ || \ \ / / |
| | __ |/ _\` \ || \ \/ / |
| | | | | (_| | || \ / |
| |_| |_|\__, | || \/ |
| __/ |___ __ || ___ ____ |
| |___/( _ )/ \ || |_ )__ / |
| / _ \ () ||| / / |_ \ |
| \___/\__/ || /___|___/ |
------------------------- -----------------
"
LSB=`lsb_release -r | awk {'print $2'}`
echo
echo "Updating APT sources."
echo
apt-get update > /dev/null
echo
echo "Installing for Ansible."
echo
apt-get -y install software-properties-common
add-apt-repository -y ppa:ansible/ansible
apt-get update
apt-get -y install ansible
ansible_version=`dpkg -s ansible 2>&1 | grep Version | cut -f2 -d' '`
echo
echo "Ansible installed ($ansible_version)"
ANS_BIN=`which ansible-playbook`
if [[ -z $ANS_BIN ]]
then
echo "Whoops, can't find Ansible anywhere. Aborting run."
echo
exit
fi
echo
echo "Validating Ansible hostfile permissions."
echo
chmod 644 /vagrant/provisioning/hosts
# More continuous scroll of the ansible standard output buffer
export PYTHONUNBUFFERED=1
# $ANS_BIN /vagrant/provisioning/playbook.yml -i /vagrant/provisioning/hosts
$ANS_BIN /vagrant/provisioning/playbook.yml -i'127.0.0.1,'
echo
|
zamoose/hgv
|
bin/hgv-init.sh
|
Shell
|
gpl-2.0
| 1,462
|
ALTER TABLE profile_phones CHANGE COLUMN link_type link_type ENUM('address','pro','user','hq', 'group') NOT NULL DEFAULT 'user';
-- vim:set syntax=mysql:
|
Polytechnique-org/platal
|
upgrade/1.1.0/08_phones.sql
|
SQL
|
gpl-2.0
| 155
|
/*
* Copyright (c) 2005, 2006, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.sun.tools.internal.ws.processor.modeler.wsdl;
import com.sun.codemodel.internal.JType;
import com.sun.istack.internal.SAXParseException2;
import com.sun.tools.internal.ws.api.wsdl.TWSDLExtensible;
import com.sun.tools.internal.ws.processor.generator.Names;
import com.sun.tools.internal.ws.processor.model.*;
import com.sun.tools.internal.ws.processor.model.Fault;
import com.sun.tools.internal.ws.processor.model.Operation;
import com.sun.tools.internal.ws.processor.model.Port;
import com.sun.tools.internal.ws.processor.model.Service;
import com.sun.tools.internal.ws.processor.model.java.*;
import com.sun.tools.internal.ws.processor.model.jaxb.*;
import com.sun.tools.internal.ws.processor.modeler.JavaSimpleTypeCreator;
import com.sun.tools.internal.ws.processor.util.ClassNameCollector;
import com.sun.tools.internal.ws.resources.ModelerMessages;
import com.sun.tools.internal.ws.wscompile.ErrorReceiver;
import com.sun.tools.internal.ws.wscompile.WsimportOptions;
import com.sun.tools.internal.ws.wsdl.document.*;
import com.sun.tools.internal.ws.wsdl.document.Message;
import com.sun.tools.internal.ws.wsdl.document.jaxws.CustomName;
import com.sun.tools.internal.ws.wsdl.document.jaxws.JAXWSBinding;
import com.sun.tools.internal.ws.wsdl.document.mime.MIMEContent;
import com.sun.tools.internal.ws.wsdl.document.schema.SchemaKinds;
import com.sun.tools.internal.ws.wsdl.document.soap.*;
import com.sun.tools.internal.ws.wsdl.framework.*;
import com.sun.tools.internal.ws.wsdl.parser.WSDLParser;
import com.sun.tools.internal.xjc.api.S2JJAXBModel;
import com.sun.tools.internal.xjc.api.TypeAndAnnotation;
import com.sun.tools.internal.xjc.api.XJC;
import com.sun.xml.internal.bind.api.JAXBRIContext;
import com.sun.xml.internal.ws.util.xml.XmlUtil;
import org.xml.sax.InputSource;
import org.xml.sax.Locator;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
import org.xml.sax.helpers.LocatorImpl;
import javax.jws.WebParam.Mode;
import javax.xml.namespace.QName;
import java.util.*;
import java.io.IOException;
/**
* The WSDLModeler processes a WSDL to create a Model.
*
* @author WS Development Team
*/
public class WSDLModeler extends WSDLModelerBase {
//map of wsdl:operation QName to <soapenv:Body> child, as per BP it must be unique in a port
private final Map<QName, Operation> uniqueBodyBlocks = new HashMap<QName, Operation>();
private final QName VOID_BODYBLOCK = new QName("");
private ClassNameCollector classNameCollector;
private final String explicitDefaultPackage;
public WSDLModeler(WsimportOptions options, ErrorReceiver receiver) {
super(options, receiver);
this.classNameCollector = new ClassNameCollector();
this.explicitDefaultPackage = options.defaultPackage;
}
protected enum StyleAndUse {
RPC_LITERAL, DOC_LITERAL
}
private JAXBModelBuilder jaxbModelBuilder;
public Model buildModel() {
try {
parser = new WSDLParser(options, errReceiver);
parser.addParserListener(new ParserListener() {
public void ignoringExtension(Entity entity, QName name, QName parent) {
if (parent.equals(WSDLConstants.QNAME_TYPES)) {
// check for a schema element with the wrong namespace URI
if (name.getLocalPart().equals("schema")
&& !name.getNamespaceURI().equals("")) {
warning(entity, ModelerMessages.WSDLMODELER_WARNING_IGNORING_UNRECOGNIZED_SCHEMA_EXTENSION(name.getNamespaceURI()));
}
}
}
public void doneParsingEntity(QName element, Entity entity) {
}
});
document = parser.parse();
if (document == null || document.getDefinitions() == null)
return null;
document.validateLocally();
forest = parser.getDOMForest();
Model model = internalBuildModel(document);
if(model == null || errReceiver.hadError())
return null;
//ClassNameCollector classNameCollector = new ClassNameCollector();
classNameCollector.process(model);
if (classNameCollector.getConflictingClassNames().isEmpty()) {
if(errReceiver.hadError())
return null;
return model;
}
// do another pass, this time with conflict resolution enabled
model = internalBuildModel(document);
classNameCollector.process(model);
if (classNameCollector.getConflictingClassNames().isEmpty()) {
// we're done
if(errReceiver.hadError())
return null;
return model;
}
// give up
StringBuffer conflictList = new StringBuffer();
boolean first = true;
for (Iterator iter =
classNameCollector.getConflictingClassNames().iterator();
iter.hasNext();
) {
if (!first) {
conflictList.append(", ");
} else {
first = false;
}
conflictList.append((String) iter.next());
}
error(document.getDefinitions(), ModelerMessages.WSDLMODELER_UNSOLVABLE_NAMING_CONFLICTS(conflictList.toString()));
} catch (ModelException e) {
reportError(document.getDefinitions(), e.getMessage(), e);
} catch (ParseException e) {
errReceiver.error(e);
} catch (ValidationException e) {
errReceiver.error(e.getMessage(), e);
} catch (SAXException e) {
errReceiver.error(e);
} catch (IOException e) {
errReceiver.error(e);
}
//should never reach here
return null;
}
private Model internalBuildModel(WSDLDocument document) {
numPasses++;
//build the jaxbModel to be used latter
buildJAXBModel(document);
QName modelName =
new QName(
document.getDefinitions().getTargetNamespaceURI(),
document.getDefinitions().getName() == null
? "model"
: document.getDefinitions().getName());
Model model = new Model(modelName, document.getDefinitions());
model.setJAXBModel(getJAXBModelBuilder().getJAXBModel());
// This fails with the changed classname (WSDLModeler to WSDLModeler11 etc.)
// with this source comaptibility change the WSDL Modeler class name is changed. Right now hardcoding the
// modeler class name to the same one being checked in WSDLGenerator.
model.setProperty(
ModelProperties.PROPERTY_MODELER_NAME,
ModelProperties.WSDL_MODELER_NAME);
_javaTypes = new JavaSimpleTypeCreator();
_javaExceptions = new HashMap<String, JavaException>();
_bindingNameToPortMap = new HashMap<QName, Port>();
// grab target namespace
model.setTargetNamespaceURI(document.getDefinitions().getTargetNamespaceURI());
setDocumentationIfPresent(model,
document.getDefinitions().getDocumentation());
boolean hasServices = document.getDefinitions().services().hasNext();
if (hasServices) {
for (Iterator iter = document.getDefinitions().services();
iter.hasNext();
) {
processService((com.sun.tools.internal.ws.wsdl.document.Service) iter.next(),
model, document);
hasServices = true;
}
} else {
// emit a warning if there are no service definitions
warning(model.getEntity(), ModelerMessages.WSDLMODELER_WARNING_NO_SERVICE_DEFINITIONS_FOUND());
}
return model;
}
/* (non-Javadoc)
* @see WSDLModelerBase#processService(Service, Model, WSDLDocument)
*/
protected void processService(com.sun.tools.internal.ws.wsdl.document.Service wsdlService, Model model, WSDLDocument document) {
String serviceInterface = "";
QName serviceQName = getQNameOf(wsdlService);
serviceInterface = getServiceInterfaceName(serviceQName, wsdlService);
if (isConflictingServiceClassName(serviceInterface)) {
serviceInterface += "_Service";
}
Service service =
new Service(
serviceQName,
new JavaInterface(serviceInterface, serviceInterface + "Impl"), wsdlService);
setDocumentationIfPresent(service, wsdlService.getDocumentation());
boolean hasPorts = false;
for (Iterator iter = wsdlService.ports(); iter.hasNext();) {
boolean processed =
processPort(
(com.sun.tools.internal.ws.wsdl.document.Port) iter.next(),
service,
document);
hasPorts = hasPorts || processed;
}
if (!hasPorts) {
// emit a warning if there are no ports
warning(wsdlService, ModelerMessages.WSDLMODELER_WARNING_NO_PORTS_IN_SERVICE(wsdlService.getName()));
} else {
model.addService(service);
}
}
/* (non-Javadoc)
* @see WSDLModelerBase#processPort(WSDLPort, Service, WSDLDocument)
*/
protected boolean processPort(com.sun.tools.internal.ws.wsdl.document.Port wsdlPort,
Service service, WSDLDocument document) {
try {
//clear the unique block map
uniqueBodyBlocks.clear();
QName portQName = getQNameOf(wsdlPort);
Port port = new Port(portQName, wsdlPort);
setDocumentationIfPresent(port, wsdlPort.getDocumentation());
SOAPAddress soapAddress =
(SOAPAddress) getExtensionOfType(wsdlPort, SOAPAddress.class);
if (soapAddress == null) {
if(options.isExtensionMode()){
warning(wsdlPort, ModelerMessages.WSDLMODELER_WARNING_NO_SOAP_ADDRESS(wsdlPort.getName()));
}else{
// not a SOAP port, ignore it
warning(wsdlPort, ModelerMessages.WSDLMODELER_WARNING_IGNORING_NON_SOAP_PORT_NO_ADDRESS(wsdlPort.getName()));
return false;
}
}
if(soapAddress != null)
port.setAddress(soapAddress.getLocation());
Binding binding = wsdlPort.resolveBinding(document);
QName bindingName = getQNameOf(binding);
PortType portType = binding.resolvePortType(document);
port.setProperty(
ModelProperties.PROPERTY_WSDL_PORT_NAME,
getQNameOf(wsdlPort));
port.setProperty(
ModelProperties.PROPERTY_WSDL_PORT_TYPE_NAME,
getQNameOf(portType));
port.setProperty(
ModelProperties.PROPERTY_WSDL_BINDING_NAME,
bindingName);
boolean isProvider = isProvider(wsdlPort);
if (_bindingNameToPortMap.containsKey(bindingName) && !isProvider) {
// this binding has been processed before
Port existingPort =
_bindingNameToPortMap.get(bindingName);
port.setOperations(existingPort.getOperations());
port.setJavaInterface(existingPort.getJavaInterface());
port.setStyle(existingPort.getStyle());
port.setWrapped(existingPort.isWrapped());
} else {
// find out the SOAP binding extension, if any
SOAPBinding soapBinding =
(SOAPBinding) getExtensionOfType(binding, SOAPBinding.class);
if (soapBinding == null) {
soapBinding =
(SOAPBinding) getExtensionOfType(binding, SOAP12Binding.class);
if (soapBinding == null) {
if(!options.isExtensionMode()){
// cannot deal with non-SOAP ports
warning(wsdlPort, ModelerMessages.WSDLMODELER_WARNING_IGNORING_NON_SOAP_PORT(wsdlPort.getName()));
return false;
}else{
warning(wsdlPort, ModelerMessages.WSDLMODELER_WARNING_NON_SOAP_PORT(wsdlPort.getName()));
}
}else{
// we can only do soap1.2 if extensions are on
if (options.isExtensionMode()) {
warning(wsdlPort, ModelerMessages.WSDLMODELER_WARNING_PORT_SOAP_BINDING_12(wsdlPort.getName()));
} else {
warning(wsdlPort, ModelerMessages.WSDLMODELER_WARNING_IGNORING_SOAP_BINDING_12(wsdlPort.getName()));
return false;
}
}
}
if (soapBinding != null && (soapBinding.getTransport() == null
|| (!soapBinding.getTransport().equals(
SOAPConstants.URI_SOAP_TRANSPORT_HTTP) && !soapBinding.getTransport().equals(
SOAP12Constants.URI_SOAP_TRANSPORT_HTTP)))) {
if (!options.isExtensionMode()) {
// cannot deal with non-HTTP ports
warning(wsdlPort, ModelerMessages.WSDLMODELER_WARNING_IGNORING_SOAP_BINDING_NON_HTTP_TRANSPORT(wsdlPort.getName()));
return false;
}
}
/**
* validate wsdl:binding uniqueness in style, e.g. rpclit or doclit
* ref: WSI BP 1.1 R 2705
*/
if (soapBinding != null && !validateWSDLBindingStyle(binding)) {
if (options.isExtensionMode()) {
warning(wsdlPort, ModelerMessages.WSDLMODELER_WARNING_PORT_SOAP_BINDING_MIXED_STYLE(wsdlPort.getName()));
} else {
error(wsdlPort, ModelerMessages.WSDLMODELER_WARNING_IGNORING_SOAP_BINDING_MIXED_STYLE(wsdlPort.getName()));
}
}
if(soapBinding != null){
port.setStyle(soapBinding.getStyle());
}
boolean hasOverloadedOperations = false;
Set<String> operationNames = new HashSet<String>();
for (Iterator iter = portType.operations(); iter.hasNext();) {
com.sun.tools.internal.ws.wsdl.document.Operation operation =
(com.sun.tools.internal.ws.wsdl.document.Operation) iter.next();
if (operationNames.contains(operation.getName())) {
hasOverloadedOperations = true;
break;
}
operationNames.add(operation.getName());
for (Iterator itr = binding.operations();
iter.hasNext();
) {
BindingOperation bindingOperation =
(BindingOperation) itr.next();
if (operation
.getName()
.equals(bindingOperation.getName())) {
break;
} else if (!itr.hasNext()) {
error(bindingOperation, ModelerMessages.WSDLMODELER_INVALID_BINDING_OPERATION_NOT_FOUND(operation.getName(), bindingOperation.getName()));
}
}
}
Map headers = new HashMap();
boolean hasOperations = false;
for (Iterator iter = binding.operations(); iter.hasNext();) {
BindingOperation bindingOperation =
(BindingOperation) iter.next();
com.sun.tools.internal.ws.wsdl.document.Operation portTypeOperation =
null;
Set operations =
portType.getOperationsNamed(bindingOperation.getName());
if (operations.size() == 0) {
// the WSDL document is invalid
error(bindingOperation, ModelerMessages.WSDLMODELER_INVALID_BINDING_OPERATION_NOT_IN_PORT_TYPE(bindingOperation.getName(), binding.getName()));
} else if (operations.size() == 1) {
portTypeOperation =
(com.sun.tools.internal.ws.wsdl.document.Operation) operations
.iterator()
.next();
} else {
boolean found = false;
String expectedInputName =
bindingOperation.getInput().getName();
String expectedOutputName =
bindingOperation.getOutput().getName();
for (Iterator iter2 = operations.iterator(); iter2.hasNext();) {
com.sun.tools.internal.ws.wsdl.document.Operation candidateOperation =
(com.sun.tools.internal.ws.wsdl.document.Operation) iter2
.next();
if (expectedInputName == null) {
// the WSDL document is invalid
error(bindingOperation, ModelerMessages.WSDLMODELER_INVALID_BINDING_OPERATION_MISSING_INPUT_NAME(bindingOperation.getName()));
}
if (expectedOutputName == null) {
// the WSDL document is invalid
error(bindingOperation, ModelerMessages.WSDLMODELER_INVALID_BINDING_OPERATION_MISSING_OUTPUT_NAME(bindingOperation.getName()));
}
if (expectedInputName
.equals(candidateOperation.getInput().getName())
&& expectedOutputName.equals(
candidateOperation
.getOutput()
.getName())) {
if (found) {
// the WSDL document is invalid
error(bindingOperation, ModelerMessages.WSDLMODELER_INVALID_BINDING_OPERATION_MULTIPLE_MATCHING_OPERATIONS(bindingOperation.getName(), bindingOperation.getName()));
}
// got it!
found = true;
portTypeOperation = candidateOperation;
}
}
if (!found) {
// the WSDL document is invalid
error(bindingOperation, ModelerMessages.WSDLMODELER_INVALID_BINDING_OPERATION_NOT_FOUND(bindingOperation.getName(), binding.getName()));
}
}
if (!isProvider) {
this.info =
new ProcessSOAPOperationInfo(
port,
wsdlPort,
portTypeOperation,
bindingOperation,
soapBinding,
document,
hasOverloadedOperations,
headers);
Operation operation;
if(soapBinding != null)
operation = processSOAPOperation();
else{
operation = processNonSOAPOperation();
}
if (operation != null) {
port.addOperation(operation);
hasOperations = true;
}
}
}
if (!isProvider && !hasOperations) {
// emit a warning if there are no operations, except when its a provider port
warning(wsdlPort, ModelerMessages.WSDLMODELER_WARNING_NO_OPERATIONS_IN_PORT(wsdlPort.getName()));
return false;
}
createJavaInterfaceForPort(port, isProvider);
PortType pt = binding.resolvePortType(document);
String jd = (pt.getDocumentation() != null) ? pt.getDocumentation().getContent() : null;
port.getJavaInterface().setJavaDoc(jd);
_bindingNameToPortMap.put(bindingName, port);
}
service.addPort(port);
applyPortMethodCustomization(port, wsdlPort);
applyWrapperStyleCustomization(port, binding.resolvePortType(document));
return true;
} catch (NoSuchEntityException e) {
warning(document.getDefinitions(), e.getMessage());
// should not happen
return false;
}
}
/**
* Returns an operation purely from abstract operation
*/
private Operation processNonSOAPOperation() {
Operation operation =
new Operation(new QName(null, info.bindingOperation.getName()), info.bindingOperation);
setDocumentationIfPresent(
operation,
info.portTypeOperation.getDocumentation());
if (info.portTypeOperation.getStyle()
!= OperationStyle.REQUEST_RESPONSE
&& info.portTypeOperation.getStyle() != OperationStyle.ONE_WAY) {
if (options.isExtensionMode()) {
warning(info.portTypeOperation, ModelerMessages.WSDLMODELER_WARNING_IGNORING_OPERATION_NOT_SUPPORTED_STYLE(info.portTypeOperation.getName()));
return null;
} else {
error(info.portTypeOperation, ModelerMessages.WSDLMODELER_INVALID_OPERATION_NOT_SUPPORTED_STYLE(info.portTypeOperation.getName(),
info.port.resolveBinding(document).resolvePortType(document).getName()));
}
}
boolean isRequestResponse = info.portTypeOperation.getStyle() == OperationStyle.REQUEST_RESPONSE;
Message inputMessage = getInputMessage();
Request request = new Request(inputMessage, errReceiver);
request.setErrorReceiver(errReceiver);
info.operation = operation;
info.operation.setWSDLPortTypeOperation(info.portTypeOperation);
Response response = null;
Message outputMessage = null;
if (isRequestResponse) {
outputMessage = getOutputMessage();
response = new Response(outputMessage, errReceiver);
}else{
response = new Response(null, errReceiver);
}
//set the style based on heuristic that message has either all parts defined
// using type(RPC) or element(DOCUMENT)
setNonSoapStyle(inputMessage, outputMessage);
// Process parameterOrder and get the parameterList
List<MessagePart> parameterList = getParameterOrder();
List<Parameter> params = null;
boolean unwrappable = isUnwrappable();
info.operation.setWrapped(unwrappable);
params = getDoclitParameters(request, response, parameterList);
if (!validateParameterName(params)) {
return null;
}
// create a definitive list of parameters to match what we'd like to get
// in the java interface (which is generated much later), parameterOrder
List<Parameter> definitiveParameterList = new ArrayList<Parameter>();
for (Parameter param : params) {
if (param.isReturn()) {
info.operation.setProperty(WSDL_RESULT_PARAMETER, param);
response.addParameter(param);
continue;
}
if (param.isIN()) {
request.addParameter(param);
} else if (param.isOUT()) {
response.addParameter(param);
} else if (param.isINOUT()) {
request.addParameter(param);
response.addParameter(param);
}
definitiveParameterList.add(param);
}
info.operation.setRequest(request);
if (isRequestResponse) {
info.operation.setResponse(response);
}
// faults with duplicate names
Set duplicateNames = getDuplicateFaultNames();
// handle soap:fault
handleLiteralSOAPFault(response, duplicateNames);
info.operation.setProperty(
WSDL_PARAMETER_ORDER,
definitiveParameterList);
Binding binding = info.port.resolveBinding(document);
PortType portType = binding.resolvePortType(document);
if (isAsync(portType, info.portTypeOperation)) {
warning(portType, "Can not generate Async methods for non-soap binding!");
}
return info.operation;
}
/**
* This method is added to fix one of the use case for j2ee se folks, so that we determine
* for non_soap wsdl what could be the style - rpc or document based on parts in the message.
*
* We assume that the message parts could have either all of them with type attribute (RPC)
* or element (DOCUMENT)
*
* Shall this check if parts are mixed and throw error message?
*/
private void setNonSoapStyle(Message inputMessage, Message outputMessage) {
SOAPStyle style = SOAPStyle.DOCUMENT;
for(MessagePart part:inputMessage.getParts()){
if(part.getDescriptorKind() == SchemaKinds.XSD_TYPE)
style = SOAPStyle.RPC;
else
style = SOAPStyle.DOCUMENT;
}
//check the outputMessage parts
if(outputMessage != null){
for(MessagePart part:outputMessage.getParts()){
if(part.getDescriptorKind() == SchemaKinds.XSD_TYPE)
style = SOAPStyle.RPC;
else
style = SOAPStyle.DOCUMENT;
}
}
info.modelPort.setStyle(style);
}
/* (non-Javadoc)
* @see WSDLModelerBase#processSOAPOperation()
*/
protected Operation processSOAPOperation() {
Operation operation =
new Operation(new QName(null, info.bindingOperation.getName()), info.bindingOperation);
setDocumentationIfPresent(
operation,
info.portTypeOperation.getDocumentation());
if (info.portTypeOperation.getStyle()
!= OperationStyle.REQUEST_RESPONSE
&& info.portTypeOperation.getStyle() != OperationStyle.ONE_WAY) {
if (options.isExtensionMode()) {
warning(info.portTypeOperation, ModelerMessages.WSDLMODELER_WARNING_IGNORING_OPERATION_NOT_SUPPORTED_STYLE(info.portTypeOperation.getName()));
return null;
} else {
error(info.portTypeOperation, ModelerMessages.WSDLMODELER_INVALID_OPERATION_NOT_SUPPORTED_STYLE(info.portTypeOperation.getName(),
info.port.resolveBinding(document).resolvePortType(document).getName()));
}
}
SOAPStyle soapStyle = info.soapBinding.getStyle();
// find out the SOAP operation extension, if any
SOAPOperation soapOperation =
(SOAPOperation) getExtensionOfType(info.bindingOperation,
SOAPOperation.class);
if (soapOperation != null) {
if (soapOperation.getStyle() != null) {
soapStyle = soapOperation.getStyle();
}
if (soapOperation.getSOAPAction() != null) {
operation.setSOAPAction(soapOperation.getSOAPAction());
}
}
operation.setStyle(soapStyle);
String uniqueOperationName =
getUniqueName(info.portTypeOperation, info.hasOverloadedOperations);
if (info.hasOverloadedOperations) {
operation.setUniqueName(uniqueOperationName);
}
info.operation = operation;
info.uniqueOperationName = uniqueOperationName;
//attachment
SOAPBody soapRequestBody = getSOAPRequestBody();
if (soapRequestBody == null) {
// the WSDL document is invalid
error(info.bindingOperation, ModelerMessages.WSDLMODELER_INVALID_BINDING_OPERATION_INPUT_MISSING_SOAP_BODY(info.bindingOperation.getName()));
}
if (soapStyle == SOAPStyle.RPC) {
if (soapRequestBody.isEncoded()) {
if(options.isExtensionMode()){
warning(soapRequestBody, ModelerMessages.WSDLMODELER_20_RPCENC_NOT_SUPPORTED());
processNonSOAPOperation();
}else{
error(soapRequestBody, ModelerMessages.WSDLMODELER_20_RPCENC_NOT_SUPPORTED());
}
}
return processLiteralSOAPOperation(StyleAndUse.RPC_LITERAL);
}
// document style
return processLiteralSOAPOperation(StyleAndUse.DOC_LITERAL);
}
protected Operation processLiteralSOAPOperation(StyleAndUse styleAndUse) {
//returns false if the operation name is not acceptable
if (!applyOperationNameCustomization())
return null;
boolean isRequestResponse = info.portTypeOperation.getStyle() == OperationStyle.REQUEST_RESPONSE;
Message inputMessage = getInputMessage();
Request request = new Request(inputMessage, errReceiver);
request.setErrorReceiver(errReceiver);
info.operation.setUse(SOAPUse.LITERAL);
info.operation.setWSDLPortTypeOperation(info.portTypeOperation);
SOAPBody soapRequestBody = getSOAPRequestBody();
if ((StyleAndUse.DOC_LITERAL == styleAndUse) && (soapRequestBody.getNamespace() != null)) {
warning(soapRequestBody, ModelerMessages.WSDLMODELER_WARNING_R_2716("soapbind:body", info.bindingOperation.getName()));
}
Response response = null;
SOAPBody soapResponseBody = null;
Message outputMessage = null;
if (isRequestResponse) {
soapResponseBody = getSOAPResponseBody();
if (isOperationDocumentLiteral(styleAndUse) && (soapResponseBody.getNamespace() != null)) {
warning(soapResponseBody, ModelerMessages.WSDLMODELER_WARNING_R_2716("soapbind:body", info.bindingOperation.getName()));
}
outputMessage = getOutputMessage();
response = new Response(outputMessage, errReceiver);
}else{
response = new Response(null, errReceiver);
}
//ignore operation if there are more than one root part
if (!validateMimeParts(getMimeParts(info.bindingOperation.getInput())) ||
!validateMimeParts(getMimeParts(info.bindingOperation.getOutput())))
return null;
if (!validateBodyParts(info.bindingOperation)) {
// BP 1.1
// R2204 A document-literal binding in a DESCRIPTION MUST refer, in each of its soapbind:body element(s),
// only to wsdl:part element(s) that have been defined using the element attribute.
// R2203 An rpc-literal binding in a DESCRIPTION MUST refer, in its soapbind:body element(s),
// only to wsdNl:part element(s) that have been defined using the type attribute.
if (isOperationDocumentLiteral(styleAndUse))
if (options.isExtensionMode())
warning(info.portTypeOperation, ModelerMessages.WSDLMODELER_WARNING_IGNORING_OPERATION_CANNOT_HANDLE_TYPE_MESSAGE_PART(info.portTypeOperation.getName()));
else
error(info.portTypeOperation, ModelerMessages.WSDLMODELER_INVALID_DOCLITOPERATION(info.portTypeOperation.getName()));
else if (isOperationRpcLiteral(styleAndUse)) {
if (options.isExtensionMode())
warning(info.portTypeOperation, ModelerMessages.WSDLMODELER_WARNING_IGNORING_OPERATION_CANNOT_HANDLE_ELEMENT_MESSAGE_PART(info.portTypeOperation.getName()));
else
error(info.portTypeOperation, ModelerMessages.WSDLMODELER_INVALID_RPCLITOPERATION(info.portTypeOperation.getName()));
}
return null;
}
// Process parameterOrder and get the parameterList
List<MessagePart> parameterList = getParameterOrder();
//binding is invalid in the wsdl, ignore the operation.
if (!setMessagePartsBinding(styleAndUse))
return null;
List<Parameter> params = null;
boolean unwrappable = isUnwrappable();
info.operation.setWrapped(unwrappable);
if (isOperationDocumentLiteral(styleAndUse)) {
params = getDoclitParameters(request, response, parameterList);
} else if (isOperationRpcLiteral(styleAndUse)) {
String operationName = info.bindingOperation.getName();
Block reqBlock = null;
if (inputMessage != null) {
QName name = new QName(getRequestNamespaceURI(soapRequestBody), operationName);
RpcLitStructure rpcStruct = new RpcLitStructure(name, getJAXBModelBuilder().getJAXBModel());
rpcStruct.setJavaType(new JavaSimpleType("com.sun.xml.internal.ws.encoding.jaxb.RpcLitPayload", null));
reqBlock = new Block(name, rpcStruct, inputMessage);
request.addBodyBlock(reqBlock);
}
Block resBlock = null;
if (isRequestResponse && outputMessage != null) {
QName name = new QName(getResponseNamespaceURI(soapResponseBody), operationName + "Response");
RpcLitStructure rpcStruct = new RpcLitStructure(name, getJAXBModelBuilder().getJAXBModel());
rpcStruct.setJavaType(new JavaSimpleType("com.sun.xml.internal.ws.encoding.jaxb.RpcLitPayload", null));
resBlock = new Block(name, rpcStruct, outputMessage);
response.addBodyBlock(resBlock);
}
params = getRpcLitParameters(request, response, reqBlock, resBlock, parameterList);
}
if (!validateParameterName(params)) {
return null;
}
// create a definitive list of parameters to match what we'd like to get
// in the java interface (which is generated much later), parameterOrder
List<Parameter> definitiveParameterList = new ArrayList<Parameter>();
for (Parameter param : params) {
if (param.isReturn()) {
info.operation.setProperty(WSDL_RESULT_PARAMETER, param);
response.addParameter(param);
continue;
}
if (param.isIN()) {
request.addParameter(param);
} else if (param.isOUT()) {
response.addParameter(param);
} else if (param.isINOUT()) {
request.addParameter(param);
response.addParameter(param);
}
definitiveParameterList.add(param);
}
info.operation.setRequest(request);
if (isRequestResponse) {
info.operation.setResponse(response);
}
Iterator<Block> bb = request.getBodyBlocks();
QName body = VOID_BODYBLOCK;
QName opName = null;
Operation thatOp;
if (bb.hasNext()) {
body = bb.next().getName();
thatOp = uniqueBodyBlocks.get(body);
} else {
//there is no body block
body = VOID_BODYBLOCK;
thatOp = uniqueBodyBlocks.get(VOID_BODYBLOCK);
}
if(thatOp != null){
if(options.isExtensionMode()){
warning(info.port, ModelerMessages.WSDLMODELER_NON_UNIQUE_BODY_WARNING(info.port.getName(), info.operation.getName(), thatOp.getName(), body));
}else{
error(info.port, ModelerMessages.WSDLMODELER_NON_UNIQUE_BODY_ERROR(info.port.getName(), info.operation.getName(), thatOp.getName(), body));
}
}else{
uniqueBodyBlocks.put(body, info.operation);
}
//Add additional headers
if (options.additionalHeaders) {
List<Parameter> additionalHeaders = new ArrayList<Parameter>();
if (inputMessage != null) {
for (MessagePart part : getAdditionHeaderParts(inputMessage, true)) {
QName name = part.getDescriptor();
JAXBType jaxbType = getJAXBType(part);
Block block = new Block(name, jaxbType, part);
Parameter param = ModelerUtils.createParameter(part.getName(), jaxbType, block);
additionalHeaders.add(param);
request.addHeaderBlock(block);
request.addParameter(param);
definitiveParameterList.add(param);
}
}
if (isRequestResponse && outputMessage != null) {
List<Parameter> outParams = new ArrayList<Parameter>();
for (MessagePart part : getAdditionHeaderParts(outputMessage, false)) {
QName name = part.getDescriptor();
JAXBType jaxbType = getJAXBType(part);
Block block = new Block(name, jaxbType, part);
Parameter param = ModelerUtils.createParameter(part.getName(), jaxbType, block);
param.setMode(Mode.OUT);
outParams.add(param);
response.addHeaderBlock(block);
response.addParameter(param);
}
for (Parameter outParam : outParams) {
for (Parameter inParam : additionalHeaders) {
if (inParam.getName().equals(outParam.getName()) &&
inParam.getBlock().getName().equals(outParam.getBlock().getName())) {
//it is INOUT
inParam.setMode(Mode.INOUT);
outParam.setMode(Mode.INOUT);
break;
}
}
if (outParam.isOUT()) {
definitiveParameterList.add(outParam);
}
}
}
}
// faults with duplicate names
Set duplicateNames = getDuplicateFaultNames();
// handle soap:fault
handleLiteralSOAPFault(response, duplicateNames);
info.operation.setProperty(
WSDL_PARAMETER_ORDER,
definitiveParameterList);
//set Async property
Binding binding = info.port.resolveBinding(document);
PortType portType = binding.resolvePortType(document);
if (isAsync(portType, info.portTypeOperation)) {
addAsyncOperations(info.operation, styleAndUse);
}
return info.operation;
}
private boolean validateParameterName(List<Parameter> params) {
if (options.isExtensionMode())
return true;
Message msg = getInputMessage();
for (Parameter param : params) {
if (param.isOUT())
continue;
if (param.getCustomName() != null) {
if (Names.isJavaReservedWord(param.getCustomName())) {
error(param.getEntity(), ModelerMessages.WSDLMODELER_INVALID_OPERATION_JAVA_RESERVED_WORD_NOT_ALLOWED_CUSTOM_NAME(info.operation.getName(), param.getCustomName()));
return false;
}
return true;
}
//process doclit wrapper style
if (param.isEmbedded() && !(param.getBlock().getType() instanceof RpcLitStructure)) {
if (Names.isJavaReservedWord(param.getName())) {
error(param.getEntity(), ModelerMessages.WSDLMODELER_INVALID_OPERATION_JAVA_RESERVED_WORD_NOT_ALLOWED_WRAPPER_STYLE(info.operation.getName(), param.getName(), param.getBlock().getName()));
return false;
}
} else {
//non-wrapper style and rpclit
if (Names.isJavaReservedWord(param.getName())) {
error(param.getEntity(), ModelerMessages.WSDLMODELER_INVALID_OPERATION_JAVA_RESERVED_WORD_NOT_ALLOWED_NON_WRAPPER_STYLE(info.operation.getName(), msg.getName(), param.getName()));
return false;
}
}
}
boolean isRequestResponse = info.portTypeOperation.getStyle() == OperationStyle.REQUEST_RESPONSE;
if (isRequestResponse) {
msg = getOutputMessage();
for (Parameter param : params) {
if (param.isIN())
continue;
if (param.getCustomName() != null) {
if (Names.isJavaReservedWord(param.getCustomName())) {
error(param.getEntity(), ModelerMessages.WSDLMODELER_INVALID_OPERATION_JAVA_RESERVED_WORD_NOT_ALLOWED_CUSTOM_NAME(info.operation.getName(), param.getCustomName()));
return false;
}
return true;
}
//process doclit wrapper style
if (param.isEmbedded() && !(param.getBlock().getType() instanceof RpcLitStructure)) {
if (param.isReturn())
continue;
if (!param.getName().equals("return") && Names.isJavaReservedWord(param.getName())) {
error(param.getEntity(), ModelerMessages.WSDLMODELER_INVALID_OPERATION_JAVA_RESERVED_WORD_NOT_ALLOWED_WRAPPER_STYLE(info.operation.getName(), param.getName(), param.getBlock().getName()));
return false;
}
} else {
if (param.isReturn())
continue;
//non-wrapper style and rpclit
if (Names.isJavaReservedWord(param.getName())) {
error(param.getEntity(), ModelerMessages.WSDLMODELER_INVALID_OPERATION_JAVA_RESERVED_WORD_NOT_ALLOWED_NON_WRAPPER_STYLE(info.operation.getName(), msg.getName(), param.getName()));
return false;
}
}
}
}
return true;
}
private boolean enableMimeContent() {
//first we look at binding operation
JAXWSBinding jaxwsCustomization = (JAXWSBinding) getExtensionOfType(info.bindingOperation, JAXWSBinding.class);
Boolean mimeContentMapping = (jaxwsCustomization != null) ? jaxwsCustomization.isEnableMimeContentMapping() : null;
if (mimeContentMapping != null)
return mimeContentMapping;
//then in wsdl:binding
Binding binding = info.port.resolveBinding(info.document);
jaxwsCustomization = (JAXWSBinding) getExtensionOfType(binding, JAXWSBinding.class);
mimeContentMapping = (jaxwsCustomization != null) ? jaxwsCustomization.isEnableMimeContentMapping() : null;
if (mimeContentMapping != null)
return mimeContentMapping;
//at last look in wsdl:definitions
jaxwsCustomization = (JAXWSBinding) getExtensionOfType(info.document.getDefinitions(), JAXWSBinding.class);
mimeContentMapping = (jaxwsCustomization != null) ? jaxwsCustomization.isEnableMimeContentMapping() : null;
if (mimeContentMapping != null)
return mimeContentMapping;
return false;
}
private boolean applyOperationNameCustomization() {
JAXWSBinding jaxwsCustomization = (JAXWSBinding) getExtensionOfType(info.portTypeOperation, JAXWSBinding.class);
String operationName = (jaxwsCustomization != null) ? ((jaxwsCustomization.getMethodName() != null) ? jaxwsCustomization.getMethodName().getName() : null) : null;
if (operationName != null) {
if (Names.isJavaReservedWord(operationName)) {
if (options.isExtensionMode())
warning(info.portTypeOperation, ModelerMessages.WSDLMODELER_WARNING_IGNORING_OPERATION_JAVA_RESERVED_WORD_NOT_ALLOWED_CUSTOMIZED_OPERATION_NAME(info.operation.getName(), operationName));
else
error(info.portTypeOperation, ModelerMessages.WSDLMODELER_INVALID_OPERATION_JAVA_RESERVED_WORD_NOT_ALLOWED_CUSTOMIZED_OPERATION_NAME(info.operation.getName(), operationName));
return false;
}
info.operation.setCustomizedName(operationName);
}
if (Names.isJavaReservedWord(info.operation.getJavaMethodName())) {
if (options.isExtensionMode())
warning(info.portTypeOperation, ModelerMessages.WSDLMODELER_WARNING_IGNORING_OPERATION_JAVA_RESERVED_WORD_NOT_ALLOWED_OPERATION_NAME(info.operation.getName()));
else
error(info.portTypeOperation, ModelerMessages.WSDLMODELER_INVALID_OPERATION_JAVA_RESERVED_WORD_NOT_ALLOWED_OPERATION_NAME(info.operation.getName()));
return false;
}
return true;
}
protected String getAsyncOperationName(Operation operation) {
String name = operation.getCustomizedName();
if (name == null)
name = operation.getUniqueName();
return name;
}
/**
* @param styleAndUse
*/
private void addAsyncOperations(Operation syncOperation, StyleAndUse styleAndUse) {
Operation operation = createAsyncOperation(syncOperation, styleAndUse, AsyncOperationType.POLLING);
if (operation != null)
info.modelPort.addOperation(operation);
operation = createAsyncOperation(syncOperation, styleAndUse, AsyncOperationType.CALLBACK);
if (operation != null)
info.modelPort.addOperation(operation);
}
private Operation createAsyncOperation(Operation syncOperation, StyleAndUse styleAndUse, AsyncOperationType asyncType) {
boolean isRequestResponse = info.portTypeOperation.getStyle() == OperationStyle.REQUEST_RESPONSE;
if (!isRequestResponse)
return null;
//create async operations
AsyncOperation operation = new AsyncOperation(info.operation, info.bindingOperation);
//creation the async operation name: operationName+Async or customized name
//operation.setName(new QName(operation.getName().getNamespaceURI(), getAsyncOperationName(info.portTypeOperation, operation)));
if (asyncType.equals(AsyncOperationType.CALLBACK))
operation.setUniqueName(info.operation.getUniqueName() + "_async_callback");
else if (asyncType.equals(AsyncOperationType.POLLING))
operation.setUniqueName(info.operation.getUniqueName() + "_async_polling");
setDocumentationIfPresent(
operation,
info.portTypeOperation.getDocumentation());
operation.setAsyncType(asyncType);
operation.setSOAPAction(info.operation.getSOAPAction());
boolean unwrappable = info.operation.isWrapped();
operation.setWrapped(unwrappable);
SOAPBody soapRequestBody = getSOAPRequestBody();
Message inputMessage = getInputMessage();
Request request = new Request(inputMessage, errReceiver);
Response response = new Response(null, errReceiver);
SOAPBody soapResponseBody = null;
Message outputMessage = null;
if (isRequestResponse) {
soapResponseBody = getSOAPResponseBody();
outputMessage = getOutputMessage();
response = new Response(outputMessage, errReceiver);
}
// Process parameterOrder and get the parameterList
java.util.List<String> parameterList = getAsynParameterOrder();
List<Parameter> inParameters = null;
if (isOperationDocumentLiteral(styleAndUse)) {
inParameters = getRequestParameters(request, parameterList);
// outParameters = getResponseParameters(response);
// re-create parameterList with unwrapped parameters
if (unwrappable) {
List<String> unwrappedParameterList = new ArrayList<String>();
if (inputMessage != null) {
Iterator<MessagePart> parts = inputMessage.parts();
if (parts.hasNext()) {
MessagePart part = parts.next();
JAXBType jaxbType = getJAXBType(part);
List<JAXBProperty> memberList = jaxbType.getWrapperChildren();
Iterator<JAXBProperty> props = memberList.iterator();
while (props.hasNext()) {
JAXBProperty prop = props.next();
unwrappedParameterList.add(prop.getElementName().getLocalPart());
}
}
}
parameterList.clear();
parameterList.addAll(unwrappedParameterList);
}
} else if (isOperationRpcLiteral(styleAndUse)) {
String operationName = info.bindingOperation.getName();
Block reqBlock = null;
if (inputMessage != null) {
QName name = new QName(getRequestNamespaceURI(soapRequestBody), operationName);
RpcLitStructure rpcStruct = new RpcLitStructure(name, getJAXBModelBuilder().getJAXBModel());
rpcStruct.setJavaType(new JavaSimpleType("com.sun.xml.internal.ws.encoding.jaxb.RpcLitPayload", null));
reqBlock = new Block(name, rpcStruct, inputMessage);
request.addBodyBlock(reqBlock);
}
inParameters = createRpcLitRequestParameters(request, parameterList, reqBlock);
}
// add response blocks, we dont need to create respnse parameters, just blocks will be fine, lets
// copy them from sync optraions
//copy the response blocks from the sync operation
Iterator<Block> blocks = info.operation.getResponse().getBodyBlocks();
while (blocks.hasNext()) {
response.addBodyBlock(blocks.next());
}
blocks = info.operation.getResponse().getHeaderBlocks();
while (blocks.hasNext()) {
response.addHeaderBlock(blocks.next());
}
blocks = info.operation.getResponse().getAttachmentBlocks();
while (blocks.hasNext()) {
response.addAttachmentBlock(blocks.next());
}
List<MessagePart> outputParts = outputMessage.getParts();
// handle headers
int numOfOutMsgParts = outputParts.size();
if (isRequestResponse) {
if (numOfOutMsgParts == 1) {
MessagePart part = outputParts.get(0);
if (isOperationDocumentLiteral(styleAndUse)) {
JAXBType type = getJAXBType(part);
operation.setResponseBean(type);
} else if (isOperationRpcLiteral(styleAndUse)) {
String operationName = info.bindingOperation.getName();
Block resBlock = null;
if (isRequestResponse && outputMessage != null) {
resBlock = info.operation.getResponse().getBodyBlocksMap().get(new QName(getResponseNamespaceURI(soapResponseBody),
operationName + "Response"));
}
RpcLitStructure resBean = (resBlock == null) ? null : (RpcLitStructure) resBlock.getType();
List<RpcLitMember> members = resBean.getRpcLitMembers();
operation.setResponseBean(members.get(0));
}
} else {
//create response bean
String nspace = "";
QName responseBeanName = new QName(nspace, getAsyncOperationName(info.operation) + "Response");
JAXBType responseBeanType = jaxbModelBuilder.getJAXBType(responseBeanName);
if(responseBeanType == null){
error(info.operation.getEntity(), ModelerMessages.WSDLMODELER_RESPONSEBEAN_NOTFOUND(info.operation.getName()));
}
operation.setResponseBean(responseBeanType);
}
}
QName respBeanName = new QName(soapResponseBody.getNamespace(), getAsyncOperationName(info.operation) + "Response");
Block block = new Block(respBeanName, operation.getResponseBeanType(), outputMessage);
JavaType respJavaType = operation.getResponseBeanJavaType();
JAXBType respType = new JAXBType(respBeanName, respJavaType);
Parameter respParam = ModelerUtils.createParameter(info.operation.getName() + "Response", respType, block);
respParam.setParameterIndex(-1);
response.addParameter(respParam);
operation.setProperty(WSDL_RESULT_PARAMETER, respParam.getName());
List<String> definitiveParameterList = new ArrayList<String>();
int parameterOrderPosition = 0;
for (String name : parameterList) {
Parameter inParameter = null;
inParameter = ModelerUtils.getParameter(name, inParameters);
if (inParameter == null) {
if (options.isExtensionMode())
warning(info.operation.getEntity(), ModelerMessages.WSDLMODELER_WARNING_IGNORING_OPERATION_PART_NOT_FOUND(info.operation.getName().getLocalPart(), name));
else
error(info.operation.getEntity(), ModelerMessages.WSDLMODELER_ERROR_PART_NOT_FOUND(info.operation.getName().getLocalPart(), name));
return null;
}
request.addParameter(inParameter);
inParameter.setParameterIndex(parameterOrderPosition);
definitiveParameterList.add(name);
parameterOrderPosition++;
}
if (isRequestResponse) {
operation.setResponse(response);
}
// add callback handlerb Parameter to request
if (operation.getAsyncType().equals(AsyncOperationType.CALLBACK)) {
JavaType cbJavaType = operation.getCallBackType();
JAXBType callbackType = new JAXBType(respBeanName, cbJavaType);
Parameter cbParam = ModelerUtils.createParameter("asyncHandler", callbackType, block);
request.addParameter(cbParam);
}
operation.setRequest(request);
return operation;
}
protected boolean isAsync(com.sun.tools.internal.ws.wsdl.document.PortType portType, com.sun.tools.internal.ws.wsdl.document.Operation wsdlOperation) {
//First look into wsdl:operation
JAXWSBinding jaxwsCustomization = (JAXWSBinding) getExtensionOfType(wsdlOperation, JAXWSBinding.class);
Boolean isAsync = (jaxwsCustomization != null) ? jaxwsCustomization.isEnableAsyncMapping() : null;
if (isAsync != null)
return isAsync;
// then into wsdl:portType
QName portTypeName = new QName(portType.getDefining().getTargetNamespaceURI(), portType.getName());
if (portTypeName != null) {
jaxwsCustomization = (JAXWSBinding) getExtensionOfType(portType, JAXWSBinding.class);
isAsync = (jaxwsCustomization != null) ? jaxwsCustomization.isEnableAsyncMapping() : null;
if (isAsync != null)
return isAsync;
}
//then wsdl:definitions
jaxwsCustomization = (JAXWSBinding) getExtensionOfType(document.getDefinitions(), JAXWSBinding.class);
isAsync = (jaxwsCustomization != null) ? jaxwsCustomization.isEnableAsyncMapping() : null;
if (isAsync != null)
return isAsync;
return false;
}
protected void handleLiteralSOAPHeaders(Request request, Response response, Iterator headerParts, Set duplicateNames, List<String> definitiveParameterList, boolean processRequest) {
QName headerName = null;
Block headerBlock = null;
JAXBType jaxbType = null;
int parameterOrderPosition = definitiveParameterList.size();
while (headerParts.hasNext()) {
MessagePart part = (MessagePart) headerParts.next();
headerName = part.getDescriptor();
jaxbType = getJAXBType(part);
headerBlock = new Block(headerName, jaxbType, part);
TWSDLExtensible ext;
if (processRequest) {
ext = info.bindingOperation.getInput();
} else {
ext = info.bindingOperation.getOutput();
}
Message headerMessage = getHeaderMessage(part, ext);
if (processRequest) {
request.addHeaderBlock(headerBlock);
} else {
response.addHeaderBlock(headerBlock);
}
Parameter parameter = ModelerUtils.createParameter(part.getName(), jaxbType, headerBlock);
parameter.setParameterIndex(parameterOrderPosition);
setCustomizedParameterName(info.bindingOperation, headerMessage, part, parameter, false);
if (processRequest && definitiveParameterList != null) {
request.addParameter(parameter);
definitiveParameterList.add(parameter.getName());
} else {
if (definitiveParameterList != null) {
for (Iterator iterInParams = definitiveParameterList.iterator(); iterInParams.hasNext();) {
String inParamName =
(String) iterInParams.next();
if (inParamName.equals(parameter.getName())) {
Parameter inParam = request.getParameterByName(inParamName);
parameter.setLinkedParameter(inParam);
inParam.setLinkedParameter(parameter);
//its in/out parameter, input and output parameter have the same order position.
parameter.setParameterIndex(inParam.getParameterIndex());
}
}
if (!definitiveParameterList.contains(parameter.getName())) {
definitiveParameterList.add(parameter.getName());
}
}
response.addParameter(parameter);
}
parameterOrderPosition++;
}
}
protected void handleLiteralSOAPFault(Response response, Set duplicateNames) {
for (BindingFault bindingFault : info.bindingOperation.faults()) {
com.sun.tools.internal.ws.wsdl.document.Fault portTypeFault = null;
for (com.sun.tools.internal.ws.wsdl.document.Fault aFault : info.portTypeOperation.faults()) {
if (aFault.getName().equals(bindingFault.getName())) {
if (portTypeFault != null) {
// the WSDL document is invalid, a wsld:fault in a wsdl:operation of a portType can be bound only once
error(portTypeFault, ModelerMessages.WSDLMODELER_INVALID_BINDING_FAULT_NOT_UNIQUE(bindingFault.getName(), info.bindingOperation.getName()));
}
portTypeFault = aFault;
}
}
// The WSDL document is invalid, the wsdl:fault in abstract operation is does not have any binding
if (portTypeFault == null) {
error(bindingFault, ModelerMessages.WSDLMODELER_INVALID_BINDING_FAULT_NOT_FOUND(bindingFault.getName(), info.bindingOperation.getName()));
}
// wsdl:fault message name is used to create the java exception name later on
String faultName = getFaultClassName(portTypeFault);
Fault fault = new Fault(faultName, portTypeFault);
fault.setWsdlFaultName(portTypeFault.getName());
setDocumentationIfPresent(fault, portTypeFault.getDocumentation());
//get the soapbind:fault from wsdl:fault in the binding
SOAPFault soapFault = (SOAPFault) getExtensionOfType(bindingFault, SOAPFault.class);
// The WSDL document is invalid, can't have wsdl:fault without soapbind:fault
if (soapFault == null) {
if(options.isExtensionMode()){
warning(bindingFault, ModelerMessages.WSDLMODELER_INVALID_BINDING_FAULT_OUTPUT_MISSING_SOAP_FAULT(bindingFault.getName(), info.bindingOperation.getName()));
soapFault = new SOAPFault(new LocatorImpl());
}else{
error(bindingFault, ModelerMessages.WSDLMODELER_INVALID_BINDING_FAULT_OUTPUT_MISSING_SOAP_FAULT(bindingFault.getName(), info.bindingOperation.getName()));
}
}
//the soapbind:fault must have use="literal" or no use attribute, in that case its assumed "literal"
if (!soapFault.isLiteral()) {
if (options.isExtensionMode())
warning(soapFault, ModelerMessages.WSDLMODELER_WARNING_IGNORING_FAULT_NOT_LITERAL(bindingFault.getName(), info.bindingOperation.getName()));
else
error(soapFault, ModelerMessages.WSDLMODELER_INVALID_OPERATION_FAULT_NOT_LITERAL(bindingFault.getName(), info.bindingOperation.getName()));
continue;
}
// the soapFault name must be present
if (soapFault.getName() == null) {
warning(bindingFault, ModelerMessages.WSDLMODELER_INVALID_BINDING_FAULT_NO_SOAP_FAULT_NAME(bindingFault.getName(), info.bindingOperation.getName()));
} else if (!soapFault.getName().equals(bindingFault.getName())) {
warning(soapFault, ModelerMessages.WSDLMODELER_INVALID_BINDING_FAULT_WRONG_SOAP_FAULT_NAME(soapFault.getName(), bindingFault.getName(), info.bindingOperation.getName()));
} else if (soapFault.getNamespace() != null) {
warning(soapFault, ModelerMessages.WSDLMODELER_WARNING_R_2716_R_2726("soapbind:fault", soapFault.getName()));
}
String faultNamespaceURI = soapFault.getNamespace();
if (faultNamespaceURI == null) {
faultNamespaceURI = portTypeFault.getMessage().getNamespaceURI();
}
com.sun.tools.internal.ws.wsdl.document.Message faultMessage = portTypeFault.resolveMessage(info.document);
Iterator iter2 = faultMessage.parts();
if (!iter2.hasNext()) {
// the WSDL document is invalid
error(faultMessage, ModelerMessages.WSDLMODELER_INVALID_BINDING_FAULT_EMPTY_MESSAGE(bindingFault.getName(), faultMessage.getName()));
}
MessagePart faultPart = (MessagePart) iter2.next();
QName faultQName = faultPart.getDescriptor();
// Don't include fault messages with non-unique soap:fault names
if (duplicateNames.contains(faultQName)) {
warning(faultPart, ModelerMessages.WSDLMODELER_DUPLICATE_FAULT_SOAP_NAME(bindingFault.getName(), info.portTypeOperation.getName(), faultPart.getName()));
continue;
}
if (iter2.hasNext()) {
// the WSDL document is invalid
error(faultMessage, ModelerMessages.WSDLMODELER_INVALID_BINDING_FAULT_MESSAGE_HAS_MORE_THAN_ONE_PART(bindingFault.getName(), faultMessage.getName()));
}
if (faultPart.getDescriptorKind() != SchemaKinds.XSD_ELEMENT) {
error(faultPart, ModelerMessages.WSDLMODELER_INVALID_MESSAGE_PART_MUST_HAVE_ELEMENT_DESCRIPTOR(faultMessage.getName(), faultPart.getName()));
}
JAXBType jaxbType = getJAXBType(faultPart);
fault.setElementName(faultPart.getDescriptor());
fault.setJavaMemberName(Names.getExceptionClassMemberName());
Block faultBlock = new Block(faultQName, jaxbType, faultPart);
fault.setBlock(faultBlock);
//createParentFault(fault);
//createSubfaults(fault);
if (!response.getFaultBlocksMap().containsKey(faultBlock.getName()))
response.addFaultBlock(faultBlock);
info.operation.addFault(fault);
}
}
private String getFaultClassName(com.sun.tools.internal.ws.wsdl.document.Fault portTypeFault) {
JAXWSBinding jaxwsBinding = (JAXWSBinding) getExtensionOfType(portTypeFault, JAXWSBinding.class);
if (jaxwsBinding != null) {
CustomName className = jaxwsBinding.getClassName();
if (className != null) {
return className.getName();
}
}
return portTypeFault.getMessage().getLocalPart();
}
protected boolean setMessagePartsBinding(StyleAndUse styleAndUse) {
SOAPBody inBody = getSOAPRequestBody();
Message inMessage = getInputMessage();
if (!setMessagePartsBinding(inBody, inMessage, styleAndUse, true))
return false;
if (isRequestResponse()) {
SOAPBody outBody = getSOAPResponseBody();
Message outMessage = getOutputMessage();
if (!setMessagePartsBinding(outBody, outMessage, styleAndUse, false))
return false;
}
return true;
}
//returns false if the wsdl is invalid and operation should be ignored
protected boolean setMessagePartsBinding(SOAPBody body, Message message, StyleAndUse styleAndUse, boolean isInput) {
List<MessagePart> parts = new ArrayList<MessagePart>();
//get Mime parts
List<MessagePart> mimeParts = null;
List<MessagePart> headerParts = null;
List<MessagePart> bodyParts = getBodyParts(body, message);
if (isInput) {
headerParts = getHeaderPartsFromMessage(message, isInput);
mimeParts = getMimeContentParts(message, info.bindingOperation.getInput());
} else {
headerParts = getHeaderPartsFromMessage(message, isInput);
mimeParts = getMimeContentParts(message, info.bindingOperation.getOutput());
}
//As of now WSDL MIME binding is not supported, so throw the exception when such binding is encounterd
// if(mimeParts.size() > 0){
// fail("wsdlmodeler.unsupportedBinding.mime", new Object[]{});
// }
//if soap:body parts attribute not there, then all unbounded message parts will
// belong to the soap body
if (bodyParts == null) {
bodyParts = new ArrayList<MessagePart>();
for (Iterator<MessagePart> iter = message.parts(); iter.hasNext();) {
MessagePart mPart = iter.next();
//Its a safe assumption that the parts in the message not belonging to header or mime will
// belong to the body?
if (mimeParts.contains(mPart) || headerParts.contains(mPart) || boundToFault(mPart.getName())) {
//throw error that a part cant be bound multiple times, not ignoring operation, if there
//is conflict it will fail latter
if (options.isExtensionMode())
warning(mPart, ModelerMessages.WSDLMODELER_WARNING_BINDING_OPERATION_MULTIPLE_PART_BINDING(info.bindingOperation.getName(), mPart.getName()));
else
error(mPart, ModelerMessages.WSDLMODELER_INVALID_BINDING_OPERATION_MULTIPLE_PART_BINDING(info.bindingOperation.getName(), mPart.getName()));
}
bodyParts.add(mPart);
}
}
//now build the final parts list with header, mime parts and body parts
for (Iterator iter = message.parts(); iter.hasNext();) {
MessagePart mPart = (MessagePart) iter.next();
if (mimeParts.contains(mPart)) {
mPart.setBindingExtensibilityElementKind(MessagePart.WSDL_MIME_BINDING);
parts.add(mPart);
} else if (headerParts.contains(mPart)) {
mPart.setBindingExtensibilityElementKind(MessagePart.SOAP_HEADER_BINDING);
parts.add(mPart);
} else if (bodyParts.contains(mPart)) {
mPart.setBindingExtensibilityElementKind(MessagePart.SOAP_BODY_BINDING);
parts.add(mPart);
} else {
mPart.setBindingExtensibilityElementKind(MessagePart.PART_NOT_BOUNDED);
}
}
if (isOperationDocumentLiteral(styleAndUse) && bodyParts.size() > 1) {
if (options.isExtensionMode())
warning(message, ModelerMessages.WSDLMODELER_WARNING_OPERATION_MORE_THAN_ONE_PART_IN_MESSAGE(info.portTypeOperation.getName()));
else
error(message, ModelerMessages.WSDLMODELER_INVALID_OPERATION_MORE_THAN_ONE_PART_IN_MESSAGE(info.portTypeOperation.getName()));
return false;
}
return true;
}
private boolean boundToFault(String partName) {
for (BindingFault bindingFault : info.bindingOperation.faults()) {
if (partName.equals(bindingFault.getName()))
return true;
}
return false;
}
//get MessagePart(s) referenced by parts attribute of soap:body element
private List<MessagePart> getBodyParts(SOAPBody body, Message message) {
String bodyParts = body.getParts();
if (bodyParts != null) {
List<MessagePart> partsList = new ArrayList<MessagePart>();
StringTokenizer in = new StringTokenizer(bodyParts.trim(), " ");
while (in.hasMoreTokens()) {
String part = in.nextToken();
MessagePart mPart = message.getPart(part);
if (null == mPart) {
error(message, ModelerMessages.WSDLMODELER_ERROR_PARTS_NOT_FOUND(part, message.getName()));
}
mPart.setBindingExtensibilityElementKind(MessagePart.SOAP_BODY_BINDING);
partsList.add(mPart);
}
return partsList;
}
return null;
}
private List<MessagePart> getAdditionHeaderParts(Message message, boolean isInput){
List<MessagePart> headerParts = new ArrayList<MessagePart>();
List<MessagePart> parts = message.getParts();
List<MessagePart> headers = getHeaderParts(isInput);
for(MessagePart part: headers){
if(parts.contains(part))
continue;
headerParts.add(part);
}
return headerParts;
}
private List<MessagePart> getHeaderPartsFromMessage(Message message, boolean isInput) {
List<MessagePart> headerParts = new ArrayList<MessagePart>();
Iterator<MessagePart> parts = message.parts();
List<MessagePart> headers = getHeaderParts(isInput);
while (parts.hasNext()) {
MessagePart part = parts.next();
if (headers.contains(part)) {
headerParts.add(part);
}
}
return headerParts;
}
private Message getHeaderMessage(MessagePart part, TWSDLExtensible ext) {
Iterator<SOAPHeader> headers = getHeaderExtensions(ext).iterator();
while (headers.hasNext()) {
SOAPHeader header = headers.next();
if (!header.isLiteral())
continue;
com.sun.tools.internal.ws.wsdl.document.Message headerMessage = findMessage(header.getMessage(), info);
if (headerMessage == null)
continue;
MessagePart headerPart = headerMessage.getPart(header.getPart());
if (headerPart == part)
return headerMessage;
}
return null;
}
private List<MessagePart> getHeaderParts(boolean isInput) {
TWSDLExtensible ext;
if (isInput) {
ext = info.bindingOperation.getInput();
} else {
ext = info.bindingOperation.getOutput();
}
List<MessagePart> parts = new ArrayList<MessagePart>();
Iterator<SOAPHeader> headers = getHeaderExtensions(ext).iterator();
while (headers.hasNext()) {
SOAPHeader header = headers.next();
if (!header.isLiteral()) {
error(header, ModelerMessages.WSDLMODELER_INVALID_HEADER_NOT_LITERAL(header.getPart(), info.bindingOperation.getName()));
}
if (header.getNamespace() != null) {
warning(header, ModelerMessages.WSDLMODELER_WARNING_R_2716_R_2726("soapbind:header", info.bindingOperation.getName()));
}
com.sun.tools.internal.ws.wsdl.document.Message headerMessage = findMessage(header.getMessage(), info);
if (headerMessage == null) {
error(header, ModelerMessages.WSDLMODELER_INVALID_HEADER_CANT_RESOLVE_MESSAGE(header.getMessage(), info.bindingOperation.getName()));
}
MessagePart part = headerMessage.getPart(header.getPart());
if (part == null) {
error(header, ModelerMessages.WSDLMODELER_INVALID_HEADER_NOT_FOUND(header.getPart(), info.bindingOperation.getName()));
}
if (part.getDescriptorKind() != SchemaKinds.XSD_ELEMENT) {
error(part, ModelerMessages.WSDLMODELER_INVALID_HEADER_MESSAGE_PART_MUST_HAVE_ELEMENT_DESCRIPTOR(part.getName(), info.bindingOperation.getName()));
}
part.setBindingExtensibilityElementKind(MessagePart.SOAP_HEADER_BINDING);
parts.add(part);
}
return parts;
}
private boolean isOperationDocumentLiteral(StyleAndUse styleAndUse) {
return StyleAndUse.DOC_LITERAL == styleAndUse;
}
private boolean isOperationRpcLiteral(StyleAndUse styleAndUse) {
return StyleAndUse.RPC_LITERAL == styleAndUse;
}
/**
* @param part
* @return Returns a JAXBType object
*/
private JAXBType getJAXBType(MessagePart part) {
JAXBType type = null;
QName name = part.getDescriptor();
if (part.getDescriptorKind().equals(SchemaKinds.XSD_ELEMENT)) {
type = jaxbModelBuilder.getJAXBType(name);
if(type == null){
error(part, ModelerMessages.WSDLMODELER_JAXB_JAVATYPE_NOTFOUND(name, part.getName()));
}
} else {
S2JJAXBModel jaxbModel = getJAXBModelBuilder().getJAXBModel().getS2JJAXBModel();
TypeAndAnnotation typeAnno = jaxbModel.getJavaType(name);
if (typeAnno == null) {
error(part, ModelerMessages.WSDLMODELER_JAXB_JAVATYPE_NOTFOUND(name, part.getName()));
}
JavaType javaType = new JavaSimpleType(new JAXBTypeAndAnnotation(typeAnno));
type = new JAXBType(new QName("", part.getName()), javaType);
}
return type;
}
private List<Parameter> getDoclitParameters(Request req, Response res, List<MessagePart> parameterList) {
if (parameterList.size() == 0)
return new ArrayList<Parameter>();
List<Parameter> params = new ArrayList<Parameter>();
Message inMsg = getInputMessage();
Message outMsg = getOutputMessage();
boolean unwrappable = isUnwrappable();
List<Parameter> outParams = null;
int pIndex = 0;
for (MessagePart part : parameterList) {
QName reqBodyName = part.getDescriptor();
JAXBType jaxbType = getJAXBType(part);
Block block = new Block(reqBodyName, jaxbType, part);
if (unwrappable) {
//So build body and header blocks and set to request and response
JAXBStructuredType jaxbStructType = ModelerUtils.createJAXBStructureType(jaxbType);
block = new Block(reqBodyName, jaxbStructType, part);
if (ModelerUtils.isBoundToSOAPBody(part)) {
if (part.isIN()) {
req.addBodyBlock(block);
} else if (part.isOUT()) {
res.addBodyBlock(block);
} else if (part.isINOUT()) {
req.addBodyBlock(block);
res.addBodyBlock(block);
}
} else if (ModelerUtils.isUnbound(part)) {
if (part.isIN())
req.addUnboundBlock(block);
else if (part.isOUT())
res.addUnboundBlock(block);
else if (part.isINOUT()) {
req.addUnboundBlock(block);
res.addUnboundBlock(block);
}
}
if (part.isIN() || part.isINOUT()) {
params = ModelerUtils.createUnwrappedParameters(jaxbStructType, block);
int index = 0;
Mode mode = part.isINOUT() ? Mode.INOUT : Mode.IN;
for (Parameter param : params) {
param.setParameterIndex(index++);
param.setMode(mode);
setCustomizedParameterName(info.portTypeOperation, inMsg, part, param, unwrappable);
}
} else if (part.isOUT()) {
outParams = ModelerUtils.createUnwrappedParameters(jaxbStructType, block);
for (Parameter param : outParams) {
param.setMode(Mode.OUT);
setCustomizedParameterName(info.portTypeOperation, outMsg, part, param, unwrappable);
}
}
} else {
if (ModelerUtils.isBoundToSOAPBody(part)) {
if (part.isIN()) {
req.addBodyBlock(block);
} else if (part.isOUT()) {
res.addBodyBlock(block);
} else if (part.isINOUT()) {
req.addBodyBlock(block);
res.addBodyBlock(block);
}
} else if (ModelerUtils.isBoundToSOAPHeader(part)) {
if (part.isIN()) {
req.addHeaderBlock(block);
} else if (part.isOUT()) {
res.addHeaderBlock(block);
} else if (part.isINOUT()) {
req.addHeaderBlock(block);
res.addHeaderBlock(block);
}
} else if (ModelerUtils.isBoundToMimeContent(part)) {
List<MIMEContent> mimeContents = null;
if (part.isIN()) {
mimeContents = getMimeContents(info.bindingOperation.getInput(),
getInputMessage(), part.getName());
jaxbType = getAttachmentType(mimeContents, part);
block = new Block(jaxbType.getName(), jaxbType, part);
req.addAttachmentBlock(block);
} else if (part.isOUT()) {
mimeContents = getMimeContents(info.bindingOperation.getOutput(),
getOutputMessage(), part.getName());
jaxbType = getAttachmentType(mimeContents, part);
block = new Block(jaxbType.getName(), jaxbType, part);
res.addAttachmentBlock(block);
} else if (part.isINOUT()) {
mimeContents = getMimeContents(info.bindingOperation.getInput(),
getInputMessage(), part.getName());
jaxbType = getAttachmentType(mimeContents, part);
block = new Block(jaxbType.getName(), jaxbType, part);
req.addAttachmentBlock(block);
res.addAttachmentBlock(block);
mimeContents = getMimeContents(info.bindingOperation.getOutput(),
getOutputMessage(), part.getName());
JAXBType outJaxbType = getAttachmentType(mimeContents, part);
String inType = jaxbType.getJavaType().getType().getName();
String outType = outJaxbType.getJavaType().getType().getName();
TypeAndAnnotation inTa = jaxbType.getJavaType().getType().getTypeAnn();
TypeAndAnnotation outTa = outJaxbType.getJavaType().getType().getTypeAnn();
if ((((inTa != null) && (outTa != null) && inTa.equals(outTa))) && !inType.equals(outType)) {
String javaType = "javax.activation.DataHandler";
S2JJAXBModel jaxbModel = getJAXBModelBuilder().getJAXBModel().getS2JJAXBModel();
//JCodeModel cm = jaxbModel.generateCode(null, errReceiver);
JType jt = null;
jt = options.getCodeModel().ref(javaType);
JAXBTypeAndAnnotation jaxbTa = jaxbType.getJavaType().getType();
jaxbTa.setType(jt);
}
}
} else if (ModelerUtils.isUnbound(part)) {
if (part.isIN()) {
req.addUnboundBlock(block);
} else if (part.isOUT()) {
res.addUnboundBlock(block);
} else if (part.isINOUT()) {
req.addUnboundBlock(block);
res.addUnboundBlock(block);
}
}
Parameter param = ModelerUtils.createParameter(part.getName(), jaxbType, block);
param.setMode(part.getMode());
if (part.isReturn()) {
param.setParameterIndex(-1);
} else {
param.setParameterIndex(pIndex++);
}
if (part.isIN())
setCustomizedParameterName(info.portTypeOperation, inMsg, part, param, false);
else if (outMsg != null)
setCustomizedParameterName(info.portTypeOperation, outMsg, part, param, false);
params.add(param);
}
}
if (unwrappable && (outParams != null)) {
int index = params.size();
for (Parameter param : outParams) {
if (JAXBRIContext.mangleNameToVariableName(param.getName()).equals("return")) {
param.setParameterIndex(-1);
} else {
Parameter inParam = ModelerUtils.getParameter(param.getName(), params);
if ((inParam != null) && inParam.isIN()) {
QName inElementName = inParam.getType().getName();
QName outElementName = param.getType().getName();
String inJavaType = inParam.getTypeName();
String outJavaType = param.getTypeName();
TypeAndAnnotation inTa = inParam.getType().getJavaType().getType().getTypeAnn();
TypeAndAnnotation outTa = param.getType().getJavaType().getType().getTypeAnn();
if (inElementName.getLocalPart().equals(outElementName.getLocalPart()) && inJavaType.equals(outJavaType) &&
(inTa == null || outTa == null || inTa.equals(outTa))) {
inParam.setMode(Mode.INOUT);
continue;
}
}
if (outParams.size() == 1) {
param.setParameterIndex(-1);
} else {
param.setParameterIndex(index++);
}
}
params.add(param);
}
}
return params;
}
private List<Parameter> getRpcLitParameters(Request req, Response res, Block reqBlock, Block resBlock, List<MessagePart> paramList) {
List<Parameter> params = new ArrayList<Parameter>();
Message inMsg = getInputMessage();
Message outMsg = getOutputMessage();
S2JJAXBModel jaxbModel = ((RpcLitStructure) reqBlock.getType()).getJaxbModel().getS2JJAXBModel();
List<Parameter> inParams = ModelerUtils.createRpcLitParameters(inMsg, reqBlock, jaxbModel, errReceiver);
List<Parameter> outParams = null;
if (outMsg != null)
outParams = ModelerUtils.createRpcLitParameters(outMsg, resBlock, jaxbModel, errReceiver);
//create parameters for header and mime parts
int index = 0;
for (MessagePart part : paramList) {
Parameter param = null;
if (ModelerUtils.isBoundToSOAPBody(part)) {
if (part.isIN()) {
param = ModelerUtils.getParameter(part.getName(), inParams);
} else if (outParams != null) {
param = ModelerUtils.getParameter(part.getName(), outParams);
}
} else if (ModelerUtils.isBoundToSOAPHeader(part)) {
QName headerName = part.getDescriptor();
JAXBType jaxbType = getJAXBType(part);
Block headerBlock = new Block(headerName, jaxbType, part);
param = ModelerUtils.createParameter(part.getName(), jaxbType, headerBlock);
if (part.isIN()) {
req.addHeaderBlock(headerBlock);
} else if (part.isOUT()) {
res.addHeaderBlock(headerBlock);
} else if (part.isINOUT()) {
req.addHeaderBlock(headerBlock);
res.addHeaderBlock(headerBlock);
}
} else if (ModelerUtils.isBoundToMimeContent(part)) {
List<MIMEContent> mimeContents = null;
if (part.isIN() || part.isINOUT())
mimeContents = getMimeContents(info.bindingOperation.getInput(),
getInputMessage(), part.getName());
else
mimeContents = getMimeContents(info.bindingOperation.getOutput(),
getOutputMessage(), part.getName());
JAXBType type = getAttachmentType(mimeContents, part);
//create Parameters in request or response
//Block mimeBlock = new Block(new QName(part.getName()), type);
Block mimeBlock = new Block(type.getName(), type, part);
param = ModelerUtils.createParameter(part.getName(), type, mimeBlock);
if (part.isIN()) {
req.addAttachmentBlock(mimeBlock);
} else if (part.isOUT()) {
res.addAttachmentBlock(mimeBlock);
} else if (part.isINOUT()) {
mimeContents = getMimeContents(info.bindingOperation.getOutput(),
getOutputMessage(), part.getName());
JAXBType outJaxbType = getAttachmentType(mimeContents, part);
String inType = type.getJavaType().getType().getName();
String outType = outJaxbType.getJavaType().getType().getName();
if (!inType.equals(outType)) {
String javaType = "javax.activation.DataHandler";
JType jt = null;
jt = options.getCodeModel().ref(javaType);
JAXBTypeAndAnnotation jaxbTa = type.getJavaType().getType();
jaxbTa.setType(jt);
}
req.addAttachmentBlock(mimeBlock);
res.addAttachmentBlock(mimeBlock);
}
} else if (ModelerUtils.isUnbound(part)) {
QName name = part.getDescriptor();
JAXBType type = getJAXBType(part);
Block unboundBlock = new Block(name, type, part);
if (part.isIN()) {
req.addUnboundBlock(unboundBlock);
} else if (part.isOUT()) {
res.addUnboundBlock(unboundBlock);
} else if (part.isINOUT()) {
req.addUnboundBlock(unboundBlock);
res.addUnboundBlock(unboundBlock);
}
param = ModelerUtils.createParameter(part.getName(), type, unboundBlock);
}
if (param != null) {
if (part.isReturn()) {
param.setParameterIndex(-1);
} else {
param.setParameterIndex(index++);
}
param.setMode(part.getMode());
params.add(param);
}
}
for (Parameter param : params) {
if (param.isIN())
setCustomizedParameterName(info.portTypeOperation, inMsg, inMsg.getPart(param.getName()), param, false);
else if (outMsg != null)
setCustomizedParameterName(info.portTypeOperation, outMsg, outMsg.getPart(param.getName()), param, false);
}
return params;
}
private List<Parameter> getRequestParameters(Request request, List<String> parameterList) {
Message inputMessage = getInputMessage();
//there is no input message, return zero parameters
if (inputMessage != null && !inputMessage.parts().hasNext())
return new ArrayList<Parameter>();
List<Parameter> inParameters = null;
QName reqBodyName = null;
Block reqBlock = null;
JAXBType jaxbReqType = null;
boolean unwrappable = isUnwrappable();
boolean doneSOAPBody = false;
//setup request parameters
for (String inParamName : parameterList) {
MessagePart part = inputMessage.getPart(inParamName);
if (part == null)
continue;
reqBodyName = part.getDescriptor();
jaxbReqType = getJAXBType(part);
if (unwrappable) {
//So build body and header blocks and set to request and response
JAXBStructuredType jaxbRequestType = ModelerUtils.createJAXBStructureType(jaxbReqType);
reqBlock = new Block(reqBodyName, jaxbRequestType, part);
if (ModelerUtils.isBoundToSOAPBody(part)) {
request.addBodyBlock(reqBlock);
} else if (ModelerUtils.isUnbound(part)) {
request.addUnboundBlock(reqBlock);
}
inParameters = ModelerUtils.createUnwrappedParameters(jaxbRequestType, reqBlock);
for (Parameter param : inParameters) {
setCustomizedParameterName(info.portTypeOperation, inputMessage, part, param, unwrappable);
}
} else {
reqBlock = new Block(reqBodyName, jaxbReqType, part);
if (ModelerUtils.isBoundToSOAPBody(part) && !doneSOAPBody) {
doneSOAPBody = true;
request.addBodyBlock(reqBlock);
} else if (ModelerUtils.isBoundToSOAPHeader(part)) {
request.addHeaderBlock(reqBlock);
} else if (ModelerUtils.isBoundToMimeContent(part)) {
List<MIMEContent> mimeContents = getMimeContents(info.bindingOperation.getInput(),
getInputMessage(), part.getName());
jaxbReqType = getAttachmentType(mimeContents, part);
//reqBlock = new Block(new QName(part.getName()), jaxbReqType);
reqBlock = new Block(jaxbReqType.getName(), jaxbReqType, part);
request.addAttachmentBlock(reqBlock);
} else if (ModelerUtils.isUnbound(part)) {
request.addUnboundBlock(reqBlock);
}
if (inParameters == null)
inParameters = new ArrayList<Parameter>();
Parameter param = ModelerUtils.createParameter(part.getName(), jaxbReqType, reqBlock);
setCustomizedParameterName(info.portTypeOperation, inputMessage, part, param, false);
inParameters.add(param);
}
}
return inParameters;
}
/**
* @param part
* @param param
* @param wrapperStyle TODO
*/
private void setCustomizedParameterName(TWSDLExtensible extension, Message msg, MessagePart part, Parameter param, boolean wrapperStyle) {
JAXWSBinding jaxwsBinding = (JAXWSBinding) getExtensionOfType(extension, JAXWSBinding.class);
if (jaxwsBinding == null)
return;
String paramName = part.getName();
QName elementName = part.getDescriptor();
if (wrapperStyle)
elementName = param.getType().getName();
String customName = jaxwsBinding.getParameterName(msg.getName(), paramName, elementName, wrapperStyle);
if (customName != null && !customName.equals("")) {
param.setCustomName(customName);
}
}
protected boolean isConflictingPortClassName(String name) {
return false;
}
protected boolean isUnwrappable() {
if (!getWrapperStyleCustomization())
return false;
com.sun.tools.internal.ws.wsdl.document.Message inputMessage = getInputMessage();
com.sun.tools.internal.ws.wsdl.document.Message outputMessage = getOutputMessage();
// Wrapper style if the operation's input and output messages each contain
// only a single part
if ((inputMessage != null && inputMessage.numParts() != 1)
|| (outputMessage != null && outputMessage.numParts() != 1)) {
return false;
}
MessagePart inputPart = inputMessage != null
? inputMessage.parts().next() : null;
MessagePart outputPart = outputMessage != null
? outputMessage.parts().next() : null;
String operationName = info.portTypeOperation.getName();
// Wrapper style if the input message part refers to a global element declaration whose localname
// is equal to the operation name
// Wrapper style if the output message part refers to a global element declaration
if ((inputPart != null && !inputPart.getDescriptor().getLocalPart().equals(operationName)) ||
(outputPart != null && outputPart.getDescriptorKind() != SchemaKinds.XSD_ELEMENT))
return false;
//check to see if either input or output message part not bound to soapbing:body
//in that case the operation is not wrapper style
if (((inputPart != null) && (inputPart.getBindingExtensibilityElementKind() != MessagePart.SOAP_BODY_BINDING)) ||
((outputPart != null) && (outputPart.getBindingExtensibilityElementKind() != MessagePart.SOAP_BODY_BINDING)))
return false;
// Wrapper style if the elements referred to by the input and output message parts
// (henceforth referred to as wrapper elements) are both complex types defined
// using the xsd:sequence compositor
// Wrapper style if the wrapper elements only contain child elements, they must not
// contain other structures such as xsd:choice, substitution groups1 or attributes
//These checkins are done by jaxb, we just check if jaxb has wrapper children. If there
// are then its wrapper style
//if(inputPart != null && outputPart != null){
if (inputPart != null) {
boolean inputWrappable = false;
JAXBType inputType = getJAXBType(inputPart);
if (inputType != null) {
inputWrappable = inputType.isUnwrappable();
}
//if there are no output part (oneway), the operation can still be wrapper style
if (outputPart == null) {
return inputWrappable;
}
JAXBType outputType = getJAXBType(outputPart);
if ((inputType != null) && (outputType != null))
return inputType.isUnwrappable() && outputType.isUnwrappable();
}
return false;
}
private boolean getWrapperStyleCustomization() {
//first we look into wsdl:portType/wsdl:operation
com.sun.tools.internal.ws.wsdl.document.Operation portTypeOperation = info.portTypeOperation;
JAXWSBinding jaxwsBinding = (JAXWSBinding) getExtensionOfType(portTypeOperation, JAXWSBinding.class);
if (jaxwsBinding != null) {
Boolean isWrappable = jaxwsBinding.isEnableWrapperStyle();
if (isWrappable != null)
return isWrappable;
}
//then into wsdl:portType
PortType portType = info.port.resolveBinding(document).resolvePortType(document);
jaxwsBinding = (JAXWSBinding) getExtensionOfType(portType, JAXWSBinding.class);
if (jaxwsBinding != null) {
Boolean isWrappable = jaxwsBinding.isEnableWrapperStyle();
if (isWrappable != null)
return isWrappable;
}
//then wsdl:definitions
jaxwsBinding = (JAXWSBinding) getExtensionOfType(document.getDefinitions(), JAXWSBinding.class);
if (jaxwsBinding != null) {
Boolean isWrappable = jaxwsBinding.isEnableWrapperStyle();
if (isWrappable != null)
return isWrappable;
}
return true;
}
/* (non-Javadoc)
* @see WSDLModelerBase#isSingleInOutPart(Set, MessagePart)
*/
protected boolean isSingleInOutPart(Set inputParameterNames,
MessagePart outputPart) {
// As of now, we dont have support for in/out in doc-lit. So return false.
SOAPOperation soapOperation =
(SOAPOperation) getExtensionOfType(info.bindingOperation,
SOAPOperation.class);
if ((soapOperation != null) && (soapOperation.isDocument() || info.soapBinding.isDocument())) {
Iterator iter = getInputMessage().parts();
while (iter.hasNext()) {
MessagePart part = (MessagePart) iter.next();
if (outputPart.getName().equals(part.getName()) && outputPart.getDescriptor().equals(part.getDescriptor()))
return true;
}
} else if (soapOperation != null && soapOperation.isRPC() || info.soapBinding.isRPC()) {
com.sun.tools.internal.ws.wsdl.document.Message inputMessage = getInputMessage();
if (inputParameterNames.contains(outputPart.getName())) {
if (inputMessage.getPart(outputPart.getName()).getDescriptor().equals(outputPart.getDescriptor())) {
return true;
}
}
}
return false;
}
private List<Parameter> createRpcLitRequestParameters(Request request, List<String> parameterList, Block block) {
Message message = getInputMessage();
S2JJAXBModel jaxbModel = ((RpcLitStructure) block.getType()).getJaxbModel().getS2JJAXBModel();
List<Parameter> parameters = ModelerUtils.createRpcLitParameters(message, block, jaxbModel, errReceiver);
//create parameters for header and mime parts
for (String paramName : parameterList) {
MessagePart part = message.getPart(paramName);
if (part == null)
continue;
if (ModelerUtils.isBoundToSOAPHeader(part)) {
if (parameters == null)
parameters = new ArrayList<Parameter>();
QName headerName = part.getDescriptor();
JAXBType jaxbType = getJAXBType(part);
Block headerBlock = new Block(headerName, jaxbType, part);
request.addHeaderBlock(headerBlock);
Parameter param = ModelerUtils.createParameter(part.getName(), jaxbType, headerBlock);
if (param != null) {
parameters.add(param);
}
} else if (ModelerUtils.isBoundToMimeContent(part)) {
if (parameters == null)
parameters = new ArrayList<Parameter>();
List<MIMEContent> mimeContents = getMimeContents(info.bindingOperation.getInput(),
getInputMessage(), paramName);
JAXBType type = getAttachmentType(mimeContents, part);
//create Parameters in request or response
//Block mimeBlock = new Block(new QName(part.getName()), type);
Block mimeBlock = new Block(type.getName(), type, part);
request.addAttachmentBlock(mimeBlock);
Parameter param = ModelerUtils.createParameter(part.getName(), type, mimeBlock);
if (param != null) {
parameters.add(param);
}
} else if (ModelerUtils.isUnbound(part)) {
if (parameters == null)
parameters = new ArrayList<Parameter>();
QName name = part.getDescriptor();
JAXBType type = getJAXBType(part);
Block unboundBlock = new Block(name, type, part);
request.addUnboundBlock(unboundBlock);
Parameter param = ModelerUtils.createParameter(part.getName(), type, unboundBlock);
if (param != null) {
parameters.add(param);
}
}
}
for (Parameter param : parameters) {
setCustomizedParameterName(info.portTypeOperation, message, message.getPart(param.getName()), param, false);
}
return parameters;
}
private String getJavaTypeForMimeType(String mimeType) {
if (mimeType.equals("image/jpeg") || mimeType.equals("image/gif")) {
return "java.awt.Image";
} else if (mimeType.equals("text/xml") || mimeType.equals("application/xml")) {
return "javax.xml.transform.Source";
}
return "javax.activation.DataHandler";
}
private JAXBType getAttachmentType(List<MIMEContent> mimeContents, MessagePart part) {
if (!enableMimeContent()) {
return getJAXBType(part);
}
String javaType = null;
List<String> mimeTypes = getAlternateMimeTypes(mimeContents);
if (mimeTypes.size() > 1) {
javaType = "javax.activation.DataHandler";
} else {
javaType = getJavaTypeForMimeType(mimeTypes.get(0));
}
S2JJAXBModel jaxbModel = getJAXBModelBuilder().getJAXBModel().getS2JJAXBModel();
JType jt = null;
jt = options.getCodeModel().ref(javaType);
QName desc = part.getDescriptor();
TypeAndAnnotation typeAnno = null;
if (part.getDescriptorKind() == SchemaKinds.XSD_TYPE) {
typeAnno = jaxbModel.getJavaType(desc);
desc = new QName("", part.getName());
} else if (part.getDescriptorKind() == SchemaKinds.XSD_ELEMENT) {
typeAnno = getJAXBModelBuilder().getElementTypeAndAnn(desc);
if(typeAnno == null){
error(part, ModelerMessages.WSDLMODELER_JAXB_JAVATYPE_NOTFOUND(part.getDescriptor(), part.getName()));
}
for (Iterator mimeTypeIter = mimeTypes.iterator(); mimeTypeIter.hasNext();) {
String mimeType = (String) mimeTypeIter.next();
if ((!mimeType.equals("text/xml") &&
!mimeType.equals("application/xml"))) {
//According to AP 1.0,
//RZZZZ: In a DESCRIPTION, if a wsdl:part element refers to a
//global element declaration (via the element attribute of the wsdl:part
//element) then the value of the type attribute of a mime:content element
//that binds that part MUST be a content type suitable for carrying an
//XML serialization.
//should we throw warning?
//type = MimeHelper.javaType.DATA_HANDLER_JAVATYPE;
warning(part, ModelerMessages.MIMEMODELER_ELEMENT_PART_INVALID_ELEMENT_MIME_TYPE(part.getName(), mimeType));
}
}
}
if (typeAnno == null) {
error(part, ModelerMessages.WSDLMODELER_JAXB_JAVATYPE_NOTFOUND(desc, part.getName()));
}
return new JAXBType(desc, new JavaSimpleType(new JAXBTypeAndAnnotation(typeAnno, jt)),
null, getJAXBModelBuilder().getJAXBModel());
}
protected void buildJAXBModel(WSDLDocument wsdlDocument) {
JAXBModelBuilder jaxbModelBuilder = new JAXBModelBuilder(options, classNameCollector, forest, errReceiver);
//set the java package where wsdl artifacts will be generated
//if user provided package name using -p switch (or package property on wsimport ant task)
//ignore the package customization in the wsdl and schema bidnings
//formce the -p option only in the first pass
if (explicitDefaultPackage != null) {
jaxbModelBuilder.getJAXBSchemaCompiler().forcePackageName(options.defaultPackage);
} else {
options.defaultPackage = getJavaPackage();
}
//create pseudo schema for async operations(if any) response bean
List<InputSource> schemas = PseudoSchemaBuilder.build(this, options, errReceiver);
for (InputSource schema : schemas) {
jaxbModelBuilder.getJAXBSchemaCompiler().parseSchema(schema);
}
jaxbModelBuilder.bind();
this.jaxbModelBuilder = jaxbModelBuilder;
}
protected String getJavaPackage() {
String jaxwsPackage = null;
JAXWSBinding jaxwsCustomization = (JAXWSBinding) getExtensionOfType(document.getDefinitions(), JAXWSBinding.class);
if (jaxwsCustomization != null && jaxwsCustomization.getJaxwsPackage() != null) {
jaxwsPackage = jaxwsCustomization.getJaxwsPackage().getName();
}
if (jaxwsPackage != null) {
return jaxwsPackage;
}
String wsdlUri = document.getDefinitions().getTargetNamespaceURI();
return XJC.getDefaultPackageName(wsdlUri);
}
protected void createJavaInterfaceForProviderPort(Port port) {
String interfaceName = "javax.xml.ws.Provider";
JavaInterface intf = new JavaInterface(interfaceName);
port.setJavaInterface(intf);
}
protected void createJavaInterfaceForPort(Port port, boolean isProvider) {
if (isProvider) {
createJavaInterfaceForProviderPort(port);
return;
}
String interfaceName = getJavaNameOfSEI(port);
if (isConflictingPortClassName(interfaceName)) {
interfaceName += "_PortType";
}
JavaInterface intf = new JavaInterface(interfaceName);
for (Operation operation : port.getOperations()) {
createJavaMethodForOperation(
port,
operation,
intf);
for (JavaParameter jParam : operation.getJavaMethod().getParametersList()) {
Parameter param = jParam.getParameter();
if (param.getCustomName() != null)
jParam.setName(param.getCustomName());
}
}
port.setJavaInterface(intf);
}
protected String getServiceInterfaceName(QName serviceQName, com.sun.tools.internal.ws.wsdl.document.Service wsdlService) {
String serviceName = wsdlService.getName();
JAXWSBinding jaxwsCust = (JAXWSBinding) getExtensionOfType(wsdlService, JAXWSBinding.class);
if (jaxwsCust != null && jaxwsCust.getClassName() != null) {
CustomName name = jaxwsCust.getClassName();
if (name != null && !name.equals(""))
serviceName = name.getName();
}
String serviceInterface = "";
String javaPackageName = options.defaultPackage;
serviceInterface = javaPackageName + ".";
serviceInterface
+= JAXBRIContext.mangleNameToClassName(serviceName);
return serviceInterface;
}
protected String getJavaNameOfSEI(Port port) {
QName portTypeName =
(QName) port.getProperty(
ModelProperties.PROPERTY_WSDL_PORT_TYPE_NAME);
PortType pt = (PortType) document.find(Kinds.PORT_TYPE, portTypeName);
//populate the portType map here. We should get rid of all these properties
// lets not do it as it may break NB
//TODO: clean all these stuff part of NB RFE
port.portTypes.put(portTypeName, pt);
JAXWSBinding jaxwsCust = (JAXWSBinding) getExtensionOfType(pt, JAXWSBinding.class);
if (jaxwsCust != null && jaxwsCust.getClassName() != null) {
CustomName name = jaxwsCust.getClassName();
if (name != null && !name.equals("")) {
return makePackageQualified(name.getName());
}
}
String interfaceName = null;
if (portTypeName != null) {
// got portType information from WSDL, use it to name the interface
interfaceName =
makePackageQualified(JAXBRIContext.mangleNameToClassName(portTypeName.getLocalPart()));
} else {
// somehow we only got the port name, so we use that
interfaceName =
makePackageQualified(JAXBRIContext.mangleNameToClassName(port.getName().getLocalPart()));
}
return interfaceName;
}
private void createJavaMethodForAsyncOperation(Port port, Operation operation,
JavaInterface intf) {
String candidateName = getJavaNameForOperation(operation);
JavaMethod method = new JavaMethod(candidateName, options, errReceiver);
Request request = operation.getRequest();
Iterator requestBodyBlocks = request.getBodyBlocks();
Block requestBlock =
(requestBodyBlocks.hasNext()
? (Block) request.getBodyBlocks().next()
: null);
Response response = operation.getResponse();
Iterator responseBodyBlocks = null;
Block responseBlock;
if (response != null) {
responseBodyBlocks = response.getBodyBlocks();
responseBlock =
responseBodyBlocks.hasNext()
? (Block) response.getBodyBlocks().next()
: null;
}
// build a signature of the form "opName%arg1type%arg2type%...%argntype so that we
// detect overloading conflicts in the generated java interface/classes
String signature = candidateName;
for (Iterator iter = request.getParameters(); iter.hasNext();) {
Parameter parameter = (Parameter) iter.next();
if (parameter.getJavaParameter() != null) {
error(operation.getEntity(), ModelerMessages.WSDLMODELER_INVALID_OPERATION(operation.getName().getLocalPart()));
}
JavaType parameterType = parameter.getType().getJavaType();
JavaParameter javaParameter =
new JavaParameter(
JAXBRIContext.mangleNameToVariableName(parameter.getName()),
parameterType,
parameter,
parameter.getLinkedParameter() != null);
if (javaParameter.isHolder()) {
javaParameter.setHolderName(javax.xml.ws.Holder.class.getName());
}
method.addParameter(javaParameter);
parameter.setJavaParameter(javaParameter);
signature += "%" + parameterType.getName();
}
if (response != null) {
String resultParameterName =
(String) operation.getProperty(WSDL_RESULT_PARAMETER);
Parameter resultParameter =
response.getParameterByName(resultParameterName);
JavaType returnType = resultParameter.getType().getJavaType();
method.setReturnType(returnType);
}
operation.setJavaMethod(method);
intf.addMethod(method);
}
/* (non-Javadoc)
* @see WSDLModelerBase#createJavaMethodForOperation(WSDLPort, WSDLOperation, JavaInterface, Set, Set)
*/
protected void createJavaMethodForOperation(Port port, Operation operation, JavaInterface intf) {
if ((operation instanceof AsyncOperation)) {
createJavaMethodForAsyncOperation(port, operation, intf);
return;
}
String candidateName = getJavaNameForOperation(operation);
JavaMethod method = new JavaMethod(candidateName, options, errReceiver);
Request request = operation.getRequest();
Parameter returnParam = (Parameter) operation.getProperty(WSDL_RESULT_PARAMETER);
if (returnParam != null) {
JavaType parameterType = returnParam.getType().getJavaType();
method.setReturnType(parameterType);
} else {
JavaType ret = new JavaSimpleTypeCreator().VOID_JAVATYPE;
method.setReturnType(ret);
}
List<Parameter> parameterOrder = (List<Parameter>) operation.getProperty(WSDL_PARAMETER_ORDER);
for (Parameter param : parameterOrder) {
JavaType parameterType = param.getType().getJavaType();
String name = (param.getCustomName() != null) ? param.getCustomName() : param.getName();
name = JAXBRIContext.mangleNameToVariableName(name);
//if its a java keyword after name mangling, then we simply put underscore as there is no
//need to ask user to customize the parameter name if its java keyword
if(Names.isJavaReservedWord(name)){
name = "_"+name;
}
JavaParameter javaParameter =
new JavaParameter(
name,
parameterType,
param,
param.isINOUT() || param.isOUT());
if (javaParameter.isHolder()) {
javaParameter.setHolderName(javax.xml.ws.Holder.class.getName());
}
method.addParameter(javaParameter);
param.setJavaParameter(javaParameter);
}
operation.setJavaMethod(method);
intf.addMethod(method);
String opName = JAXBRIContext.mangleNameToVariableName(operation.getName().getLocalPart());
for (Iterator iter = operation.getFaults();
iter != null && iter.hasNext();
) {
Fault fault = (Fault) iter.next();
createJavaExceptionFromLiteralType(fault, port, opName);
}
JavaException javaException;
Fault fault;
for (Iterator iter = operation.getFaults(); iter.hasNext();) {
fault = (Fault) iter.next();
javaException = fault.getJavaException();
method.addException(javaException.getName());
}
}
protected boolean createJavaExceptionFromLiteralType(Fault fault, com.sun.tools.internal.ws.processor.model.Port port, String operationName) {
JAXBType faultType = (JAXBType) fault.getBlock().getType();
String exceptionName =
makePackageQualified(JAXBRIContext.mangleNameToClassName(fault.getName()));
// use fault namespace attribute
JAXBStructuredType jaxbStruct = new JAXBStructuredType(new QName(
fault.getBlock().getName().getNamespaceURI(),
fault.getName()));
QName memberName = fault.getElementName();
JAXBElementMember jaxbMember =
new JAXBElementMember(memberName, faultType);
//jaxbMember.setNillable(faultType.isNillable());
String javaMemberName = getLiteralJavaMemberName(fault);
JavaStructureMember javaMember = new JavaStructureMember(
javaMemberName,
faultType.getJavaType(),
jaxbMember);
jaxbMember.setJavaStructureMember(javaMember);
javaMember.setReadMethod(Names.getJavaMemberReadMethod(javaMember));
javaMember.setInherited(false);
jaxbMember.setJavaStructureMember(javaMember);
jaxbStruct.add(jaxbMember);
if (isConflictingExceptionClassName(exceptionName)) {
exceptionName += "_Exception";
}
JavaException existingJavaException = (JavaException) _javaExceptions.get(exceptionName);
if (existingJavaException != null) {
if (existingJavaException.getName().equals(exceptionName)) {
if (((JAXBType) existingJavaException.getOwner()).getName().equals(jaxbStruct.getName())
|| ModelerUtils.isEquivalentLiteralStructures(jaxbStruct, (JAXBStructuredType) existingJavaException.getOwner())) {
// we have mapped this fault already
if (faultType instanceof JAXBStructuredType) {
fault.getBlock().setType((JAXBType) existingJavaException.getOwner());
}
fault.setJavaException(existingJavaException);
return false;
}
}
}
JavaException javaException = new JavaException(exceptionName, false, jaxbStruct);
javaException.add(javaMember);
jaxbStruct.setJavaType(javaException);
_javaExceptions.put(javaException.getName(), javaException);
fault.setJavaException(javaException);
return true;
}
protected boolean isRequestResponse() {
return info.portTypeOperation.getStyle() == OperationStyle.REQUEST_RESPONSE;
}
protected java.util.List<String> getAsynParameterOrder() {
//for async operation ignore the parameterOrder
java.util.List<String> parameterList = new ArrayList<String>();
Message inputMessage = getInputMessage();
List<MessagePart> inputParts = inputMessage.getParts();
for (MessagePart part : inputParts) {
parameterList.add(part.getName());
}
return parameterList;
}
protected List<MessagePart> getParameterOrder() {
List<MessagePart> params = new ArrayList<MessagePart>();
String parameterOrder = info.portTypeOperation.getParameterOrder();
java.util.List<String> parameterList = new ArrayList<String>();
boolean parameterOrderPresent = false;
if ((parameterOrder != null) && !(parameterOrder.trim().equals(""))) {
parameterList = XmlUtil.parseTokenList(parameterOrder);
parameterOrderPresent = true;
} else {
parameterList = new ArrayList<String>();
}
Message inputMessage = getInputMessage();
Message outputMessage = getOutputMessage();
List<MessagePart> outputParts = null;
List<MessagePart> inputParts = inputMessage.getParts();
//reset the mode and ret flag, as MEssagePArts aer shared across ports
for (MessagePart part : inputParts) {
part.setMode(Mode.IN);
part.setReturn(false);
}
if (isRequestResponse()) {
outputParts = outputMessage.getParts();
for (MessagePart part : outputParts) {
part.setMode(Mode.OUT);
part.setReturn(false);
}
}
if (parameterOrderPresent) {
boolean validParameterOrder = true;
Iterator<String> paramOrders = parameterList.iterator();
// If any part in the parameterOrder is not present in the request or
// response message, we completely ignore the parameterOrder hint
while (paramOrders.hasNext()) {
String param = paramOrders.next();
boolean partFound = false;
for (MessagePart part : inputParts) {
if (param.equals(part.getName())) {
partFound = true;
break;
}
}
// if not found, check in output parts
if (!partFound) {
for (MessagePart part : outputParts) {
if (param.equals(part.getName())) {
partFound = true;
break;
}
}
}
if (!partFound) {
warning(info.operation.getEntity(), ModelerMessages.WSDLMODELER_INVALID_PARAMETERORDER_PARAMETER(param, info.operation.getName().getLocalPart()));
validParameterOrder = false;
}
}
List<MessagePart> inputUnlistedParts = new ArrayList<MessagePart>();
List<MessagePart> outputUnlistedParts = new ArrayList<MessagePart>();
//gather input Parts
if (validParameterOrder) {
for (String param : parameterList) {
MessagePart part = inputMessage.getPart(param);
if (part != null) {
params.add(part);
continue;
}
if (isRequestResponse()) {
MessagePart outPart = outputMessage.getPart(param);
if (outPart != null) {
params.add(outPart);
continue;
}
}
}
for (MessagePart part : inputParts) {
if (!parameterList.contains(part.getName())) {
inputUnlistedParts.add(part);
}
}
if (isRequestResponse()) {
// at most one output part should be unlisted
for (MessagePart part : outputParts) {
if (!parameterList.contains(part.getName())) {
MessagePart inPart = inputMessage.getPart(part.getName());
//dont add inout as unlisted part
if ((inPart != null) && inPart.getDescriptor().equals(part.getDescriptor())) {
inPart.setMode(Mode.INOUT);
} else {
outputUnlistedParts.add(part);
}
} else {
//param list may contain it, check if its INOUT
MessagePart inPart = inputMessage.getPart(part.getName());
//dont add inout as unlisted part
if ((inPart != null) && inPart.getDescriptor().equals(part.getDescriptor())) {
inPart.setMode(Mode.INOUT);
} else if (!params.contains(part)) {
params.add(part);
}
}
}
if (outputUnlistedParts.size() == 1) {
MessagePart resultPart = outputUnlistedParts.get(0);
resultPart.setReturn(true);
params.add(resultPart);
outputUnlistedParts.clear();
}
}
//add the input and output unlisted parts
for (MessagePart part : inputUnlistedParts) {
params.add(part);
}
for (MessagePart part : outputUnlistedParts) {
params.add(part);
}
return params;
}
//parameterOrder attribute is not valid, we ignore it
warning(info.operation.getEntity(), ModelerMessages.WSDLMODELER_INVALID_PARAMETER_ORDER_INVALID_PARAMETER_ORDER(info.operation.getName().getLocalPart()));
parameterOrderPresent = false;
parameterList.clear();
}
List<MessagePart> outParts = new ArrayList<MessagePart>();
//construct input parameter list with the same order as in input message
for (MessagePart part : inputParts) {
params.add(part);
}
if (isRequestResponse()) {
for (MessagePart part : outputParts) {
MessagePart inPart = inputMessage.getPart(part.getName());
if (inPart != null && part.getDescriptorKind() == inPart.getDescriptorKind() &&
part.getDescriptor().equals(inPart.getDescriptor())) {
inPart.setMode(Mode.INOUT);
continue;
}
outParts.add(part);
}
//append the out parts to the parameterList
for (MessagePart part : outParts) {
if (outParts.size() == 1)
part.setReturn(true);
params.add(part);
}
}
return params;
}
/**
* @param port
* @param suffix
* @return the Java ClassName for a port
*/
protected String getClassName(Port port, String suffix) {
String prefix = JAXBRIContext.mangleNameToClassName((port.getName().getLocalPart()));
return options.defaultPackage + "." + prefix + suffix;
}
protected boolean isConflictingServiceClassName(String name) {
return conflictsWithSEIClass(name) || conflictsWithJAXBClass(name) || conflictsWithExceptionClass(name);
}
private boolean conflictsWithSEIClass(String name) {
Set<String> seiNames = classNameCollector.getSeiClassNames();
return seiNames != null && seiNames.contains(name);
}
private boolean conflictsWithJAXBClass(String name) {
Set<String> jaxbNames = classNameCollector.getJaxbGeneratedClassNames();
return jaxbNames != null && jaxbNames.contains(name);
}
private boolean conflictsWithExceptionClass(String name) {
Set<String> exceptionNames = classNameCollector.getExceptionClassNames();
return exceptionNames != null && exceptionNames.contains(name);
}
protected boolean isConflictingExceptionClassName(String name) {
return conflictsWithSEIClass(name) || conflictsWithJAXBClass(name);
}
protected JAXBModelBuilder getJAXBModelBuilder() {
return jaxbModelBuilder;
}
protected boolean validateWSDLBindingStyle(Binding binding) {
SOAPBinding soapBinding =
(SOAPBinding) getExtensionOfType(binding, SOAPBinding.class);
//dont process the binding
if (soapBinding == null)
soapBinding =
(SOAPBinding) getExtensionOfType(binding, SOAP12Binding.class);
if (soapBinding == null)
return false;
//if soapbind:binding has no style attribute, the default is DOCUMENT
if (soapBinding.getStyle() == null)
soapBinding.setStyle(SOAPStyle.DOCUMENT);
SOAPStyle opStyle = soapBinding.getStyle();
for (Iterator iter = binding.operations(); iter.hasNext();) {
BindingOperation bindingOperation =
(BindingOperation) iter.next();
SOAPOperation soapOperation =
(SOAPOperation) getExtensionOfType(bindingOperation,
SOAPOperation.class);
if (soapOperation != null) {
SOAPStyle currOpStyle = (soapOperation.getStyle() != null) ? soapOperation.getStyle() : soapBinding.getStyle();
//dont check for the first operation
if (!currOpStyle.equals(opStyle))
return false;
}
}
return true;
}
/**
* @param port
*/
private void applyWrapperStyleCustomization(Port port, PortType portType) {
JAXWSBinding jaxwsBinding = (JAXWSBinding) getExtensionOfType(portType, JAXWSBinding.class);
Boolean wrapperStyle = (jaxwsBinding != null) ? jaxwsBinding.isEnableWrapperStyle() : null;
if (wrapperStyle != null) {
port.setWrapped(wrapperStyle);
}
}
protected static void setDocumentationIfPresent(
ModelObject obj,
Documentation documentation) {
if (documentation != null && documentation.getContent() != null) {
obj.setJavaDoc(documentation.getContent());
}
}
protected String getJavaNameForOperation(Operation operation) {
String name = operation.getJavaMethodName();
if (Names.isJavaReservedWord(name)) {
name = "_" + name;
}
return name;
}
private void reportError(Entity entity,
String formattedMsg, Exception nestedException ) {
Locator locator = (entity == null)?null:entity.getLocator();
SAXParseException e = new SAXParseException2( formattedMsg,
locator,
nestedException );
errReceiver.error(e);
}
}
|
samskivert/ikvm-openjdk
|
build/linux-amd64/impsrc/com/sun/tools/internal/ws/processor/modeler/wsdl/WSDLModeler.java
|
Java
|
gpl-2.0
| 131,427
|
/*
translation.c : irssi
Copyright (C) 1999-2000 Timo Sirainen
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "module.h"
#include "module-formats.h"
#include "signals.h"
#include "line-split.h"
#include "misc.h"
#include "levels.h"
#include "settings.h"
#include "printtext.h"
unsigned char translation_in[256], translation_out[256];
static char *current_translation;
void translation_reset(void)
{
int n;
for (n = 0; n < 256; n++)
translation_in[n] = (unsigned char) n;
for (n = 0; n < 256; n++)
translation_out[n] = (unsigned char) n;
}
void translate_output(char *text)
{
while (*text != '\0') {
*text = (char) translation_out[(int) (unsigned char) *text];
text++;
}
}
#define gethex(a) \
(i_isdigit(a) ? ((a)-'0') : (i_toupper(a)-'A'+10))
void translation_parse_line(const char *str, int *pos)
{
const char *ptr;
int value;
for (ptr = str; *ptr != '\0'; ptr++) {
if (ptr[0] != '0' || ptr[1] != 'x')
break;
ptr += 2;
value = (gethex(ptr[0]) << 4) + gethex(ptr[1]);
if (*pos < 256)
translation_in[*pos] = (unsigned char) value;
else
translation_out[*pos-256] = (unsigned char) value;
(*pos)++;
ptr += 2;
if (*ptr != ',') break;
}
}
int translation_read(const char *file)
{
char tmpbuf[1024], *str, *path;
LINEBUF_REC *buffer;
int f, pos, ret, recvlen;
g_return_val_if_fail(file != NULL, FALSE);
path = convert_home(file);
f = open(file, O_RDONLY);
g_free(path);
if (f == -1) {
printformat(NULL, NULL, MSGLEVEL_CLIENTERROR,
TXT_TRANSLATION_NOT_FOUND, file,
g_strerror(errno));
return FALSE;
}
pos = 0; buffer = NULL;
while (pos < 512) {
recvlen = read(f, tmpbuf, sizeof(tmpbuf));
ret = line_split(tmpbuf, recvlen, &str, &buffer);
if (ret <= 0) break;
translation_parse_line(str, &pos);
}
line_split_free(buffer);
close(f);
if (pos != 512) {
translation_reset();
printformat(NULL, NULL, MSGLEVEL_CLIENTERROR,
TXT_TRANSLATION_FILE_ERROR, file);
}
return pos == 512;
}
static void read_settings(void)
{
const char *translation;
translation = settings_get_str("translation");
if (*translation == '\0') {
if (current_translation != NULL) {
g_free_and_null(current_translation);
translation_reset();
}
return;
}
if (current_translation == NULL ||
strcmp(translation, current_translation) != 0) {
g_free_not_null(current_translation);
current_translation = g_strdup(translation);
translation_read(translation);
}
}
void translation_init(void)
{
translation_reset();
current_translation = NULL;
settings_add_str("misc", "translation", "");
signal_add("setup changed", (SIGNAL_FUNC) read_settings);
read_settings();
}
void translation_deinit(void)
{
read_settings();
signal_remove("setup changed", (SIGNAL_FUNC) read_settings);
}
|
ipwndev/DSLinux-Mirror
|
user/irssi/src/src/fe-common/core/translation.c
|
C
|
gpl-2.0
| 3,533
|
/*
* Copyright (C) 1996-2015 The Squid Software Foundation and contributors
*
* Squid software is distributed under GPLv2+ license and includes
* contributions from numerous individuals and organizations.
* Please see the COPYING and CONTRIBUTORS files for details.
*/
#include "squid.h"
#include "mem_node.h"
#define STUB_API "mem_node.cc"
#include "tests/STUB.h"
mem_node::mem_node(int64_t offset):nodeBuffer(0,offset,data) STUB
size_t mem_node::InUseCount() STUB_RETVAL(0)
|
krichter722/squid
|
src/tests/stub_mem_node.cc
|
C++
|
gpl-2.0
| 490
|
/******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
*
******************************************************************************/
#define _IOCTL_CFG80211_C_
#include <drv_conf.h>
#include <osdep_service.h>
#include <drv_types.h>
#include <rtw_ioctl.h>
#include <rtw_ioctl_set.h>
#include <rtw_ioctl_query.h>
#include <xmit_osdep.h>
#ifdef CONFIG_IOCTL_CFG80211
#include "ioctl_cfg80211.h"
#define RTW_MAX_MGMT_TX_CNT (8)
#define RTW_SCAN_IE_LEN_MAX 2304
#define RTW_MAX_REMAIN_ON_CHANNEL_DURATION 65535 //ms
#define RTW_MAX_NUM_PMKIDS 4
#define RTW_CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
static const u32 rtw_cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
};
#define RATETAB_ENT(_rate, _rateid, _flags) \
{ \
.bitrate = (_rate), \
.hw_value = (_rateid), \
.flags = (_flags), \
}
#define CHAN2G(_channel, _freq, _flags) { \
.band = IEEE80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define CHAN5G(_channel, _flags) { \
.band = IEEE80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
static struct ieee80211_rate rtw_rates[] = {
RATETAB_ENT(10, 0x1, 0),
RATETAB_ENT(20, 0x2, 0),
RATETAB_ENT(55, 0x4, 0),
RATETAB_ENT(110, 0x8, 0),
RATETAB_ENT(60, 0x10, 0),
RATETAB_ENT(90, 0x20, 0),
RATETAB_ENT(120, 0x40, 0),
RATETAB_ENT(180, 0x80, 0),
RATETAB_ENT(240, 0x100, 0),
RATETAB_ENT(360, 0x200, 0),
RATETAB_ENT(480, 0x400, 0),
RATETAB_ENT(540, 0x800, 0),
};
#define rtw_a_rates (rtw_rates + 4)
#define RTW_A_RATES_NUM 8
#define rtw_g_rates (rtw_rates + 0)
#define RTW_G_RATES_NUM 12
#define RTW_2G_CHANNELS_NUM 14
#define RTW_5G_CHANNELS_NUM 37
static struct ieee80211_channel rtw_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
CHAN2G(3, 2422, 0),
CHAN2G(4, 2427, 0),
CHAN2G(5, 2432, 0),
CHAN2G(6, 2437, 0),
CHAN2G(7, 2442, 0),
CHAN2G(8, 2447, 0),
CHAN2G(9, 2452, 0),
CHAN2G(10, 2457, 0),
CHAN2G(11, 2462, 0),
CHAN2G(12, 2467, 0),
CHAN2G(13, 2472, 0),
CHAN2G(14, 2484, 0),
};
static struct ieee80211_channel rtw_5ghz_a_channels[] = {
CHAN5G(34, 0), CHAN5G(36, 0),
CHAN5G(38, 0), CHAN5G(40, 0),
CHAN5G(42, 0), CHAN5G(44, 0),
CHAN5G(46, 0), CHAN5G(48, 0),
CHAN5G(52, 0), CHAN5G(56, 0),
CHAN5G(60, 0), CHAN5G(64, 0),
CHAN5G(100, 0), CHAN5G(104, 0),
CHAN5G(108, 0), CHAN5G(112, 0),
CHAN5G(116, 0), CHAN5G(120, 0),
CHAN5G(124, 0), CHAN5G(128, 0),
CHAN5G(132, 0), CHAN5G(136, 0),
CHAN5G(140, 0), CHAN5G(149, 0),
CHAN5G(153, 0), CHAN5G(157, 0),
CHAN5G(161, 0), CHAN5G(165, 0),
CHAN5G(184, 0), CHAN5G(188, 0),
CHAN5G(192, 0), CHAN5G(196, 0),
CHAN5G(200, 0), CHAN5G(204, 0),
CHAN5G(208, 0), CHAN5G(212, 0),
CHAN5G(216, 0),
};
void rtw_2g_channels_init(struct ieee80211_channel *channels)
{
_rtw_memcpy((void*)channels, (void*)rtw_2ghz_channels,
sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
);
}
void rtw_5g_channels_init(struct ieee80211_channel *channels)
{
_rtw_memcpy((void*)channels, (void*)rtw_5ghz_a_channels,
sizeof(struct ieee80211_channel)*RTW_5G_CHANNELS_NUM
);
}
void rtw_2g_rates_init(struct ieee80211_rate *rates)
{
_rtw_memcpy(rates, rtw_g_rates,
sizeof(struct ieee80211_rate)*RTW_G_RATES_NUM
);
}
void rtw_5g_rates_init(struct ieee80211_rate *rates)
{
_rtw_memcpy(rates, rtw_a_rates,
sizeof(struct ieee80211_rate)*RTW_A_RATES_NUM
);
}
struct ieee80211_supported_band *rtw_spt_band_alloc(
enum ieee80211_band band
)
{
struct ieee80211_supported_band *spt_band = NULL;
int n_channels, n_bitrates;
if(band == IEEE80211_BAND_2GHZ)
{
n_channels = RTW_2G_CHANNELS_NUM;
n_bitrates = RTW_G_RATES_NUM;
}
else if(band == IEEE80211_BAND_5GHZ)
{
n_channels = RTW_5G_CHANNELS_NUM;
n_bitrates = RTW_A_RATES_NUM;
}
else
{
goto exit;
}
spt_band = (struct ieee80211_supported_band *)rtw_zmalloc(
sizeof(struct ieee80211_supported_band)
+ sizeof(struct ieee80211_channel)*n_channels
+ sizeof(struct ieee80211_rate)*n_bitrates
);
if(!spt_band)
goto exit;
spt_band->channels = (struct ieee80211_channel*)(((u8*)spt_band)+sizeof(struct ieee80211_supported_band));
spt_band->bitrates= (struct ieee80211_rate*)(((u8*)spt_band->channels)+sizeof(struct ieee80211_channel)*n_channels);
spt_band->band = band;
spt_band->n_channels = n_channels;
spt_band->n_bitrates = n_bitrates;
if(band == IEEE80211_BAND_2GHZ)
{
rtw_2g_channels_init(spt_band->channels);
rtw_2g_rates_init(spt_band->bitrates);
}
else if(band == IEEE80211_BAND_5GHZ)
{
rtw_5g_channels_init(spt_band->channels);
rtw_5g_rates_init(spt_band->bitrates);
}
//spt_band.ht_cap
exit:
return spt_band;
}
void rtw_spt_band_free(struct ieee80211_supported_band *spt_band)
{
u32 size;
if(!spt_band)
return;
if(spt_band->band == IEEE80211_BAND_2GHZ)
{
size = sizeof(struct ieee80211_supported_band)
+ sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
+ sizeof(struct ieee80211_rate)*RTW_G_RATES_NUM;
}
else if(spt_band->band == IEEE80211_BAND_5GHZ)
{
size = sizeof(struct ieee80211_supported_band)
+ sizeof(struct ieee80211_channel)*RTW_5G_CHANNELS_NUM
+ sizeof(struct ieee80211_rate)*RTW_A_RATES_NUM;
}
else
{
}
rtw_mfree((u8*)spt_band, size);
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
static const struct ieee80211_txrx_stypes
rtw_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_ADHOC] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4)
},
[NL80211_IFTYPE_STATION] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_AP] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_DISASSOC >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_ACTION >> 4)
},
[NL80211_IFTYPE_AP_VLAN] = {
/* copy AP */
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_DISASSOC >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_ACTION >> 4)
},
[NL80211_IFTYPE_P2P_CLIENT] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_P2P_GO] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_DISASSOC >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_ACTION >> 4)
},
};
#endif
static int rtw_ieee80211_channel_to_frequency(int chan, int band)
{
/* see 802.11 17.3.8.3.2 and Annex J
* there are overlapping channel numbers in 5GHz and 2GHz bands */
if (band == IEEE80211_BAND_5GHZ) {
if (chan >= 182 && chan <= 196)
return 4000 + chan * 5;
else
return 5000 + chan * 5;
} else { /* IEEE80211_BAND_2GHZ */
if (chan == 14)
return 2484;
else if (chan < 14)
return 2407 + chan * 5;
else
return 0; /* not supported */
}
}
#define MAX_BSSINFO_LEN 1000
static int rtw_cfg80211_inform_bss(_adapter *padapter, struct wlan_network *pnetwork)
{
int ret=0;
struct ieee80211_channel *notify_channel;
struct cfg80211_bss *bss;
//struct ieee80211_supported_band *band;
u16 channel;
u32 freq;
u64 notify_timestamp;
u16 notify_capability;
u16 notify_interval;
u8 *notify_ie;
size_t notify_ielen;
s32 notify_signal;
u8 buf[MAX_BSSINFO_LEN], *pbuf;
size_t len,bssinf_len=0;
struct rtw_ieee80211_hdr *pwlanhdr;
unsigned short *fctrl;
u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct wireless_dev *wdev = padapter->rtw_wdev;
struct wiphy *wiphy = wdev->wiphy;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
//DBG_8192C("%s\n", __func__);
bssinf_len = pnetwork->network.IELength+sizeof (struct rtw_ieee80211_hdr_3addr);
if(bssinf_len > MAX_BSSINFO_LEN){
DBG_871X("%s IE Length too long > %d byte \n",__FUNCTION__,MAX_BSSINFO_LEN);
goto exit;
}
channel = pnetwork->network.Configuration.DSConfig;
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
else
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
notify_channel = ieee80211_get_channel(wiphy, freq);
//rtw_get_timestampe_from_ie()
notify_timestamp = jiffies_to_msecs(jiffies)*1000; /* uSec */
notify_interval = le16_to_cpu(*(u16*)rtw_get_beacon_interval_from_ie(pnetwork->network.IEs));
notify_capability = le16_to_cpu(*(u16*)rtw_get_capability_from_ie(pnetwork->network.IEs));
notify_ie = pnetwork->network.IEs+_FIXED_IE_LENGTH_;
notify_ielen = pnetwork->network.IELength-_FIXED_IE_LENGTH_;
//We've set wiphy's signal_type as CFG80211_SIGNAL_TYPE_MBM: signal strength in mBm (100*dBm)
if ( check_fwstate(pmlmepriv, _FW_LINKED)== _TRUE &&
is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network)) {
notify_signal = 100*translate_percentage_to_dbm(padapter->recvpriv.signal_strength);//dbm
} else {
notify_signal = 100*translate_percentage_to_dbm(pnetwork->network.PhyInfo.SignalStrength);//dbm
}
/*
DBG_8192C("bssid: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
pnetwork->network.MacAddress[0], pnetwork->network.MacAddress[1], pnetwork->network.MacAddress[2],
pnetwork->network.MacAddress[3], pnetwork->network.MacAddress[4], pnetwork->network.MacAddress[5]);
DBG_8192C("Channel: %d(%d)\n", channel, freq);
DBG_8192C("Capability: %X\n", notify_capability);
DBG_8192C("Beacon interval: %d\n", notify_interval);
DBG_8192C("Signal: %d\n", notify_signal);
DBG_8192C("notify_timestamp: %#018llx\n", notify_timestamp);
*/
pbuf = buf;
pwlanhdr = (struct rtw_ieee80211_hdr *)pbuf;
fctrl = &(pwlanhdr->frame_ctl);
*(fctrl) = 0;
SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
//pmlmeext->mgnt_seq++;
if (pnetwork->network.Reserved[0] == 1) { // WIFI_BEACON
_rtw_memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
SetFrameSubType(pbuf, WIFI_BEACON);
} else {
_rtw_memcpy(pwlanhdr->addr1, myid(&(padapter->eeprompriv)), ETH_ALEN);
SetFrameSubType(pbuf, WIFI_PROBERSP);
}
_rtw_memcpy(pwlanhdr->addr2, pnetwork->network.MacAddress, ETH_ALEN);
_rtw_memcpy(pwlanhdr->addr3, pnetwork->network.MacAddress, ETH_ALEN);
pbuf += sizeof(struct rtw_ieee80211_hdr_3addr);
len = sizeof (struct rtw_ieee80211_hdr_3addr);
_rtw_memcpy(pbuf, pnetwork->network.IEs, pnetwork->network.IELength);
len += pnetwork->network.IELength;
//#ifdef CONFIG_P2P
//if(rtw_get_p2p_ie(pnetwork->network.IEs+12, pnetwork->network.IELength-12, NULL, NULL))
//{
// DBG_8192C("%s, got p2p_ie\n", __func__);
//}
//#endif
#if 1
bss = cfg80211_inform_bss_frame(wiphy, notify_channel, (struct ieee80211_mgmt *)buf,
len, notify_signal, GFP_ATOMIC);
#else
bss = cfg80211_inform_bss(wiphy, notify_channel, (const u8 *)pnetwork->network.MacAddress,
notify_timestamp, notify_capability, notify_interval, notify_ie,
notify_ielen, notify_signal, GFP_ATOMIC/*GFP_KERNEL*/);
#endif
if (unlikely(!bss)) {
DBG_8192C("rtw_cfg80211_inform_bss error\n");
return -EINVAL;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
#ifndef COMPAT_KERNEL_RELEASE
//patch for cfg80211, update beacon ies to information_elements
if (pnetwork->network.Reserved[0] == 1) { // WIFI_BEACON
if(bss->len_information_elements != bss->len_beacon_ies)
{
bss->information_elements = bss->beacon_ies;
bss->len_information_elements = bss->len_beacon_ies;
}
}
#endif //COMPAT_KERNEL_RELEASE
#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
/*
{
if( bss->information_elements == bss->proberesp_ies)
{
if( bss->len_information_elements != bss->len_proberesp_ies)
{
DBG_8192C("error!, len_information_elements != bss->len_proberesp_ies\n");
}
}
else if(bss->len_information_elements < bss->len_beacon_ies)
{
bss->information_elements = bss->beacon_ies;
bss->len_information_elements = bss->len_beacon_ies;
}
}
*/
//cfg80211_put_bss(bss);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
cfg80211_put_bss(wiphy, bss);
#else
cfg80211_put_bss(bss);
#endif
exit:
return ret;
}
void rtw_cfg80211_indicate_connect(_adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *cur_network = &(pmlmepriv->cur_network);
struct wireless_dev *pwdev = padapter->rtw_wdev;
#ifdef CONFIG_P2P
struct wifidirect_info *pwdinfo= &(padapter->wdinfo);
#endif
DBG_8192C("%s(padapter=%p)\n", __func__, padapter);
if (pwdev->iftype != NL80211_IFTYPE_STATION
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
&& pwdev->iftype != NL80211_IFTYPE_P2P_CLIENT
#endif
) {
return;
}
if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
return;
#ifdef CONFIG_P2P
if(pwdinfo->driver_interface == DRIVER_CFG80211 )
{
if(!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
{
rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
DBG_8192C("%s, role=%d, p2p_state=%d, pre_p2p_state=%d\n", __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo), rtw_p2p_pre_state(pwdinfo));
}
}
#endif //CONFIG_P2P
#ifdef CONFIG_LAYER2_ROAMING
if (rtw_to_roaming(padapter) > 0) {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39) || defined(COMPAT_KERNEL_RELEASE)
struct wiphy *wiphy = pwdev->wiphy;
struct ieee80211_channel *notify_channel;
u32 freq;
u16 channel = cur_network->network.Configuration.DSConfig;
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
else
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
notify_channel = ieee80211_get_channel(wiphy, freq);
#endif
DBG_871X("%s call cfg80211_roamed\n", __FUNCTION__);
cfg80211_roamed(padapter->pnetdev
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39) || defined(COMPAT_KERNEL_RELEASE)
, notify_channel
#endif
, cur_network->network.MacAddress
, pmlmepriv->assoc_req+sizeof(struct rtw_ieee80211_hdr_3addr)+2
, pmlmepriv->assoc_req_len-sizeof(struct rtw_ieee80211_hdr_3addr)-2
, pmlmepriv->assoc_rsp+sizeof(struct rtw_ieee80211_hdr_3addr)+6
, pmlmepriv->assoc_rsp_len-sizeof(struct rtw_ieee80211_hdr_3addr)-6
, GFP_ATOMIC);
}
else
#endif
{
DBG_8192C("pwdev->sme_state(b)=%d\n", pwdev->sme_state);
cfg80211_connect_result(padapter->pnetdev, cur_network->network.MacAddress
, pmlmepriv->assoc_req+sizeof(struct rtw_ieee80211_hdr_3addr)+2
, pmlmepriv->assoc_req_len-sizeof(struct rtw_ieee80211_hdr_3addr)-2
, pmlmepriv->assoc_rsp+sizeof(struct rtw_ieee80211_hdr_3addr)+6
, pmlmepriv->assoc_rsp_len-sizeof(struct rtw_ieee80211_hdr_3addr)-6
, WLAN_STATUS_SUCCESS, GFP_ATOMIC);
DBG_8192C("pwdev->sme_state(a)=%d\n", pwdev->sme_state);
}
}
void rtw_cfg80211_indicate_disconnect(_adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wireless_dev *pwdev = padapter->rtw_wdev;
#ifdef CONFIG_P2P
struct wifidirect_info *pwdinfo= &(padapter->wdinfo);
#endif
DBG_8192C("%s(padapter=%p)\n", __func__, padapter);
if (pwdev->iftype != NL80211_IFTYPE_STATION
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
&& pwdev->iftype != NL80211_IFTYPE_P2P_CLIENT
#endif
) {
return;
}
if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
return;
#ifdef CONFIG_P2P
if( pwdinfo->driver_interface == DRIVER_CFG80211 )
{
if(!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
{
_cancel_timer_ex( &pwdinfo->find_phase_timer );
_cancel_timer_ex( &pwdinfo->restore_p2p_state_timer );
_cancel_timer_ex( &pwdinfo->pre_tx_scan_timer);
rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
DBG_8192C("%s, role=%d, p2p_state=%d, pre_p2p_state=%d\n", __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo), rtw_p2p_pre_state(pwdinfo));
}
}
#endif //CONFIG_P2P
if (!padapter->mlmepriv.not_indic_disco) {
DBG_8192C("pwdev->sme_state(b)=%d\n", pwdev->sme_state);
if(pwdev->sme_state==CFG80211_SME_CONNECTING)
cfg80211_connect_result(padapter->pnetdev, NULL, NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_ATOMIC/*GFP_KERNEL*/);
else if(pwdev->sme_state==CFG80211_SME_CONNECTED)
cfg80211_disconnected(padapter->pnetdev, 0, NULL, 0, GFP_ATOMIC);
//else
//DBG_8192C("pwdev->sme_state=%d\n", pwdev->sme_state);
DBG_8192C("pwdev->sme_state(a)=%d\n", pwdev->sme_state);
}
}
#ifdef CONFIG_AP_MODE
static u8 set_pairwise_key(_adapter *padapter, struct sta_info *psta)
{
struct cmd_obj* ph2c;
struct set_stakey_parm *psetstakey_para;
struct cmd_priv *pcmdpriv=&padapter->cmdpriv;
u8 res=_SUCCESS;
ph2c = (struct cmd_obj*)rtw_zmalloc(sizeof(struct cmd_obj));
if ( ph2c == NULL){
res= _FAIL;
goto exit;
}
psetstakey_para = (struct set_stakey_parm*)rtw_zmalloc(sizeof(struct set_stakey_parm));
if(psetstakey_para==NULL){
rtw_mfree((u8 *) ph2c, sizeof(struct cmd_obj));
res=_FAIL;
goto exit;
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
psetstakey_para->algorithm = (u8)psta->dot118021XPrivacy;
_rtw_memcpy(psetstakey_para->addr, psta->hwaddr, ETH_ALEN);
_rtw_memcpy(psetstakey_para->key, &psta->dot118021x_UncstKey, 16);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
return res;
}
static int set_group_key(_adapter *padapter, u8 *key, u8 alg, int keyid)
{
u8 keylen;
struct cmd_obj* pcmd;
struct setkey_parm *psetkeyparm;
struct cmd_priv *pcmdpriv=&(padapter->cmdpriv);
int res=_SUCCESS;
DBG_8192C("%s\n", __FUNCTION__);
pcmd = (struct cmd_obj*)rtw_zmalloc(sizeof(struct cmd_obj));
if(pcmd==NULL){
res= _FAIL;
goto exit;
}
psetkeyparm=(struct setkey_parm*)rtw_zmalloc(sizeof(struct setkey_parm));
if(psetkeyparm==NULL){
rtw_mfree((unsigned char *)pcmd, sizeof(struct cmd_obj));
res= _FAIL;
goto exit;
}
_rtw_memset(psetkeyparm, 0, sizeof(struct setkey_parm));
psetkeyparm->keyid=(u8)keyid;
if (is_wep_enc(alg))
padapter->mlmepriv.key_mask |= BIT(psetkeyparm->keyid);
psetkeyparm->algorithm = alg;
psetkeyparm->set_tx = 1;
switch(alg)
{
case _WEP40_:
keylen = 5;
break;
case _WEP104_:
keylen = 13;
break;
case _TKIP_:
case _TKIP_WTMIC_:
case _AES_:
keylen = 16;
default:
keylen = 16;
}
_rtw_memcpy(&(psetkeyparm->key[0]), key, keylen);
pcmd->cmdcode = _SetKey_CMD_;
pcmd->parmbuf = (u8 *)psetkeyparm;
pcmd->cmdsz = (sizeof(struct setkey_parm));
pcmd->rsp = NULL;
pcmd->rspsz = 0;
_rtw_init_listhead(&pcmd->list);
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
exit:
return res;
}
static int set_wep_key(_adapter *padapter, u8 *key, u8 keylen, int keyid)
{
u8 alg;
switch(keylen)
{
case 5:
alg =_WEP40_;
break;
case 13:
alg =_WEP104_;
break;
default:
alg =_NO_PRIVACY_;
}
return set_group_key(padapter, key, alg, keyid);
}
static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
{
int ret = 0;
u32 wep_key_idx, wep_key_len,wep_total_len;
struct sta_info *psta = NULL, *pbcmc_sta = NULL;
_adapter *padapter = (_adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv* psecuritypriv=&(padapter->securitypriv);
struct sta_priv *pstapriv = &padapter->stapriv;
DBG_8192C("%s\n", __FUNCTION__);
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
//sizeof(struct ieee_param) = 64 bytes;
//if (param_len != (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len)
if (param_len != sizeof(struct ieee_param) + param->u.crypt.key_len)
{
ret = -EINVAL;
goto exit;
}
if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
{
if (param->u.crypt.idx >= WEP_KEYS)
{
ret = -EINVAL;
goto exit;
}
}
else
{
psta = rtw_get_stainfo(pstapriv, param->sta_addr);
if(!psta)
{
//ret = -EINVAL;
DBG_8192C("rtw_set_encryption(), sta has already been removed or never been added\n");
goto exit;
}
}
if (strcmp(param->u.crypt.alg, "none") == 0 && (psta==NULL))
{
//todo:clear default encryption keys
DBG_8192C("clear default encryption keys, keyid=%d\n", param->u.crypt.idx);
goto exit;
}
if (strcmp(param->u.crypt.alg, "WEP") == 0 && (psta==NULL))
{
DBG_8192C("r871x_set_encryption, crypt.alg = WEP\n");
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
DBG_8192C("r871x_set_encryption, wep_key_idx=%d, len=%d\n", wep_key_idx, wep_key_len);
if((wep_key_idx >= WEP_KEYS) || (wep_key_len<=0))
{
ret = -EINVAL;
goto exit;
}
if (wep_key_len > 0)
{
wep_key_len = wep_key_len <= 5 ? 5 : 13;
}
if (psecuritypriv->bWepDefaultKeyIdxSet == 0)
{
//wep default key has not been set, so use this key index as default key.
psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
psecuritypriv->dot11PrivacyAlgrthm=_WEP40_;
psecuritypriv->dot118021XGrpPrivacy=_WEP40_;
if(wep_key_len == 13)
{
psecuritypriv->dot11PrivacyAlgrthm=_WEP104_;
psecuritypriv->dot118021XGrpPrivacy=_WEP104_;
}
psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
}
_rtw_memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), param->u.crypt.key, wep_key_len);
psecuritypriv->dot11DefKeylen[wep_key_idx] = wep_key_len;
set_wep_key(padapter, param->u.crypt.key, wep_key_len, wep_key_idx);
goto exit;
}
if(!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) // //group key
{
if(param->u.crypt.set_tx == 0) //group key
{
if(strcmp(param->u.crypt.alg, "WEP") == 0)
{
DBG_8192C("%s, set group_key, WEP\n", __FUNCTION__);
_rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if(param->u.crypt.key_len==13)
{
psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
}
}
else if(strcmp(param->u.crypt.alg, "TKIP") == 0)
{
DBG_8192C("%s, set group_key, TKIP\n", __FUNCTION__);
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
_rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
//DEBUG_ERR("set key length :param->u.crypt.key_len=%d\n", param->u.crypt.key_len);
//set mic key
_rtw_memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
_rtw_memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
psecuritypriv->busetkipkey = _TRUE;
}
else if(strcmp(param->u.crypt.alg, "CCMP") == 0)
{
DBG_8192C("%s, set group_key, CCMP\n", __FUNCTION__);
psecuritypriv->dot118021XGrpPrivacy = _AES_;
_rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
}
else
{
DBG_8192C("%s, set group_key, none\n", __FUNCTION__);
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
}
psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
psecuritypriv->binstallGrpkey = _TRUE;
psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;//!!!
set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
pbcmc_sta=rtw_get_bcmc_stainfo(padapter);
if(pbcmc_sta)
{
pbcmc_sta->ieee8021x_blocked = _FALSE;
pbcmc_sta->dot118021XPrivacy= psecuritypriv->dot118021XGrpPrivacy;//rx will use bmc_sta's dot118021XPrivacy
}
}
goto exit;
}
if(psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) // psk/802_1x
{
if(check_fwstate(pmlmepriv, WIFI_AP_STATE))
{
if(param->u.crypt.set_tx ==1) //pairwise key
{
_rtw_memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
if(strcmp(param->u.crypt.alg, "WEP") == 0)
{
DBG_8192C("%s, set pairwise key, WEP\n", __FUNCTION__);
psta->dot118021XPrivacy = _WEP40_;
if(param->u.crypt.key_len==13)
{
psta->dot118021XPrivacy = _WEP104_;
}
}
else if(strcmp(param->u.crypt.alg, "TKIP") == 0)
{
DBG_8192C("%s, set pairwise key, TKIP\n", __FUNCTION__);
psta->dot118021XPrivacy = _TKIP_;
//DEBUG_ERR("set key length :param->u.crypt.key_len=%d\n", param->u.crypt.key_len);
//set mic key
_rtw_memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
_rtw_memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
psecuritypriv->busetkipkey = _TRUE;
}
else if(strcmp(param->u.crypt.alg, "CCMP") == 0)
{
DBG_8192C("%s, set pairwise key, CCMP\n", __FUNCTION__);
psta->dot118021XPrivacy = _AES_;
}
else
{
DBG_8192C("%s, set pairwise key, none\n", __FUNCTION__);
psta->dot118021XPrivacy = _NO_PRIVACY_;
}
set_pairwise_key(padapter, psta);
psta->ieee8021x_blocked = _FALSE;
psta->bpairwise_key_installed = _TRUE;
}
else//group key???
{
if(strcmp(param->u.crypt.alg, "WEP") == 0)
{
_rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if(param->u.crypt.key_len==13)
{
psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
}
}
else if(strcmp(param->u.crypt.alg, "TKIP") == 0)
{
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
_rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
//DEBUG_ERR("set key length :param->u.crypt.key_len=%d\n", param->u.crypt.key_len);
//set mic key
_rtw_memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
_rtw_memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
psecuritypriv->busetkipkey = _TRUE;
}
else if(strcmp(param->u.crypt.alg, "CCMP") == 0)
{
psecuritypriv->dot118021XGrpPrivacy = _AES_;
_rtw_memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
}
else
{
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
}
psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
psecuritypriv->binstallGrpkey = _TRUE;
psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;//!!!
set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
pbcmc_sta=rtw_get_bcmc_stainfo(padapter);
if(pbcmc_sta)
{
pbcmc_sta->ieee8021x_blocked = _FALSE;
pbcmc_sta->dot118021XPrivacy= psecuritypriv->dot118021XGrpPrivacy;//rx will use bmc_sta's dot118021XPrivacy
}
}
}
}
exit:
return ret;
}
#endif
static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
{
int ret = 0;
u32 wep_key_idx, wep_key_len,wep_total_len;
_adapter *padapter = (_adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
#ifdef CONFIG_P2P
struct wifidirect_info* pwdinfo = &padapter->wdinfo;
#endif //CONFIG_P2P
_func_enter_;
DBG_8192C("%s\n", __func__);
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
if (param_len < (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len)
{
ret = -EINVAL;
goto exit;
}
if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
{
if (param->u.crypt.idx >= WEP_KEYS)
{
ret = -EINVAL;
goto exit;
}
} else {
#ifdef CONFIG_WAPI_SUPPORT
if (strcmp(param->u.crypt.alg, "SMS4"))
#endif
{
ret = -EINVAL;
goto exit;
}
}
if (strcmp(param->u.crypt.alg, "WEP") == 0)
{
RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_err_,("wpa_set_encryption, crypt.alg = WEP\n"));
DBG_8192C("wpa_set_encryption, crypt.alg = WEP\n");
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
if ((wep_key_idx > WEP_KEYS) || (wep_key_len <= 0))
{
ret = -EINVAL;
goto exit;
}
if (psecuritypriv->bWepDefaultKeyIdxSet == 0)
{
//wep default key has not been set, so use this key index as default key.
wep_key_len = wep_key_len <= 5 ? 5 : 13;
psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if(wep_key_len==13)
{
psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
}
psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
}
_rtw_memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), param->u.crypt.key, wep_key_len);
psecuritypriv->dot11DefKeylen[wep_key_idx] = wep_key_len;
rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0);
goto exit;
}
if(padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) // 802_1x
{
struct sta_info * psta,*pbcmc_sta;
struct sta_priv * pstapriv = &padapter->stapriv;
//DBG_8192C("%s, : dot11AuthAlgrthm == dot11AuthAlgrthm_8021X \n", __func__);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE) == _TRUE) //sta mode
{
psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
if (psta == NULL) {
//DEBUG_ERR( ("Set wpa_set_encryption: Obtain Sta_info fail \n"));
DBG_8192C("%s, : Obtain Sta_info fail \n", __func__);
}
else
{
//Jeff: don't disable ieee8021x_blocked while clearing key
if (strcmp(param->u.crypt.alg, "none") != 0)
psta->ieee8021x_blocked = _FALSE;
if((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)||
(padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
{
psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
}
if(param->u.crypt.set_tx ==1)//pairwise key
{
DBG_8192C("%s, : param->u.crypt.set_tx ==1 \n", __func__);
_rtw_memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
if(strcmp(param->u.crypt.alg, "TKIP") == 0)//set mic key
{
//DEBUG_ERR(("\nset key length :param->u.crypt.key_len=%d\n", param->u.crypt.key_len));
_rtw_memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
_rtw_memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
padapter->securitypriv.busetkipkey=_FALSE;
//_set_timer(&padapter->securitypriv.tkip_timer, 50);
}
//DEBUG_ERR((" param->u.crypt.key_len=%d\n",param->u.crypt.key_len));
DBG_871X(" ~~~~set sta key:unicastkey\n");
rtw_setstakey_cmd(padapter, (unsigned char *)psta, _TRUE);
}
else//group key
{
_rtw_memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key,(param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
_rtw_memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[16]),8);
_rtw_memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[24]),8);
padapter->securitypriv.binstallGrpkey = _TRUE;
//DEBUG_ERR((" param->u.crypt.key_len=%d\n", param->u.crypt.key_len));
DBG_871X(" ~~~~set sta key:groupkey\n");
padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
rtw_set_key(padapter,&padapter->securitypriv,param->u.crypt.idx, 1);
#ifdef CONFIG_P2P
if(pwdinfo->driver_interface == DRIVER_CFG80211 )
{
if(rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
{
rtw_p2p_set_state(pwdinfo, P2P_STATE_PROVISIONING_DONE);
}
}
#endif //CONFIG_P2P
}
}
pbcmc_sta=rtw_get_bcmc_stainfo(padapter);
if(pbcmc_sta==NULL)
{
//DEBUG_ERR( ("Set OID_802_11_ADD_KEY: bcmc stainfo is null \n"));
}
else
{
//Jeff: don't disable ieee8021x_blocked while clearing key
if (strcmp(param->u.crypt.alg, "none") != 0)
pbcmc_sta->ieee8021x_blocked = _FALSE;
if((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)||
(padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
{
pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
}
}
}
else if(check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) //adhoc mode
{
}
}
#ifdef CONFIG_WAPI_SUPPORT
if (strcmp(param->u.crypt.alg, "SMS4") == 0)
{
PRT_WAPI_T pWapiInfo = &padapter->wapiInfo;
PRT_WAPI_STA_INFO pWapiSta;
u8 WapiASUEPNInitialValueSrc[16] = {0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C} ;
u8 WapiAEPNInitialValueSrc[16] = {0x37,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C} ;
u8 WapiAEMultiCastPNInitialValueSrc[16] = {0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C,0x36,0x5C} ;
if(param->u.crypt.set_tx == 1)
{
list_for_each_entry(pWapiSta, &pWapiInfo->wapiSTAUsedList, list) {
if(_rtw_memcmp(pWapiSta->PeerMacAddr,param->sta_addr,6))
{
_rtw_memcpy(pWapiSta->lastTxUnicastPN,WapiASUEPNInitialValueSrc,16);
pWapiSta->wapiUsk.bSet = true;
_rtw_memcpy(pWapiSta->wapiUsk.dataKey,param->u.crypt.key,16);
_rtw_memcpy(pWapiSta->wapiUsk.micKey,param->u.crypt.key+16,16);
pWapiSta->wapiUsk.keyId = param->u.crypt.idx ;
pWapiSta->wapiUsk.bTxEnable = true;
_rtw_memcpy(pWapiSta->lastRxUnicastPNBEQueue,WapiAEPNInitialValueSrc,16);
_rtw_memcpy(pWapiSta->lastRxUnicastPNBKQueue,WapiAEPNInitialValueSrc,16);
_rtw_memcpy(pWapiSta->lastRxUnicastPNVIQueue,WapiAEPNInitialValueSrc,16);
_rtw_memcpy(pWapiSta->lastRxUnicastPNVOQueue,WapiAEPNInitialValueSrc,16);
_rtw_memcpy(pWapiSta->lastRxUnicastPN,WapiAEPNInitialValueSrc,16);
pWapiSta->wapiUskUpdate.bTxEnable = false;
pWapiSta->wapiUskUpdate.bSet = false;
if (psecuritypriv->sw_encrypt== false || psecuritypriv->sw_decrypt == false)
{
//set unicast key for ASUE
rtw_wapi_set_key(padapter, &pWapiSta->wapiUsk, pWapiSta, false, false);
}
}
}
}
else
{
list_for_each_entry(pWapiSta, &pWapiInfo->wapiSTAUsedList, list) {
if(_rtw_memcmp(pWapiSta->PeerMacAddr,get_bssid(pmlmepriv),6))
{
pWapiSta->wapiMsk.bSet = true;
_rtw_memcpy(pWapiSta->wapiMsk.dataKey,param->u.crypt.key,16);
_rtw_memcpy(pWapiSta->wapiMsk.micKey,param->u.crypt.key+16,16);
pWapiSta->wapiMsk.keyId = param->u.crypt.idx ;
pWapiSta->wapiMsk.bTxEnable = false;
if(!pWapiSta->bSetkeyOk)
pWapiSta->bSetkeyOk = true;
pWapiSta->bAuthenticateInProgress = false;
_rtw_memcpy(pWapiSta->lastRxMulticastPN, WapiAEMultiCastPNInitialValueSrc, 16);
if (psecuritypriv->sw_decrypt == false)
{
//set rx broadcast key for ASUE
rtw_wapi_set_key(padapter, &pWapiSta->wapiMsk, pWapiSta, true, false);
}
}
}
}
}
#endif
exit:
DBG_8192C("%s, ret=%d\n", __func__, ret);
_func_exit_;
return ret;
}
static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
u8 key_index, bool pairwise, const u8 *mac_addr,
#else // (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
u8 key_index, const u8 *mac_addr,
#endif // (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
struct key_params *params)
{
char *alg_name;
u32 param_len;
struct ieee_param *param = NULL;
int ret=0;
struct wireless_dev *rtw_wdev = wiphy_to_wdev(wiphy);
_adapter *padapter = wiphy_to_adapter(wiphy);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
DBG_871X(FUNC_NDEV_FMT" adding key for %pM\n", FUNC_NDEV_ARG(ndev), mac_addr);
DBG_871X("cipher=0x%x\n", params->cipher);
DBG_871X("key_len=0x%x\n", params->key_len);
DBG_871X("seq_len=0x%x\n", params->seq_len);
DBG_871X("key_index=%d\n", key_index);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
DBG_871X("pairwise=%d\n", pairwise);
#endif // (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
param_len = sizeof(struct ieee_param) + params->key_len;
param = (struct ieee_param *)rtw_malloc(param_len);
if (param == NULL)
return -1;
_rtw_memset(param, 0, param_len);
param->cmd = IEEE_CMD_SET_ENCRYPTION;
_rtw_memset(param->sta_addr, 0xff, ETH_ALEN);
switch (params->cipher) {
case IW_AUTH_CIPHER_NONE:
//todo: remove key
//remove = 1;
alg_name = "none";
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
alg_name = "WEP";
break;
case WLAN_CIPHER_SUITE_TKIP:
alg_name = "TKIP";
break;
case WLAN_CIPHER_SUITE_CCMP:
alg_name = "CCMP";
break;
#ifdef CONFIG_WAPI_SUPPORT
case WLAN_CIPHER_SUITE_SMS4:
alg_name= "SMS4";
if(pairwise == NL80211_KEYTYPE_PAIRWISE) {
if (key_index != 0 && key_index != 1) {
ret = -ENOTSUPP;
goto addkey_end;
}
_rtw_memcpy((void*)param->sta_addr, (void*)mac_addr, ETH_ALEN);
} else {
DBG_871X("mac_addr is null \n");
}
DBG_871X("rtw_wx_set_enc_ext: SMS4 case \n");
break;
#endif
default:
ret = -ENOTSUPP;
goto addkey_end;
}
strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
if (!mac_addr || is_broadcast_ether_addr(mac_addr))
{
param->u.crypt.set_tx = 0; //for wpa/wpa2 group key
} else {
param->u.crypt.set_tx = 1; //for wpa/wpa2 pairwise key
}
//param->u.crypt.idx = key_index - 1;
param->u.crypt.idx = key_index;
if (params->seq_len && params->seq)
{
_rtw_memcpy(param->u.crypt.seq, params->seq, params->seq_len);
}
if(params->key_len && params->key)
{
param->u.crypt.key_len = params->key_len;
_rtw_memcpy(param->u.crypt.key, params->key, params->key_len);
}
if(check_fwstate(pmlmepriv, WIFI_STATION_STATE) == _TRUE)
{
ret = rtw_cfg80211_set_encryption(ndev, param, param_len);
}
else if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
{
#ifdef CONFIG_AP_MODE
if(mac_addr)
_rtw_memcpy(param->sta_addr, (void*)mac_addr, ETH_ALEN);
ret = rtw_cfg80211_ap_set_encryption(ndev, param, param_len);
#endif
}
else
{
DBG_8192C("error! fw_state=0x%x, iftype=%d\n", pmlmepriv->fw_state, rtw_wdev->iftype);
}
addkey_end:
if(param)
{
rtw_mfree((u8*)param, param_len);
}
return ret;
}
static int cfg80211_rtw_get_key(struct wiphy *wiphy, struct net_device *ndev,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
u8 key_index, bool pairwise, const u8 *mac_addr,
#else // (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
u8 key_index, const u8 *mac_addr,
#endif // (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
void *cookie,
void (*callback)(void *cookie,
struct key_params*))
{
#if 0
struct iwm_priv *iwm = ndev_to_iwm(ndev);
struct iwm_key *key = &iwm->keys[key_index];
struct key_params params;
IWM_DBG_WEXT(iwm, DBG, "Getting key %d\n", key_index);
memset(¶ms, 0, sizeof(params));
params.cipher = key->cipher;
params.key_len = key->key_len;
params.seq_len = key->seq_len;
params.seq = key->seq;
params.key = key->key;
callback(cookie, ¶ms);
return key->key_len ? 0 : -ENOENT;
#endif
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
return 0;
}
static int cfg80211_rtw_del_key(struct wiphy *wiphy, struct net_device *ndev,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
u8 key_index, bool pairwise, const u8 *mac_addr)
#else // (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
u8 key_index, const u8 *mac_addr)
#endif // (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
{
_adapter *padapter = (_adapter *)rtw_netdev_priv(ndev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
DBG_871X(FUNC_NDEV_FMT" key_index=%d\n", FUNC_NDEV_ARG(ndev), key_index);
if (key_index == psecuritypriv->dot11PrivacyKeyIndex)
{
//clear the flag of wep default key set.
psecuritypriv->bWepDefaultKeyIdxSet = 0;
}
return 0;
}
static int cfg80211_rtw_set_default_key(struct wiphy *wiphy,
struct net_device *ndev, u8 key_index
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) || defined(COMPAT_KERNEL_RELEASE)
, bool unicast, bool multicast
#endif
)
{
_adapter *padapter = (_adapter *)rtw_netdev_priv(ndev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
DBG_871X(FUNC_NDEV_FMT" key_index=%d"
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) || defined(COMPAT_KERNEL_RELEASE)
", unicast=%d, multicast=%d"
#endif
".\n", FUNC_NDEV_ARG(ndev), key_index
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) || defined(COMPAT_KERNEL_RELEASE)
, unicast, multicast
#endif
);
if ((key_index < WEP_KEYS) && ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_))) //set wep default key
{
psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
psecuritypriv->dot11PrivacyKeyIndex = key_index;
psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (psecuritypriv->dot11DefKeylen[key_index] == 13)
{
psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
}
psecuritypriv->bWepDefaultKeyIdxSet = 1; //set the flag to represent that wep default key has been set
}
return 0;
}
static int cfg80211_rtw_get_station(struct wiphy *wiphy,
struct net_device *ndev,
u8 *mac, struct station_info *sinfo)
{
int ret = 0;
_adapter *padapter = wiphy_to_adapter(wiphy);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
sinfo->filled = 0;
if (!mac) {
DBG_871X(FUNC_NDEV_FMT" mac==%p\n", FUNC_NDEV_ARG(ndev), mac);
ret = -ENOENT;
goto exit;
}
psta = rtw_get_stainfo(pstapriv, mac);
if (psta == NULL) {
DBG_8192C("%s, sta_info is null\n", __func__);
ret = -ENOENT;
goto exit;
}
#ifdef CONFIG_DEBUG_CFG80211
DBG_871X(FUNC_NDEV_FMT" mac="MAC_FMT"\n", FUNC_NDEV_ARG(ndev), MAC_ARG(mac));
#endif
//for infra./P2PClient mode
if( check_fwstate(pmlmepriv, WIFI_STATION_STATE)
&& check_fwstate(pmlmepriv, _FW_LINKED)
)
{
struct wlan_network *cur_network = &(pmlmepriv->cur_network);
if (_rtw_memcmp(mac, cur_network->network.MacAddress, ETH_ALEN) == _FALSE) {
DBG_871X("%s, mismatch bssid="MAC_FMT"\n", __func__, MAC_ARG(cur_network->network.MacAddress));
ret = -ENOENT;
goto exit;
}
sinfo->filled |= STATION_INFO_SIGNAL;
sinfo->signal = translate_percentage_to_dbm(padapter->recvpriv.signal_strength);
sinfo->filled |= STATION_INFO_TX_BITRATE;
sinfo->txrate.legacy = rtw_get_cur_max_rate(padapter);
sinfo->filled |= STATION_INFO_RX_PACKETS;
sinfo->rx_packets = sta_rx_data_pkts(psta);
sinfo->filled |= STATION_INFO_TX_PACKETS;
sinfo->tx_packets = psta->sta_stats.tx_pkts;
}
//for Ad-Hoc/AP mode
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)
||check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)
||check_fwstate(pmlmepriv, WIFI_AP_STATE))
&& check_fwstate(pmlmepriv, _FW_LINKED)
)
{
//TODO: should acquire station info...
}
exit:
return ret;
}
extern int netdev_open(struct net_device *pnetdev);
#ifdef CONFIG_CONCURRENT_MODE
extern int netdev_if2_open(struct net_device *pnetdev);
#endif
/*
enum nl80211_iftype {
NL80211_IFTYPE_UNSPECIFIED,
NL80211_IFTYPE_ADHOC, //1
NL80211_IFTYPE_STATION, //2
NL80211_IFTYPE_AP, //3
NL80211_IFTYPE_AP_VLAN,
NL80211_IFTYPE_WDS,
NL80211_IFTYPE_MONITOR, //6
NL80211_IFTYPE_MESH_POINT,
NL80211_IFTYPE_P2P_CLIENT, //8
NL80211_IFTYPE_P2P_GO, //9
//keep last
NUM_NL80211_IFTYPES,
NL80211_IFTYPE_MAX = NUM_NL80211_IFTYPES - 1
};
*/
static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
enum nl80211_iftype old_type;
NDIS_802_11_NETWORK_INFRASTRUCTURE networkType ;
_adapter *padapter = wiphy_to_adapter(wiphy);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct wireless_dev *rtw_wdev = wiphy_to_wdev(wiphy);
#ifdef CONFIG_P2P
struct wifidirect_info *pwdinfo= &(padapter->wdinfo);
#endif
int ret = 0;
u8 change = _FALSE;
#ifdef CONFIG_CONCURRENT_MODE
if(padapter->adapter_type == SECONDARY_ADAPTER)
{
DBG_871X(FUNC_NDEV_FMT" call netdev_if2_open\n", FUNC_NDEV_ARG(ndev));
if(netdev_if2_open(ndev) != 0) {
ret= -EPERM;
goto exit;
}
}
else if(padapter->adapter_type == PRIMARY_ADAPTER)
#endif //CONFIG_CONCURRENT_MODE
{
DBG_871X(FUNC_NDEV_FMT" call netdev_open\n", FUNC_NDEV_ARG(ndev));
if(netdev_open(ndev) != 0) {
ret= -EPERM;
goto exit;
}
}
if(_FAIL == rtw_pwr_wakeup(padapter)) {
ret= -EPERM;
goto exit;
}
old_type = rtw_wdev->iftype;
DBG_871X(FUNC_NDEV_FMT" old_iftype=%d, new_iftype=%d\n",
FUNC_NDEV_ARG(ndev), old_type, type);
if(old_type != type)
{
change = _TRUE;
pmlmeext->action_public_rxseq = 0xffff;
pmlmeext->action_public_dialog_token = 0xff;
}
switch (type) {
case NL80211_IFTYPE_ADHOC:
networkType = Ndis802_11IBSS;
break;
#if defined(CONFIG_P2P) && ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE))
case NL80211_IFTYPE_P2P_CLIENT:
#endif
case NL80211_IFTYPE_STATION:
networkType = Ndis802_11Infrastructure;
#ifdef CONFIG_P2P
if(pwdinfo->driver_interface == DRIVER_CFG80211 )
{
if(change && rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
{
_cancel_timer_ex( &pwdinfo->find_phase_timer );
_cancel_timer_ex( &pwdinfo->restore_p2p_state_timer );
_cancel_timer_ex( &pwdinfo->pre_tx_scan_timer);
//it means remove GO and change mode from AP(GO) to station(P2P DEVICE)
rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
DBG_8192C("%s, role=%d, p2p_state=%d, pre_p2p_state=%d\n", __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo), rtw_p2p_pre_state(pwdinfo));
}
}
#endif //CONFIG_P2P
break;
#if defined(CONFIG_P2P) && ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE))
case NL80211_IFTYPE_P2P_GO:
#endif
case NL80211_IFTYPE_AP:
networkType = Ndis802_11APMode;
#ifdef CONFIG_P2P
if(pwdinfo->driver_interface == DRIVER_CFG80211 )
{
if(change && !rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
{
//it means P2P Group created, we will be GO and change mode from P2P DEVICE to AP(GO)
rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
}
}
#endif //CONFIG_P2P
break;
default:
return -EOPNOTSUPP;
}
rtw_wdev->iftype = type;
if (rtw_set_802_11_infrastructure_mode(padapter, networkType) ==_FALSE)
{
rtw_wdev->iftype = old_type;
ret = -EPERM;
goto exit;
}
rtw_setopmode_cmd(padapter, networkType);
exit:
return ret;
}
void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv, bool aborted)
{
_irqL irqL;
_enter_critical_bh(&pwdev_priv->scan_req_lock, &irqL);
if(pwdev_priv->scan_request != NULL)
{
//struct cfg80211_scan_request *scan_request = pwdev_priv->scan_request;
#ifdef CONFIG_DEBUG_CFG80211
DBG_871X("%s with scan req\n", __FUNCTION__);
#endif
//avoid WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
//if(scan_request == wiphy_to_dev(scan_request->wiphy)->scan_req)
if(pwdev_priv->scan_request->wiphy != pwdev_priv->rtw_wdev->wiphy)
{
DBG_8192C("error wiphy compare\n");
}
else
{
cfg80211_scan_done(pwdev_priv->scan_request, aborted);
}
pwdev_priv->scan_request = NULL;
} else {
#ifdef CONFIG_DEBUG_CFG80211
DBG_871X("%s without scan req\n", __FUNCTION__);
#endif
}
_exit_critical_bh(&pwdev_priv->scan_req_lock, &irqL);
}
void rtw_cfg80211_surveydone_event_callback(_adapter *padapter)
{
_irqL irqL;
_list *plist, *phead;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
_queue *queue = &(pmlmepriv->scanned_queue);
struct wlan_network *pnetwork = NULL;
u32 cnt=0;
u32 wait_for_surveydone;
sint wait_status;
#ifdef CONFIG_P2P
struct wifidirect_info* pwdinfo = &padapter->wdinfo;
#endif //CONFIG_P2P
struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s\n", __func__);
#endif
_enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
phead = get_list_head(queue);
plist = get_next(phead);
while(1)
{
if (rtw_end_of_queue_search(phead,plist)== _TRUE)
break;
pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
//report network only if the current channel set contains the channel to which this network belongs
if(rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) >= 0
#ifdef CONFIG_VALIDATE_SSID
&& _TRUE == rtw_validate_ssid(&(pnetwork->network.Ssid))
#endif
)
{
//ev=translate_scan(padapter, a, pnetwork, ev, stop);
rtw_cfg80211_inform_bss(padapter, pnetwork);
}
plist = get_next(plist);
}
_exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
//call this after other things have been done
rtw_cfg80211_indicate_scan_done(wdev_to_priv(padapter->rtw_wdev), _FALSE);
}
static int rtw_cfg80211_set_probe_req_wpsp2pie(_adapter *padapter, char *buf, int len)
{
int ret = 0;
uint wps_ielen = 0;
u8 *wps_ie;
u32 p2p_ielen = 0;
u8 *p2p_ie;
u32 wfd_ielen = 0;
u8 *wfd_ie;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s, ielen=%d\n", __func__, len);
#endif
if(len>0)
{
if((wps_ie = rtw_get_wps_ie(buf, len, NULL, &wps_ielen)))
{
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("probe_req_wps_ielen=%d\n", wps_ielen);
#endif
if(pmlmepriv->wps_probe_req_ie)
{
u32 free_len = pmlmepriv->wps_probe_req_ie_len;
pmlmepriv->wps_probe_req_ie_len = 0;
rtw_mfree(pmlmepriv->wps_probe_req_ie, free_len);
pmlmepriv->wps_probe_req_ie = NULL;
}
pmlmepriv->wps_probe_req_ie = rtw_malloc(wps_ielen);
if ( pmlmepriv->wps_probe_req_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
return -EINVAL;
}
_rtw_memcpy(pmlmepriv->wps_probe_req_ie, wps_ie, wps_ielen);
pmlmepriv->wps_probe_req_ie_len = wps_ielen;
}
//buf += wps_ielen;
//len -= wps_ielen;
#ifdef CONFIG_P2P
if((p2p_ie=rtw_get_p2p_ie(buf, len, NULL, &p2p_ielen)))
{
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("probe_req_p2p_ielen=%d\n", p2p_ielen);
#endif
if(pmlmepriv->p2p_probe_req_ie)
{
u32 free_len = pmlmepriv->p2p_probe_req_ie_len;
pmlmepriv->p2p_probe_req_ie_len = 0;
rtw_mfree(pmlmepriv->p2p_probe_req_ie, free_len);
pmlmepriv->p2p_probe_req_ie = NULL;
}
pmlmepriv->p2p_probe_req_ie = rtw_malloc(p2p_ielen);
if ( pmlmepriv->p2p_probe_req_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
return -EINVAL;
}
_rtw_memcpy(pmlmepriv->p2p_probe_req_ie, p2p_ie, p2p_ielen);
pmlmepriv->p2p_probe_req_ie_len = p2p_ielen;
}
#endif //CONFIG_P2P
//buf += p2p_ielen;
//len -= p2p_ielen;
#ifdef CONFIG_WFD
if(rtw_get_wfd_ie(buf, len, NULL, &wfd_ielen))
{
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("probe_req_wfd_ielen=%d\n", wfd_ielen);
#endif
if(pmlmepriv->wfd_probe_req_ie)
{
u32 free_len = pmlmepriv->wfd_probe_req_ie_len;
pmlmepriv->wfd_probe_req_ie_len = 0;
rtw_mfree(pmlmepriv->wfd_probe_req_ie, free_len);
pmlmepriv->wfd_probe_req_ie = NULL;
}
pmlmepriv->wfd_probe_req_ie = rtw_malloc(wfd_ielen);
if ( pmlmepriv->wfd_probe_req_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
return -EINVAL;
}
rtw_get_wfd_ie(buf, len, pmlmepriv->wfd_probe_req_ie, &pmlmepriv->wfd_probe_req_ie_len);
}
#endif //CONFIG_WFD
}
return ret;
}
static int cfg80211_rtw_scan(struct wiphy *wiphy
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
, struct net_device *ndev
#endif
, struct cfg80211_scan_request *request)
{
int i;
u8 _status = _FALSE;
int ret = 0;
_adapter *padapter = wiphy_to_adapter(wiphy);
struct mlme_priv *pmlmepriv= &padapter->mlmepriv;
NDIS_802_11_SSID ssid[RTW_SSID_SCAN_AMOUNT];
struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
_irqL irqL;
u8 *wps_ie=NULL;
uint wps_ielen=0;
u8 *p2p_ie=NULL;
uint p2p_ielen=0;
u8 survey_times=3;
#ifdef CONFIG_P2P
struct wifidirect_info *pwdinfo= &(padapter->wdinfo);
#endif //CONFIG_P2P
struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
struct cfg80211_ssid *ssids = request->ssids;
int social_channel = 0, j = 0;
bool need_indicate_scan_done = _FALSE;
#ifdef CONFIG_CONCURRENT_MODE
PADAPTER pbuddy_adapter = NULL;
struct mlme_priv *pbuddy_mlmepriv = NULL;
#endif //CONFIG_CONCURRENT_MODE
#ifdef CONFIG_DEBUG_CFG80211
DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
#endif
#ifdef CONFIG_CONCURRENT_MODE
if (padapter->pbuddy_adapter) {
pbuddy_adapter = padapter->pbuddy_adapter;
pbuddy_mlmepriv = &(pbuddy_adapter->mlmepriv);
}
#endif //CONFIG_CONCURRENT_MODE
#ifdef CONFIG_MP_INCLUDED
if (padapter->registrypriv.mp_mode == 1)
{
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == _TRUE)
{
ret = -EPERM;
goto exit;
}
}
#endif
_enter_critical_bh(&pwdev_priv->scan_req_lock, &irqL);
pwdev_priv->scan_request = request;
_exit_critical_bh(&pwdev_priv->scan_req_lock, &irqL);
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
{
#ifdef CONFIG_DEBUG_CFG80211
DBG_871X("%s under WIFI_AP_STATE\n", __FUNCTION__);
#endif
//need_indicate_scan_done = _TRUE;
//goto check_need_indicate_scan_done;
}
if(_FAIL == rtw_pwr_wakeup(padapter)) {
need_indicate_scan_done = _TRUE;
goto check_need_indicate_scan_done;
}
#ifdef CONFIG_P2P
if( pwdinfo->driver_interface == DRIVER_CFG80211 )
{
if(ssids->ssid != NULL
&& _rtw_memcmp(ssids->ssid, "DIRECT-", 7)
&& rtw_get_p2p_ie((u8 *)request->ie, request->ie_len, NULL, NULL)
)
{
if(rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
{
rtw_p2p_enable(padapter, P2P_ROLE_DEVICE);
wdev_to_priv(padapter->rtw_wdev)->p2p_enabled = _TRUE;
}
else
{
rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s, role=%d, p2p_state=%d\n", __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo));
#endif
}
rtw_p2p_set_state(pwdinfo, P2P_STATE_LISTEN);
if(request->n_channels == 3 &&
request->channels[0]->hw_value == 1 &&
request->channels[1]->hw_value == 6 &&
request->channels[2]->hw_value == 11
)
{
social_channel = 1;
}
}
}
#endif //CONFIG_P2P
if(request->ie && request->ie_len>0)
{
rtw_cfg80211_set_probe_req_wpsp2pie(padapter, (u8 *)request->ie, request->ie_len );
}
if (pmlmepriv->LinkDetectInfo.bBusyTraffic == _TRUE)
{
DBG_8192C("%s, bBusyTraffic == _TRUE\n", __func__);
need_indicate_scan_done = _TRUE;
goto check_need_indicate_scan_done;
}
if (rtw_is_scan_deny(padapter)){
DBG_871X(FUNC_ADPT_FMT ": scan deny\n", FUNC_ADPT_ARG(padapter));
need_indicate_scan_done = _TRUE;
goto check_need_indicate_scan_done;
}
#ifdef CONFIG_CONCURRENT_MODE
if(pbuddy_mlmepriv && (pbuddy_mlmepriv->LinkDetectInfo.bBusyTraffic == _TRUE))
{
DBG_8192C("%s, bBusyTraffic == _TRUE at buddy_intf\n", __func__);
need_indicate_scan_done = _TRUE;
goto check_need_indicate_scan_done;
}
#endif //CONFIG_CONCURRENT_MODE
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == _TRUE)
{
DBG_8192C("%s, fwstate=0x%x\n", __func__, pmlmepriv->fw_state);
need_indicate_scan_done = _TRUE;
goto check_need_indicate_scan_done;
}
#ifdef CONFIG_CONCURRENT_MODE
if (check_buddy_fwstate(padapter,
_FW_UNDER_SURVEY|_FW_UNDER_LINKING|WIFI_UNDER_WPS) == _TRUE)
{
if(check_buddy_fwstate(padapter, _FW_UNDER_SURVEY))
{
DBG_8192C("scanning_via_buddy_intf\n");
pmlmepriv->scanning_via_buddy_intf = _TRUE;
}
DBG_8192C("buddy_intf's mlme state:0x%x\n", pbuddy_mlmepriv->fw_state);
need_indicate_scan_done = _TRUE;
goto check_need_indicate_scan_done;
}
#endif
#ifdef CONFIG_P2P
if( pwdinfo->driver_interface == DRIVER_CFG80211 )
{
if(!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) && !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE))
{
rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
rtw_free_network_queue(padapter, _TRUE);
if(social_channel == 0)
rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
else
rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_SOCIAL_LAST);
}
}
#endif //CONFIG_P2P
_rtw_memset(ssid, 0, sizeof(NDIS_802_11_SSID)*RTW_SSID_SCAN_AMOUNT);
//parsing request ssids, n_ssids
for (i = 0; i < request->n_ssids && i < RTW_SSID_SCAN_AMOUNT; i++) {
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("ssid=%s, len=%d\n", ssids[i].ssid, ssids[i].ssid_len);
#endif
_rtw_memcpy(ssid[i].Ssid, ssids[i].ssid, ssids[i].ssid_len);
ssid[i].SsidLength = ssids[i].ssid_len;
}
/* parsing channels, n_channels */
_rtw_memset(ch, 0, sizeof(struct rtw_ieee80211_channel)*RTW_CHANNEL_SCAN_AMOUNT);
for (i=0;i<request->n_channels && i<RTW_CHANNEL_SCAN_AMOUNT;i++) {
#ifdef CONFIG_DEBUG_CFG80211
DBG_871X(FUNC_ADPT_FMT CHAN_FMT"\n", FUNC_ADPT_ARG(padapter), CHAN_ARG(request->channels[i]));
#endif
ch[i].hw_value = request->channels[i]->hw_value;
ch[i].flags = request->channels[i]->flags;
}
_enter_critical_bh(&pmlmepriv->lock, &irqL);
if (request->n_channels == 1) {
for(i=1;i<survey_times;i++)
_rtw_memcpy(&ch[i], &ch[0], sizeof(struct rtw_ieee80211_channel));
_status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, ch, survey_times);
} else if (request->n_channels == 2) {
_rtw_memcpy(&ch[3], &ch[1], sizeof(struct rtw_ieee80211_channel));
for(i=1;i<survey_times;i++) {
_rtw_memcpy(&ch[i], &ch[0], sizeof(struct rtw_ieee80211_channel));
_rtw_memcpy(&ch[i+3], &ch[3], sizeof(struct rtw_ieee80211_channel));
}
_status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, ch, survey_times * 2);
} else {
_status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, NULL, 0);
}
_exit_critical_bh(&pmlmepriv->lock, &irqL);
if(_status == _FALSE)
{
ret = -1;
}
check_need_indicate_scan_done:
if(need_indicate_scan_done)
rtw_cfg80211_surveydone_event_callback(padapter);
exit:
return ret;
}
static int cfg80211_rtw_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
#if 0
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
(iwm->conf.rts_threshold != wiphy->rts_threshold)) {
int ret;
iwm->conf.rts_threshold = wiphy->rts_threshold;
ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
CFG_RTS_THRESHOLD,
iwm->conf.rts_threshold);
if (ret < 0)
return ret;
}
if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
(iwm->conf.frag_threshold != wiphy->frag_threshold)) {
int ret;
iwm->conf.frag_threshold = wiphy->frag_threshold;
ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
CFG_FRAG_THRESHOLD,
iwm->conf.frag_threshold);
if (ret < 0)
return ret;
}
#endif
DBG_8192C("%s\n", __func__);
return 0;
}
static int cfg80211_rtw_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ibss_params *params)
{
#if 0
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
struct ieee80211_channel *chan = params->channel;
if (!test_bit(IWM_STATUS_READY, &iwm->status))
return -EIO;
/* UMAC doesn't support creating or joining an IBSS network
* with specified bssid. */
if (params->bssid)
return -EOPNOTSUPP;
iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
iwm->umac_profile->ibss.band = chan->band;
iwm->umac_profile->ibss.channel = iwm->channel;
iwm->umac_profile->ssid.ssid_len = params->ssid_len;
memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
return iwm_send_mlme_profile(iwm);
#endif
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
return 0;
}
static int cfg80211_rtw_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
{
#if 0
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
if (iwm->umac_profile_active)
return iwm_invalidate_mlme_profile(iwm);
#endif
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
return 0;
}
static int rtw_cfg80211_set_wpa_version(struct security_priv *psecuritypriv, u32 wpa_version)
{
DBG_8192C("%s, wpa_version=%d\n", __func__, wpa_version);
if (!wpa_version) {
psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
return 0;
}
if (wpa_version & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
{
psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPAPSK;
}
/*
if (wpa_version & NL80211_WPA_VERSION_2)
{
psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPA2PSK;
}
*/
return 0;
}
static int rtw_cfg80211_set_auth_type(struct security_priv *psecuritypriv,
enum nl80211_auth_type sme_auth_type)
{
DBG_8192C("%s, nl80211_auth_type=%d\n", __func__, sme_auth_type);
switch (sme_auth_type) {
case NL80211_AUTHTYPE_AUTOMATIC:
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
break;
case NL80211_AUTHTYPE_OPEN_SYSTEM:
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
if(psecuritypriv->ndisauthtype>Ndis802_11AuthModeWPA)
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
#ifdef CONFIG_WAPI_SUPPORT
if(psecuritypriv->ndisauthtype == Ndis802_11AuthModeWAPI)
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_WAPI;
#endif
break;
case NL80211_AUTHTYPE_SHARED_KEY:
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
break;
default:
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
//return -ENOTSUPP;
}
return 0;
}
static int rtw_cfg80211_set_cipher(struct security_priv *psecuritypriv, u32 cipher, bool ucast)
{
u32 ndisencryptstatus = Ndis802_11EncryptionDisabled;
u32 *profile_cipher = ucast ? &psecuritypriv->dot11PrivacyAlgrthm :
&psecuritypriv->dot118021XGrpPrivacy;
DBG_8192C("%s, ucast=%d, cipher=0x%x\n", __func__, ucast, cipher);
if (!cipher) {
*profile_cipher = _NO_PRIVACY_;
psecuritypriv->ndisencryptstatus = ndisencryptstatus;
return 0;
}
switch (cipher) {
case IW_AUTH_CIPHER_NONE:
*profile_cipher = _NO_PRIVACY_;
ndisencryptstatus = Ndis802_11EncryptionDisabled;
#ifdef CONFIG_WAPI_SUPPORT
if(psecuritypriv->dot11PrivacyAlgrthm ==_SMS4_ )
{
*profile_cipher = _SMS4_;
}
#endif
break;
case WLAN_CIPHER_SUITE_WEP40:
*profile_cipher = _WEP40_;
ndisencryptstatus = Ndis802_11Encryption1Enabled;
break;
case WLAN_CIPHER_SUITE_WEP104:
*profile_cipher = _WEP104_;
ndisencryptstatus = Ndis802_11Encryption1Enabled;
break;
case WLAN_CIPHER_SUITE_TKIP:
*profile_cipher = _TKIP_;
ndisencryptstatus = Ndis802_11Encryption2Enabled;
break;
case WLAN_CIPHER_SUITE_CCMP:
*profile_cipher = _AES_;
ndisencryptstatus = Ndis802_11Encryption3Enabled;
break;
#ifdef CONFIG_WAPI_SUPPORT
case WLAN_CIPHER_SUITE_SMS4:
*profile_cipher = _SMS4_;
ndisencryptstatus = Ndis802_11_EncrypteionWAPI;
break;
#endif
default:
DBG_8192C("Unsupported cipher: 0x%x\n", cipher);
return -ENOTSUPP;
}
if(ucast)
{
psecuritypriv->ndisencryptstatus = ndisencryptstatus;
//if(psecuritypriv->dot11PrivacyAlgrthm >= _AES_)
// psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPA2PSK;
}
return 0;
}
static int rtw_cfg80211_set_key_mgt(struct security_priv *psecuritypriv, u32 key_mgt)
{
DBG_8192C("%s, key_mgt=0x%x\n", __func__, key_mgt);
if (key_mgt == WLAN_AKM_SUITE_8021X)
//*auth_type = UMAC_AUTH_TYPE_8021X;
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
else if (key_mgt == WLAN_AKM_SUITE_PSK) {
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
}
#ifdef CONFIG_WAPI_SUPPORT
else if(key_mgt ==WLAN_AKM_SUITE_WAPI_PSK){
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_WAPI;
}
else if(key_mgt ==WLAN_AKM_SUITE_WAPI_CERT){
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_WAPI;
}
#endif
else {
DBG_8192C("Invalid key mgt: 0x%x\n", key_mgt);
//return -EINVAL;
}
return 0;
}
static int rtw_cfg80211_set_wpa_ie(_adapter *padapter, u8 *pie, size_t ielen)
{
u8 *buf=NULL, *pos=NULL;
u32 left;
int group_cipher = 0, pairwise_cipher = 0;
int ret = 0;
int wpa_ielen=0;
int wpa2_ielen=0;
u8 *pwpa, *pwpa2;
u8 null_addr[]= {0,0,0,0,0,0};
if (pie == NULL || !ielen) {
/* Treat this as normal case, but need to clear WIFI_UNDER_WPS */
_clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
goto exit;
}
if (ielen > MAX_WPA_IE_LEN+MAX_WPS_IE_LEN+MAX_P2P_IE_LEN) {
ret = -EINVAL;
goto exit;
}
buf = rtw_zmalloc(ielen);
if (buf == NULL){
ret = -ENOMEM;
goto exit;
}
_rtw_memcpy(buf, pie , ielen);
//dump
{
int i;
DBG_8192C("set wpa_ie(length:%zu):\n", ielen);
for(i=0;i<ielen;i=i+8)
DBG_8192C("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x \n",buf[i],buf[i+1],buf[i+2],buf[i+3],buf[i+4],buf[i+5],buf[i+6],buf[i+7]);
}
pos = buf;
if(ielen < RSN_HEADER_LEN){
RT_TRACE(_module_rtl871x_ioctl_os_c,_drv_err_,("Ie len too short %d\n", ielen));
ret = -1;
goto exit;
}
pwpa = rtw_get_wpa_ie(buf, &wpa_ielen, ielen);
if(pwpa && wpa_ielen>0)
{
if(rtw_parse_wpa_ie(pwpa, wpa_ielen+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS)
{
padapter->securitypriv.dot11AuthAlgrthm= dot11AuthAlgrthm_8021X;
padapter->securitypriv.ndisauthtype=Ndis802_11AuthModeWPAPSK;
_rtw_memcpy(padapter->securitypriv.supplicant_ie, &pwpa[0], wpa_ielen+2);
DBG_8192C("got wpa_ie, wpa_ielen:%u\n", wpa_ielen);
}
}
pwpa2 = rtw_get_wpa2_ie(buf, &wpa2_ielen, ielen);
if(pwpa2 && wpa2_ielen>0)
{
if(rtw_parse_wpa2_ie(pwpa2, wpa2_ielen+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS)
{
padapter->securitypriv.dot11AuthAlgrthm= dot11AuthAlgrthm_8021X;
padapter->securitypriv.ndisauthtype=Ndis802_11AuthModeWPA2PSK;
_rtw_memcpy(padapter->securitypriv.supplicant_ie, &pwpa2[0], wpa2_ielen+2);
DBG_8192C("got wpa2_ie, wpa2_ielen:%u\n", wpa2_ielen);
}
}
if (group_cipher == 0)
{
group_cipher = WPA_CIPHER_NONE;
}
if (pairwise_cipher == 0)
{
pairwise_cipher = WPA_CIPHER_NONE;
}
switch(group_cipher)
{
case WPA_CIPHER_NONE:
padapter->securitypriv.dot118021XGrpPrivacy=_NO_PRIVACY_;
padapter->securitypriv.ndisencryptstatus=Ndis802_11EncryptionDisabled;
break;
case WPA_CIPHER_WEP40:
padapter->securitypriv.dot118021XGrpPrivacy=_WEP40_;
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
break;
case WPA_CIPHER_TKIP:
padapter->securitypriv.dot118021XGrpPrivacy=_TKIP_;
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
break;
case WPA_CIPHER_CCMP:
padapter->securitypriv.dot118021XGrpPrivacy=_AES_;
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
break;
case WPA_CIPHER_WEP104:
padapter->securitypriv.dot118021XGrpPrivacy=_WEP104_;
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
break;
}
switch(pairwise_cipher)
{
case WPA_CIPHER_NONE:
padapter->securitypriv.dot11PrivacyAlgrthm=_NO_PRIVACY_;
padapter->securitypriv.ndisencryptstatus=Ndis802_11EncryptionDisabled;
break;
case WPA_CIPHER_WEP40:
padapter->securitypriv.dot11PrivacyAlgrthm=_WEP40_;
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
break;
case WPA_CIPHER_TKIP:
padapter->securitypriv.dot11PrivacyAlgrthm=_TKIP_;
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
break;
case WPA_CIPHER_CCMP:
padapter->securitypriv.dot11PrivacyAlgrthm=_AES_;
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
break;
case WPA_CIPHER_WEP104:
padapter->securitypriv.dot11PrivacyAlgrthm=_WEP104_;
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
break;
}
{/* handle wps_ie */
uint wps_ielen;
u8 *wps_ie;
wps_ie = rtw_get_wps_ie(buf, ielen, NULL, &wps_ielen);
if (wps_ie && wps_ielen > 0) {
DBG_8192C("got wps_ie, wps_ielen:%u\n", wps_ielen);
padapter->securitypriv.wps_ie_len = wps_ielen<MAX_WPS_IE_LEN?wps_ielen:MAX_WPS_IE_LEN;
_rtw_memcpy(padapter->securitypriv.wps_ie, wps_ie, padapter->securitypriv.wps_ie_len);
set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
} else {
_clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
}
}
#ifdef CONFIG_P2P
{//check p2p_ie for assoc req;
uint p2p_ielen=0;
u8 *p2p_ie;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
if((p2p_ie=rtw_get_p2p_ie(buf, ielen, NULL, &p2p_ielen)))
{
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s p2p_assoc_req_ielen=%d\n", __FUNCTION__, p2p_ielen);
#endif
if(pmlmepriv->p2p_assoc_req_ie)
{
u32 free_len = pmlmepriv->p2p_assoc_req_ie_len;
pmlmepriv->p2p_assoc_req_ie_len = 0;
rtw_mfree(pmlmepriv->p2p_assoc_req_ie, free_len);
pmlmepriv->p2p_assoc_req_ie = NULL;
}
pmlmepriv->p2p_assoc_req_ie = rtw_malloc(p2p_ielen);
if ( pmlmepriv->p2p_assoc_req_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
goto exit;
}
_rtw_memcpy(pmlmepriv->p2p_assoc_req_ie, p2p_ie, p2p_ielen);
pmlmepriv->p2p_assoc_req_ie_len = p2p_ielen;
}
}
#endif //CONFIG_P2P
#ifdef CONFIG_WFD
{//check wfd_ie for assoc req;
uint wfd_ielen=0;
u8 *wfd_ie;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
if(rtw_get_wfd_ie(buf, ielen, NULL, &wfd_ielen))
{
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s wfd_assoc_req_ielen=%d\n", __FUNCTION__, wfd_ielen);
#endif
if(pmlmepriv->wfd_assoc_req_ie)
{
u32 free_len = pmlmepriv->wfd_assoc_req_ie_len;
pmlmepriv->wfd_assoc_req_ie_len = 0;
rtw_mfree(pmlmepriv->wfd_assoc_req_ie, free_len);
pmlmepriv->wfd_assoc_req_ie = NULL;
}
pmlmepriv->wfd_assoc_req_ie = rtw_malloc(wfd_ielen);
if ( pmlmepriv->wfd_assoc_req_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
goto exit;
}
rtw_get_wfd_ie(buf, ielen, pmlmepriv->wfd_assoc_req_ie, &pmlmepriv->wfd_assoc_req_ie_len);
}
}
#endif //CONFIG_WFD
//TKIP and AES disallow multicast packets until installing group key
if(padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_
|| padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_WTMIC_
|| padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
//WPS open need to enable multicast
//|| check_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS) == _TRUE)
rtw_hal_set_hwreg(padapter, HW_VAR_OFF_RCR_AM, null_addr);
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
("rtw_set_wpa_ie: pairwise_cipher=0x%08x padapter->securitypriv.ndisencryptstatus=%d padapter->securitypriv.ndisauthtype=%d\n",
pairwise_cipher, padapter->securitypriv.ndisencryptstatus, padapter->securitypriv.ndisauthtype));
exit:
if (buf)
rtw_mfree(buf, ielen);
if (ret)
_clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
return ret;
}
static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_connect_params *sme)
{
int ret=0;
_irqL irqL;
_list *phead;
struct wlan_network *pnetwork = NULL;
NDIS_802_11_AUTHENTICATION_MODE authmode;
NDIS_802_11_SSID ndis_ssid;
u8 *dst_ssid, *src_ssid;
u8 *dst_bssid, *src_bssid;
//u8 matched_by_bssid=_FALSE;
//u8 matched_by_ssid=_FALSE;
u8 matched=_FALSE;
_adapter *padapter = wiphy_to_adapter(wiphy);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
_queue *queue = &pmlmepriv->scanned_queue;
DBG_871X("=>"FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
DBG_871X("privacy=%d, key=%p, key_len=%d, key_idx=%d\n",
sme->privacy, sme->key, sme->key_len, sme->key_idx);
if(wdev_to_priv(padapter->rtw_wdev)->block == _TRUE)
{
ret = -EBUSY;
DBG_871X("%s wdev_priv.block is set\n", __FUNCTION__);
goto exit;
}
#ifdef CONFIG_PLATFORM_MSTAR_TITANIA12
printk("MStar Android!\n");
if((wdev_to_priv(padapter->rtw_wdev))->bandroid_scan == _FALSE)
{
#ifdef CONFIG_P2P
struct wifidirect_info *pwdinfo= &(padapter->wdinfo);
if(rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
#endif //CONFIG_P2P
{
ret = -EBUSY;
printk("Android hasn't attached yet!\n");
goto exit;
}
}
#endif
if(_FAIL == rtw_pwr_wakeup(padapter)) {
ret= -EPERM;
goto exit;
}
if(check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
ret = -EPERM;
goto exit;
}
#ifdef CONFIG_CONCURRENT_MODE
if (check_buddy_fwstate(padapter, _FW_UNDER_LINKING) == _TRUE) {
DBG_8192C("%s, but buddy_intf is under linking\n", __FUNCTION__);
ret = -EINVAL;
goto exit;
}
if (check_buddy_fwstate(padapter, _FW_UNDER_SURVEY) == _TRUE) {
rtw_scan_abort(padapter->pbuddy_adapter);
}
#endif
if (!sme->ssid || !sme->ssid_len)
{
ret = -EINVAL;
goto exit;
}
if (sme->ssid_len > IW_ESSID_MAX_SIZE){
ret= -E2BIG;
goto exit;
}
_rtw_memset(&ndis_ssid, 0, sizeof(NDIS_802_11_SSID));
ndis_ssid.SsidLength = sme->ssid_len;
_rtw_memcpy(ndis_ssid.Ssid, sme->ssid, sme->ssid_len);
DBG_8192C("ssid=%s, len=%zu\n", ndis_ssid.Ssid, sme->ssid_len);
if (sme->bssid)
DBG_8192C("bssid="MAC_FMT"\n", MAC_ARG(sme->bssid));
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == _TRUE) {
ret = -EBUSY;
DBG_8192C("%s, fw_state=0x%x, goto exit\n", __FUNCTION__, pmlmepriv->fw_state);
goto exit;
}
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == _TRUE) {
rtw_scan_abort(padapter);
}
_enter_critical_bh(&queue->lock, &irqL);
phead = get_list_head(queue);
pmlmepriv->pscanned = get_next(phead);
while (1)
{
if (rtw_end_of_queue_search(phead, pmlmepriv->pscanned) == _TRUE)
{
break;
}
pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
dst_ssid = pnetwork->network.Ssid.Ssid;
dst_bssid = pnetwork->network.MacAddress;
if(sme->bssid) {
if(_rtw_memcmp(pnetwork->network.MacAddress, sme->bssid, ETH_ALEN) == _FALSE)
continue;
}
if(sme->ssid && sme->ssid_len) {
if( pnetwork->network.Ssid.SsidLength != sme->ssid_len
|| _rtw_memcmp(pnetwork->network.Ssid.Ssid, sme->ssid, sme->ssid_len) == _FALSE
)
continue;
}
if (sme->bssid)
{
src_bssid = sme->bssid;
if ((_rtw_memcmp(dst_bssid, src_bssid, ETH_ALEN)) == _TRUE)
{
DBG_8192C("matched by bssid\n");
ndis_ssid.SsidLength = pnetwork->network.Ssid.SsidLength;
_rtw_memcpy(ndis_ssid.Ssid, pnetwork->network.Ssid.Ssid, pnetwork->network.Ssid.SsidLength);
matched=_TRUE;
break;
}
}
else if (sme->ssid && sme->ssid_len)
{
src_ssid = ndis_ssid.Ssid;
if ((_rtw_memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength) == _TRUE) &&
(pnetwork->network.Ssid.SsidLength==ndis_ssid.SsidLength))
{
DBG_8192C("matched by ssid\n");
matched=_TRUE;
break;
}
}
}
_exit_critical_bh(&queue->lock, &irqL);
if((matched == _FALSE) || (pnetwork== NULL))
{
ret = -ENOENT;
DBG_8192C("connect, matched == _FALSE, goto exit\n");
goto exit;
}
if (rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode) == _FALSE)
{
ret = -EPERM;
goto exit;
}
psecuritypriv->ndisencryptstatus = Ndis802_11EncryptionDisabled;
psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; //open system
psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
#ifdef CONFIG_WAPI_SUPPORT
padapter->wapiInfo.bWapiEnable = false;
#endif
ret = rtw_cfg80211_set_wpa_version(psecuritypriv, sme->crypto.wpa_versions);
if (ret < 0)
goto exit;
#ifdef CONFIG_WAPI_SUPPORT
if(sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1)
{
padapter->wapiInfo.bWapiEnable = true;
padapter->wapiInfo.extra_prefix_len = WAPI_EXT_LEN;
padapter->wapiInfo.extra_postfix_len = SMS4_MIC_LEN;
}
#endif
ret = rtw_cfg80211_set_auth_type(psecuritypriv, sme->auth_type);
#ifdef CONFIG_WAPI_SUPPORT
if(psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_WAPI)
padapter->mlmeextpriv.mlmext_info.auth_algo = psecuritypriv->dot11AuthAlgrthm;
#endif
if (ret < 0)
goto exit;
DBG_8192C("%s, ie_len=%zu\n", __func__, sme->ie_len);
ret = rtw_cfg80211_set_wpa_ie(padapter, sme->ie, sme->ie_len);
if (ret < 0)
goto exit;
if (sme->crypto.n_ciphers_pairwise) {
ret = rtw_cfg80211_set_cipher(psecuritypriv, sme->crypto.ciphers_pairwise[0], _TRUE);
if (ret < 0)
goto exit;
}
//For WEP Shared auth
if((psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_Shared
|| psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_Auto) && sme->key
)
{
u32 wep_key_idx, wep_key_len,wep_total_len;
NDIS_802_11_WEP *pwep = NULL;
DBG_871X("%s(): Shared/Auto WEP\n",__FUNCTION__);
wep_key_idx = sme->key_idx;
wep_key_len = sme->key_len;
if (sme->key_idx > WEP_KEYS) {
ret = -EINVAL;
goto exit;
}
if (wep_key_len > 0)
{
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(NDIS_802_11_WEP, KeyMaterial);
pwep =(NDIS_802_11_WEP *) rtw_malloc(wep_total_len);
if(pwep == NULL){
DBG_871X(" wpa_set_encryption: pwep allocate fail !!!\n");
ret = -ENOMEM;
goto exit;
}
_rtw_memset(pwep, 0, wep_total_len);
pwep->KeyLength = wep_key_len;
pwep->Length = wep_total_len;
if(wep_key_len==13)
{
padapter->securitypriv.dot11PrivacyAlgrthm=_WEP104_;
padapter->securitypriv.dot118021XGrpPrivacy=_WEP104_;
}
}
else {
ret = -EINVAL;
goto exit;
}
pwep->KeyIndex = wep_key_idx;
pwep->KeyIndex |= 0x80000000;
_rtw_memcpy(pwep->KeyMaterial, (void *)sme->key, pwep->KeyLength);
if(rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL)
{
ret = -EOPNOTSUPP ;
}
if (pwep) {
rtw_mfree((u8 *)pwep,wep_total_len);
}
if(ret < 0)
goto exit;
}
ret = rtw_cfg80211_set_cipher(psecuritypriv, sme->crypto.cipher_group, _FALSE);
if (ret < 0)
return ret;
if (sme->crypto.n_akm_suites) {
ret = rtw_cfg80211_set_key_mgt(psecuritypriv, sme->crypto.akm_suites[0]);
if (ret < 0)
goto exit;
}
#ifdef CONFIG_WAPI_SUPPORT
if(sme->crypto.akm_suites[0] ==WLAN_AKM_SUITE_WAPI_PSK){
padapter->wapiInfo.bWapiPSK = true;
}
else if(sme->crypto.akm_suites[0] ==WLAN_AKM_SUITE_WAPI_CERT){
padapter->wapiInfo.bWapiPSK = false;
}
#endif
authmode = psecuritypriv->ndisauthtype;
rtw_set_802_11_authentication_mode(padapter, authmode);
//rtw_set_802_11_encryption_mode(padapter, padapter->securitypriv.ndisencryptstatus);
if (rtw_set_802_11_ssid(padapter, &ndis_ssid) == _FALSE) {
ret = -1;
goto exit;
}
DBG_8192C("set ssid:dot11AuthAlgrthm=%d, dot11PrivacyAlgrthm=%d, dot118021XGrpPrivacy=%d\n", psecuritypriv->dot11AuthAlgrthm, psecuritypriv->dot11PrivacyAlgrthm, psecuritypriv->dot118021XGrpPrivacy);
exit:
DBG_8192C("<=%s, ret %d\n",__FUNCTION__, ret);
return ret;
}
static int cfg80211_rtw_disconnect(struct wiphy *wiphy, struct net_device *ndev,
u16 reason_code)
{
_adapter *padapter = wiphy_to_adapter(wiphy);
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
rtw_set_roaming(padapter, 0);
if(check_fwstate(&padapter->mlmepriv, _FW_LINKED))
{
rtw_scan_abort(padapter);
LeaveAllPowerSaveMode(padapter);
rtw_disassoc_cmd(padapter, 500, _FALSE);
DBG_871X("%s...call rtw_indicate_disconnect\n", __FUNCTION__);
padapter->mlmepriv.not_indic_disco = _TRUE;
rtw_indicate_disconnect(padapter);
padapter->mlmepriv.not_indic_disco = _FALSE;
rtw_free_assoc_resources(padapter, 1);
}
return 0;
}
static int cfg80211_rtw_set_txpower(struct wiphy *wiphy,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
struct wireless_dev *wdev,
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) || defined(COMPAT_KERNEL_RELEASE)
enum nl80211_tx_power_setting type, int mbm)
#else
enum tx_power_setting type, int dbm)
#endif
{
#if 0
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
int ret;
switch (type) {
case NL80211_TX_POWER_AUTOMATIC:
return 0;
case NL80211_TX_POWER_FIXED:
if (mbm < 0 || (mbm % 100))
return -EOPNOTSUPP;
if (!test_bit(IWM_STATUS_READY, &iwm->status))
return 0;
ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
CFG_TX_PWR_LIMIT_USR,
MBM_TO_DBM(mbm) * 2);
if (ret < 0)
return ret;
return iwm_tx_power_trigger(iwm);
default:
IWM_ERR(iwm, "Unsupported power type: %d\n", type);
return -EOPNOTSUPP;
}
#endif
DBG_8192C("%s\n", __func__);
return 0;
}
static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
struct wireless_dev *wdev,
#endif
int *dbm)
{
//_adapter *padapter = wiphy_to_adapter(wiphy);
DBG_8192C("%s\n", __func__);
*dbm = (12);
return 0;
}
inline bool rtw_cfg80211_pwr_mgmt(_adapter *adapter)
{
struct rtw_wdev_priv *rtw_wdev_priv = wdev_to_priv(adapter->rtw_wdev);
return rtw_wdev_priv->power_mgmt;
}
static int cfg80211_rtw_set_power_mgmt(struct wiphy *wiphy,
struct net_device *ndev,
bool enabled, int timeout)
{
_adapter *padapter = wiphy_to_adapter(wiphy);
struct rtw_wdev_priv *rtw_wdev_priv = wdev_to_priv(padapter->rtw_wdev);
DBG_871X(FUNC_NDEV_FMT" enabled:%u, timeout:%d\n", FUNC_NDEV_ARG(ndev),
enabled, timeout);
rtw_wdev_priv->power_mgmt = enabled;
#ifdef CONFIG_LPS
if (!enabled)
LPS_Leave(padapter);
#endif
return 0;
}
static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy,
struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
u8 index,blInserted = _FALSE;
_adapter *padapter = wiphy_to_adapter(wiphy);
struct security_priv *psecuritypriv = &padapter->securitypriv;
u8 strZeroMacAddress[ ETH_ALEN ] = { 0x00 };
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(netdev));
if ( _rtw_memcmp( pmksa->bssid, strZeroMacAddress, ETH_ALEN ) == _TRUE )
{
return -EINVAL;
}
blInserted = _FALSE;
//overwrite PMKID
for(index=0 ; index<NUM_PMKID_CACHE; index++)
{
if( _rtw_memcmp( psecuritypriv->PMKIDList[index].Bssid, pmksa->bssid, ETH_ALEN) ==_TRUE )
{ // BSSID is matched, the same AP => rewrite with new PMKID.
DBG_871X(FUNC_NDEV_FMT" BSSID exists in the PMKList.\n", FUNC_NDEV_ARG(netdev));
_rtw_memcpy( psecuritypriv->PMKIDList[index].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
psecuritypriv->PMKIDList[index].bUsed = _TRUE;
psecuritypriv->PMKIDIndex = index+1;
blInserted = _TRUE;
break;
}
}
if(!blInserted)
{
// Find a new entry
DBG_871X(FUNC_NDEV_FMT" Use the new entry index = %d for this PMKID.\n",
FUNC_NDEV_ARG(netdev), psecuritypriv->PMKIDIndex );
_rtw_memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].Bssid, pmksa->bssid, ETH_ALEN);
_rtw_memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed = _TRUE;
psecuritypriv->PMKIDIndex++ ;
if(psecuritypriv->PMKIDIndex==16)
{
psecuritypriv->PMKIDIndex =0;
}
}
return 0;
}
static int cfg80211_rtw_del_pmksa(struct wiphy *wiphy,
struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
u8 index, bMatched = _FALSE;
_adapter *padapter = wiphy_to_adapter(wiphy);
struct security_priv *psecuritypriv = &padapter->securitypriv;
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(netdev));
for(index=0 ; index<NUM_PMKID_CACHE; index++)
{
if( _rtw_memcmp( psecuritypriv->PMKIDList[index].Bssid, pmksa->bssid, ETH_ALEN) ==_TRUE )
{ // BSSID is matched, the same AP => Remove this PMKID information and reset it.
_rtw_memset( psecuritypriv->PMKIDList[index].Bssid, 0x00, ETH_ALEN );
_rtw_memset( psecuritypriv->PMKIDList[index].PMKID, 0x00, WLAN_PMKID_LEN );
psecuritypriv->PMKIDList[index].bUsed = _FALSE;
bMatched = _TRUE;
break;
}
}
if(_FALSE == bMatched)
{
DBG_871X(FUNC_NDEV_FMT" do not have matched BSSID\n"
, FUNC_NDEV_ARG(netdev));
return -EINVAL;
}
return 0;
}
static int cfg80211_rtw_flush_pmksa(struct wiphy *wiphy,
struct net_device *netdev)
{
_adapter *padapter = wiphy_to_adapter(wiphy);
struct security_priv *psecuritypriv = &padapter->securitypriv;
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(netdev));
_rtw_memset( &psecuritypriv->PMKIDList[ 0 ], 0x00, sizeof( RT_PMKID_LIST ) * NUM_PMKID_CACHE );
psecuritypriv->PMKIDIndex = 0;
return 0;
}
#ifdef CONFIG_AP_MODE
void rtw_cfg80211_indicate_sta_assoc(_adapter *padapter, u8 *pmgmt_frame, uint frame_len)
{
s32 freq;
int channel;
struct wireless_dev *pwdev = padapter->rtw_wdev;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct net_device *ndev = padapter->pnetdev;
DBG_8192C("%s(padapter=%p,%s)\n", __func__, padapter, ndev->name);
#if defined(RTW_USE_CFG80211_STA_EVENT) || defined(COMPAT_KERNEL_RELEASE)
{
struct station_info sinfo;
u8 ie_offset;
if (GetFrameSubType(pmgmt_frame) == WIFI_ASSOCREQ)
ie_offset = _ASOCREQ_IE_OFFSET_;
else // WIFI_REASSOCREQ
ie_offset = _REASOCREQ_IE_OFFSET_;
sinfo.filled = 0;
sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
sinfo.assoc_req_ies = pmgmt_frame + WLAN_HDR_A3_LEN + ie_offset;
sinfo.assoc_req_ies_len = frame_len - WLAN_HDR_A3_LEN - ie_offset;
cfg80211_new_sta(ndev, GetAddr2Ptr(pmgmt_frame), &sinfo, GFP_ATOMIC);
}
#else /* defined(RTW_USE_CFG80211_STA_EVENT) */
channel = pmlmeext->cur_channel;
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
else
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
#ifdef COMPAT_KERNEL_RELEASE
rtw_cfg80211_rx_mgmt(padapter, freq, 0, pmgmt_frame, frame_len, GFP_ATOMIC);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) && !defined(CONFIG_CFG80211_FORCE_COMPATIBLE_2_6_37_UNDER)
rtw_cfg80211_rx_mgmt(padapter, freq, 0, pmgmt_frame, frame_len, GFP_ATOMIC);
#else //COMPAT_KERNEL_RELEASE
{
//to avoid WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION) when calling cfg80211_send_rx_assoc()
#ifndef CONFIG_PLATFORM_MSTAR_TITANIA12
pwdev->iftype = NL80211_IFTYPE_STATION;
#endif //CONFIG_PLATFORM_MSTAR_TITANIA12
DBG_8192C("iftype=%d before call cfg80211_send_rx_assoc()\n", pwdev->iftype);
rtw_cfg80211_send_rx_assoc(padapter, NULL, pmgmt_frame, frame_len);
DBG_8192C("iftype=%d after call cfg80211_send_rx_assoc()\n", pwdev->iftype);
pwdev->iftype = NL80211_IFTYPE_AP;
//cfg80211_rx_action(padapter->pnetdev, freq, pmgmt_frame, frame_len, GFP_ATOMIC);
}
#endif //COMPAT_KERNEL_RELEASE
#endif /* defined(RTW_USE_CFG80211_STA_EVENT) */
}
void rtw_cfg80211_indicate_sta_disassoc(_adapter *padapter, unsigned char *da, unsigned short reason)
{
s32 freq;
int channel;
u8 *pmgmt_frame;
uint frame_len;
struct rtw_ieee80211_hdr *pwlanhdr;
unsigned short *fctrl;
u8 mgmt_buf[128] = {0};
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct net_device *ndev = padapter->pnetdev;
DBG_8192C("%s(padapter=%p,%s)\n", __func__, padapter, ndev->name);
#if defined(RTW_USE_CFG80211_STA_EVENT) || defined(COMPAT_KERNEL_RELEASE)
cfg80211_del_sta(ndev, da, GFP_ATOMIC);
#else /* defined(RTW_USE_CFG80211_STA_EVENT) */
channel = pmlmeext->cur_channel;
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
else
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
pmgmt_frame = mgmt_buf;
pwlanhdr = (struct rtw_ieee80211_hdr *)pmgmt_frame;
fctrl = &(pwlanhdr->frame_ctl);
*(fctrl) = 0;
//_rtw_memcpy(pwlanhdr->addr1, da, ETH_ALEN);
//_rtw_memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
_rtw_memcpy(pwlanhdr->addr1, myid(&(padapter->eeprompriv)), ETH_ALEN);
_rtw_memcpy(pwlanhdr->addr2, da, ETH_ALEN);
_rtw_memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pmgmt_frame, WIFI_DEAUTH);
pmgmt_frame += sizeof(struct rtw_ieee80211_hdr_3addr);
frame_len = sizeof(struct rtw_ieee80211_hdr_3addr);
reason = cpu_to_le16(reason);
pmgmt_frame = rtw_set_fixed_ie(pmgmt_frame, _RSON_CODE_ , (unsigned char *)&reason, &frame_len);
#ifdef COMPAT_KERNEL_RELEASE
rtw_cfg80211_rx_mgmt(padapter, freq, 0, mgmt_buf, frame_len, GFP_ATOMIC);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) && !defined(CONFIG_CFG80211_FORCE_COMPATIBLE_2_6_37_UNDER)
rtw_cfg80211_rx_mgmt(padapter, freq, 0, mgmt_buf, frame_len, GFP_ATOMIC);
#else //COMPAT_KERNEL_RELEASE
cfg80211_send_disassoc(padapter->pnetdev, mgmt_buf, frame_len);
//cfg80211_rx_action(padapter->pnetdev, freq, mgmt_buf, frame_len, GFP_ATOMIC);
#endif //COMPAT_KERNEL_RELEASE
#endif /* defined(RTW_USE_CFG80211_STA_EVENT) */
}
static int rtw_cfg80211_monitor_if_open(struct net_device *ndev)
{
int ret = 0;
DBG_8192C("%s\n", __func__);
return ret;
}
static int rtw_cfg80211_monitor_if_close(struct net_device *ndev)
{
int ret = 0;
DBG_8192C("%s\n", __func__);
return ret;
}
static int rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struct net_device *ndev)
{
int ret = 0;
int rtap_len;
int qos_len = 0;
int dot11_hdr_len = 24;
int snap_len = 6;
unsigned char *pdata;
u16 frame_ctl;
unsigned char src_mac_addr[6];
unsigned char dst_mac_addr[6];
struct ieee80211_hdr *dot11_hdr;
struct ieee80211_radiotap_header *rtap_hdr;
_adapter *padapter = (_adapter *)rtw_netdev_priv(ndev);
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
goto fail;
rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
if (unlikely(rtap_hdr->it_version))
goto fail;
rtap_len = ieee80211_get_radiotap_len(skb->data);
if (unlikely(skb->len < rtap_len))
goto fail;
if(rtap_len != 14)
{
DBG_8192C("radiotap len (should be 14): %d\n", rtap_len);
goto fail;
}
/* Skip the ratio tap header */
skb_pull(skb, rtap_len);
dot11_hdr = (struct ieee80211_hdr *)skb->data;
frame_ctl = le16_to_cpu(dot11_hdr->frame_control);
/* Check if the QoS bit is set */
if ((frame_ctl & RTW_IEEE80211_FCTL_FTYPE) == RTW_IEEE80211_FTYPE_DATA) {
/* Check if this ia a Wireless Distribution System (WDS) frame
* which has 4 MAC addresses
*/
if (dot11_hdr->frame_control & 0x0080)
qos_len = 2;
if ((dot11_hdr->frame_control & 0x0300) == 0x0300)
dot11_hdr_len += 6;
memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr));
memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
/* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
* for two MAC addresses
*/
skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
pdata = (unsigned char*)skb->data;
memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr));
memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
DBG_8192C("should be eapol packet\n");
/* Use the real net device to transmit the packet */
ret = rtw_xmit_entry(skb, padapter->pnetdev);
return ret;
}
else if ((frame_ctl & (RTW_IEEE80211_FCTL_FTYPE|RTW_IEEE80211_FCTL_STYPE))
== (RTW_IEEE80211_FTYPE_MGMT|RTW_IEEE80211_STYPE_ACTION)
)
{
//only for action frames
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
//u8 category, action, OUI_Subtype, dialogToken=0;
//unsigned char *frame_body;
struct rtw_ieee80211_hdr *pwlanhdr;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
u8 *buf = skb->data;
u32 len = skb->len;
u8 category, action;
int type = -1;
if (rtw_action_frame_parse(buf, len, &category, &action) == _FALSE) {
DBG_8192C(FUNC_NDEV_FMT" frame_control:0x%x\n", FUNC_NDEV_ARG(ndev),
le16_to_cpu(((struct rtw_ieee80211_hdr_3addr *)buf)->frame_ctl));
goto fail;
}
DBG_8192C("RTW_Tx:da="MAC_FMT" via "FUNC_NDEV_FMT"\n",
MAC_ARG(GetAddr1Ptr(buf)), FUNC_NDEV_ARG(ndev));
#ifdef CONFIG_P2P
if((type = rtw_p2p_check_frames(padapter, buf, len, _TRUE)) >= 0)
goto dump;
#endif
if (category == RTW_WLAN_CATEGORY_PUBLIC)
DBG_871X("RTW_Tx:%s\n", action_public_str(action));
else
DBG_871X("RTW_Tx:category(%u), action(%u)\n", category, action);
dump:
//starting alloc mgmt frame to dump it
if ((pmgntframe = alloc_mgtxmitframe(pxmitpriv)) == NULL)
{
goto fail;
}
//update attribute
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
pattrib->retry_ctrl = _FALSE;
_rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
_rtw_memcpy(pframe, (void*)buf, len);
#ifdef CONFIG_WFD
if (type >= 0)
{
struct wifi_display_info *pwfd_info;
pwfd_info = padapter->wdinfo.wfd_info;
if ( _TRUE == pwfd_info->wfd_enable )
{
rtw_append_wfd_ie( padapter, pframe, &len );
}
}
#endif // CONFIG_WFD
pattrib->pktlen = len;
pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
//update seq number
pmlmeext->mgnt_seq = GetSequence(pwlanhdr);
pattrib->seqnum = pmlmeext->mgnt_seq;
pmlmeext->mgnt_seq++;
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
}
else
{
DBG_8192C("frame_ctl=0x%x\n", frame_ctl & (RTW_IEEE80211_FCTL_FTYPE|RTW_IEEE80211_FCTL_STYPE));
}
fail:
dev_kfree_skb(skb);
return 0;
}
static void rtw_cfg80211_monitor_if_set_multicast_list(struct net_device *ndev)
{
DBG_8192C("%s\n", __func__);
}
static int rtw_cfg80211_monitor_if_set_mac_address(struct net_device *ndev, void *addr)
{
int ret = 0;
DBG_8192C("%s\n", __func__);
return ret;
}
#if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,29))
static const struct net_device_ops rtw_cfg80211_monitor_if_ops = {
.ndo_open = rtw_cfg80211_monitor_if_open,
.ndo_stop = rtw_cfg80211_monitor_if_close,
.ndo_start_xmit = rtw_cfg80211_monitor_if_xmit_entry,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
.ndo_set_multicast_list = rtw_cfg80211_monitor_if_set_multicast_list,
#endif
.ndo_set_mac_address = rtw_cfg80211_monitor_if_set_mac_address,
};
#endif
static int rtw_cfg80211_add_monitor_if(_adapter *padapter, char *name, struct net_device **ndev)
{
int ret = 0;
struct net_device* mon_ndev = NULL;
struct wireless_dev* mon_wdev = NULL;
struct rtw_netdev_priv_indicator *pnpi;
struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
if (!name ) {
DBG_871X(FUNC_ADPT_FMT" without specific name\n", FUNC_ADPT_ARG(padapter));
ret = -EINVAL;
goto out;
}
if (pwdev_priv->pmon_ndev) {
DBG_871X(FUNC_ADPT_FMT" monitor interface exist: "NDEV_FMT"\n",
FUNC_ADPT_ARG(padapter), NDEV_ARG(pwdev_priv->pmon_ndev));
ret = -EBUSY;
goto out;
}
mon_ndev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
if (!mon_ndev) {
DBG_871X(FUNC_ADPT_FMT" allocate ndev fail\n", FUNC_ADPT_ARG(padapter));
ret = -ENOMEM;
goto out;
}
mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP;
strncpy(mon_ndev->name, name, IFNAMSIZ);
mon_ndev->name[IFNAMSIZ - 1] = 0;
mon_ndev->destructor = rtw_ndev_destructor;
#if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,29))
mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops;
#else
mon_ndev->open = rtw_cfg80211_monitor_if_open;
mon_ndev->stop = rtw_cfg80211_monitor_if_close;
mon_ndev->hard_start_xmit = rtw_cfg80211_monitor_if_xmit_entry;
mon_ndev->set_mac_address = rtw_cfg80211_monitor_if_set_mac_address;
#endif
pnpi = netdev_priv(mon_ndev);
pnpi->priv = padapter;
pnpi->sizeof_priv = sizeof(_adapter);
/* wdev */
mon_wdev = (struct wireless_dev *)rtw_zmalloc(sizeof(struct wireless_dev));
if (!mon_wdev) {
DBG_871X(FUNC_ADPT_FMT" allocate mon_wdev fail\n", FUNC_ADPT_ARG(padapter));
ret = -ENOMEM;
goto out;
}
mon_wdev->wiphy = padapter->rtw_wdev->wiphy;
mon_wdev->netdev = mon_ndev;
mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
mon_ndev->ieee80211_ptr = mon_wdev;
ret = register_netdevice(mon_ndev);
if (ret) {
goto out;
}
*ndev = pwdev_priv->pmon_ndev = mon_ndev;
_rtw_memcpy(pwdev_priv->ifname_mon, name, IFNAMSIZ+1);
out:
if (ret && mon_wdev) {
rtw_mfree((u8*)mon_wdev, sizeof(struct wireless_dev));
mon_wdev = NULL;
}
if (ret && mon_ndev) {
free_netdev(mon_ndev);
*ndev = mon_ndev = NULL;
}
return ret;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
static struct wireless_dev *
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) || defined(COMPAT_KERNEL_RELEASE)
static struct net_device *
#else
static int
#endif
cfg80211_rtw_add_virtual_intf(
struct wiphy *wiphy,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
const char *name,
#else
char *name,
#endif
enum nl80211_iftype type, u32 *flags, struct vif_params *params)
{
int ret = 0;
struct net_device* ndev = NULL;
_adapter *padapter = wiphy_to_adapter(wiphy);
DBG_871X(FUNC_ADPT_FMT " wiphy:%s, name:%s, type:%d\n",
FUNC_ADPT_ARG(padapter), wiphy_name(wiphy), name, type);
switch (type) {
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
ret = -ENODEV;
break;
case NL80211_IFTYPE_MONITOR:
ret = rtw_cfg80211_add_monitor_if(padapter, (char *)name, &ndev);
break;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
case NL80211_IFTYPE_P2P_CLIENT:
#endif
case NL80211_IFTYPE_STATION:
ret = -ENODEV;
break;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
case NL80211_IFTYPE_P2P_GO:
#endif
case NL80211_IFTYPE_AP:
ret = -ENODEV;
break;
default:
ret = -ENODEV;
DBG_871X("Unsupported interface type\n");
break;
}
DBG_871X(FUNC_ADPT_FMT" ndev:%p, ret:%d\n", FUNC_ADPT_ARG(padapter), ndev, ret);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
return ndev ? ndev->ieee80211_ptr : ERR_PTR(ret);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) || defined(COMPAT_KERNEL_RELEASE)
return ndev ? ndev : ERR_PTR(ret);
#else
return ret;
#endif
}
static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
struct wireless_dev *wdev
#else
struct net_device *ndev
#endif
)
{
struct rtw_wdev_priv *pwdev_priv = (struct rtw_wdev_priv *)wiphy_priv(wiphy);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
struct net_device *ndev;
ndev = wdev ? wdev->netdev : NULL;
#endif
if (!ndev)
goto exit;
unregister_netdevice(ndev);
if (ndev == pwdev_priv->pmon_ndev) {
pwdev_priv->pmon_ndev = NULL;
pwdev_priv->ifname_mon[0] = '\0';
DBG_871X(FUNC_NDEV_FMT" remove monitor interface\n", FUNC_NDEV_ARG(ndev));
}
exit:
return 0;
}
static int rtw_add_beacon(_adapter *adapter, const u8 *head, size_t head_len, const u8 *tail, size_t tail_len)
{
int ret=0;
u8 *pbuf = NULL;
uint len, wps_ielen=0;
uint p2p_ielen=0;
u8 *p2p_ie;
u8 got_p2p_ie = _FALSE;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
//struct sta_priv *pstapriv = &padapter->stapriv;
DBG_8192C("%s beacon_head_len=%zu, beacon_tail_len=%zu\n", __FUNCTION__, head_len, tail_len);
if(check_fwstate(pmlmepriv, WIFI_AP_STATE) != _TRUE)
return -EINVAL;
if(head_len<24)
return -EINVAL;
pbuf = rtw_zmalloc(head_len+tail_len);
if(!pbuf)
return -ENOMEM;
//_rtw_memcpy(&pstapriv->max_num_sta, param->u.bcn_ie.reserved, 2);
//if((pstapriv->max_num_sta>NUM_STA) || (pstapriv->max_num_sta<=0))
// pstapriv->max_num_sta = NUM_STA;
_rtw_memcpy(pbuf, (void *)head+24, head_len-24);// 24=beacon header len.
_rtw_memcpy(pbuf+head_len-24, (void *)tail, tail_len);
len = head_len+tail_len-24;
//check wps ie if inclued
if(rtw_get_wps_ie(pbuf+_FIXED_IE_LENGTH_, len-_FIXED_IE_LENGTH_, NULL, &wps_ielen))
DBG_8192C("add bcn, wps_ielen=%d\n", wps_ielen);
#ifdef CONFIG_P2P
//check p2p ie if inclued
if( adapter->wdinfo.driver_interface == DRIVER_CFG80211 )
{
if(rtw_get_p2p_ie(pbuf+_FIXED_IE_LENGTH_, len-_FIXED_IE_LENGTH_, NULL, &p2p_ielen))
{
DBG_8192C("got p2p_ie, len=%d\n", p2p_ielen);
got_p2p_ie = _TRUE;
}
}
#endif
/* pbss_network->IEs will not include p2p_ie, wfd ie */
rtw_ies_remove_ie(pbuf, &len, _BEACON_IE_OFFSET_, _VENDOR_SPECIFIC_IE_, P2P_OUI, 4);
rtw_ies_remove_ie(pbuf, &len, _BEACON_IE_OFFSET_, _VENDOR_SPECIFIC_IE_, WFD_OUI, 4);
if (rtw_check_beacon_data(adapter, pbuf, len) == _SUCCESS) {
#ifdef CONFIG_P2P
//check p2p if enable
if(got_p2p_ie == _TRUE)
{
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
struct wifidirect_info *pwdinfo= &(adapter->wdinfo);
if(rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
{
DBG_8192C("Enable P2P function for the first time\n");
rtw_p2p_enable(adapter, P2P_ROLE_GO);
wdev_to_priv(adapter->rtw_wdev)->p2p_enabled = _TRUE;
}
else
{
_cancel_timer_ex( &pwdinfo->find_phase_timer );
_cancel_timer_ex( &pwdinfo->restore_p2p_state_timer );
_cancel_timer_ex( &pwdinfo->pre_tx_scan_timer);
DBG_8192C("enter GO Mode, p2p_ielen=%d\n", p2p_ielen);
rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
pwdinfo->intent = 15;
}
pwdinfo->operating_channel = pmlmeext->cur_channel;
}
#endif //CONFIG_P2P
ret = 0;
}
else
{
ret = -EINVAL;
}
rtw_mfree(pbuf, head_len+tail_len);
return ret;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)) && !defined(COMPAT_KERNEL_RELEASE)
static int cfg80211_rtw_add_beacon(struct wiphy *wiphy, struct net_device *ndev,
struct beacon_parameters *info)
{
int ret=0;
_adapter *adapter = wiphy_to_adapter(wiphy);
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
ret = rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len);
return ret;
}
static int cfg80211_rtw_set_beacon(struct wiphy *wiphy, struct net_device *ndev,
struct beacon_parameters *info)
{
_adapter *padapter = wiphy_to_adapter(wiphy);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
pmlmeext->bstart_bss = _TRUE;
cfg80211_rtw_add_beacon(wiphy, ndev, info);
return 0;
}
static int cfg80211_rtw_del_beacon(struct wiphy *wiphy, struct net_device *ndev)
{
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
return 0;
}
#else
static int cfg80211_rtw_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ap_settings *settings)
{
int ret = 0;
_adapter *adapter = wiphy_to_adapter(wiphy);
DBG_871X(FUNC_NDEV_FMT" hidden_ssid:%d, auth_type:%d\n", FUNC_NDEV_ARG(ndev),
settings->hidden_ssid, settings->auth_type);
ret = rtw_add_beacon(adapter, settings->beacon.head, settings->beacon.head_len,
settings->beacon.tail, settings->beacon.tail_len);
adapter->mlmeextpriv.mlmext_info.hidden_ssid_mode = settings->hidden_ssid;
if (settings->ssid && settings->ssid_len) {
WLAN_BSSID_EX *pbss_network = &adapter->mlmepriv.cur_network.network;
WLAN_BSSID_EX *pbss_network_ext = &adapter->mlmeextpriv.mlmext_info.network;
if(0)
DBG_871X(FUNC_ADPT_FMT" ssid:(%s,%d), from ie:(%s,%d)\n", FUNC_ADPT_ARG(adapter),
settings->ssid, settings->ssid_len,
pbss_network->Ssid.Ssid, pbss_network->Ssid.SsidLength);
_rtw_memcpy(pbss_network->Ssid.Ssid, (void *)settings->ssid, settings->ssid_len);
pbss_network->Ssid.SsidLength = settings->ssid_len;
_rtw_memcpy(pbss_network_ext->Ssid.Ssid, (void *)settings->ssid, settings->ssid_len);
pbss_network_ext->Ssid.SsidLength = settings->ssid_len;
if(0)
DBG_871X(FUNC_ADPT_FMT" after ssid:(%s,%d), (%s,%d)\n", FUNC_ADPT_ARG(adapter),
pbss_network->Ssid.Ssid, pbss_network->Ssid.SsidLength,
pbss_network_ext->Ssid.Ssid, pbss_network_ext->Ssid.SsidLength);
}
return ret;
}
static int cfg80211_rtw_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_beacon_data *info)
{
int ret = 0;
_adapter *adapter = wiphy_to_adapter(wiphy);
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
ret = rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len);
return ret;
}
static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
{
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
return 0;
}
#endif //(LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0))
static int cfg80211_rtw_add_station(struct wiphy *wiphy, struct net_device *ndev,
u8 *mac, struct station_parameters *params)
{
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
return 0;
}
static int cfg80211_rtw_del_station(struct wiphy *wiphy, struct net_device *ndev,
u8 *mac)
{
int ret=0;
_irqL irqL;
_list *phead, *plist;
u8 updated;
struct sta_info *psta = NULL;
_adapter *padapter = (_adapter *)rtw_netdev_priv(ndev);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct sta_priv *pstapriv = &padapter->stapriv;
DBG_871X("+"FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
if(check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != _TRUE)
{
DBG_8192C("%s, fw_state != FW_LINKED|WIFI_AP_STATE\n", __func__);
return -EINVAL;
}
if(!mac)
{
DBG_8192C("flush all sta, and cam_entry\n");
flush_all_cam_entry(padapter); //clear CAM
ret = rtw_sta_flush(padapter);
return ret;
}
DBG_8192C("free sta macaddr =" MAC_FMT "\n", MAC_ARG(mac));
if (mac[0] == 0xff && mac[1] == 0xff &&
mac[2] == 0xff && mac[3] == 0xff &&
mac[4] == 0xff && mac[5] == 0xff)
{
return -EINVAL;
}
_enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
phead = &pstapriv->asoc_list;
plist = get_next(phead);
//check asoc_queue
while ((rtw_end_of_queue_search(phead, plist)) == _FALSE)
{
psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
plist = get_next(plist);
if(_rtw_memcmp(mac, psta->hwaddr, ETH_ALEN))
{
if(psta->dot8021xalg == 1 && psta->bpairwise_key_installed == _FALSE)
{
DBG_8192C("%s, sta's dot8021xalg = 1 and key_installed = _FALSE\n", __func__);
}
else
{
DBG_8192C("free psta=%p, aid=%d\n", psta, psta->aid);
rtw_list_delete(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
//_exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
updated = ap_free_sta(padapter, psta, _TRUE, WLAN_REASON_DEAUTH_LEAVING);
//_enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
psta = NULL;
break;
}
}
}
_exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
associated_clients_update(padapter, updated);
DBG_871X("-"FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
return ret;
}
static int cfg80211_rtw_change_station(struct wiphy *wiphy, struct net_device *ndev,
u8 *mac, struct station_parameters *params)
{
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
return 0;
}
static int cfg80211_rtw_dump_station(struct wiphy *wiphy, struct net_device *ndev,
int idx, u8 *mac, struct station_info *sinfo)
{
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
//TODO: dump scanned queue
return -ENOENT;
}
static int cfg80211_rtw_change_bss(struct wiphy *wiphy, struct net_device *ndev,
struct bss_parameters *params)
{
u8 i;
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
/*
DBG_8192C("use_cts_prot=%d\n", params->use_cts_prot);
DBG_8192C("use_short_preamble=%d\n", params->use_short_preamble);
DBG_8192C("use_short_slot_time=%d\n", params->use_short_slot_time);
DBG_8192C("ap_isolate=%d\n", params->ap_isolate);
DBG_8192C("basic_rates_len=%d\n", params->basic_rates_len);
for(i=0; i<params->basic_rates_len; i++)
{
DBG_8192C("basic_rates=%d\n", params->basic_rates[i]);
}
*/
return 0;
}
static int cfg80211_rtw_set_channel(struct wiphy *wiphy
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
, struct net_device *ndev
#endif
, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
#endif
return 0;
}
static int cfg80211_rtw_auth(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_auth_request *req)
{
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
return 0;
}
static int cfg80211_rtw_assoc(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_assoc_request *req)
{
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
return 0;
}
#endif //CONFIG_AP_MODE
void rtw_cfg80211_rx_action_p2p(_adapter *padapter, u8 *pmgmt_frame, uint frame_len)
{
int type;
s32 freq;
int channel;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
u8 category, action;
channel = rtw_get_oper_ch(padapter);
DBG_8192C("RTW_Rx:cur_ch=%d\n", channel);
#ifdef CONFIG_P2P
type = rtw_p2p_check_frames(padapter, pmgmt_frame, frame_len, _FALSE);
if (type >= 0)
goto indicate;
#endif
rtw_action_frame_parse(pmgmt_frame, frame_len, &category, &action);
DBG_871X("RTW_Rx:category(%u), action(%u)\n", category, action);
indicate:
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
else
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
rtw_cfg80211_rx_mgmt(padapter, freq, 0, pmgmt_frame, frame_len, GFP_ATOMIC);
#else
cfg80211_rx_action(padapter->pnetdev, freq, pmgmt_frame, frame_len, GFP_ATOMIC);
#endif
}
void rtw_cfg80211_rx_p2p_action_public(_adapter *padapter, u8 *pmgmt_frame, uint frame_len)
{
int type;
s32 freq;
int channel;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
u8 category, action;
channel = rtw_get_oper_ch(padapter);
DBG_8192C("RTW_Rx:cur_ch=%d\n", channel);
#ifdef CONFIG_P2P
type = rtw_p2p_check_frames(padapter, pmgmt_frame, frame_len, _FALSE);
if (type >= 0) {
switch (type) {
case P2P_GO_NEGO_CONF:
case P2P_PROVISION_DISC_RESP:
rtw_clear_scan_deny(padapter);
}
goto indicate;
}
#endif
rtw_action_frame_parse(pmgmt_frame, frame_len, &category, &action);
DBG_871X("RTW_Rx:category(%u), action(%u)\n", category, action);
indicate:
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
else
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
rtw_cfg80211_rx_mgmt(padapter, freq, 0, pmgmt_frame, frame_len, GFP_ATOMIC);
#else
cfg80211_rx_action(padapter->pnetdev, freq, pmgmt_frame, frame_len, GFP_ATOMIC);
#endif
}
void rtw_cfg80211_rx_action(_adapter *adapter, u8 *frame, uint frame_len, const char*msg)
{
s32 freq;
int channel;
struct mlme_ext_priv *pmlmeext = &(adapter->mlmeextpriv);
struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(adapter->rtw_wdev);
u8 category, action;
channel = rtw_get_oper_ch(adapter);
rtw_action_frame_parse(frame, frame_len, &category, &action);
DBG_8192C("RTW_Rx:cur_ch=%d\n", channel);
if (msg)
DBG_871X("RTW_Rx:%s\n", msg);
else
DBG_871X("RTW_Rx:category(%u), action(%u)\n", category, action);
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
else
freq = rtw_ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
rtw_cfg80211_rx_mgmt(adapter, freq, 0, frame, frame_len, GFP_ATOMIC);
#else
cfg80211_rx_action(adapter->pnetdev, freq, frame, frame_len, GFP_ATOMIC);
#endif
}
#ifdef CONFIG_P2P
void rtw_cfg80211_issue_p2p_provision_request(_adapter *padapter, const u8 *buf, size_t len)
{
u16 wps_devicepassword_id = 0x0000;
uint wps_devicepassword_id_len = 0;
u8 wpsie[ 255 ] = { 0x00 }, p2p_ie[ 255 ] = { 0x00 };
uint p2p_ielen = 0;
uint wpsielen = 0;
u32 devinfo_contentlen = 0;
u8 devinfo_content[64] = { 0x00 };
u16 capability = 0;
uint capability_len = 0;
unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
u8 action = P2P_PUB_ACTION_ACTION;
u8 dialogToken = 1;
u32 p2poui = cpu_to_be32(P2POUI);
u8 oui_subtype = P2P_PROVISION_DISC_REQ;
u32 p2pielen = 0;
#ifdef CONFIG_WFD
u32 wfdielen = 0;
#endif //CONFIG_WFD
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
struct rtw_ieee80211_hdr *pwlanhdr;
unsigned short *fctrl;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
u8 *frame_body = (unsigned char *)(buf + sizeof(struct rtw_ieee80211_hdr_3addr));
size_t frame_body_len = len - sizeof(struct rtw_ieee80211_hdr_3addr);
DBG_871X( "[%s] In\n", __FUNCTION__ );
//prepare for building provision_request frame
_rtw_memcpy(pwdinfo->tx_prov_disc_info.peerIFAddr, GetAddr1Ptr(buf), ETH_ALEN);
_rtw_memcpy(pwdinfo->tx_prov_disc_info.peerDevAddr, GetAddr1Ptr(buf), ETH_ALEN);
pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_PUSH_BUTTON;
rtw_get_wps_ie( frame_body + _PUBLIC_ACTION_IE_OFFSET_, frame_body_len - _PUBLIC_ACTION_IE_OFFSET_, wpsie, &wpsielen);
rtw_get_wps_attr_content( wpsie, wpsielen, WPS_ATTR_DEVICE_PWID, (u8*) &wps_devicepassword_id, &wps_devicepassword_id_len);
wps_devicepassword_id = be16_to_cpu( wps_devicepassword_id );
switch(wps_devicepassword_id)
{
case WPS_DPID_PIN:
pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_LABEL;
break;
case WPS_DPID_USER_SPEC:
pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_DISPLYA;
break;
case WPS_DPID_MACHINE_SPEC:
break;
case WPS_DPID_REKEY:
break;
case WPS_DPID_PBC:
pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_PUSH_BUTTON;
break;
case WPS_DPID_REGISTRAR_SPEC:
pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_KEYPAD;
break;
default:
break;
}
if ( rtw_get_p2p_ie( frame_body + _PUBLIC_ACTION_IE_OFFSET_, frame_body_len - _PUBLIC_ACTION_IE_OFFSET_, p2p_ie, &p2p_ielen ) )
{
rtw_get_p2p_attr_content( p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_INFO, devinfo_content, &devinfo_contentlen);
rtw_get_p2p_attr_content( p2p_ie, p2p_ielen, P2P_ATTR_CAPABILITY, (u8*)&capability, &capability_len);
}
//start to build provision_request frame
_rtw_memset(wpsie, 0, sizeof(wpsie));
_rtw_memset(p2p_ie, 0, sizeof(p2p_ie));
p2p_ielen = 0;
if ((pmgntframe = alloc_mgtxmitframe(pxmitpriv)) == NULL)
{
return;
}
//update attribute
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
_rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
fctrl = &(pwlanhdr->frame_ctl);
*(fctrl) = 0;
_rtw_memcpy(pwlanhdr->addr1, pwdinfo->tx_prov_disc_info.peerDevAddr, ETH_ALEN);
_rtw_memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
_rtw_memcpy(pwlanhdr->addr3, pwdinfo->tx_prov_disc_info.peerDevAddr, ETH_ALEN);
SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
pmlmeext->mgnt_seq++;
SetFrameSubType(pframe, WIFI_ACTION);
pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *) &(p2poui), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
//build_prov_disc_request_p2p_ie
// P2P OUI
p2pielen = 0;
p2p_ie[ p2pielen++ ] = 0x50;
p2p_ie[ p2pielen++ ] = 0x6F;
p2p_ie[ p2pielen++ ] = 0x9A;
p2p_ie[ p2pielen++ ] = 0x09; // WFA P2P v1.0
// Commented by Albert 20110301
// According to the P2P Specification, the provision discovery request frame should contain 3 P2P attributes
// 1. P2P Capability
// 2. Device Info
// 3. Group ID ( When joining an operating P2P Group )
// P2P Capability ATTR
// Type:
p2p_ie[ p2pielen++ ] = P2P_ATTR_CAPABILITY;
// Length:
//*(u16*) ( p2pie + p2pielen ) = cpu_to_le16( 0x0002 );
RTW_PUT_LE16(p2p_ie + p2pielen, 0x0002);
p2pielen += 2;
// Value:
// Device Capability Bitmap, 1 byte
// Group Capability Bitmap, 1 byte
_rtw_memcpy(p2p_ie + p2pielen, &capability, 2);
p2pielen += 2;
// Device Info ATTR
// Type:
p2p_ie[ p2pielen++ ] = P2P_ATTR_DEVICE_INFO;
// Length:
// 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes)
// + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes)
//*(u16*) ( p2pie + p2pielen ) = cpu_to_le16( 21 + pwdinfo->device_name_len );
RTW_PUT_LE16(p2p_ie + p2pielen, devinfo_contentlen);
p2pielen += 2;
// Value:
_rtw_memcpy(p2p_ie + p2pielen, devinfo_content, devinfo_contentlen);
p2pielen += devinfo_contentlen;
pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *) p2p_ie, &p2p_ielen);
//p2pielen = build_prov_disc_request_p2p_ie( pwdinfo, pframe, NULL, 0, pwdinfo->tx_prov_disc_info.peerDevAddr);
//pframe += p2pielen;
pattrib->pktlen += p2p_ielen;
wpsielen = 0;
// WPS OUI
*(u32*) ( wpsie ) = cpu_to_be32( WPSOUI );
wpsielen += 4;
// WPS version
// Type:
*(u16*) ( wpsie + wpsielen ) = cpu_to_be16( WPS_ATTR_VER1 );
wpsielen += 2;
// Length:
*(u16*) ( wpsie + wpsielen ) = cpu_to_be16( 0x0001 );
wpsielen += 2;
// Value:
wpsie[wpsielen++] = WPS_VERSION_1; // Version 1.0
// Config Method
// Type:
*(u16*) ( wpsie + wpsielen ) = cpu_to_be16( WPS_ATTR_CONF_METHOD );
wpsielen += 2;
// Length:
*(u16*) ( wpsie + wpsielen ) = cpu_to_be16( 0x0002 );
wpsielen += 2;
// Value:
*(u16*) ( wpsie + wpsielen ) = cpu_to_be16( pwdinfo->tx_prov_disc_info.wps_config_method_request );
wpsielen += 2;
pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *) wpsie, &pattrib->pktlen );
#ifdef CONFIG_WFD
wfdielen = build_provdisc_req_wfd_ie(pwdinfo, pframe);
pframe += wfdielen;
pattrib->pktlen += wfdielen;
#endif //CONFIG_WFD
pattrib->last_txcmdsz = pattrib->pktlen;
//dump_mgntframe(padapter, pmgntframe);
if (dump_mgntframe_and_wait_ack(padapter, pmgntframe) != _SUCCESS)
DBG_8192C("%s, ack to\n", __func__);
//if(wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
//{
// DBG_8192C("waiting for p2p peer key-in PIN CODE\n");
// rtw_msleep_os(15000); // 15 sec for key in PIN CODE, workaround for GS2 before issuing Nego Req.
//}
}
static s32 cfg80211_rtw_remain_on_channel(struct wiphy *wiphy,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
struct wireless_dev *wdev,
#else
struct net_device *ndev,
#endif
struct ieee80211_channel * channel,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
enum nl80211_channel_type channel_type,
#endif
unsigned int duration, u64 *cookie)
{
s32 err = 0;
_adapter *padapter = wiphy_to_adapter(wiphy);
struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
struct cfg80211_wifidirect_info *pcfg80211_wdinfo = &padapter->cfg80211_wdinfo;
u8 remain_ch = (u8) ieee80211_frequency_to_channel(channel->center_freq);
u8 ready_on_channel = _FALSE;
DBG_871X(FUNC_ADPT_FMT" ch:%u duration:%d\n", FUNC_ADPT_ARG(padapter), remain_ch, duration);
if(pcfg80211_wdinfo->is_ro_ch == _TRUE)
{
DBG_8192C("%s, cancel ro ch timer\n", __func__);
_cancel_timer_ex(&padapter->cfg80211_wdinfo.remain_on_ch_timer);
#ifdef CONFIG_CONCURRENT_MODE
ATOMIC_SET(&pwdev_priv->ro_ch_to, 1);
#endif //CONFIG_CONCURRENT_MODE
p2p_protocol_wk_hdl(padapter, P2P_RO_CH_WK);
}
pcfg80211_wdinfo->is_ro_ch = _TRUE;
if(_FAIL == rtw_pwr_wakeup(padapter)) {
err = -EFAULT;
goto exit;
}
_rtw_memcpy(&pcfg80211_wdinfo->remain_on_ch_channel, channel, sizeof(struct ieee80211_channel));
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
pcfg80211_wdinfo->remain_on_ch_type= channel_type;
#endif
pcfg80211_wdinfo->remain_on_ch_cookie= *cookie;
rtw_scan_abort(padapter);
#ifdef CONFIG_CONCURRENT_MODE
if(rtw_buddy_adapter_up(padapter))
rtw_scan_abort(padapter->pbuddy_adapter);
#endif //CONFIG_CONCURRENT_MODE
//if(!rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT) && !rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
if(rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
{
rtw_p2p_enable(padapter, P2P_ROLE_DEVICE);
wdev_to_priv(padapter->rtw_wdev)->p2p_enabled = _TRUE;
}
else
{
rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s, role=%d, p2p_state=%d\n", __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo));
#endif
}
rtw_p2p_set_state(pwdinfo, P2P_STATE_LISTEN);
if(duration < 400)
duration = duration*3;//extend from exper.
#ifdef CONFIG_CONCURRENT_MODE
if(check_buddy_fwstate(padapter, _FW_LINKED) &&
(duration<pwdinfo->ext_listen_interval))
{
duration = duration + pwdinfo->ext_listen_interval;
}
#endif
pcfg80211_wdinfo->restore_channel = pmlmeext->cur_channel;
if(rtw_ch_set_search_ch(pmlmeext->channel_set, remain_ch) >= 0) {
#ifdef CONFIG_CONCURRENT_MODE
if ( check_buddy_fwstate(padapter, _FW_LINKED ) )
{
PADAPTER pbuddy_adapter = padapter->pbuddy_adapter;
struct mlme_ext_priv *pbuddy_mlmeext = &pbuddy_adapter->mlmeextpriv;
if(remain_ch != pbuddy_mlmeext->cur_channel)
{
if(ATOMIC_READ(&pwdev_priv->switch_ch_to)==1 ||
(remain_ch != pmlmeext->cur_channel))
{
DBG_8192C("%s, issue nulldata pwrbit=1\n", __func__);
issue_nulldata(padapter->pbuddy_adapter, NULL, 1, 3, 500);
ATOMIC_SET(&pwdev_priv->switch_ch_to, 0);
DBG_8192C("%s, set switch ch timer, duration=%d\n", __func__, duration-pwdinfo->ext_listen_interval);
_set_timer(&pwdinfo->ap_p2p_switch_timer, duration-pwdinfo->ext_listen_interval);
}
}
ready_on_channel = _TRUE;
//pmlmeext->cur_channel = remain_ch;
//set_channel_bwmode(padapter, remain_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
}else
#endif //CONFIG_CONCURRENT_MODE
if(remain_ch != pmlmeext->cur_channel )
{
ready_on_channel = _TRUE;
//pmlmeext->cur_channel = remain_ch;
//set_channel_bwmode(padapter, remain_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
}
} else {
DBG_871X("%s remain_ch:%u not in channel plan!!!!\n", __FUNCTION__, remain_ch);
}
//call this after other things have been done
#ifdef CONFIG_CONCURRENT_MODE
if(ATOMIC_READ(&pwdev_priv->ro_ch_to)==1 ||
(remain_ch != pmlmeext->cur_channel))
{
u8 co_channel = 0xff;
ATOMIC_SET(&pwdev_priv->ro_ch_to, 0);
#endif
if(ready_on_channel == _TRUE)
{
if ( !check_fwstate(&padapter->mlmepriv, _FW_LINKED ) )
{
pmlmeext->cur_channel = remain_ch;
set_channel_bwmode(padapter, remain_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
}
}
DBG_8192C("%s, set ro ch timer, duration=%d\n", __func__, duration);
_set_timer( &pcfg80211_wdinfo->remain_on_ch_timer, duration);
#ifdef CONFIG_CONCURRENT_MODE
}
#endif
rtw_cfg80211_ready_on_channel(padapter, *cookie, channel, channel_type, duration, GFP_KERNEL);
pwdinfo->listen_channel = pmlmeext->cur_channel;
exit:
if (err)
pcfg80211_wdinfo->is_ro_ch = _FALSE;
return err;
}
static s32 cfg80211_rtw_cancel_remain_on_channel(struct wiphy *wiphy,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
struct wireless_dev *wdev,
#else
struct net_device *ndev,
#endif
u64 cookie)
{
s32 err = 0;
_adapter *padapter = wiphy_to_adapter(wiphy);
struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct cfg80211_wifidirect_info *pcfg80211_wdinfo = &padapter->cfg80211_wdinfo;
DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
if (pcfg80211_wdinfo->is_ro_ch == _TRUE) {
DBG_8192C("%s, cancel ro ch timer\n", __func__);
_cancel_timer_ex(&padapter->cfg80211_wdinfo.remain_on_ch_timer);
#ifdef CONFIG_CONCURRENT_MODE
ATOMIC_SET(&pwdev_priv->ro_ch_to, 1);
#endif
p2p_protocol_wk_hdl(padapter, P2P_RO_CH_WK);
}
#if 0
// Disable P2P Listen State
if(!rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT) && !rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
{
if(!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
{
_cancel_timer_ex( &pwdinfo->find_phase_timer );
_cancel_timer_ex( &pwdinfo->restore_p2p_state_timer );
_cancel_timer_ex( &pwdinfo->pre_tx_scan_timer);
rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
_rtw_memset(pwdinfo, 0x00, sizeof(struct wifidirect_info));
}
}
else
#endif
{
rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s, role=%d, p2p_state=%d\n", __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo));
#endif
}
pcfg80211_wdinfo->is_ro_ch = _FALSE;
return err;
}
#endif //CONFIG_P2P
static int _cfg80211_rtw_mgmt_tx(_adapter *padapter, u8 tx_ch, const u8 *buf, size_t len)
{
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
unsigned char *pframe;
int ret = _FAIL;
bool ack = _TRUE;
struct rtw_ieee80211_hdr *pwlanhdr;
struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
//struct cfg80211_wifidirect_info *pcfg80211_wdinfo = &padapter->cfg80211_wdinfo;
if(_FAIL == rtw_pwr_wakeup(padapter)) {
ret = -EFAULT;
goto exit;
}
rtw_set_scan_deny(padapter, 1000);
rtw_scan_abort(padapter);
#ifdef CONFIG_CONCURRENT_MODE
if(rtw_buddy_adapter_up(padapter))
rtw_scan_abort(padapter->pbuddy_adapter);
#endif /* CONFIG_CONCURRENT_MODE */
if (padapter->cfg80211_wdinfo.is_ro_ch == _TRUE) {
//DBG_8192C("%s, cancel ro ch timer\n", __func__);
//_cancel_timer_ex(&padapter->cfg80211_wdinfo.remain_on_ch_timer);
//padapter->cfg80211_wdinfo.is_ro_ch = _FALSE;
#ifdef CONFIG_CONCURRENT_MODE
DBG_8192C("%s, extend ro ch time\n", __func__);
_set_timer( &padapter->cfg80211_wdinfo.remain_on_ch_timer, pwdinfo->ext_listen_period);
#endif //CONFIG_CONCURRENT_MODE
}
#ifdef CONFIG_CONCURRENT_MODE
if (check_buddy_fwstate(padapter, _FW_LINKED )) {
u8 co_channel=0xff;
PADAPTER pbuddy_adapter = padapter->pbuddy_adapter;
struct mlme_ext_priv *pbuddy_mlmeext = &pbuddy_adapter->mlmeextpriv;
co_channel = rtw_get_oper_ch(padapter);
if (tx_ch != pbuddy_mlmeext->cur_channel) {
if (ATOMIC_READ(&pwdev_priv->switch_ch_to)==1) {
DBG_8192C("%s, issue nulldata pwrbit=1\n", __func__);
issue_nulldata(padapter->pbuddy_adapter, NULL, 1, 3, 500);
ATOMIC_SET(&pwdev_priv->switch_ch_to, 0);
//DBG_8192C("%s, set switch ch timer, period=%d\n", __func__, pwdinfo->ext_listen_period);
//_set_timer(&pwdinfo->ap_p2p_switch_timer, pwdinfo->ext_listen_period);
}
DBG_8192C("%s, set switch ch timer, period=%d\n", __func__, pwdinfo->ext_listen_period);
_set_timer(&pwdinfo->ap_p2p_switch_timer, pwdinfo->ext_listen_period);
}
if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED ))
pmlmeext->cur_channel = tx_ch;
if (tx_ch != co_channel)
set_channel_bwmode(padapter, tx_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
}else
#endif //CONFIG_CONCURRENT_MODE
//if (tx_ch != pmlmeext->cur_channel) {
if(tx_ch != rtw_get_oper_ch(padapter)) {
if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED ))
pmlmeext->cur_channel = tx_ch;
set_channel_bwmode(padapter, tx_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
}
//starting alloc mgmt frame to dump it
if ((pmgntframe = alloc_mgtxmitframe(pxmitpriv)) == NULL)
{
//ret = -ENOMEM;
ret = _FAIL;
goto exit;
}
//update attribute
pattrib = &pmgntframe->attrib;
update_mgntframe_attrib(padapter, pattrib);
pattrib->retry_ctrl = _FALSE;
_rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
_rtw_memcpy(pframe, (void*)buf, len);
pattrib->pktlen = len;
pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
//update seq number
pmlmeext->mgnt_seq = GetSequence(pwlanhdr);
pattrib->seqnum = pmlmeext->mgnt_seq;
pmlmeext->mgnt_seq++;
#ifdef CONFIG_WFD
{
struct wifi_display_info *pwfd_info;
pwfd_info = padapter->wdinfo.wfd_info;
if ( _TRUE == pwfd_info->wfd_enable )
{
rtw_append_wfd_ie( padapter, pframe, &pattrib->pktlen );
}
}
#endif // CONFIG_WFD
pattrib->last_txcmdsz = pattrib->pktlen;
if (dump_mgntframe_and_wait_ack(padapter, pmgntframe) != _SUCCESS)
{
ack = _FALSE;
ret = _FAIL;
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s, ack == _FAIL\n", __func__);
#endif
}
else
{
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s, ack=%d, ok!\n", __func__, ack);
#endif
ret = _SUCCESS;
}
exit:
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s, ret=%d\n", __func__, ret);
#endif
return ret;
}
static int cfg80211_rtw_mgmt_tx(struct wiphy *wiphy,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
struct wireless_dev *wdev,
#else
struct net_device *ndev,
#endif
struct ieee80211_channel *chan,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) || defined(COMPAT_KERNEL_RELEASE)
bool offchan,
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
enum nl80211_channel_type channel_type,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
bool channel_type_valid,
#endif
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) || defined(COMPAT_KERNEL_RELEASE)
unsigned int wait,
#endif
const u8 *buf, size_t len,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
bool no_cck,
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
bool dont_wait_for_ack,
#endif
u64 *cookie)
{
_adapter *padapter = (_adapter *)wiphy_to_adapter(wiphy);
struct rtw_wdev_priv *pwdev_priv = wdev_to_priv(padapter->rtw_wdev);
int ret = 0;
int tx_ret;
u32 dump_limit = RTW_MAX_MGMT_TX_CNT;
u32 dump_cnt = 0;
bool ack = _TRUE;
u8 tx_ch = (u8)ieee80211_frequency_to_channel(chan->center_freq);
u8 category, action;
int type = (-1);
u32 start = rtw_get_current_time();
/* cookie generation */
*cookie = (unsigned long) buf;
#ifdef CONFIG_DEBUG_CFG80211
DBG_871X(FUNC_ADPT_FMT" len=%zu, ch=%d"
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
", ch_type=%d"
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
", channel_type_valid=%d"
#endif
#endif
"\n", FUNC_ADPT_ARG(padapter),
len, tx_ch
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0))
, channel_type
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
, channel_type_valid
#endif
#endif
);
#endif /* CONFIG_DEBUG_CFG80211 */
/* indicate ack before issue frame to avoid racing with rsp frame */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
rtw_cfg80211_mgmt_tx_status(padapter, *cookie, buf, len, ack, GFP_KERNEL);
#elif (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,34) && LINUX_VERSION_CODE<=KERNEL_VERSION(2,6,35))
cfg80211_action_tx_status(ndev, *cookie, buf, len, ack, GFP_KERNEL);
#endif
if (rtw_action_frame_parse(buf, len, &category, &action) == _FALSE) {
DBG_8192C(FUNC_ADPT_FMT" frame_control:0x%x\n", FUNC_ADPT_ARG(padapter),
le16_to_cpu(((struct rtw_ieee80211_hdr_3addr *)buf)->frame_ctl));
goto exit;
}
DBG_8192C("RTW_Tx:tx_ch=%d, da="MAC_FMT"\n", tx_ch, MAC_ARG(GetAddr1Ptr(buf)));
#ifdef CONFIG_P2P
if((type = rtw_p2p_check_frames(padapter, buf, len, _TRUE)) >= 0)
goto dump;
#endif
if (category == RTW_WLAN_CATEGORY_PUBLIC)
DBG_871X("RTW_Tx:%s\n", action_public_str(action));
else
DBG_871X("RTW_Tx:category(%u), action(%u)\n", category, action);
dump:
do {
dump_cnt++;
tx_ret = _cfg80211_rtw_mgmt_tx(padapter, tx_ch, buf, len);
} while (dump_cnt < dump_limit && tx_ret != _SUCCESS);
if (tx_ret != _SUCCESS || dump_cnt > 1) {
DBG_871X(FUNC_ADPT_FMT" %s (%d/%d) in %d ms\n", FUNC_ADPT_ARG(padapter),
tx_ret==_SUCCESS?"OK":"FAIL", dump_cnt, dump_limit, rtw_get_passing_time_ms(start));
}
switch (type) {
case P2P_GO_NEGO_CONF:
rtw_clear_scan_deny(padapter);
break;
case P2P_INVIT_RESP:
if (pwdev_priv->invit_info.flags & BIT(0)
&& pwdev_priv->invit_info.status == 0)
{
DBG_871X(FUNC_ADPT_FMT" agree with invitation of persistent group\n",
FUNC_ADPT_ARG(padapter));
rtw_set_scan_deny(padapter, 5000);
rtw_pwr_wakeup_ex(padapter, 5000);
rtw_clear_scan_deny(padapter);
}
break;
}
exit:
return ret;
}
static void cfg80211_rtw_mgmt_frame_register(struct wiphy *wiphy,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
struct wireless_dev *wdev,
#else
struct net_device *ndev,
#endif
u16 frame_type, bool reg)
{
_adapter *adapter = wiphy_to_adapter(wiphy);
#ifdef CONFIG_DEBUG_CFG80211
DBG_871X(FUNC_ADPT_FMT" frame_type:%x, reg:%d\n", FUNC_ADPT_ARG(adapter),
frame_type, reg);
#endif
if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
return;
return;
}
static int rtw_cfg80211_set_beacon_wpsp2pie(struct net_device *ndev, char *buf, int len)
{
int ret = 0;
uint wps_ielen = 0;
u8 *wps_ie;
u32 p2p_ielen = 0;
u8 wps_oui[8]={0x0,0x50,0xf2,0x04};
u8 *p2p_ie;
u32 wfd_ielen = 0;
u8 *wfd_ie;
_adapter *padapter = (_adapter *)rtw_netdev_priv(ndev);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
DBG_871X(FUNC_NDEV_FMT" ielen=%d\n", FUNC_NDEV_ARG(ndev), len);
if(len>0)
{
if((wps_ie = rtw_get_wps_ie(buf, len, NULL, &wps_ielen)))
{
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("bcn_wps_ielen=%d\n", wps_ielen);
#endif
if(pmlmepriv->wps_beacon_ie)
{
u32 free_len = pmlmepriv->wps_beacon_ie_len;
pmlmepriv->wps_beacon_ie_len = 0;
rtw_mfree(pmlmepriv->wps_beacon_ie, free_len);
pmlmepriv->wps_beacon_ie = NULL;
}
pmlmepriv->wps_beacon_ie = rtw_malloc(wps_ielen);
if ( pmlmepriv->wps_beacon_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
return -EINVAL;
}
_rtw_memcpy(pmlmepriv->wps_beacon_ie, wps_ie, wps_ielen);
pmlmepriv->wps_beacon_ie_len = wps_ielen;
update_beacon(padapter, _VENDOR_SPECIFIC_IE_, wps_oui, _TRUE);
}
//buf += wps_ielen;
//len -= wps_ielen;
#ifdef CONFIG_P2P
if((p2p_ie=rtw_get_p2p_ie(buf, len, NULL, &p2p_ielen)))
{
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("bcn_p2p_ielen=%d\n", p2p_ielen);
#endif
if(pmlmepriv->p2p_beacon_ie)
{
u32 free_len = pmlmepriv->p2p_beacon_ie_len;
pmlmepriv->p2p_beacon_ie_len = 0;
rtw_mfree(pmlmepriv->p2p_beacon_ie, free_len);
pmlmepriv->p2p_beacon_ie = NULL;
}
pmlmepriv->p2p_beacon_ie = rtw_malloc(p2p_ielen);
if ( pmlmepriv->p2p_beacon_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
return -EINVAL;
}
_rtw_memcpy(pmlmepriv->p2p_beacon_ie, p2p_ie, p2p_ielen);
pmlmepriv->p2p_beacon_ie_len = p2p_ielen;
}
#endif //CONFIG_P2P
//buf += p2p_ielen;
//len -= p2p_ielen;
#ifdef CONFIG_WFD
if(rtw_get_wfd_ie(buf, len, NULL, &wfd_ielen))
{
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("bcn_wfd_ielen=%d\n", wfd_ielen);
#endif
if(pmlmepriv->wfd_beacon_ie)
{
u32 free_len = pmlmepriv->wfd_beacon_ie_len;
pmlmepriv->wfd_beacon_ie_len = 0;
rtw_mfree(pmlmepriv->wfd_beacon_ie, free_len);
pmlmepriv->wfd_beacon_ie = NULL;
}
pmlmepriv->wfd_beacon_ie = rtw_malloc(wfd_ielen);
if ( pmlmepriv->wfd_beacon_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
return -EINVAL;
}
rtw_get_wfd_ie(buf, len, pmlmepriv->wfd_beacon_ie, &pmlmepriv->wfd_beacon_ie_len);
}
#endif //CONFIG_WFD
pmlmeext->bstart_bss = _TRUE;
}
return ret;
}
static int rtw_cfg80211_set_probe_resp_wpsp2pie(struct net_device *net, char *buf, int len)
{
int ret = 0;
uint wps_ielen = 0;
u8 *wps_ie;
u32 p2p_ielen = 0;
u8 *p2p_ie;
u32 wfd_ielen = 0;
u8 *wfd_ie;
_adapter *padapter = (_adapter *)rtw_netdev_priv(net);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s, ielen=%d\n", __func__, len);
#endif
if(len>0)
{
if((wps_ie = rtw_get_wps_ie(buf, len, NULL, &wps_ielen)))
{
uint attr_contentlen = 0;
u16 uconfig_method, *puconfig_method = NULL;
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("probe_resp_wps_ielen=%d\n", wps_ielen);
#endif
if(pmlmepriv->wps_probe_resp_ie)
{
u32 free_len = pmlmepriv->wps_probe_resp_ie_len;
pmlmepriv->wps_probe_resp_ie_len = 0;
rtw_mfree(pmlmepriv->wps_probe_resp_ie, free_len);
pmlmepriv->wps_probe_resp_ie = NULL;
}
pmlmepriv->wps_probe_resp_ie = rtw_malloc(wps_ielen);
if ( pmlmepriv->wps_probe_resp_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
return -EINVAL;
}
//add PUSH_BUTTON config_method by driver self in wpsie of probe_resp at GO Mode
if ( (puconfig_method = (u16*)rtw_get_wps_attr_content( wps_ie, wps_ielen, WPS_ATTR_CONF_METHOD , NULL, &attr_contentlen)) != NULL )
{
#ifdef CONFIG_DEBUG_CFG80211
//printk("config_method in wpsie of probe_resp = 0x%x\n", be16_to_cpu(*puconfig_method));
#endif
uconfig_method = WPS_CM_PUSH_BUTTON;
uconfig_method = cpu_to_be16( uconfig_method );
*puconfig_method |= uconfig_method;
}
_rtw_memcpy(pmlmepriv->wps_probe_resp_ie, wps_ie, wps_ielen);
pmlmepriv->wps_probe_resp_ie_len = wps_ielen;
}
//buf += wps_ielen;
//len -= wps_ielen;
#ifdef CONFIG_P2P
if((p2p_ie=rtw_get_p2p_ie(buf, len, NULL, &p2p_ielen)))
{
u8 is_GO = _FALSE;
u32 attr_contentlen = 0;
u16 cap_attr=0;
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("probe_resp_p2p_ielen=%d\n", p2p_ielen);
#endif
//Check P2P Capability ATTR
if( rtw_get_p2p_attr_content( p2p_ie, p2p_ielen, P2P_ATTR_CAPABILITY, (u8*)&cap_attr, (uint*) &attr_contentlen) )
{
u8 grp_cap=0;
//DBG_8192C( "[%s] Got P2P Capability Attr!!\n", __FUNCTION__ );
cap_attr = le16_to_cpu(cap_attr);
grp_cap = (u8)((cap_attr >> 8)&0xff);
is_GO = (grp_cap&BIT(0)) ? _TRUE:_FALSE;
if(is_GO)
DBG_8192C("Got P2P Capability Attr, grp_cap=0x%x, is_GO\n", grp_cap);
}
if(is_GO == _FALSE)
{
if(pmlmepriv->p2p_probe_resp_ie)
{
u32 free_len = pmlmepriv->p2p_probe_resp_ie_len;
pmlmepriv->p2p_probe_resp_ie_len = 0;
rtw_mfree(pmlmepriv->p2p_probe_resp_ie, free_len);
pmlmepriv->p2p_probe_resp_ie = NULL;
}
pmlmepriv->p2p_probe_resp_ie = rtw_malloc(p2p_ielen);
if ( pmlmepriv->p2p_probe_resp_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
return -EINVAL;
}
_rtw_memcpy(pmlmepriv->p2p_probe_resp_ie, p2p_ie, p2p_ielen);
pmlmepriv->p2p_probe_resp_ie_len = p2p_ielen;
}
else
{
if(pmlmepriv->p2p_go_probe_resp_ie)
{
u32 free_len = pmlmepriv->p2p_go_probe_resp_ie_len;
pmlmepriv->p2p_go_probe_resp_ie_len = 0;
rtw_mfree(pmlmepriv->p2p_go_probe_resp_ie, free_len);
pmlmepriv->p2p_go_probe_resp_ie = NULL;
}
pmlmepriv->p2p_go_probe_resp_ie = rtw_malloc(p2p_ielen);
if ( pmlmepriv->p2p_go_probe_resp_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
return -EINVAL;
}
_rtw_memcpy(pmlmepriv->p2p_go_probe_resp_ie, p2p_ie, p2p_ielen);
pmlmepriv->p2p_go_probe_resp_ie_len = p2p_ielen;
}
}
#endif //CONFIG_P2P
//buf += p2p_ielen;
//len -= p2p_ielen;
#ifdef CONFIG_WFD
if(rtw_get_wfd_ie(buf, len, NULL, &wfd_ielen))
{
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("probe_resp_wfd_ielen=%d\n", wfd_ielen);
#endif
if(pmlmepriv->wfd_probe_resp_ie)
{
u32 free_len = pmlmepriv->wfd_probe_resp_ie_len;
pmlmepriv->wfd_probe_resp_ie_len = 0;
rtw_mfree(pmlmepriv->wfd_probe_resp_ie, free_len);
pmlmepriv->wfd_probe_resp_ie = NULL;
}
pmlmepriv->wfd_probe_resp_ie = rtw_malloc(wfd_ielen);
if ( pmlmepriv->wfd_probe_resp_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
return -EINVAL;
}
rtw_get_wfd_ie(buf, len, pmlmepriv->wfd_probe_resp_ie, &pmlmepriv->wfd_probe_resp_ie_len);
}
#endif //CONFIG_WFD
}
return ret;
}
static int rtw_cfg80211_set_assoc_resp_wpsp2pie(struct net_device *net, char *buf, int len)
{
int ret = 0;
_adapter *padapter = (_adapter *)rtw_netdev_priv(net);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
DBG_8192C("%s, ielen=%d\n", __func__, len);
if(len>0)
{
if(pmlmepriv->wps_assoc_resp_ie)
{
u32 free_len = pmlmepriv->wps_assoc_resp_ie_len;
pmlmepriv->wps_assoc_resp_ie_len = 0;
rtw_mfree(pmlmepriv->wps_assoc_resp_ie, free_len);
pmlmepriv->wps_assoc_resp_ie = NULL;
}
pmlmepriv->wps_assoc_resp_ie = rtw_malloc(len);
if ( pmlmepriv->wps_assoc_resp_ie == NULL) {
DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __FUNCTION__, __LINE__);
return -EINVAL;
}
_rtw_memcpy(pmlmepriv->wps_assoc_resp_ie, buf, len);
pmlmepriv->wps_assoc_resp_ie_len = len;
}
return ret;
}
int rtw_cfg80211_set_mgnt_wpsp2pie(struct net_device *net, char *buf, int len,
int type)
{
int ret = 0;
uint wps_ielen = 0;
u32 p2p_ielen = 0;
#ifdef CONFIG_DEBUG_CFG80211
DBG_8192C("%s, ielen=%d\n", __func__, len);
#endif
if( (rtw_get_wps_ie(buf, len, NULL, &wps_ielen) && (wps_ielen>0))
#ifdef CONFIG_P2P
|| (rtw_get_p2p_ie(buf, len, NULL, &p2p_ielen) && (p2p_ielen>0))
#endif
)
{
if (net != NULL)
{
switch (type)
{
case 0x1: //BEACON
ret = rtw_cfg80211_set_beacon_wpsp2pie(net, buf, len);
break;
case 0x2: //PROBE_RESP
ret = rtw_cfg80211_set_probe_resp_wpsp2pie(net, buf, len);
break;
case 0x4: //ASSOC_RESP
ret = rtw_cfg80211_set_assoc_resp_wpsp2pie(net, buf, len);
break;
}
}
}
return ret;
}
static struct cfg80211_ops rtw_cfg80211_ops = {
.change_virtual_intf = cfg80211_rtw_change_iface,
.add_key = cfg80211_rtw_add_key,
.get_key = cfg80211_rtw_get_key,
.del_key = cfg80211_rtw_del_key,
.set_default_key = cfg80211_rtw_set_default_key,
.get_station = cfg80211_rtw_get_station,
.scan = cfg80211_rtw_scan,
.set_wiphy_params = cfg80211_rtw_set_wiphy_params,
.connect = cfg80211_rtw_connect,
.disconnect = cfg80211_rtw_disconnect,
.join_ibss = cfg80211_rtw_join_ibss,
.leave_ibss = cfg80211_rtw_leave_ibss,
.set_tx_power = cfg80211_rtw_set_txpower,
.get_tx_power = cfg80211_rtw_get_txpower,
.set_power_mgmt = cfg80211_rtw_set_power_mgmt,
.set_pmksa = cfg80211_rtw_set_pmksa,
.del_pmksa = cfg80211_rtw_del_pmksa,
.flush_pmksa = cfg80211_rtw_flush_pmksa,
#ifdef CONFIG_AP_MODE
.add_virtual_intf = cfg80211_rtw_add_virtual_intf,
.del_virtual_intf = cfg80211_rtw_del_virtual_intf,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) && !defined(COMPAT_KERNEL_RELEASE)
.add_beacon = cfg80211_rtw_add_beacon,
.set_beacon = cfg80211_rtw_set_beacon,
.del_beacon = cfg80211_rtw_del_beacon,
#else
.start_ap = cfg80211_rtw_start_ap,
.change_beacon = cfg80211_rtw_change_beacon,
.stop_ap = cfg80211_rtw_stop_ap,
#endif
.add_station = cfg80211_rtw_add_station,
.del_station = cfg80211_rtw_del_station,
.change_station = cfg80211_rtw_change_station,
.dump_station = cfg80211_rtw_dump_station,
.change_bss = cfg80211_rtw_change_bss,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
.set_channel = cfg80211_rtw_set_channel,
#endif
//.auth = cfg80211_rtw_auth,
//.assoc = cfg80211_rtw_assoc,
#endif //CONFIG_AP_MODE
#ifdef CONFIG_P2P
.remain_on_channel = cfg80211_rtw_remain_on_channel,
.cancel_remain_on_channel = cfg80211_rtw_cancel_remain_on_channel,
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
.mgmt_tx = cfg80211_rtw_mgmt_tx,
.mgmt_frame_register = cfg80211_rtw_mgmt_frame_register,
#elif (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,34) && LINUX_VERSION_CODE<=KERNEL_VERSION(2,6,35))
.action = cfg80211_rtw_mgmt_tx,
#endif
};
static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap, enum ieee80211_band band, u8 rf_type)
{
#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */
ht_cap->ht_supported = _TRUE;
ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU;
/*
*Maximum length of AMPDU that the STA can receive.
*Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
*/
ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
/*Minimum MPDU start spacing , */
ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
/*
*hw->wiphy->bands[IEEE80211_BAND_2GHZ]
*base on ant_num
*rx_mask: RX mask
*if rx_ant =1 rx_mask[0]=0xff;==>MCS0-MCS7
*if rx_ant =2 rx_mask[1]=0xff;==>MCS8-MCS15
*if rx_ant >=3 rx_mask[2]=0xff;
*if BW_40 rx_mask[4]=0x01;
*highest supported RX rate
*/
if(rf_type == RF_1T1R)
{
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0x00;
ht_cap->mcs.rx_mask[4] = 0x01;
ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7;
}
else if((rf_type == RF_1T2R) || (rf_type==RF_2T2R))
{
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0xFF;
ht_cap->mcs.rx_mask[4] = 0x01;
ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
}
else
{
DBG_8192C("%s, error rf_type=%d\n", __func__, rf_type);
}
}
void rtw_cfg80211_init_wiphy(_adapter *padapter)
{
u8 rf_type;
struct ieee80211_supported_band *bands;
struct wireless_dev *pwdev = padapter->rtw_wdev;
struct wiphy *wiphy = pwdev->wiphy;
rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
DBG_8192C("%s:rf_type=%d\n", __func__, rf_type);
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
{
bands = wiphy->bands[IEEE80211_BAND_2GHZ];
if(bands)
rtw_cfg80211_init_ht_capab(&bands->ht_cap, IEEE80211_BAND_2GHZ, rf_type);
}
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
{
bands = wiphy->bands[IEEE80211_BAND_5GHZ];
if(bands)
rtw_cfg80211_init_ht_capab(&bands->ht_cap, IEEE80211_BAND_5GHZ, rf_type);
}
}
/*
struct ieee80211_iface_limit rtw_limits[] = {
{ .max = 1, .types = BIT(NL80211_IFTYPE_STATION)
| BIT(NL80211_IFTYPE_ADHOC)
#ifdef CONFIG_AP_MODE
| BIT(NL80211_IFTYPE_AP)
#endif
#if defined(CONFIG_P2P) && ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE))
| BIT(NL80211_IFTYPE_P2P_CLIENT)
| BIT(NL80211_IFTYPE_P2P_GO)
#endif
},
{.max = 1, .types = BIT(NL80211_IFTYPE_MONITOR)},
};
struct ieee80211_iface_combination rtw_combinations = {
.limits = rtw_limits,
.n_limits = ARRAY_SIZE(rtw_limits),
.max_interfaces = 2,
.num_different_channels = 1,
};
*/
static void rtw_cfg80211_preinit_wiphy(_adapter *padapter, struct wiphy *wiphy)
{
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->max_scan_ssids = RTW_SSID_SCAN_AMOUNT;
wiphy->max_scan_ie_len = RTW_SCAN_IE_LEN_MAX;
wiphy->max_num_pmkids = RTW_MAX_NUM_PMKIDS;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) || defined(COMPAT_KERNEL_RELEASE)
wiphy->max_remain_on_channel_duration = RTW_MAX_REMAIN_ON_CHANNEL_DURATION;
#endif
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
| BIT(NL80211_IFTYPE_ADHOC)
#ifdef CONFIG_AP_MODE
| BIT(NL80211_IFTYPE_AP)
| BIT(NL80211_IFTYPE_MONITOR)
#endif
#if defined(CONFIG_P2P) && ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE))
| BIT(NL80211_IFTYPE_P2P_CLIENT)
| BIT(NL80211_IFTYPE_P2P_GO)
#endif
;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) || defined(COMPAT_KERNEL_RELEASE)
#ifdef CONFIG_AP_MODE
wiphy->mgmt_stypes = rtw_cfg80211_default_mgmt_stypes;
#endif //CONFIG_AP_MODE
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0))
wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
#endif
/*
wiphy->iface_combinations = &rtw_combinations;
wiphy->n_iface_combinations = 1;
*/
wiphy->cipher_suites = rtw_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(rtw_cipher_suites);
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
wiphy->bands[IEEE80211_BAND_2GHZ] = rtw_spt_band_alloc(IEEE80211_BAND_2GHZ);
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
wiphy->bands[IEEE80211_BAND_5GHZ] = rtw_spt_band_alloc(IEEE80211_BAND_5GHZ);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38) && LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))
wiphy->flags |= WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS;
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAVE_AP_SME;
#endif
if(padapter->registrypriv.power_mgnt != PS_MODE_ACTIVE)
wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
else
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
}
int rtw_wdev_alloc(_adapter *padapter, struct device *dev)
{
int ret = 0;
struct wiphy *wiphy;
struct wireless_dev *wdev;
struct rtw_wdev_priv *pwdev_priv;
struct net_device *pnetdev = padapter->pnetdev;
DBG_8192C("%s(padapter=%p)\n", __func__, padapter);
/* wiphy */
wiphy = wiphy_new(&rtw_cfg80211_ops, sizeof(struct rtw_wdev_priv));
if (!wiphy) {
DBG_8192C("Couldn't allocate wiphy device\n");
ret = -ENOMEM;
goto exit;
}
set_wiphy_dev(wiphy, dev);
rtw_cfg80211_preinit_wiphy(padapter, wiphy);
ret = wiphy_register(wiphy);
if (ret < 0) {
DBG_8192C("Couldn't register wiphy device\n");
goto free_wiphy;
}
/* wdev */
wdev = (struct wireless_dev *)rtw_zmalloc(sizeof(struct wireless_dev));
if (!wdev) {
DBG_8192C("Couldn't allocate wireless device\n");
ret = -ENOMEM;
goto unregister_wiphy;
}
wdev->wiphy = wiphy;
wdev->netdev = pnetdev;
//wdev->iftype = NL80211_IFTYPE_STATION;
wdev->iftype = NL80211_IFTYPE_MONITOR; // for rtw_setopmode_cmd() in cfg80211_rtw_change_iface()
padapter->rtw_wdev = wdev;
pnetdev->ieee80211_ptr = wdev;
//init pwdev_priv
pwdev_priv = wdev_to_priv(wdev);
pwdev_priv->rtw_wdev = wdev;
pwdev_priv->pmon_ndev = NULL;
pwdev_priv->ifname_mon[0] = '\0';
pwdev_priv->padapter = padapter;
pwdev_priv->scan_request = NULL;
_rtw_spinlock_init(&pwdev_priv->scan_req_lock);
pwdev_priv->p2p_enabled = _FALSE;
pwdev_priv->provdisc_req_issued = _FALSE;
rtw_wdev_invit_info_init(&pwdev_priv->invit_info);
pwdev_priv->bandroid_scan = _FALSE;
if(padapter->registrypriv.power_mgnt != PS_MODE_ACTIVE)
pwdev_priv->power_mgmt = _TRUE;
else
pwdev_priv->power_mgmt = _FALSE;
#ifdef CONFIG_CONCURRENT_MODE
ATOMIC_SET(&pwdev_priv->switch_ch_to, 1);
ATOMIC_SET(&pwdev_priv->ro_ch_to, 1);
#endif
return ret;
rtw_mfree((u8*)wdev, sizeof(struct wireless_dev));
unregister_wiphy:
wiphy_unregister(wiphy);
free_wiphy:
wiphy_free(wiphy);
exit:
return ret;
}
void rtw_wdev_free(struct wireless_dev *wdev)
{
struct rtw_wdev_priv *pwdev_priv;
DBG_8192C("%s(wdev=%p)\n", __func__, wdev);
if (!wdev)
return;
pwdev_priv = wdev_to_priv(wdev);
rtw_spt_band_free(wdev->wiphy->bands[IEEE80211_BAND_2GHZ]);
rtw_spt_band_free(wdev->wiphy->bands[IEEE80211_BAND_5GHZ]);
wiphy_free(wdev->wiphy);
rtw_mfree((u8*)wdev, sizeof(struct wireless_dev));
}
void rtw_wdev_unregister(struct wireless_dev *wdev)
{
struct rtw_wdev_priv *pwdev_priv;
DBG_8192C("%s(wdev=%p)\n", __func__, wdev);
if (!wdev)
return;
pwdev_priv = wdev_to_priv(wdev);
rtw_cfg80211_indicate_scan_done(pwdev_priv, _TRUE);
if (pwdev_priv->pmon_ndev) {
DBG_8192C("%s, unregister monitor interface\n", __func__);
unregister_netdev(pwdev_priv->pmon_ndev);
}
wiphy_unregister(wdev->wiphy);
}
#endif //CONFIG_IOCTL_CFG80211
|
cottsay/linux
|
drivers/net/wireless/realtek/8188eu/rtl8xxx_EU_MP/os_dep/linux/ioctl_cfg80211.c
|
C
|
gpl-2.0
| 155,396
|
{namespace n=Tx_News_ViewHelpers}
<div class="mediaelement">
<div class="mediaelement-video">
<n:mediaFactory classes="{settings.interfaces.media.video}" element="{mediaElement}" width="{settings.detail.media.video.width}"
height="{settings.detail.media.video.height}" />
</div>
<f:if condition="{mediaElement.caption}">
<div class="medialement-alternative-content">
<p class="news-img-caption">
{mediaElement.caption}
</p>
</div>
</f:if>
</div>
|
tonglin/pdPm
|
public_html/typo3conf/ext/news/Resources/Private/Partials/Detail/MediaVideo.html
|
HTML
|
gpl-2.0
| 489
|
PWD_DIR=`pwd`
REL_DIR=$PWD_DIR/_build/relive/
CON_DIR=$REL_DIR/conf/
[ -z "$REL_DIR_TEMP" ] && REL_DIR_TEMP=$REL_DIR
CON_DIR_TEMP=$REL_DIR_TEMP/conf/
make ejabberdctl.relive
chmod +x ejabberdctl.relive
mv ejabberdctl.relive $REL_DIR/ejabberdctl
cp inetrc $CON_DIR/
cp ejabberdctl.cfg.example $CON_DIR/ejabberdctl.cfg.example
cp ejabberd.yml.example $CON_DIR/ejabberd.yml.example
cp test/ejabberd_SUITE_data/ca.pem $CON_DIR
cp test/ejabberd_SUITE_data/cert.pem $CON_DIR
cd $CON_DIR_TEMP
sed -i "s|# certfiles:|certfiles:\n - $CON_DIR/cert.pem|g" ejabberd.yml.example
sed -i "s|certfiles:|ca_file: $CON_DIR/ca.pem\ncertfiles:|g" ejabberd.yml.example
sed -i 's|^acl:$|acl:\n admin: [user: admin]|g' ejabberd.yml.example
[ ! -f "$CON_DIR/ejabberd.yml" ] \
&& echo -n "ejabberd.yml " \
&& mv ejabberd.yml.example ejabberd.yml
sed -i "s|#' POLL|EJABBERD_BYPASS_WARNINGS=true\n\n#' POLL|g" ejabberdctl.cfg.example
[ ! -f "$CON_DIR/ejabberdctl.cfg" ] \
&& echo -n "ejabberdctl.cfg " \
&& mv ejabberdctl.cfg.example ejabberdctl.cfg
|
weiss/ejabberd
|
rel/setup-relive.sh
|
Shell
|
gpl-2.0
| 1,047
|
#ifndef __TUNER_E4000_H
#define __TUNER_E4000_H
/**
@file
@brief E4000 tuner module declaration
One can manipulate E4000 tuner through E4000 module.
E4000 module is derived from tuner module.
@par Example:
@code
// The example is the same as the tuner example in tuner_base.h except the listed lines.
#include "tuner_e4000.h"
...
int main(void)
{
TUNER_MODULE *pTuner;
E4000_EXTRA_MODULE *pTunerExtra;
TUNER_MODULE TunerModuleMemory;
BASE_INTERFACE_MODULE BaseInterfaceModuleMemory;
I2C_BRIDGE_MODULE I2cBridgeModuleMemory;
unsigned long BandwidthMode;
...
// Build E4000 tuner module.
BuildE4000Module(
&pTuner,
&TunerModuleMemory,
&BaseInterfaceModuleMemory,
&I2cBridgeModuleMemory,
0xac, // I2C device address is 0xac in 8-bit format.
CRYSTAL_FREQ_16384000HZ, // Crystal frequency is 16.384 MHz.
E4000_AGC_INTERNAL // The E4000 AGC mode is internal AGC mode.
);
// Get E4000 tuner extra module.
pTunerExtra = (T2266_EXTRA_MODULE *)(pTuner->pExtra);
// ==== Initialize tuner and set its parameters =====
...
// Set E4000 bandwidth.
pTunerExtra->SetBandwidthMode(pTuner, E4000_BANDWIDTH_6MHZ);
// ==== Get tuner information =====
...
// Get E4000 bandwidth.
pTunerExtra->GetBandwidthMode(pTuner, &BandwidthMode);
// See the example for other tuner functions in tuner_base.h
return 0;
}
@endcode
*/
#include "tuner_base.h"
// The following context is implemented for E4000 source code.
// Definition (implemeted for E4000)
#define E4000_1_SUCCESS 1
#define E4000_1_FAIL 0
#define E4000_I2C_SUCCESS 1
#define E4000_I2C_FAIL 0
// Function (implemeted for E4000)
int
I2CReadByte(
TUNER_MODULE *pTuner,
unsigned char NoUse,
unsigned char RegAddr,
unsigned char *pReadingByte
);
int
I2CWriteByte(
TUNER_MODULE *pTuner,
unsigned char NoUse,
unsigned char RegAddr,
unsigned char WritingByte
);
int
I2CWriteArray(
TUNER_MODULE *pTuner,
unsigned char NoUse,
unsigned char RegStartAddr,
unsigned char ByteNum,
unsigned char *pWritingBytes
);
// Functions (from E4000 source code)
int tunerreset (TUNER_MODULE *pTuner);
int Tunerclock(TUNER_MODULE *pTuner);
int Qpeak(TUNER_MODULE *pTuner);
int DCoffloop(TUNER_MODULE *pTuner);
int GainControlinit(TUNER_MODULE *pTuner);
int Gainmanual(TUNER_MODULE *pTuner);
int E4000_gain_freq(TUNER_MODULE *pTuner, int frequency);
int PLL(TUNER_MODULE *pTuner, int Ref_clk, int Freq);
int LNAfilter(TUNER_MODULE *pTuner, int Freq);
int IFfilter(TUNER_MODULE *pTuner, int bandwidth, int Ref_clk);
int freqband(TUNER_MODULE *pTuner, int Freq);
int DCoffLUT(TUNER_MODULE *pTuner);
int GainControlauto(TUNER_MODULE *pTuner);
int E4000_sensitivity(TUNER_MODULE *pTuner, int Freq, int bandwidth);
int E4000_linearity(TUNER_MODULE *pTuner, int Freq, int bandwidth);
int E4000_high_linearity(TUNER_MODULE *pTuner);
int E4000_nominal(TUNER_MODULE *pTuner, int Freq, int bandwidth);
// The following context is E4000 tuner API source code
// Definitions
// Bandwidth in Hz
enum E4000_BANDWIDTH_HZ
{
E4000_BANDWIDTH_6000000HZ = 6000000,
E4000_BANDWIDTH_7000000HZ = 7000000,
E4000_BANDWIDTH_8000000HZ = 8000000,
};
// Builder
void
BuildE4000Module(
TUNER_MODULE **ppTuner,
TUNER_MODULE *pTunerModuleMemory,
BASE_INTERFACE_MODULE *pBaseInterfaceModuleMemory,
I2C_BRIDGE_MODULE *pI2cBridgeModuleMemory,
unsigned char DeviceAddr,
unsigned long CrystalFreqHz
);
// Manipulaing functions
void
e4000_GetTunerType(
TUNER_MODULE *pTuner,
int *pTunerType
);
void
e4000_GetDeviceAddr(
TUNER_MODULE *pTuner,
unsigned char *pDeviceAddr
);
int
e4000_Initialize(
TUNER_MODULE *pTuner
);
int
e4000_SetRfFreqHz(
TUNER_MODULE *pTuner,
unsigned long RfFreqHz
);
int
e4000_GetRfFreqHz(
TUNER_MODULE *pTuner,
unsigned long *pRfFreqHz
);
// Extra manipulaing functions
int
e4000_GetRegByte(
TUNER_MODULE *pTuner,
unsigned char RegAddr,
unsigned char *pReadingByte
);
int
e4000_SetBandwidthHz(
TUNER_MODULE *pTuner,
unsigned long BandwidthHz
);
int
e4000_GetBandwidthHz(
TUNER_MODULE *pTuner,
unsigned long *pBandwidthHz
);
#endif
|
vetzki/kernel_tegra
|
drivers/media/dvb/dvb-usb/tuner_e4000.h
|
C
|
gpl-2.0
| 4,190
|
use strict;
use warnings;
=head1 NAME
Algorithm::Evolutionary::Op::Uniform_Crossover - interchanges a set of atoms
from one parent to the other.
=head1 SYNOPSIS
#Create from XML description using EvoSpec
my $xmlStr3=<<EOC;
<op name='Uniform_Crossover' type='binary' rate='1'>
<param name='numPoints' value='3' /> #Max is 2, anyways
</op>
EOC
my $op3 = Algorithm::Evolutionary::Op::Base->fromXML( $xmlStr3 );
print $op3->asXML(), "\n";
#Apply to 2 Individuals of the String class
my $indi = new Algorithm::Evolutionary::Individual::BitString 10;
my $indi2 = $indi->clone();
my $indi3 = $indi->clone();
my $offspring = $op3->apply( $indi2, $indi3 ); #$indi2 == $offspring
#Initialize using OO interface
my $op4 = new Algorithm::Evolutionary::Op::Uniform_Crossover 0.5;# Crossover rate
=head1 Base Class
L<Algorithm::Evolutionary::Op::Base|Algorithm::Evolutionary::Op::Base>
=head1 DESCRIPTION
General purpose uniform crossover operator
=head1 METHODS
=cut
package Algorithm::Evolutionary::Op::Uniform_Crossover;
use lib qw(../../..);
our ($VERSION) = ( '$Revision: 3.2 $ ' =~ /(\d+\.\d+)/ );
use Clone qw(clone);
use Carp;
use base 'Algorithm::Evolutionary::Op::Base';
#Class-wide constants
our $APPLIESTO = 'Algorithm::Evolutionary::Individual::String';
our $ARITY = 2;
our %parameters = ( crossover_rate => 2 );
=head2 new( [$options_hash] [, $operation_priority] )
Creates a new n-point crossover operator, with 2 as the default number
of points, that is, the default would be
my $options_hash = { crossover_rate => 0.5 };
my $priority = 1;
=cut
sub new {
my $class = shift;
my $hash = { crossover_rate => shift || 0.5 };
croak "Crossover probability must be less than 1"
if $hash->{'crossover_rate'} >= 1;
my $priority = shift || 1;
my $self = Algorithm::Evolutionary::Op::Base::new( $class, $priority, $hash );
return $self;
}
=head2 apply( $chromsosome_1, $chromosome_2 )
Applies xover operator to a "Chromosome", a string, really. Can be
applied only to I<victims> with the C<_str> instance variable; but
it checks before application that both operands are of type
L<String|Algorithm::Evolutionary::Individual::String>.
Changes the first parent, and returns it. If you want to change both
parents at the same time, check
L<QuadXOver|Algorithm::Evolutionary::Op::QuadXOver>
=cut
sub apply ($$$){
my $self = shift;
my $arg = shift || croak "No victim here!";
my $victim = clone( $arg );
my $victim2 = shift || croak "No victim here!";
my $min_length = ( $victim->size() > $victim2->size() )?
$victim2->size():$victim->size();
for ( my $i = 0; $i < $min_length; $i++ ) {
if ( rand() < $self->{'_crossover_rate'}) {
$victim->Atom($i, $victim2->Atom($i));
}
}
$victim->{'_fitness'} = undef;
return $victim;
}
=head1 Copyright
This file is released under the GPL. See the LICENSE file included in this distribution,
or go to http://www.fsf.org/licenses/gpl.txt
CVS Info: $Date: 2011/02/14 06:55:36 $
$Header: /media/Backup/Repos/opeal/opeal/Algorithm-Evolutionary/lib/Algorithm/Evolutionary/Op/Uniform_Crossover.pm,v 3.2 2011/02/14 06:55:36 jmerelo Exp $
$Author: jmerelo $
$Revision: 3.2 $
$Name $
=cut
|
JJ/Algorithm-Evolutionary
|
lib/Algorithm/Evolutionary/Op/Uniform_Crossover.pm
|
Perl
|
gpl-2.0
| 3,258
|
#region License
// Copyright (c) 2007 James Newton-King
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
#endregion
using System;
using System.Collections;
using System.Collections.Generic;
using OsmSharp.IO.Json.Utilities;
using System.Globalization;
namespace OsmSharp.IO.Json.Linq
{
/// <summary>
/// Represents a JSON constructor.
/// </summary>
public class JConstructor : JContainer
{
private string _name;
private readonly List<JToken> _values = new List<JToken>();
/// <summary>
/// Gets the container's children tokens.
/// </summary>
/// <value>The container's children tokens.</value>
protected override IList<JToken> ChildrenTokens
{
get { return _values; }
}
internal override void MergeItem(object content, JsonMergeSettings settings)
{
JConstructor c = content as JConstructor;
if (c == null)
return;
if (c.Name != null)
Name = c.Name;
MergeEnumerableContent(this, c, settings);
}
/// <summary>
/// Gets or sets the name of this constructor.
/// </summary>
/// <value>The constructor name.</value>
public string Name
{
get { return _name; }
set { _name = value; }
}
/// <summary>
/// Gets the node type for this <see cref="JToken"/>.
/// </summary>
/// <value>The type.</value>
public override JTokenType Type
{
get { return JTokenType.Constructor; }
}
/// <summary>
/// Initializes a new instance of the <see cref="JConstructor"/> class.
/// </summary>
public JConstructor()
{
}
/// <summary>
/// Initializes a new instance of the <see cref="JConstructor"/> class from another <see cref="JConstructor"/> object.
/// </summary>
/// <param name="other">A <see cref="JConstructor"/> object to copy from.</param>
public JConstructor(JConstructor other)
: base(other)
{
_name = other.Name;
}
/// <summary>
/// Initializes a new instance of the <see cref="JConstructor"/> class with the specified name and content.
/// </summary>
/// <param name="name">The constructor name.</param>
/// <param name="content">The contents of the constructor.</param>
public JConstructor(string name, params object[] content)
: this(name, (object)content)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="JConstructor"/> class with the specified name and content.
/// </summary>
/// <param name="name">The constructor name.</param>
/// <param name="content">The contents of the constructor.</param>
public JConstructor(string name, object content)
: this(name)
{
Add(content);
}
/// <summary>
/// Initializes a new instance of the <see cref="JConstructor"/> class with the specified name.
/// </summary>
/// <param name="name">The constructor name.</param>
public JConstructor(string name)
{
ValidationUtils.ArgumentNotNullOrEmpty(name, "name");
_name = name;
}
internal override bool DeepEquals(JToken node)
{
JConstructor c = node as JConstructor;
return (c != null && _name == c.Name && ContentsEqual(c));
}
internal override JToken CloneToken()
{
return new JConstructor(this);
}
/// <summary>
/// Writes this token to a <see cref="JsonWriter"/>.
/// </summary>
/// <param name="writer">A <see cref="JsonWriter"/> into which this method will write.</param>
/// <param name="converters">A collection of <see cref="JsonConverter"/> which will be used when writing the token.</param>
public override void WriteTo(JsonWriter writer, params JsonConverter[] converters)
{
writer.WriteStartConstructor(_name);
foreach (JToken token in Children())
{
token.WriteTo(writer, converters);
}
writer.WriteEndConstructor();
}
/// <summary>
/// Gets the <see cref="JToken"/> with the specified key.
/// </summary>
/// <value>The <see cref="JToken"/> with the specified key.</value>
public override JToken this[object key]
{
get
{
ValidationUtils.ArgumentNotNull(key, "o");
if (!(key is int))
throw new ArgumentException("Accessed JConstructor values with invalid key value: {0}. Argument position index expected.".FormatWith(CultureInfo.InvariantCulture, MiscellaneousUtils.ToString(key)));
return GetItem((int)key);
}
set
{
ValidationUtils.ArgumentNotNull(key, "o");
if (!(key is int))
throw new ArgumentException("Set JConstructor values with invalid key value: {0}. Argument position index expected.".FormatWith(CultureInfo.InvariantCulture, MiscellaneousUtils.ToString(key)));
SetItem((int)key, value);
}
}
internal override int GetDeepHashCode()
{
return _name.GetHashCode() ^ ContentsHashCode();
}
/// <summary>
/// Loads an <see cref="JConstructor"/> from a <see cref="JsonReader"/>.
/// </summary>
/// <param name="reader">A <see cref="JsonReader"/> that will be read for the content of the <see cref="JConstructor"/>.</param>
/// <returns>A <see cref="JConstructor"/> that contains the JSON that was read from the specified <see cref="JsonReader"/>.</returns>
public new static JConstructor Load(JsonReader reader)
{
if (reader.TokenType == JsonToken.None)
{
if (!reader.Read())
throw JsonReaderException.Create(reader, "Error reading JConstructor from JsonReader.");
}
while (reader.TokenType == JsonToken.Comment)
{
reader.Read();
}
if (reader.TokenType != JsonToken.StartConstructor)
throw JsonReaderException.Create(reader, "Error reading JConstructor from JsonReader. Current JsonReader item is not a constructor: {0}".FormatWith(CultureInfo.InvariantCulture, reader.TokenType));
JConstructor c = new JConstructor((string)reader.Value);
c.SetLineInfo(reader as IJsonLineInfo);
c.ReadTokenFrom(reader);
return c;
}
}
}
|
ryfx/OsmSharp
|
OsmSharp/IO/Json/Linq/JConstructor.cs
|
C#
|
gpl-2.0
| 7,918
|
/*
* lanstreamer.c -- driver for the IBM Auto LANStreamer PCI Adapter
*
* Written By: Mike Sullivan, IBM Corporation
*
* Copyright (C) 1999 IBM Corporation
*
* Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC
* chipset.
*
* This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic
* chipsets) written by:
* 1999 Peter De Schrijver All Rights Reserved
* 1999 Mike Phillips (phillim@amtrak.com)
*
* Base Driver Skeleton:
* Written 1993-94 by Donald Becker.
*
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* 12/10/99 - Alpha Release 0.1.0
* First release to the public
* 03/03/00 - Merged to kernel, indented -kr -i8 -bri0, fixed some missing
* malloc free checks, reviewed code. <alan@redhat.com>
* 03/13/00 - Added spinlocks for smp
* 03/08/01 - Added support for module_init() and module_exit()
* 08/15/01 - Added ioctl() functionality for debugging, changed netif_*_queue
* calls and other incorrectness - Kent Yoder <yoder1@us.ibm.com>
* 11/05/01 - Restructured the interrupt function, added delays, reduced the
* the number of TX descriptors to 1, which together can prevent
* the card from locking up the box - <yoder1@us.ibm.com>
* 09/27/02 - New PCI interface + bug fix. - <yoder1@us.ibm.com>
*
* To Do:
*
*
* If Problems do Occur
* Most problems can be rectified by either closing and opening the interface
* (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
* if compiled into the kernel).
*/
/* Change STREAMER_DEBUG to 1 to get verbose, and I mean really verbose, messages */
#define STREAMER_DEBUG 0
#define STREAMER_DEBUG_PACKETS 0
/* Change STREAMER_NETWORK_MONITOR to receive mac frames through the arb channel.
* Will also create a /proc/net/streamer_tr entry if proc_fs is compiled into the
* kernel.
* Intended to be used to create a ring-error reporting network module
* i.e. it will give you the source address of beaconers on the ring
*/
#define STREAMER_NETWORK_MONITOR 0
/* #define CONFIG_PROC_FS */
/*
* Allow or disallow ioctl's for debugging
*/
#define STREAMER_IOCTL 0
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/in.h>
#include <linux/ioport.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/ptrace.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/trdevice.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <net/checksum.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include "lanstreamer.h"
/* I've got to put some intelligence into the version number so that Peter and I know
* which version of the code somebody has got.
* Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
* So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
*
* Official releases will only have an a.b.c version number format.
*/
static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
" v0.5.2 09/30/02 - Kent Yoder";
static struct pci_device_id streamer_pci_tbl[] __initdata = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(pci,streamer_pci_tbl);
static char *open_maj_error[] = {
"No error", "Lobe Media Test", "Physical Insertion",
"Address Verification", "Neighbor Notification (Ring Poll)",
"Request Parameters", "FDX Registration Request",
"FDX Lobe Media Test", "FDX Duplicate Address Check",
"Unknown stage"
};
static char *open_min_error[] = {
"No error", "Function Failure", "Signal Lost", "Wire Fault",
"Ring Speed Mismatch", "Timeout", "Ring Failure", "Ring Beaconing",
"Duplicate Node Address", "Request Parameters", "Remove Received",
"Reserved", "Reserved", "No Monitor Detected for RPL",
"Monitor Contention failer for RPL", "FDX Protocol Error"
};
/* Module paramters */
/* Ring Speed 0,4,16
* 0 = Autosense
* 4,16 = Selected speed only, no autosense
* This allows the card to be the first on the ring
* and become the active monitor.
*
* WARNING: Some hubs will allow you to insert
* at the wrong speed
*/
static int ringspeed[STREAMER_MAX_ADAPTERS] = { 0, };
MODULE_PARM(ringspeed, "1-" __MODULE_STRING(STREAMER_MAX_ADAPTERS) "i");
/* Packet buffer size */
static int pkt_buf_sz[STREAMER_MAX_ADAPTERS] = { 0, };
MODULE_PARM(pkt_buf_sz, "1-" __MODULE_STRING(STREAMER_MAX_ADAPTERS) "i");
/* Message Level */
static int message_level[STREAMER_MAX_ADAPTERS] = { 1, };
MODULE_PARM(message_level,
"1-" __MODULE_STRING(STREAMER_MAX_ADAPTERS) "i");
#if STREAMER_IOCTL
static int streamer_ioctl(struct net_device *, struct ifreq *, int);
#endif
static int streamer_reset(struct net_device *dev);
static int streamer_open(struct net_device *dev);
static int streamer_xmit(struct sk_buff *skb, struct net_device *dev);
static int streamer_close(struct net_device *dev);
static void streamer_set_rx_mode(struct net_device *dev);
static void streamer_interrupt(int irq, void *dev_id,
struct pt_regs *regs);
static struct net_device_stats *streamer_get_stats(struct net_device *dev);
static int streamer_set_mac_address(struct net_device *dev, void *addr);
static void streamer_arb_cmd(struct net_device *dev);
static int streamer_change_mtu(struct net_device *dev, int mtu);
static void streamer_srb_bh(struct net_device *dev);
static void streamer_asb_bh(struct net_device *dev);
#if STREAMER_NETWORK_MONITOR
#ifdef CONFIG_PROC_FS
static int streamer_proc_info(char *buffer, char **start, off_t offset,
int length, int *eof, void *data);
static int sprintf_info(char *buffer, struct net_device *dev);
struct streamer_private *dev_streamer=NULL;
#endif
#endif
static int __devinit streamer_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev=NULL;
struct streamer_private *streamer_priv;
__u32 pio_start, pio_end, pio_flags, pio_len;
__u32 mmio_start, mmio_end, mmio_flags, mmio_len;
int rc=0;
static int card_no=-1;
u16 pcr;
u8 cls = 0;
#if STREAMER_DEBUG
printk("lanstreamer::streamer_init_one, entry pdev %p\n",pdev);
#endif
card_no++;
dev=init_trdev(dev, sizeof(*streamer_priv));
if(dev==NULL) {
printk(KERN_ERR "lanstreamer: out of memory.\n");
return -ENOMEM;
}
SET_MODULE_OWNER(dev);
streamer_priv=dev->priv;
#if STREAMER_NETWORK_MONITOR
#ifdef CONFIG_PROC_FS
if (!dev_streamer) {
create_proc_read_entry("net/streamer_tr",0,0,streamer_proc_info,NULL);
}
streamer_priv->next=dev_streamer;
dev_streamer=streamer_priv;
#endif
#endif
if(pci_set_dma_mask(pdev, 0xFFFFFFFF)) {
printk(KERN_ERR "%s: No suitable PCI mapping available.\n", dev->name);
rc = -ENODEV;
goto err_out;
}
if (pci_enable_device(pdev)) {
printk(KERN_ERR "lanstreamer: unable to enable pci device\n");
rc=-EIO;
goto err_out;
}
pci_set_master(pdev);
pio_start = pci_resource_start(pdev, 0);
pio_end = pci_resource_end(pdev, 0);
pio_flags = pci_resource_flags(pdev, 0);
pio_len = pci_resource_len(pdev, 0);
mmio_start = pci_resource_start(pdev, 1);
mmio_end = pci_resource_end(pdev, 1);
mmio_flags = pci_resource_flags(pdev, 1);
mmio_len = pci_resource_len(pdev, 1);
#if STREAMER_DEBUG
printk("lanstreamer: pio_start %x pio_end %x pio_len %x pio_flags %x\n",
pio_start, pio_end, pio_len, pio_flags);
printk("lanstreamer: mmio_start %x mmio_end %x mmio_len %x mmio_flags %x\n",
mmio_start, mmio_end, mmio_flags, mmio_len);
#endif
if (!request_region(pio_start, pio_len, "lanstreamer")) {
printk(KERN_ERR "lanstreamer: unable to get pci io addr %x\n",pio_start);
rc= -EBUSY;
goto err_out;
}
if (!request_mem_region(mmio_start, mmio_len, "lanstreamer")) {
printk(KERN_ERR "lanstreamer: unable to get pci mmio addr %x\n",mmio_start);
rc= -EBUSY;
goto err_out_free_pio;
}
streamer_priv->streamer_mmio=ioremap(mmio_start, mmio_len);
if (streamer_priv->streamer_mmio == NULL) {
printk(KERN_ERR "lanstreamer: unable to remap MMIO %x\n",mmio_start);
rc= -EIO;
goto err_out_free_mmio;
}
init_waitqueue_head(&streamer_priv->srb_wait);
init_waitqueue_head(&streamer_priv->trb_wait);
dev->open = &streamer_open;
dev->hard_start_xmit = &streamer_xmit;
dev->change_mtu = &streamer_change_mtu;
dev->stop = &streamer_close;
#if STREAMER_IOCTL
dev->do_ioctl = &streamer_ioctl;
#else
dev->do_ioctl = NULL;
#endif
dev->set_multicast_list = &streamer_set_rx_mode;
dev->get_stats = &streamer_get_stats;
dev->set_mac_address = &streamer_set_mac_address;
dev->irq = pdev->irq;
dev->base_addr=pio_start;
streamer_priv->streamer_card_name = (char *)pdev->resource[0].name;
streamer_priv->pci_dev=pdev;
if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000))
streamer_priv->pkt_buf_sz = PKT_BUF_SZ;
else
streamer_priv->pkt_buf_sz = pkt_buf_sz[card_no];
streamer_priv->streamer_ring_speed = ringspeed[card_no];
streamer_priv->streamer_message_level = message_level[card_no];
pci_set_drvdata(pdev, dev);
spin_lock_init(&streamer_priv->streamer_lock);
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cls);
cls <<= 2;
if (cls != SMP_CACHE_BYTES) {
printk(KERN_INFO " PCI cache line size set incorrectly "
"(%i bytes) by BIOS/FW, ", cls);
if (cls > SMP_CACHE_BYTES)
printk("expecting %i\n", SMP_CACHE_BYTES);
else {
printk("correcting to %i\n", SMP_CACHE_BYTES);
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
SMP_CACHE_BYTES >> 2);
}
}
pci_read_config_word (pdev, PCI_COMMAND, &pcr);
pcr |= (PCI_COMMAND_INVALIDATE | PCI_COMMAND_SERR);
pci_write_config_word (pdev, PCI_COMMAND, pcr);
pci_read_config_word (pdev, PCI_COMMAND, &pcr);
printk("%s \n", version);
printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
streamer_priv->streamer_card_name,
(unsigned int) dev->base_addr,
streamer_priv->streamer_mmio,
dev->irq);
if (!streamer_reset(dev)) {
return 0;
}
iounmap(streamer_priv->streamer_mmio);
err_out_free_mmio:
release_mem_region(mmio_start, mmio_len);
err_out_free_pio:
release_region(pio_start, pio_len);
err_out:
unregister_trdev(dev);
kfree(dev);
#if STREAMER_DEBUG
printk("lanstreamer: Exit error %x\n",rc);
#endif
return rc;
}
static void __devexit streamer_remove_one(struct pci_dev *pdev) {
struct net_device *dev=pci_get_drvdata(pdev);
struct streamer_private *streamer_priv;
#if STREAMER_DEBUG
printk("lanstreamer::streamer_remove_one entry pdev %p\n",pdev);
#endif
if (dev == NULL) {
printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev is NULL\n");
return;
}
streamer_priv=dev->priv;
if (streamer_priv == NULL) {
printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev->priv is NULL\n");
return;
}
#if STREAMER_NETWORK_MONITOR
#ifdef CONFIG_PROC_FS
{
struct streamer_private *slast;
struct streamer_private *scurrent;
if (streamer_priv == dev_streamer) {
dev_streamer=dev_streamer->next;
} else {
for(slast=scurrent=dev_streamer; dev_streamer; slast=scurrent, scurrent=scurrent->next) {
if (scurrent == streamer_priv) {
slast->next=scurrent->next;
break;
}
}
}
if (!dev_streamer) {
remove_proc_entry("net/streamer_tr", NULL);
}
}
#endif
#endif
unregister_trdev(dev);
release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev,0));
release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev,1));
kfree(dev);
pci_set_drvdata(pdev, NULL);
}
static int streamer_reset(struct net_device *dev)
{
struct streamer_private *streamer_priv;
__u8 *streamer_mmio;
unsigned long t;
unsigned int uaa_addr;
struct sk_buff *skb = 0;
__u16 misr;
streamer_priv = (struct streamer_private *) dev->priv;
streamer_mmio = streamer_priv->streamer_mmio;
writew(readw(streamer_mmio + BCTL) | BCTL_SOFTRESET, streamer_mmio + BCTL);
t = jiffies;
/* Hold soft reset bit for a while */
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
writew(readw(streamer_mmio + BCTL) & ~BCTL_SOFTRESET,
streamer_mmio + BCTL);
#if STREAMER_DEBUG
printk("BCTL: %x\n", readw(streamer_mmio + BCTL));
printk("GPR: %x\n", readw(streamer_mmio + GPR));
printk("SISRMASK: %x\n", readw(streamer_mmio + SISR_MASK));
#endif
writew(readw(streamer_mmio + BCTL) | (BCTL_RX_FIFO_8 | BCTL_TX_FIFO_8), streamer_mmio + BCTL );
if (streamer_priv->streamer_ring_speed == 0) { /* Autosense */
writew(readw(streamer_mmio + GPR) | GPR_AUTOSENSE,
streamer_mmio + GPR);
if (streamer_priv->streamer_message_level)
printk(KERN_INFO "%s: Ringspeed autosense mode on\n",
dev->name);
} else if (streamer_priv->streamer_ring_speed == 16) {
if (streamer_priv->streamer_message_level)
printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n",
dev->name);
writew(GPR_16MBPS, streamer_mmio + GPR);
} else if (streamer_priv->streamer_ring_speed == 4) {
if (streamer_priv->streamer_message_level)
printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n",
dev->name);
writew(0, streamer_mmio + GPR);
}
skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
if (!skb) {
printk(KERN_INFO "%s: skb allocation for diagnostics failed...proceeding\n",
dev->name);
} else {
struct streamer_rx_desc *rx_ring;
u8 *data;
rx_ring=(struct streamer_rx_desc *)skb->data;
data=((u8 *)skb->data)+sizeof(struct streamer_rx_desc);
rx_ring->forward=0;
rx_ring->status=0;
rx_ring->buffer=cpu_to_le32(pci_map_single(streamer_priv->pci_dev, data,
512, PCI_DMA_FROMDEVICE));
rx_ring->framelen_buflen=512;
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, rx_ring, 512, PCI_DMA_FROMDEVICE)),
streamer_mmio+RXBDA);
}
#if STREAMER_DEBUG
printk("GPR = %x\n", readw(streamer_mmio + GPR));
#endif
/* start solo init */
writew(SISR_MI, streamer_mmio + SISR_MASK_SUM);
while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) {
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(HZ/10);
if (jiffies - t > 40 * HZ) {
printk(KERN_ERR
"IBM PCI tokenring card not responding\n");
release_region(dev->base_addr, STREAMER_IO_SPACE);
if (skb)
dev_kfree_skb(skb);
return -1;
}
}
writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
misr = readw(streamer_mmio + MISR_RUM);
writew(~misr, streamer_mmio + MISR_RUM);
if (skb)
dev_kfree_skb(skb); /* release skb used for diagnostics */
#if STREAMER_DEBUG
printk("LAPWWO: %x, LAPA: %x LAPE: %x\n",
readw(streamer_mmio + LAPWWO), readw(streamer_mmio + LAPA),
readw(streamer_mmio + LAPE));
#endif
#if STREAMER_DEBUG
{
int i;
writew(readw(streamer_mmio + LAPWWO),
streamer_mmio + LAPA);
printk("initialization response srb dump: ");
for (i = 0; i < 10; i++)
printk("%x:",
ntohs(readw(streamer_mmio + LAPDINC)));
printk("\n");
}
#endif
writew(readw(streamer_mmio + LAPWWO) + 6, streamer_mmio + LAPA);
if (readw(streamer_mmio + LAPD)) {
printk(KERN_INFO "tokenring card intialization failed. errorcode : %x\n",
ntohs(readw(streamer_mmio + LAPD)));
release_region(dev->base_addr, STREAMER_IO_SPACE);
return -1;
}
writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
uaa_addr = ntohs(readw(streamer_mmio + LAPDINC));
readw(streamer_mmio + LAPDINC); /* skip over Level.Addr field */
streamer_priv->streamer_addr_table_addr = ntohs(readw(streamer_mmio + LAPDINC));
streamer_priv->streamer_parms_addr = ntohs(readw(streamer_mmio + LAPDINC));
#if STREAMER_DEBUG
printk("UAA resides at %x\n", uaa_addr);
#endif
/* setup uaa area for access with LAPD */
{
int i;
__u16 addr;
writew(uaa_addr, streamer_mmio + LAPA);
for (i = 0; i < 6; i += 2) {
addr=ntohs(readw(streamer_mmio+LAPDINC));
dev->dev_addr[i]= (addr >> 8) & 0xff;
dev->dev_addr[i+1]= addr & 0xff;
}
#if STREAMER_DEBUG
printk("Adapter address: ");
for (i = 0; i < 6; i++) {
printk("%02x:", dev->dev_addr[i]);
}
printk("\n");
#endif
}
return 0;
}
static int streamer_open(struct net_device *dev)
{
struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv;
__u8 *streamer_mmio = streamer_priv->streamer_mmio;
unsigned long flags;
char open_error[255];
int i, open_finished = 1;
__u16 srb_word;
__u16 srb_open;
int rc;
if (readw(streamer_mmio+BMCTL_SUM) & BMCTL_RX_ENABLED) {
rc=streamer_reset(dev);
}
if (request_irq(dev->irq, &streamer_interrupt, SA_SHIRQ, "lanstreamer", dev)) {
return -EAGAIN;
}
#if STREAMER_DEBUG
printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
printk("pending ints: %x\n", readw(streamer_mmio + SISR));
#endif
writew(SISR_MI | SISR_SRB_REPLY, streamer_mmio + SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
writew(LISR_LIE, streamer_mmio + LISR); /* more ints later */
/* adapter is closed, so SRB is pointed to by LAPWWO */
writew(readw(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
#if STREAMER_DEBUG
printk("LAPWWO: %x, LAPA: %x\n", readw(streamer_mmio + LAPWWO),
readw(streamer_mmio + LAPA));
printk("LAPE: %x\n", readw(streamer_mmio + LAPE));
printk("SISR Mask = %04x\n", readw(streamer_mmio + SISR_MASK));
#endif
do {
int i;
for (i = 0; i < SRB_COMMAND_SIZE; i += 2) {
writew(0, streamer_mmio + LAPDINC);
}
writew(readw(streamer_mmio+LAPWWO),streamer_mmio+LAPA);
writew(htons(SRB_OPEN_ADAPTER<<8),streamer_mmio+LAPDINC) ; /* open */
writew(htons(STREAMER_CLEAR_RET_CODE<<8),streamer_mmio+LAPDINC);
writew(STREAMER_CLEAR_RET_CODE, streamer_mmio + LAPDINC);
writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
#if STREAMER_NETWORK_MONITOR
/* If Network Monitor, instruct card to copy MAC frames through the ARB */
writew(htons(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), streamer_mmio + LAPDINC); /* offset 8 word contains open options */
#else
writew(htons(OPEN_ADAPTER_ENABLE_FDX), streamer_mmio + LAPDINC); /* Offset 8 word contains Open.Options */
#endif
if (streamer_priv->streamer_laa[0]) {
writew(readw(streamer_mmio + LAPWWO) + 12, streamer_mmio + LAPA);
writew(htons((streamer_priv->streamer_laa[0] << 8) |
streamer_priv->streamer_laa[1]),streamer_mmio+LAPDINC);
writew(htons((streamer_priv->streamer_laa[2] << 8) |
streamer_priv->streamer_laa[3]),streamer_mmio+LAPDINC);
writew(htons((streamer_priv->streamer_laa[4] << 8) |
streamer_priv->streamer_laa[5]),streamer_mmio+LAPDINC);
memcpy(dev->dev_addr, streamer_priv->streamer_laa, dev->addr_len);
}
/* save off srb open offset */
srb_open = readw(streamer_mmio + LAPWWO);
#if STREAMER_DEBUG
writew(readw(streamer_mmio + LAPWWO),
streamer_mmio + LAPA);
printk("srb open request: \n");
for (i = 0; i < 16; i++) {
printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
}
printk("\n");
#endif
spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
streamer_priv->srb_queued = 1;
/* signal solo that SRB command has been issued */
writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
while (streamer_priv->srb_queued) {
interruptible_sleep_on_timeout(&streamer_priv->srb_wait, 5 * HZ);
if (signal_pending(current)) {
printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
printk(KERN_WARNING "SISR=%x MISR=%x, LISR=%x\n",
readw(streamer_mmio + SISR),
readw(streamer_mmio + MISR_RUM),
readw(streamer_mmio + LISR));
streamer_priv->srb_queued = 0;
break;
}
}
#if STREAMER_DEBUG
printk("SISR_MASK: %x\n", readw(streamer_mmio + SISR_MASK));
printk("srb open response:\n");
writew(srb_open, streamer_mmio + LAPA);
for (i = 0; i < 10; i++) {
printk("%x:",
ntohs(readw(streamer_mmio + LAPDINC)));
}
#endif
/* If we get the same return response as we set, the interrupt wasn't raised and the open
* timed out.
*/
writew(srb_open + 2, streamer_mmio + LAPA);
srb_word = ntohs(readw(streamer_mmio + LAPD)) >> 8;
if (srb_word == STREAMER_CLEAR_RET_CODE) {
printk(KERN_WARNING "%s: Adapter Open time out or error.\n",
dev->name);
return -EIO;
}
if (srb_word != 0) {
if (srb_word == 0x07) {
if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */
printk(KERN_WARNING "%s: Retrying at different ring speed \n",
dev->name);
open_finished = 0;
} else {
__u16 error_code;
writew(srb_open + 6, streamer_mmio + LAPA);
error_code = ntohs(readw(streamer_mmio + LAPD));
strcpy(open_error, open_maj_error[(error_code & 0xf0) >> 4]);
strcat(open_error, " - ");
strcat(open_error, open_min_error[(error_code & 0x0f)]);
if (!streamer_priv->streamer_ring_speed
&& ((error_code & 0x0f) == 0x0d))
{
printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
printk(KERN_WARNING "%s: Please try again with a specified ring speed \n", dev->name);
free_irq(dev->irq, dev);
return -EIO;
}
printk(KERN_WARNING "%s: %s\n",
dev->name, open_error);
free_irq(dev->irq, dev);
return -EIO;
} /* if autosense && open_finished */
} else {
printk(KERN_WARNING "%s: Bad OPEN response: %x\n",
dev->name, srb_word);
free_irq(dev->irq, dev);
return -EIO;
}
} else
open_finished = 1;
} while (!(open_finished)); /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
writew(srb_open + 18, streamer_mmio + LAPA);
srb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
if (srb_word & (1 << 3))
if (streamer_priv->streamer_message_level)
printk(KERN_INFO "%s: Opened in FDX Mode\n", dev->name);
if (srb_word & 1)
streamer_priv->streamer_ring_speed = 16;
else
streamer_priv->streamer_ring_speed = 4;
if (streamer_priv->streamer_message_level)
printk(KERN_INFO "%s: Opened in %d Mbps mode\n",
dev->name,
streamer_priv->streamer_ring_speed);
writew(srb_open + 8, streamer_mmio + LAPA);
streamer_priv->asb = ntohs(readw(streamer_mmio + LAPDINC));
streamer_priv->srb = ntohs(readw(streamer_mmio + LAPDINC));
streamer_priv->arb = ntohs(readw(streamer_mmio + LAPDINC));
readw(streamer_mmio + LAPDINC); /* offset 14 word is rsvd */
streamer_priv->trb = ntohs(readw(streamer_mmio + LAPDINC));
streamer_priv->streamer_receive_options = 0x00;
streamer_priv->streamer_copy_all_options = 0;
/* setup rx ring */
/* enable rx channel */
writew(~BMCTL_RX_DIS, streamer_mmio + BMCTL_RUM);
/* setup rx descriptors */
streamer_priv->streamer_rx_ring=
kmalloc( sizeof(struct streamer_rx_desc)*
STREAMER_RX_RING_SIZE,GFP_KERNEL);
if (!streamer_priv->streamer_rx_ring) {
printk(KERN_WARNING "%s ALLOC of streamer rx ring FAILED!!\n",dev->name);
return -EIO;
}
for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
struct sk_buff *skb;
skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
if (skb == NULL)
break;
skb->dev = dev;
streamer_priv->streamer_rx_ring[i].forward =
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[i + 1],
sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
streamer_priv->streamer_rx_ring[i].status = 0;
streamer_priv->streamer_rx_ring[i].buffer =
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data,
streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
streamer_priv->streamer_rx_ring[i].framelen_buflen = streamer_priv->pkt_buf_sz;
streamer_priv->rx_ring_skb[i] = skb;
}
streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1].forward =
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
if (i == 0) {
printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n", dev->name);
free_irq(dev->irq, dev);
return -EIO;
}
streamer_priv->rx_ring_last_received = STREAMER_RX_RING_SIZE - 1; /* last processed rx status */
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
streamer_mmio + RXBDA);
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1],
sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)),
streamer_mmio + RXLBDA);
/* set bus master interrupt event mask */
writew(MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
/* setup tx ring */
streamer_priv->streamer_tx_ring=kmalloc(sizeof(struct streamer_tx_desc)*
STREAMER_TX_RING_SIZE,GFP_KERNEL);
if (!streamer_priv->streamer_tx_ring) {
printk(KERN_WARNING "%s ALLOC of streamer_tx_ring FAILED\n",dev->name);
return -EIO;
}
writew(~BMCTL_TX2_DIS, streamer_mmio + BMCTL_RUM); /* Enables TX channel 2 */
for (i = 0; i < STREAMER_TX_RING_SIZE; i++) {
streamer_priv->streamer_tx_ring[i].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
&streamer_priv->streamer_tx_ring[i + 1],
sizeof(struct streamer_tx_desc),
PCI_DMA_TODEVICE));
streamer_priv->streamer_tx_ring[i].status = 0;
streamer_priv->streamer_tx_ring[i].bufcnt_framelen = 0;
streamer_priv->streamer_tx_ring[i].buffer = 0;
streamer_priv->streamer_tx_ring[i].buflen = 0;
streamer_priv->streamer_tx_ring[i].rsvd1 = 0;
streamer_priv->streamer_tx_ring[i].rsvd2 = 0;
streamer_priv->streamer_tx_ring[i].rsvd3 = 0;
}
streamer_priv->streamer_tx_ring[STREAMER_TX_RING_SIZE - 1].forward =
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_tx_ring[0],
sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE));
streamer_priv->free_tx_ring_entries = STREAMER_TX_RING_SIZE;
streamer_priv->tx_ring_free = 0; /* next entry in tx ring to use */
streamer_priv->tx_ring_last_status = STREAMER_TX_RING_SIZE - 1;
/* set Busmaster interrupt event mask (handle receives on interrupt only */
writew(MISR_TX2_EOF | MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
/* set system event interrupt mask */
writew(SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE, streamer_mmio + SISR_MASK_SUM);
#if STREAMER_DEBUG
printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
printk("SISR MASK: %x\n", readw(streamer_mmio + SISR_MASK));
#endif
#if STREAMER_NETWORK_MONITOR
writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
printk("%s: Node Address: %04x:%04x:%04x\n", dev->name,
ntohs(readw(streamer_mmio + LAPDINC)),
ntohs(readw(streamer_mmio + LAPDINC)),
ntohs(readw(streamer_mmio + LAPDINC)));
readw(streamer_mmio + LAPDINC);
readw(streamer_mmio + LAPDINC);
printk("%s: Functional Address: %04x:%04x\n", dev->name,
ntohs(readw(streamer_mmio + LAPDINC)),
ntohs(readw(streamer_mmio + LAPDINC)));
writew(streamer_priv->streamer_parms_addr + 4,
streamer_mmio + LAPA);
printk("%s: NAUN Address: %04x:%04x:%04x\n", dev->name,
ntohs(readw(streamer_mmio + LAPDINC)),
ntohs(readw(streamer_mmio + LAPDINC)),
ntohs(readw(streamer_mmio + LAPDINC)));
#endif
netif_start_queue(dev);
return 0;
}
/*
* When we enter the rx routine we do not know how many frames have been
* queued on the rx channel. Therefore we start at the next rx status
* position and travel around the receive ring until we have completed
* all the frames.
*
* This means that we may process the frame before we receive the end
* of frame interrupt. This is why we always test the status instead
* of blindly processing the next frame.
*
*/
static void streamer_rx(struct net_device *dev)
{
struct streamer_private *streamer_priv =
(struct streamer_private *) dev->priv;
__u8 *streamer_mmio = streamer_priv->streamer_mmio;
struct streamer_rx_desc *rx_desc;
int rx_ring_last_received, length, frame_length, buffer_cnt = 0;
struct sk_buff *skb, *skb2;
/* setup the next rx descriptor to be received */
rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
rx_ring_last_received = streamer_priv->rx_ring_last_received;
while (rx_desc->status & 0x01000000) { /* While processed descriptors are available */
if (rx_ring_last_received != streamer_priv->rx_ring_last_received)
{
printk(KERN_WARNING "RX Error 1 rx_ring_last_received not the same %x %x\n",
rx_ring_last_received, streamer_priv->rx_ring_last_received);
}
streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
rx_ring_last_received = streamer_priv->rx_ring_last_received;
length = rx_desc->framelen_buflen & 0xffff; /* buffer length */
frame_length = (rx_desc->framelen_buflen >> 16) & 0xffff;
if (rx_desc->status & 0x7E830000) { /* errors */
if (streamer_priv->streamer_message_level) {
printk(KERN_WARNING "%s: Rx Error %x \n",
dev->name, rx_desc->status);
}
} else { /* received without errors */
if (rx_desc->status & 0x80000000) { /* frame complete */
buffer_cnt = 1;
skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
} else {
skb = dev_alloc_skb(frame_length);
}
if (skb == NULL)
{
printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name);
streamer_priv->streamer_stats.rx_dropped++;
} else { /* we allocated an skb OK */
skb->dev = dev;
if (buffer_cnt == 1) {
/* release the DMA mapping */
pci_unmap_single(streamer_priv->pci_dev,
le32_to_cpu(streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer),
streamer_priv->pkt_buf_sz,
PCI_DMA_FROMDEVICE);
skb2 = streamer_priv->rx_ring_skb[rx_ring_last_received];
#if STREAMER_DEBUG_PACKETS
{
int i;
printk("streamer_rx packet print: skb->data2 %p skb->head %p\n", skb2->data, skb2->head);
for (i = 0; i < frame_length; i++)
{
printk("%x:", skb2->data[i]);
if (((i + 1) % 16) == 0)
printk("\n");
}
printk("\n");
}
#endif
skb_put(skb2, length);
skb2->protocol = tr_type_trans(skb2, dev);
/* recycle this descriptor */
streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer =
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, streamer_priv->pkt_buf_sz,
PCI_DMA_FROMDEVICE));
streamer_priv->rx_ring_skb[rx_ring_last_received] = skb;
/* place recycled descriptor back on the adapter */
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
&streamer_priv->streamer_rx_ring[rx_ring_last_received],
sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)),
streamer_mmio + RXLBDA);
/* pass the received skb up to the protocol */
netif_rx(skb2);
} else {
do { /* Walk the buffers */
pci_unmap_single(streamer_priv->pci_dev, le32_to_cpu(rx_desc->buffer), length, PCI_DMA_FROMDEVICE),
memcpy(skb_put(skb, length), (void *)rx_desc->buffer, length); /* copy this fragment */
streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
/* give descriptor back to the adapter */
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
&streamer_priv->streamer_rx_ring[rx_ring_last_received],
length, PCI_DMA_FROMDEVICE)),
streamer_mmio + RXLBDA);
if (rx_desc->status & 0x80000000)
break; /* this descriptor completes the frame */
/* else get the next pending descriptor */
if (rx_ring_last_received!= streamer_priv->rx_ring_last_received)
{
printk("RX Error rx_ring_last_received not the same %x %x\n",
rx_ring_last_received,
streamer_priv->rx_ring_last_received);
}
rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE-1)];
length = rx_desc->framelen_buflen & 0xffff; /* buffer length */
streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE - 1);
rx_ring_last_received = streamer_priv->rx_ring_last_received;
} while (1);
skb->protocol = tr_type_trans(skb, dev);
/* send up to the protocol */
netif_rx(skb);
}
dev->last_rx = jiffies;
streamer_priv->streamer_stats.rx_packets++;
streamer_priv->streamer_stats.rx_bytes += length;
} /* if skb == null */
} /* end received without errors */
/* try the next one */
rx_desc = &streamer_priv->streamer_rx_ring[(rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
} /* end for all completed rx descriptors */
}
static void streamer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
struct streamer_private *streamer_priv =
(struct streamer_private *) dev->priv;
__u8 *streamer_mmio = streamer_priv->streamer_mmio;
__u16 sisr;
__u16 misr;
u8 max_intr = MAX_INTR;
spin_lock(&streamer_priv->streamer_lock);
sisr = readw(streamer_mmio + SISR);
while((sisr & (SISR_MI | SISR_SRB_REPLY | SISR_ADAPTER_CHECK | SISR_ASB_FREE |
SISR_ARB_CMD | SISR_TRB_REPLY | SISR_PAR_ERR | SISR_SERR_ERR))
&& (max_intr > 0)) {
if(sisr & SISR_PAR_ERR) {
writew(~SISR_PAR_ERR, streamer_mmio + SISR_RUM);
(void)readw(streamer_mmio + SISR_RUM);
}
else if(sisr & SISR_SERR_ERR) {
writew(~SISR_SERR_ERR, streamer_mmio + SISR_RUM);
(void)readw(streamer_mmio + SISR_RUM);
}
else if(sisr & SISR_MI) {
misr = readw(streamer_mmio + MISR_RUM);
if (misr & MISR_TX2_EOF) {
while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) {
streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1);
streamer_priv->free_tx_ring_entries++;
streamer_priv->streamer_stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len;
streamer_priv->streamer_stats.tx_packets++;
dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]);
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].bufcnt_framelen = 0;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buflen = 0;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd1 = 0;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd2 = 0;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd3 = 0;
}
netif_wake_queue(dev);
}
if (misr & MISR_RX_EOF) {
streamer_rx(dev);
}
/* MISR_RX_EOF */
if (misr & MISR_RX_NOBUF) {
/* According to the documentation, we don't have to do anything,
* but trapping it keeps it out of /var/log/messages.
*/
} /* SISR_RX_NOBUF */
writew(~misr, streamer_mmio + MISR_RUM);
(void)readw(streamer_mmio + MISR_RUM);
}
else if (sisr & SISR_SRB_REPLY) {
if (streamer_priv->srb_queued == 1) {
wake_up_interruptible(&streamer_priv->srb_wait);
} else if (streamer_priv->srb_queued == 2) {
streamer_srb_bh(dev);
}
streamer_priv->srb_queued = 0;
writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
(void)readw(streamer_mmio + SISR_RUM);
}
else if (sisr & SISR_ADAPTER_CHECK) {
printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
writel(readl(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
printk(KERN_WARNING "%s: Words %x:%x:%x:%x:\n",
dev->name, readw(streamer_mmio + LAPDINC),
ntohs(readw(streamer_mmio + LAPDINC)),
ntohs(readw(streamer_mmio + LAPDINC)),
ntohs(readw(streamer_mmio + LAPDINC)));
free_irq(dev->irq, dev);
}
/* SISR_ADAPTER_CHECK */
else if (sisr & SISR_ASB_FREE) {
/* Wake up anything that is waiting for the asb response */
if (streamer_priv->asb_queued) {
streamer_asb_bh(dev);
}
writew(~SISR_ASB_FREE, streamer_mmio + SISR_RUM);
(void)readw(streamer_mmio + SISR_RUM);
}
/* SISR_ASB_FREE */
else if (sisr & SISR_ARB_CMD) {
streamer_arb_cmd(dev);
writew(~SISR_ARB_CMD, streamer_mmio + SISR_RUM);
(void)readw(streamer_mmio + SISR_RUM);
}
/* SISR_ARB_CMD */
else if (sisr & SISR_TRB_REPLY) {
/* Wake up anything that is waiting for the trb response */
if (streamer_priv->trb_queued) {
wake_up_interruptible(&streamer_priv->
trb_wait);
}
streamer_priv->trb_queued = 0;
writew(~SISR_TRB_REPLY, streamer_mmio + SISR_RUM);
(void)readw(streamer_mmio + SISR_RUM);
}
/* SISR_TRB_REPLY */
sisr = readw(streamer_mmio + SISR);
max_intr--;
} /* while() */
spin_unlock(&streamer_priv->streamer_lock) ;
}
static int streamer_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct streamer_private *streamer_priv =
(struct streamer_private *) dev->priv;
__u8 *streamer_mmio = streamer_priv->streamer_mmio;
unsigned long flags ;
spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
if (streamer_priv->free_tx_ring_entries) {
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].status = 0;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].bufcnt_framelen = 0x00020000 | skb->len;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buffer =
cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE));
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd1 = skb->len;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd2 = 0;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd3 = 0;
streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buflen = skb->len;
streamer_priv->tx_ring_skb[streamer_priv->tx_ring_free] = skb;
streamer_priv->free_tx_ring_entries--;
#if STREAMER_DEBUG_PACKETS
{
int i;
printk("streamer_xmit packet print:\n");
for (i = 0; i < skb->len; i++) {
printk("%x:", skb->data[i]);
if (((i + 1) % 16) == 0)
printk("\n");
}
printk("\n");
}
#endif
writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev,
&streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free],
sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)),
streamer_mmio + TX2LFDA);
(void)readl(streamer_mmio + TX2LFDA);
streamer_priv->tx_ring_free = (streamer_priv->tx_ring_free + 1) & (STREAMER_TX_RING_SIZE - 1);
spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
return 0;
} else {
netif_stop_queue(dev);
spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
return 1;
}
}
static int streamer_close(struct net_device *dev)
{
struct streamer_private *streamer_priv =
(struct streamer_private *) dev->priv;
__u8 *streamer_mmio = streamer_priv->streamer_mmio;
unsigned long flags;
int i;
netif_stop_queue(dev);
writew(streamer_priv->srb, streamer_mmio + LAPA);
writew(htons(SRB_CLOSE_ADAPTER << 8),streamer_mmio+LAPDINC);
writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
streamer_priv->srb_queued = 1;
writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
while (streamer_priv->srb_queued)
{
interruptible_sleep_on_timeout(&streamer_priv->srb_wait,
jiffies + 60 * HZ);
if (signal_pending(current))
{
printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
printk(KERN_WARNING "SISR=%x MISR=%x LISR=%x\n",
readw(streamer_mmio + SISR),
readw(streamer_mmio + MISR_RUM),
readw(streamer_mmio + LISR));
streamer_priv->srb_queued = 0;
break;
}
}
streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
if (streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]) {
dev_kfree_skb(streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]);
}
streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
}
/* reset tx/rx fifo's and busmaster logic */
/* TBD. Add graceful way to reset the LLC channel without doing a soft reset.
writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
udelay(1);
writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL);
*/
#if STREAMER_DEBUG
writew(streamer_priv->srb, streamer_mmio + LAPA);
printk("srb): ");
for (i = 0; i < 2; i++) {
printk("%x ", ntohs(readw(streamer_mmio + LAPDINC)));
}
printk("\n");
#endif
free_irq(dev->irq, dev);
return 0;
}
static void streamer_set_rx_mode(struct net_device *dev)
{
struct streamer_private *streamer_priv =
(struct streamer_private *) dev->priv;
__u8 *streamer_mmio = streamer_priv->streamer_mmio;
__u8 options = 0;
struct dev_mc_list *dmi;
unsigned char dev_mc_address[5];
int i;
writel(streamer_priv->srb, streamer_mmio + LAPA);
options = streamer_priv->streamer_copy_all_options;
if (dev->flags & IFF_PROMISC)
options |= (3 << 5); /* All LLC and MAC frames, all through the main rx channel */
else
options &= ~(3 << 5);
/* Only issue the srb if there is a change in options */
if ((options ^ streamer_priv->streamer_copy_all_options))
{
/* Now to issue the srb command to alter the copy.all.options */
writew(htons(SRB_MODIFY_RECEIVE_OPTIONS << 8), streamer_mmio+LAPDINC);
writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
writew(htons((streamer_priv->streamer_receive_options << 8) | options),streamer_mmio+LAPDINC);
writew(htons(0x4a41),streamer_mmio+LAPDINC);
writew(htons(0x4d45),streamer_mmio+LAPDINC);
writew(htons(0x5320),streamer_mmio+LAPDINC);
writew(0x2020, streamer_mmio + LAPDINC);
streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
writel(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
streamer_priv->streamer_copy_all_options = options;
return;
}
/* Set the functional addresses we need for multicast */
writel(streamer_priv->srb,streamer_mmio+LAPA);
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next)
{
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
dev_mc_address[3] |= dmi->dmi_addr[5] ;
}
writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
writew(0,streamer_mmio+LAPDINC);
writew(htons( (dev_mc_address[0] << 8) | dev_mc_address[1]),streamer_mmio+LAPDINC);
writew(htons( (dev_mc_address[2] << 8) | dev_mc_address[3]),streamer_mmio+LAPDINC);
streamer_priv->srb_queued = 2 ;
writel(LISR_SRB_CMD,streamer_mmio+LISR_SUM);
}
static void streamer_srb_bh(struct net_device *dev)
{
struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv;
__u8 *streamer_mmio = streamer_priv->streamer_mmio;
__u16 srb_word;
writew(streamer_priv->srb, streamer_mmio + LAPA);
srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
switch (srb_word) {
/* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
* At some point we should do something if we get an error, such as
* resetting the IFF_PROMISC flag in dev
*/
case SRB_MODIFY_RECEIVE_OPTIONS:
srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
switch (srb_word) {
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
break;
default:
if (streamer_priv->streamer_message_level)
printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",
dev->name,
streamer_priv->streamer_copy_all_options,
streamer_priv->streamer_receive_options);
break;
} /* switch srb[2] */
break;
/* SRB_SET_GROUP_ADDRESS - Multicast group setting
*/
case SRB_SET_GROUP_ADDRESS:
srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
switch (srb_word) {
case 0x00:
break;
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
break;
case 0x3c:
printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n", dev->name);
break;
case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
printk(KERN_WARNING "%s: Group address registers full\n", dev->name);
break;
case 0x55:
printk(KERN_INFO "%s: Group Address already set.\n", dev->name);
break;
default:
break;
} /* switch srb[2] */
break;
/* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
*/
case SRB_RESET_GROUP_ADDRESS:
srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
switch (srb_word) {
case 0x00:
break;
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
break;
case 0x39: /* Must deal with this if individual multicast addresses used */
printk(KERN_INFO "%s: Group address not found \n", dev->name);
break;
default:
break;
} /* switch srb[2] */
break;
/* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
*/
case SRB_SET_FUNC_ADDRESS:
srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
switch (srb_word) {
case 0x00:
if (streamer_priv->streamer_message_level)
printk(KERN_INFO "%s: Functional Address Mask Set \n", dev->name);
break;
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
break;
default:
break;
} /* switch srb[2] */
break;
/* SRB_READ_LOG - Read and reset the adapter error counters
*/
case SRB_READ_LOG:
srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
switch (srb_word) {
case 0x00:
{
int i;
if (streamer_priv->streamer_message_level)
printk(KERN_INFO "%s: Read Log command complete\n", dev->name);
printk("Read Log statistics: ");
writew(streamer_priv->srb + 6,
streamer_mmio + LAPA);
for (i = 0; i < 5; i++) {
printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
}
printk("\n");
}
break;
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
break;
} /* switch srb[2] */
break;
/* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
case SRB_READ_SR_COUNTERS:
srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
switch (srb_word) {
case 0x00:
if (streamer_priv->streamer_message_level)
printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
break;
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name);
break;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
break;
default:
break;
} /* switch srb[2] */
break;
default:
printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n", dev->name);
break;
} /* switch srb[0] */
}
static struct net_device_stats *streamer_get_stats(struct net_device *dev)
{
struct streamer_private *streamer_priv;
streamer_priv = (struct streamer_private *) dev->priv;
return (struct net_device_stats *) &streamer_priv->streamer_stats;
}
static int streamer_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *saddr = addr;
struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv;
if (netif_running(dev))
{
printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name);
return -EIO;
}
memcpy(streamer_priv->streamer_laa, saddr->sa_data, dev->addr_len);
if (streamer_priv->streamer_message_level) {
printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",
dev->name, streamer_priv->streamer_laa[0],
streamer_priv->streamer_laa[1],
streamer_priv->streamer_laa[2],
streamer_priv->streamer_laa[3],
streamer_priv->streamer_laa[4],
streamer_priv->streamer_laa[5]);
}
return 0;
}
static void streamer_arb_cmd(struct net_device *dev)
{
struct streamer_private *streamer_priv =
(struct streamer_private *) dev->priv;
__u8 *streamer_mmio = streamer_priv->streamer_mmio;
__u8 header_len;
__u16 frame_len, buffer_len;
struct sk_buff *mac_frame;
__u8 frame_data[256];
__u16 buff_off;
__u16 lan_status = 0, lan_status_diff; /* Initialize to stop compiler warning */
__u8 fdx_prot_error;
__u16 next_ptr;
__u16 arb_word;
#if STREAMER_NETWORK_MONITOR
struct trh_hdr *mac_hdr;
#endif
writew(streamer_priv->arb, streamer_mmio + LAPA);
arb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
if (arb_word == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
streamer_priv->mac_rx_buffer = buff_off = ntohs(readw(streamer_mmio + LAPDINC));
header_len=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; /* 802.5 Token-Ring Header Length */
frame_len = ntohs(readw(streamer_mmio + LAPDINC));
#if STREAMER_DEBUG
{
int i;
__u16 next;
__u8 status;
__u16 len;
writew(ntohs(buff_off), streamer_mmio + LAPA); /*setup window to frame data */
next = htons(readw(streamer_mmio + LAPDINC));
status =
ntohs(readw(streamer_mmio + LAPDINC)) & 0xff;
len = ntohs(readw(streamer_mmio + LAPDINC));
/* print out 1st 14 bytes of frame data */
for (i = 0; i < 7; i++) {
printk("Loc %d = %04x\n", i,
ntohs(readw
(streamer_mmio + LAPDINC)));
}
printk("next %04x, fs %02x, len %04x \n", next,
status, len);
}
#endif
if (!(mac_frame = dev_alloc_skb(frame_len))) {
printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n",
dev->name);
goto drop_frame;
}
/* Walk the buffer chain, creating the frame */
do {
int i;
__u16 rx_word;
writew(htons(buff_off), streamer_mmio + LAPA); /* setup window to frame data */
next_ptr = ntohs(readw(streamer_mmio + LAPDINC));
readw(streamer_mmio + LAPDINC); /* read thru status word */
buffer_len = ntohs(readw(streamer_mmio + LAPDINC));
if (buffer_len > 256)
break;
i = 0;
while (i < buffer_len) {
rx_word=ntohs(readw(streamer_mmio+LAPDINC));
frame_data[i]=rx_word >> 8;
frame_data[i+1]=rx_word & 0xff;
i += 2;
}
memcpy_fromio(skb_put(mac_frame, buffer_len),
frame_data, buffer_len);
} while (next_ptr && (buff_off = next_ptr));
#if STREAMER_NETWORK_MONITOR
printk(KERN_WARNING "%s: Received MAC Frame, details: \n",
dev->name);
mac_hdr = (struct trh_hdr *) mac_frame->data;
printk(KERN_WARNING
"%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n",
dev->name, mac_hdr->daddr[0], mac_hdr->daddr[1],
mac_hdr->daddr[2], mac_hdr->daddr[3],
mac_hdr->daddr[4], mac_hdr->daddr[5]);
printk(KERN_WARNING
"%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n",
dev->name, mac_hdr->saddr[0], mac_hdr->saddr[1],
mac_hdr->saddr[2], mac_hdr->saddr[3],
mac_hdr->saddr[4], mac_hdr->saddr[5]);
#endif
mac_frame->dev = dev;
mac_frame->protocol = tr_type_trans(mac_frame, dev);
netif_rx(mac_frame);
/* Now tell the card we have dealt with the received frame */
drop_frame:
/* Set LISR Bit 1 */
writel(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
/* Is the ASB free ? */
if (!(readl(streamer_priv->streamer_mmio + SISR) & SISR_ASB_FREE))
{
streamer_priv->asb_queued = 1;
writel(LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
return;
/* Drop out and wait for the bottom half to be run */
}
writew(streamer_priv->asb, streamer_mmio + LAPA);
writew(htons(ASB_RECEIVE_DATA << 8), streamer_mmio+LAPDINC);
writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
writew(0, streamer_mmio + LAPDINC);
writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
streamer_priv->asb_queued = 2;
return;
} else if (arb_word == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
lan_status = ntohs(readw(streamer_mmio + LAPDINC));
fdx_prot_error = ntohs(readw(streamer_mmio+LAPD)) >> 8;
/* Issue ARB Free */
writew(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
lan_status_diff = (streamer_priv->streamer_lan_status ^ lan_status) &
lan_status;
if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR))
{
if (lan_status_diff & LSC_LWF)
printk(KERN_WARNING "%s: Short circuit detected on the lobe\n", dev->name);
if (lan_status_diff & LSC_ARW)
printk(KERN_WARNING "%s: Auto removal error\n", dev->name);
if (lan_status_diff & LSC_FPE)
printk(KERN_WARNING "%s: FDX Protocol Error\n", dev->name);
if (lan_status_diff & LSC_RR)
printk(KERN_WARNING "%s: Force remove MAC frame received\n", dev->name);
/* Adapter has been closed by the hardware */
/* reset tx/rx fifo's and busmaster logic */
/* @TBD. no llc reset on autostreamer writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
udelay(1);
writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL); */
netif_stop_queue(dev);
free_irq(dev->irq, dev);
printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name);
}
/* If serious error */
if (streamer_priv->streamer_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
printk(KERN_WARNING "%s: No receive signal detected \n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
printk(KERN_INFO "%s: Beaconing \n", dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n", dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
if (lan_status_diff & LSC_FDX_MODE)
printk(KERN_INFO "%s: Operating in FDX mode\n", dev->name);
}
if (lan_status_diff & LSC_CO) {
if (streamer_priv->streamer_message_level)
printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
/* Issue READ.LOG command */
writew(streamer_priv->srb, streamer_mmio + LAPA);
writew(htons(SRB_READ_LOG << 8),streamer_mmio+LAPDINC);
writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
writew(0, streamer_mmio + LAPDINC);
streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
}
if (lan_status_diff & LSC_SR_CO) {
if (streamer_priv->streamer_message_level)
printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
/* Issue a READ.SR.COUNTERS */
writew(streamer_priv->srb, streamer_mmio + LAPA);
writew(htons(SRB_READ_SR_COUNTERS << 8),
streamer_mmio+LAPDINC);
writew(htons(STREAMER_CLEAR_RET_CODE << 8),
streamer_mmio+LAPDINC);
streamer_priv->srb_queued = 2; /* Can't sleep, use srb_bh */
writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
}
streamer_priv->streamer_lan_status = lan_status;
} /* Lan.change.status */
else
printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
}
static void streamer_asb_bh(struct net_device *dev)
{
struct streamer_private *streamer_priv =
(struct streamer_private *) dev->priv;
__u8 *streamer_mmio = streamer_priv->streamer_mmio;
if (streamer_priv->asb_queued == 1)
{
/* Dropped through the first time */
writew(streamer_priv->asb, streamer_mmio + LAPA);
writew(htons(ASB_RECEIVE_DATA << 8),streamer_mmio+LAPDINC);
writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
writew(0, streamer_mmio + LAPDINC);
writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
streamer_priv->asb_queued = 2;
return;
}
if (streamer_priv->asb_queued == 2) {
__u8 rc;
writew(streamer_priv->asb + 2, streamer_mmio + LAPA);
rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
switch (rc) {
case 0x01:
printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
break;
case 0x26:
printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
break;
case 0xFF:
/* Valid response, everything should be ok again */
break;
default:
printk(KERN_WARNING "%s: Invalid return code in asb\n", dev->name);
break;
}
}
streamer_priv->asb_queued = 0;
}
static int streamer_change_mtu(struct net_device *dev, int mtu)
{
struct streamer_private *streamer_priv =
(struct streamer_private *) dev->priv;
__u16 max_mtu;
if (streamer_priv->streamer_ring_speed == 4)
max_mtu = 4500;
else
max_mtu = 18000;
if (mtu > max_mtu)
return -EINVAL;
if (mtu < 100)
return -EINVAL;
dev->mtu = mtu;
streamer_priv->pkt_buf_sz = mtu + TR_HLEN;
return 0;
}
#if STREAMER_NETWORK_MONITOR
#ifdef CONFIG_PROC_FS
static int streamer_proc_info(char *buffer, char **start, off_t offset,
int length, int *eof, void *data)
{
struct streamer_private *sdev=NULL;
struct pci_dev *pci_device = NULL;
int len = 0;
off_t begin = 0;
off_t pos = 0;
int size;
struct net_device *dev;
size = sprintf(buffer, "IBM LanStreamer/MPC Chipset Token Ring Adapters\n");
pos += size;
len += size;
for(sdev=dev_streamer; sdev; sdev=sdev->next) {
pci_device=sdev->pci_dev;
dev=pci_get_drvdata(pci_device);
size = sprintf_info(buffer + len, dev);
len += size;
pos = begin + len;
if (pos < offset) {
len = 0;
begin = pos;
}
if (pos > offset + length)
break;
} /* for */
*start = buffer + (offset - begin); /* Start of wanted data */
len -= (offset - begin); /* Start slop */
if (len > length)
len = length; /* Ending slop */
return len;
}
static int sprintf_info(char *buffer, struct net_device *dev)
{
struct streamer_private *streamer_priv =
(struct streamer_private *) dev->priv;
__u8 *streamer_mmio = streamer_priv->streamer_mmio;
struct streamer_adapter_addr_table sat;
struct streamer_parameters_table spt;
int size = 0;
int i;
writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
for (i = 0; i < 14; i += 2) {
__u16 io_word;
__u8 *datap = (__u8 *) & sat;
io_word=ntohs(readw(streamer_mmio+LAPDINC));
datap[size]=io_word >> 8;
datap[size+1]=io_word & 0xff;
}
writew(streamer_priv->streamer_parms_addr, streamer_mmio + LAPA);
for (i = 0; i < 68; i += 2) {
__u16 io_word;
__u8 *datap = (__u8 *) & spt;
io_word=ntohs(readw(streamer_mmio+LAPDINC));
datap[size]=io_word >> 8;
datap[size+1]=io_word & 0xff;
}
size = sprintf(buffer, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name);
size += sprintf(buffer + size,
"%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n",
dev->name, dev->dev_addr[0], dev->dev_addr[1],
dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4],
dev->dev_addr[5], sat.node_addr[0], sat.node_addr[1],
sat.node_addr[2], sat.node_addr[3], sat.node_addr[4],
sat.node_addr[5], sat.func_addr[0], sat.func_addr[1],
sat.func_addr[2], sat.func_addr[3]);
size += sprintf(buffer + size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name);
size += sprintf(buffer + size,
"%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n",
dev->name, spt.phys_addr[0], spt.phys_addr[1],
spt.phys_addr[2], spt.phys_addr[3],
spt.up_node_addr[0], spt.up_node_addr[1],
spt.up_node_addr[2], spt.up_node_addr[3],
spt.up_node_addr[4], spt.up_node_addr[4],
spt.poll_addr[0], spt.poll_addr[1], spt.poll_addr[2],
spt.poll_addr[3], spt.poll_addr[4], spt.poll_addr[5],
ntohs(spt.acc_priority), ntohs(spt.auth_source_class),
ntohs(spt.att_code));
size += sprintf(buffer + size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name);
size += sprintf(buffer + size,
"%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n",
dev->name, spt.source_addr[0], spt.source_addr[1],
spt.source_addr[2], spt.source_addr[3],
spt.source_addr[4], spt.source_addr[5],
ntohs(spt.beacon_type), ntohs(spt.major_vector),
ntohs(spt.lan_status), ntohs(spt.local_ring),
ntohs(spt.mon_error), ntohs(spt.frame_correl));
size += sprintf(buffer + size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
dev->name);
size += sprintf(buffer + size,
"%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n",
dev->name, ntohs(spt.beacon_transmit),
ntohs(spt.beacon_receive), spt.beacon_naun[0],
spt.beacon_naun[1], spt.beacon_naun[2],
spt.beacon_naun[3], spt.beacon_naun[4],
spt.beacon_naun[5], spt.beacon_phys[0],
spt.beacon_phys[1], spt.beacon_phys[2],
spt.beacon_phys[3]);
return size;
}
#endif
#endif
#if STREAMER_IOCTL && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
static int streamer_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int i;
struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv;
u8 *streamer_mmio = streamer_priv->streamer_mmio;
switch(cmd) {
case IOCTL_SISR_MASK:
writew(SISR_MI, streamer_mmio + SISR_MASK_SUM);
break;
case IOCTL_SPIN_LOCK_TEST:
printk(KERN_INFO "spin_lock() called.\n");
spin_lock(&streamer_priv->streamer_lock);
spin_unlock(&streamer_priv->streamer_lock);
printk(KERN_INFO "spin_unlock() finished.\n");
break;
case IOCTL_PRINT_BDAS:
printk(KERN_INFO "bdas: RXBDA: %x RXLBDA: %x TX2FDA: %x TX2LFDA: %x\n",
readw(streamer_mmio + RXBDA),
readw(streamer_mmio + RXLBDA),
readw(streamer_mmio + TX2FDA),
readw(streamer_mmio + TX2LFDA));
break;
case IOCTL_PRINT_REGISTERS:
printk(KERN_INFO "registers:\n");
printk(KERN_INFO "SISR: %04x MISR: %04x LISR: %04x BCTL: %04x BMCTL: %04x\nmask %04x mask %04x\n",
readw(streamer_mmio + SISR),
readw(streamer_mmio + MISR_RUM),
readw(streamer_mmio + LISR),
readw(streamer_mmio + BCTL),
readw(streamer_mmio + BMCTL_SUM),
readw(streamer_mmio + SISR_MASK),
readw(streamer_mmio + MISR_MASK));
break;
case IOCTL_PRINT_RX_BUFS:
printk(KERN_INFO "Print rx bufs:\n");
for(i=0; i<STREAMER_RX_RING_SIZE; i++)
printk(KERN_INFO "rx_ring %d status: 0x%x\n", i,
streamer_priv->streamer_rx_ring[i].status);
break;
case IOCTL_PRINT_TX_BUFS:
printk(KERN_INFO "Print tx bufs:\n");
for(i=0; i<STREAMER_TX_RING_SIZE; i++)
printk(KERN_INFO "tx_ring %d status: 0x%x\n", i,
streamer_priv->streamer_tx_ring[i].status);
break;
case IOCTL_RX_CMD:
streamer_rx(dev);
printk(KERN_INFO "Sent rx command.\n");
break;
default:
printk(KERN_INFO "Bad ioctl!\n");
}
return 0;
}
#endif
static struct pci_driver streamer_pci_driver = {
name: "lanstreamer",
id_table: streamer_pci_tbl,
probe: streamer_init_one,
remove: __devexit_p(streamer_remove_one),
};
static int __init streamer_init_module(void) {
return pci_module_init(&streamer_pci_driver);
}
static void __exit streamer_cleanup_module(void) {
pci_unregister_driver(&streamer_pci_driver);
}
module_init(streamer_init_module);
module_exit(streamer_cleanup_module);
MODULE_LICENSE("GPL");
|
robacklin/celinux
|
drivers/net/tokenring/lanstreamer.c
|
C
|
gpl-2.0
| 68,659
|
/*
* Copyright (C) 1996 Universidade de Lisboa
*
* Written by Pedro Roque Marques (roque@di.fc.ul.pt)
*
* This software may be used and distributed according to the terms of
* the GNU Public License, incorporated herein by reference.
*/
/*
* PCBIT-D interface with isdn4linux
*/
/*
* Fixes:
*
* Nuno Grilo <l38486@alfa.ist.utl.pt>
* fixed msn_list NULL pointer dereference.
*
*/
#define __NO_VERSION__
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/malloc.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/isdnif.h>
#include <asm/string.h>
#include <asm/io.h>
#include "pcbit.h"
#include "edss1.h"
#include "layer2.h"
#include "capi.h"
extern ushort last_ref_num;
static int pcbit_ioctl(isdn_ctrl* ctl);
static char* pcbit_devname[MAX_PCBIT_CARDS] = {
"pcbit0",
"pcbit1",
"pcbit2",
"pcbit3"
};
/*
* prototypes
*/
int pcbit_command(isdn_ctrl* ctl);
int pcbit_stat(u_char* buf, int len, int user, int, int);
int pcbit_xmit(int driver, int chan, int ack, struct sk_buff *skb);
int pcbit_writecmd(const u_char*, int, int, int, int);
static int set_protocol_running(struct pcbit_dev * dev);
static void pcbit_clear_msn(struct pcbit_dev *dev);
static void pcbit_set_msn(struct pcbit_dev *dev, char *list);
static int pcbit_check_msn(struct pcbit_dev *dev, char *msn);
extern void pcbit_deliver(void * data);
int pcbit_init_dev(int board, int mem_base, int irq)
{
struct pcbit_dev *dev;
isdn_if *dev_if;
if ((dev=kmalloc(sizeof(struct pcbit_dev), GFP_KERNEL)) == NULL)
{
printk("pcbit_init: couldn't malloc pcbit_dev struct\n");
return -ENOMEM;
}
dev_pcbit[board] = dev;
memset(dev, 0, sizeof(struct pcbit_dev));
if (mem_base >= 0xA0000 && mem_base <= 0xFFFFF )
dev->sh_mem = (unsigned char*) mem_base;
else
{
printk("memory address invalid");
kfree(dev);
dev_pcbit[board] = NULL;
return -EACCES;
}
dev->b1 = kmalloc(sizeof(struct pcbit_chan), GFP_KERNEL);
if (!dev->b1) {
printk("pcbit_init: couldn't malloc pcbit_chan struct\n");
kfree(dev);
return -ENOMEM;
}
dev->b2 = kmalloc(sizeof(struct pcbit_chan), GFP_KERNEL);
if (!dev->b2) {
printk("pcbit_init: couldn't malloc pcbit_chan struct\n");
kfree(dev->b1);
kfree(dev);
return -ENOMEM;
}
memset(dev->b1, 0, sizeof(struct pcbit_chan));
memset(dev->b2, 0, sizeof(struct pcbit_chan));
dev->b2->id = 1;
dev->qdelivery.next = NULL;
dev->qdelivery.sync = 0;
dev->qdelivery.routine = pcbit_deliver;
dev->qdelivery.data = dev;
/*
* interrupts
*/
if (request_irq(irq, &pcbit_irq_handler, 0, pcbit_devname[board], dev) != 0)
{
kfree(dev->b1);
kfree(dev->b2);
kfree(dev);
dev_pcbit[board] = NULL;
return -EIO;
}
dev->irq = irq;
/* next frame to be received */
dev->rcv_seq = 0;
dev->send_seq = 0;
dev->unack_seq = 0;
dev->hl_hdrlen = 10;
dev_if = kmalloc(sizeof(isdn_if), GFP_KERNEL);
if (!dev_if) {
free_irq(irq, dev);
kfree(dev->b1);
kfree(dev->b2);
kfree(dev);
dev_pcbit[board] = NULL;
return -EIO;
}
dev->dev_if = dev_if;
dev_if->channels = 2;
dev_if->features = (ISDN_FEATURE_P_EURO | ISDN_FEATURE_L3_TRANS |
ISDN_FEATURE_L2_HDLC | ISDN_FEATURE_L2_TRANS );
dev_if->writebuf_skb = pcbit_xmit;
dev_if->hl_hdrlen = 10;
dev_if->maxbufsize = MAXBUFSIZE;
dev_if->command = pcbit_command;
dev_if->writecmd = pcbit_writecmd;
dev_if->readstat = pcbit_stat;
strcpy(dev_if->id, pcbit_devname[board]);
if (!register_isdn(dev_if)) {
free_irq(irq, dev);
kfree(dev->b1);
kfree(dev->b2);
kfree(dev);
dev_pcbit[board] = NULL;
return -EIO;
}
dev->id = dev_if->channels;
dev->l2_state = L2_DOWN;
dev->free = 511;
/*
* set_protocol_running(dev);
*/
return 0;
}
#ifdef MODULE
void pcbit_terminate(int board)
{
struct pcbit_dev * dev;
dev = dev_pcbit[board];
if (dev) {
/* unregister_isdn(dev->dev_if); */
free_irq(dev->irq, dev);
pcbit_clear_msn(dev);
kfree(dev->dev_if);
if (dev->b1->fsm_timer.function)
del_timer(&dev->b1->fsm_timer);
if (dev->b2->fsm_timer.function)
del_timer(&dev->b2->fsm_timer);
kfree(dev->b1);
kfree(dev->b2);
kfree(dev);
}
}
#endif
int pcbit_command(isdn_ctrl* ctl)
{
struct pcbit_dev *dev;
struct pcbit_chan *chan;
struct callb_data info;
dev = finddev(ctl->driver);
if (!dev)
{
printk("pcbit_command: unknown device\n");
return -1;
}
chan = (ctl->arg & 0x0F) ? dev->b2 : dev->b1;
switch(ctl->command) {
case ISDN_CMD_IOCTL:
return pcbit_ioctl(ctl);
break;
case ISDN_CMD_DIAL:
info.type = EV_USR_SETUP_REQ;
info.data.setup.CalledPN = (char *) &ctl->parm.setup.phone;
pcbit_fsm_event(dev, chan, EV_USR_SETUP_REQ, &info);
break;
case ISDN_CMD_ACCEPTD:
pcbit_fsm_event(dev, chan, EV_USR_SETUP_RESP, NULL);
break;
case ISDN_CMD_ACCEPTB:
printk("ISDN_CMD_ACCEPTB - not really needed\n");
break;
case ISDN_CMD_HANGUP:
pcbit_fsm_event(dev, chan, EV_USR_RELEASE_REQ, NULL);
break;
case ISDN_CMD_SETL2:
chan->proto = (ctl->arg >> 8);
break;
case ISDN_CMD_GETL2:
return chan->proto;
break;
case ISDN_CMD_LOCK:
MOD_INC_USE_COUNT;
break;
case ISDN_CMD_UNLOCK:
MOD_DEC_USE_COUNT;
break;
case ISDN_CMD_CLREAZ:
pcbit_clear_msn(dev);
break;
case ISDN_CMD_SETEAZ:
pcbit_set_msn(dev, ctl->parm.num);
break;
case ISDN_CMD_SETL3:
if ((ctl->arg >> 8) != ISDN_PROTO_L3_TRANS)
printk(KERN_DEBUG "L3 protocol unknown\n");
break;
case ISDN_CMD_GETL3:
return ISDN_PROTO_L3_TRANS;
break;
case ISDN_CMD_GETEAZ:
case ISDN_CMD_SETSIL:
case ISDN_CMD_GETSIL:
printk(KERN_DEBUG "pcbit_command: code %d not implemented yet\n", ctl->command);
break;
default:
printk(KERN_DEBUG "pcbit_command: unknown command\n");
break;
};
return 0;
}
/*
* Another Hack :-(
* on some conditions the board stops sending TDATA_CONFs
* let's see if we can turn around the problem
*/
#ifdef BLOCK_TIMER
static void pcbit_block_timer(unsigned long data)
{
struct pcbit_chan *chan;
struct pcbit_dev * dev;
isdn_ctrl ictl;
chan = (struct pcbit_chan *) data;
dev = chan2dev(chan);
if (dev == NULL) {
printk(KERN_DEBUG "pcbit: chan2dev failed\n");
return;
}
del_timer(&chan->block_timer);
chan->block_timer.function = NULL;
#ifdef DEBUG
printk(KERN_DEBUG "pcbit_block_timer\n");
#endif
chan->queued = 0;
ictl.driver = dev->id;
ictl.command = ISDN_STAT_BSENT;
ictl.arg = chan->id;
dev->dev_if->statcallb(&ictl);
}
#endif
int pcbit_xmit(int driver, int chnum, int ack, struct sk_buff *skb)
{
ushort hdrlen;
int refnum, len;
struct pcbit_chan * chan;
struct pcbit_dev *dev;
dev = finddev(driver);
if (dev == NULL)
{
printk("finddev returned NULL");
return -1;
}
chan = chnum ? dev->b2 : dev->b1;
if (chan->fsm_state != ST_ACTIVE)
return -1;
if (chan->queued >= MAX_QUEUED )
{
#ifdef DEBUG_QUEUE
printk(KERN_DEBUG
"pcbit: %d packets already in queue - write fails\n",
chan->queued);
#endif
/*
* packet stays on the head of the device queue
* since dev_start_xmit will fail
* see net/core/dev.c
*/
#ifdef BLOCK_TIMER
if (chan->block_timer.function == NULL) {
init_timer(&chan->block_timer);
chan->block_timer.function = &pcbit_block_timer;
chan->block_timer.data = (long) chan;
chan->block_timer.expires = jiffies + 1 * HZ;
add_timer(&chan->block_timer);
}
#endif
return 0;
}
chan->queued++;
len = skb->len;
hdrlen = capi_tdata_req(chan, skb);
refnum = last_ref_num++ & 0x7fffU;
chan->s_refnum = refnum;
pcbit_l2_write(dev, MSG_TDATA_REQ, refnum, skb, hdrlen);
return len;
}
int pcbit_writecmd(const u_char* buf, int len, int user, int driver, int channel)
{
struct pcbit_dev * dev;
int i, j;
const u_char * loadbuf;
u_char * ptr = NULL;
int errstat;
dev = finddev(driver);
if (!dev)
{
printk("pcbit_writecmd: couldn't find device");
return -ENODEV;
}
switch(dev->l2_state) {
case L2_LWMODE:
/* check (size <= rdp_size); write buf into board */
if (len > BANK4 + 1)
{
printk("pcbit_writecmd: invalid length %d\n", len);
return -EFAULT;
}
if (user)
{
u_char cbuf[1024];
copy_from_user(cbuf, buf, len);
for (i=0; i<len; i++)
writeb(cbuf[i], dev->sh_mem + i);
}
else
memcpy_toio(dev->sh_mem, buf, len);
return len;
break;
case L2_FWMODE:
/* this is the hard part */
/* dumb board */
if (len < 0)
return -EINVAL;
if (user) {
/* get it into kernel space */
if ((ptr = kmalloc(len, GFP_KERNEL))==NULL)
return -ENOMEM;
copy_from_user(ptr, buf, len);
loadbuf = ptr;
}
else
loadbuf = buf;
errstat = 0;
for (i=0; i < len; i++)
{
for(j=0; j < LOAD_RETRY; j++)
if (!(readb(dev->sh_mem + dev->loadptr)))
break;
if (j == LOAD_RETRY)
{
errstat = -ETIME;
printk("TIMEOUT i=%d\n", i);
break;
}
writeb(loadbuf[i], dev->sh_mem + dev->loadptr + 1);
writeb(0x01, dev->sh_mem + dev->loadptr);
dev->loadptr += 2;
if (dev->loadptr > LOAD_ZONE_END)
dev->loadptr = LOAD_ZONE_START;
}
if (user)
kfree(ptr);
return errstat ? errstat : len;
break;
default:
return -EBUSY;
}
return 0;
}
/*
* demultiplexing of messages
*
*/
void pcbit_l3_receive(struct pcbit_dev * dev, ulong msg,
struct sk_buff * skb,
ushort hdr_len, ushort refnum)
{
struct pcbit_chan *chan;
struct sk_buff *skb2;
unsigned short len;
struct callb_data cbdata;
int complete, err;
isdn_ctrl ictl;
#ifdef DEBUG
struct msg_fmt * fmsg;
#endif
switch(msg) {
case MSG_TDATA_IND:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
chan->r_refnum = skb->data[7];
skb_pull(skb, 8);
dev->dev_if->rcvcallb_skb(dev->id, chan->id, skb);
if (capi_tdata_resp(chan, &skb2) > 0)
pcbit_l2_write(dev, MSG_TDATA_RESP, refnum,
skb2, skb2->len);
return;
break;
case MSG_TDATA_CONF:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
#ifdef DEBUG
if ( (*((ushort *) (skb->data + 2) )) != 0) {
printk(KERN_DEBUG "TDATA_CONF error\n");
}
#endif
#ifdef BLOCK_TIMER
if (chan->queued == MAX_QUEUED) {
del_timer(&chan->block_timer);
chan->block_timer.function = NULL;
}
#endif
chan->queued--;
ictl.driver = dev->id;
ictl.command = ISDN_STAT_BSENT;
ictl.arg = chan->id;
dev->dev_if->statcallb(&ictl);
break;
case MSG_CONN_IND:
/*
* channel: 1st not used will do
* if both are used we're in trouble
*/
if (!dev->b1->fsm_state)
chan = dev->b1;
else if (!dev->b2->fsm_state)
chan = dev->b2;
else {
printk(KERN_INFO
"Incoming connection: no channels available");
if ((len = capi_disc_req(*(ushort*)(skb->data), &skb2, CAUSE_NOCHAN)) > 0)
pcbit_l2_write(dev, MSG_DISC_REQ, refnum, skb2, len);
break;
}
cbdata.data.setup.CalledPN = NULL;
cbdata.data.setup.CallingPN = NULL;
capi_decode_conn_ind(chan, skb, &cbdata);
cbdata.type = EV_NET_SETUP;
pcbit_fsm_event(dev, chan, EV_NET_SETUP, NULL);
if (pcbit_check_msn(dev, cbdata.data.setup.CallingPN))
pcbit_fsm_event(dev, chan, EV_USR_PROCED_REQ, &cbdata);
else
pcbit_fsm_event(dev, chan, EV_USR_RELEASE_REQ, NULL);
if (cbdata.data.setup.CalledPN)
kfree(cbdata.data.setup.CalledPN);
if (cbdata.data.setup.CallingPN)
kfree(cbdata.data.setup.CallingPN);
break;
case MSG_CONN_CONF:
/*
* We should be able to find the channel by the message
* reference number. The current version of the firmware
* doesn't sent the ref number correctly.
*/
#ifdef DEBUG
printk(KERN_DEBUG "refnum=%04x b1=%04x b2=%04x\n", refnum,
dev->b1->s_refnum,
dev->b2->s_refnum);
#endif
#if 0
if (dev->b1->s_refnum == refnum)
chan = dev->b1;
else {
if (dev->b2->s_refnum == refnum)
chan = dev->b2;
else {
chan = NULL;
printk(KERN_WARNING "Connection Confirm - refnum doesn't match chan\n");
break;
}
}
#else
/* We just try to find a channel in the right state */
if (dev->b1->fsm_state == ST_CALL_INIT)
chan = dev->b1;
else {
if (dev->b2->s_refnum == ST_CALL_INIT)
chan = dev->b2;
else {
chan = NULL;
printk(KERN_WARNING "Connection Confirm - no channel in Call Init state\n");
break;
}
}
#endif
if (capi_decode_conn_conf(chan, skb, &complete)) {
printk(KERN_DEBUG "conn_conf indicates error\n");
pcbit_fsm_event(dev, chan, EV_ERROR, NULL);
}
else
if (complete)
pcbit_fsm_event(dev, chan, EV_NET_CALL_PROC, NULL);
else
pcbit_fsm_event(dev, chan, EV_NET_SETUP_ACK, NULL);
break;
case MSG_CONN_ACTV_IND:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (capi_decode_conn_actv_ind(chan, skb)) {
printk("error in capi_decode_conn_actv_ind\n");
/* pcbit_fsm_event(dev, chan, EV_ERROR, NULL); */
break;
}
chan->r_refnum = refnum;
pcbit_fsm_event(dev, chan, EV_NET_CONN, NULL);
break;
case MSG_CONN_ACTV_CONF:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (capi_decode_conn_actv_conf(chan, skb) == 0)
pcbit_fsm_event(dev, chan, EV_NET_CONN_ACK, NULL);
else
printk(KERN_DEBUG "decode_conn_actv_conf failed\n");
break;
case MSG_SELP_CONF:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (!(err = capi_decode_sel_proto_conf(chan, skb)))
pcbit_fsm_event(dev, chan, EV_NET_SELP_RESP, NULL);
else {
/* Error */
printk("error %d - capi_decode_sel_proto_conf\n", err);
}
break;
case MSG_ACT_TRANSP_CONF:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (!capi_decode_actv_trans_conf(chan, skb))
pcbit_fsm_event(dev, chan, EV_NET_ACTV_RESP, NULL);
break;
case MSG_DISC_IND:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (!capi_decode_disc_ind(chan, skb))
pcbit_fsm_event(dev, chan, EV_NET_DISC, NULL);
else
printk(KERN_WARNING "capi_decode_disc_ind - error\n");
break;
case MSG_DISC_CONF:
if (!(chan = capi_channel(dev, skb))) {
printk(KERN_WARNING
"CAPI header: unknown channel id\n");
break;
}
if (!capi_decode_disc_ind(chan, skb))
pcbit_fsm_event(dev, chan, EV_NET_RELEASE, NULL);
else
printk(KERN_WARNING "capi_decode_disc_conf - error\n");
break;
case MSG_INFO_IND:
#ifdef DEBUG
printk(KERN_DEBUG "received Info Indication - discarded\n");
#endif
break;
#ifdef DEBUG
case MSG_DEBUG_188:
capi_decode_debug_188(skb->data, skb->len);
break;
default:
printk(KERN_DEBUG "pcbit_l3_receive: unknown message %08lx\n",
msg);
fmsg = (struct msg_fmt *) &msg;
printk(KERN_DEBUG "cmd=%02x sub=%02x\n",
fmsg->cmd, fmsg->scmd);
break;
#endif
}
kfree_skb(skb);
}
/*
* Single statbuf
* should be a statbuf per device
*/
static char statbuf[STATBUF_LEN];
static int stat_st = 0;
static int stat_end = 0;
static __inline void
memcpy_to_COND(int flag, char *d, const char *s, int len) {
if (flag)
copy_to_user(d, s, len);
else
memcpy(d, s, len);
}
int pcbit_stat(u_char* buf, int len, int user, int driver, int channel)
{
int stat_count;
stat_count = stat_end - stat_st;
if (stat_count < 0)
stat_count = STATBUF_LEN - stat_st + stat_end;
/* FIXME: should we sleep and wait for more cookies ? */
if (len > stat_count)
len = stat_count;
if (stat_st < stat_end)
{
memcpy_to_COND(user, buf, statbuf + stat_st, len);
stat_st += len;
}
else
{
if (len > STATBUF_LEN - stat_st)
{
memcpy_to_COND(user, buf, statbuf + stat_st,
STATBUF_LEN - stat_st);
memcpy_to_COND(user, buf, statbuf,
len - (STATBUF_LEN - stat_st));
stat_st = len - (STATBUF_LEN - stat_st);
}
else
{
memcpy_to_COND(user, buf, statbuf + stat_st,
len);
stat_st += len;
if (stat_st == STATBUF_LEN)
stat_st = 0;
}
}
if (stat_st == stat_end)
stat_st = stat_end = 0;
return len;
}
static void pcbit_logstat(struct pcbit_dev *dev, char *str)
{
int i;
isdn_ctrl ictl;
for (i=stat_end; i<strlen(str); i++)
{
statbuf[i]=str[i];
stat_end = (stat_end + 1) % STATBUF_LEN;
if (stat_end == stat_st)
stat_st = (stat_st + 1) % STATBUF_LEN;
}
ictl.command=ISDN_STAT_STAVAIL;
ictl.driver=dev->id;
ictl.arg=strlen(str);
dev->dev_if->statcallb(&ictl);
}
extern char * isdn_state_table[];
extern char * strisdnevent(unsigned short);
void pcbit_state_change(struct pcbit_dev * dev, struct pcbit_chan * chan,
unsigned short i, unsigned short ev, unsigned short f)
{
char buf[256];
sprintf(buf, "change on device: %d channel:%d\n%s -> %s -> %s\n",
dev->id, chan->id,
isdn_state_table[i], strisdnevent(ev), isdn_state_table[f]
);
#ifdef DEBUG
printk("%s", buf);
#endif
pcbit_logstat(dev, buf);
}
static void set_running_timeout(unsigned long ptr)
{
struct pcbit_dev * dev;
#ifdef DEBUG
printk(KERN_DEBUG "set_running_timeout\n");
#endif
dev = (struct pcbit_dev *) ptr;
wake_up_interruptible(&dev->set_running_wq);
}
static int set_protocol_running(struct pcbit_dev * dev)
{
isdn_ctrl ctl;
init_timer(&dev->set_running_timer);
dev->set_running_timer.function = &set_running_timeout;
dev->set_running_timer.data = (ulong) dev;
dev->set_running_timer.expires = jiffies + SET_RUN_TIMEOUT;
/* kick it */
dev->l2_state = L2_STARTING;
writeb((0x80U | ((dev->rcv_seq & 0x07) << 3) | (dev->send_seq & 0x07)),
dev->sh_mem + BANK4);
add_timer(&dev->set_running_timer);
interruptible_sleep_on(&dev->set_running_wq);
del_timer(&dev->set_running_timer);
if (dev->l2_state == L2_RUNNING)
{
printk(KERN_DEBUG "pcbit: running\n");
dev->unack_seq = dev->send_seq;
dev->writeptr = dev->sh_mem;
dev->readptr = dev->sh_mem + BANK2;
/* tell the good news to the upper layer */
ctl.driver = dev->id;
ctl.command = ISDN_STAT_RUN;
dev->dev_if->statcallb(&ctl);
}
else
{
printk(KERN_DEBUG "pcbit: initialization failed\n");
printk(KERN_DEBUG "pcbit: firmware not loaded\n");
dev->l2_state = L2_DOWN;
#ifdef DEBUG
printk(KERN_DEBUG "Bank3 = %02x\n",
readb(dev->sh_mem + BANK3));
#endif
*(dev->sh_mem + BANK4) = 0x40U;
/* warn the upper layer */
ctl.driver = dev->id;
ctl.command = ISDN_STAT_STOP;
dev->dev_if->statcallb(&ctl);
return -EL2HLT; /* Level 2 halted */
}
return 0;
}
static int pcbit_ioctl(isdn_ctrl* ctl)
{
struct pcbit_dev * dev;
struct pcbit_ioctl *cmd;
dev = finddev(ctl->driver);
if (!dev)
{
printk(KERN_DEBUG "pcbit_ioctl: unknown device\n");
return -ENODEV;
}
cmd = (struct pcbit_ioctl *) ctl->parm.num;
switch(ctl->arg) {
case PCBIT_IOCTL_GETSTAT:
cmd->info.l2_status = dev->l2_state;
break;
case PCBIT_IOCTL_STRLOAD:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
dev->unack_seq = dev->send_seq = dev->rcv_seq = 0;
dev->writeptr = dev->sh_mem;
dev->readptr = dev->sh_mem + BANK2;
dev->l2_state = L2_LOADING;
break;
case PCBIT_IOCTL_LWMODE:
if (dev->l2_state != L2_LOADING)
return -EINVAL;
dev->l2_state = L2_LWMODE;
break;
case PCBIT_IOCTL_FWMODE:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
dev->loadptr = LOAD_ZONE_START;
dev->l2_state = L2_FWMODE;
break;
case PCBIT_IOCTL_ENDLOAD:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
dev->l2_state = L2_DOWN;
break;
case PCBIT_IOCTL_SETBYTE:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
/* check addr */
if (cmd->info.rdp_byte.addr > BANK4)
return -EFAULT;
writeb(cmd->info.rdp_byte.value, dev->sh_mem + cmd->info.rdp_byte.addr);
break;
case PCBIT_IOCTL_GETBYTE:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
/* check addr */
if (cmd->info.rdp_byte.addr > BANK4)
{
printk("getbyte: invalid addr %04x\n", cmd->info.rdp_byte.addr);
return -EFAULT;
}
cmd->info.rdp_byte.value = readb(dev->sh_mem + cmd->info.rdp_byte.addr);
break;
case PCBIT_IOCTL_RUNNING:
if (dev->l2_state == L2_RUNNING)
return -EBUSY;
return set_protocol_running(dev);
break;
case PCBIT_IOCTL_WATCH188:
if (dev->l2_state != L2_LOADING)
return -EINVAL;
pcbit_l2_write(dev, MSG_WATCH188, 0x0001, NULL, 0);
break;
case PCBIT_IOCTL_PING188:
if (dev->l2_state != L2_LOADING)
return -EINVAL;
pcbit_l2_write(dev, MSG_PING188_REQ, 0x0001, NULL, 0);
break;
case PCBIT_IOCTL_APION:
if (dev->l2_state != L2_LOADING)
return -EINVAL;
pcbit_l2_write(dev, MSG_API_ON, 0x0001, NULL, 0);
break;
case PCBIT_IOCTL_STOP:
dev->l2_state = L2_DOWN;
writeb(0x40, dev->sh_mem + BANK4);
dev->rcv_seq = 0;
dev->send_seq = 0;
dev->unack_seq = 0;
break;
default:
printk("error: unknown ioctl\n");
break;
};
return 0;
}
/*
* MSN list handling
*
* if null reject all calls
* if first entry has null MSN accept all calls
*/
static void pcbit_clear_msn(struct pcbit_dev *dev)
{
struct msn_entry *ptr, *back;
for (ptr=dev->msn_list; ptr; )
{
back = ptr->next;
kfree(ptr);
ptr = back;
}
dev->msn_list = NULL;
}
static void pcbit_set_msn(struct pcbit_dev *dev, char *list)
{
struct msn_entry *ptr;
struct msn_entry *back = NULL;
char *cp, *sp;
int len;
if (strlen(list) == 0) {
ptr = kmalloc(sizeof(struct msn_entry), GFP_ATOMIC);
if (!ptr) {
printk(KERN_WARNING "kmalloc failed\n");
return;
}
ptr->msn = NULL;
ptr->next = dev->msn_list;
dev->msn_list = ptr;
return;
}
if (dev->msn_list)
for (back=dev->msn_list; back->next; back=back->next);
sp = list;
do {
cp=strchr(sp, ',');
if (cp)
len = cp - sp;
else
len = strlen(sp);
ptr = kmalloc(sizeof(struct msn_entry), GFP_ATOMIC);
if (!ptr) {
printk(KERN_WARNING "kmalloc failed\n");
return;
}
ptr->next = NULL;
ptr->msn = kmalloc(len, GFP_ATOMIC);
if (!ptr->msn) {
printk(KERN_WARNING "kmalloc failed\n");
return;
}
memcpy(ptr->msn, sp, len - 1);
ptr->msn[len] = 0;
#ifdef DEBUG
printk(KERN_DEBUG "msn: %s\n", ptr->msn);
#endif
if (dev->msn_list == NULL)
dev->msn_list = ptr;
else
back->next = ptr;
back = ptr;
sp += len;
} while(cp);
}
/*
* check if we do signal or reject an incoming call
*/
static int pcbit_check_msn(struct pcbit_dev *dev, char *msn)
{
struct msn_entry *ptr;
for (ptr=dev->msn_list; ptr; ptr=ptr->next) {
if (ptr->msn == NULL)
return 1;
if (strcmp(ptr->msn, msn) == 0)
return 1;
}
return 0;
}
|
jur/linux-2.2.1-ps2
|
drivers/isdn/pcbit/drv.c
|
C
|
gpl-2.0
| 23,053
|
#ifndef __DRV_PQ_H__
#define __DRV_PQ_H__
#include "hi_type.h"
#include "drv_pq_ext.h"
#include "drv_pq_define.h"
#ifdef __cplusplus
extern "C" {
#endif
HI_VOID PQ_DRV_GetOption(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetOption(PQ_IO_S* arg);
HI_VOID PQ_DRV_GetDeiParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetDeiParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_GetFmdParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetFmdParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_GetDnrParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetDnrParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_GetSharpParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetVpssSharpParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetGfxSharpParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_GetCscParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetVoCscParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetDispCscParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetGfxCscParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_GetAccParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetAccParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_GetAcmParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetAcmParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_GetGammaParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetGammaParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_GetGammaCtrlParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetGammaCtrlParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_GetDitherParam(PQ_IO_S* arg);
HI_VOID PQ_DRV_SetDitherParam(PQ_IO_S* arg);
HI_S32 PQ_DRV_FixParaToFlash(PQ_IO_S* arg);
HI_S32 PQ_DRV_UpdateVpss(HI_U32 u32UpdateType, PQ_PARAM_S* pstPqParam);
HI_S32 PQ_DRV_UpdateVo(HI_U32 u32UpdateType, PQ_PARAM_S* pstPqParam);
HI_S32 PQ_DRV_UpdateDisp(HI_U32 u32UpdateType, PQ_PARAM_S* pstPqParam);
HI_S32 PQ_DRV_UpdateGfx(HI_U32 u32UpdateType, PQ_PARAM_S* pstPqParam);
HI_S32 PQ_DRV_GetPqParam(PQ_PARAM_S** pstPqParam);
HI_S32 PQ_DRV_SetPqParam(PQ_PARAM_S* pstPqParam);
HI_S32 PQ_DRV_GetFlashPqBin(PQ_PARAM_S* pstPqParam);
HI_S32 PQ_DRV_CopyPqBinToPqApp(PQ_PARAM_S* pstPqParam);
#ifdef __cplusplus
}
#endif
#endif
|
caps-liu/hisi-driverlibs
|
source/msp/drv/pq/drv_pq.h
|
C
|
gpl-2.0
| 1,873
|
/*
* CDE - Common Desktop Environment
*
* Copyright (c) 1993-2012, The Open Group. All rights reserved.
*
* These libraries and programs are free software; you can
* redistribute them and/or modify them under the terms of the GNU
* Lesser General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* These libraries and programs are distributed in the hope that
* they will be useful, but WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU Lesser General Public License for more
* details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with these librararies and programs; if not, write
* to the Free Software Foundation, Inc., 51 Franklin Street, Fifth
* Floor, Boston, MA 02110-1301 USA
*/
/* $XConsortium: Group.C /main/1 1996/07/29 16:52:27 cde-hp $ */
// Copyright (c) 1994 James Clark
// See the file COPYING for copying permission.
#ifdef __GNUG__
#pragma implementation
#endif
#include "splib.h"
#include "Group.h"
#include "MessageBuilder.h"
#include "ParserMessages.h"
#include "macros.h"
#ifdef SP_NAMESPACE
namespace SP_NAMESPACE {
#endif
AllowedGroupTokens::AllowedGroupTokens(GroupToken::Type t1, GroupToken::Type t2,
GroupToken::Type t3, GroupToken::Type t4)
: flags_(0)
{
allow(t1);
allow(t2);
allow(t3);
allow(t4);
}
AllowedGroupConnectors::AllowedGroupConnectors(GroupConnector::Type c1)
: flags_(0)
{
allow(c1);
}
AllowedGroupConnectors::AllowedGroupConnectors(GroupConnector::Type c1,
GroupConnector::Type c2)
: flags_(0)
{
allow(c1);
allow(c2);
}
AllowedGroupConnectors::AllowedGroupConnectors(GroupConnector::Type c1,
GroupConnector::Type c2,
GroupConnector::Type c3)
: flags_(0)
{
allow(c1);
allow(c2);
allow(c3);
}
AllowedGroupConnectors::AllowedGroupConnectors(GroupConnector::Type c1,
GroupConnector::Type c2,
GroupConnector::Type c3,
GroupConnector::Type c4)
: flags_(0)
{
allow(c1);
allow(c2);
allow(c3);
allow(c4);
}
AllowedGroupConnectorsMessageArg::AllowedGroupConnectorsMessageArg(
const AllowedGroupConnectors &allow,
const ConstPtr<Syntax> &syntax)
: allow_(allow),
syntax_(syntax)
{
}
MessageArg *AllowedGroupConnectorsMessageArg::copy() const
{
return new AllowedGroupConnectorsMessageArg(*this);
}
void AllowedGroupConnectorsMessageArg::append(MessageBuilder &builder) const
{
static GroupConnector::Type types[] = {
GroupConnector::andGC, GroupConnector::orGC, GroupConnector::seqGC,
GroupConnector::grpcGC, GroupConnector::dtgcGC
};
static Syntax::DelimGeneral delims[] = {
Syntax::dAND, Syntax::dOR, Syntax::dSEQ,
Syntax::dGRPC, Syntax::dDTGC
};
Boolean first = 1;
for (size_t i = 0; i < SIZEOF(types); i++)
if (allow_.groupConnector(types[i])) {
if (!first)
builder.appendFragment(ParserMessages::listSep);
else
first = 0;
const StringC &delim = syntax_->delimGeneral(delims[i]);
builder.appendFragment(ParserMessages::delimStart);
builder.appendChars(delim.data(), delim.size());
builder.appendFragment(ParserMessages::delimEnd);
}
}
AllowedGroupTokensMessageArg::AllowedGroupTokensMessageArg(
const AllowedGroupTokens &allow,
const ConstPtr<Syntax> &syntax)
: allow_(allow),
syntax_(syntax)
{
}
MessageArg *AllowedGroupTokensMessageArg::copy() const
{
return new AllowedGroupTokensMessageArg(*this);
}
void AllowedGroupTokensMessageArg::append(MessageBuilder &builder) const
{
const MessageFragment *fragment[4];
int nFragments = 0;
if (allow_.groupToken(GroupToken::dataTagLiteral))
fragment[nFragments++] = &ParserMessages::parameterLiteral;
if (allow_.groupToken(GroupToken::dataTagGroup))
fragment[nFragments++] = &ParserMessages::dataTagGroup;
switch (allow_.group()) {
case GroupToken::modelGroup:
fragment[nFragments++] = &ParserMessages::modelGroup;
break;
case GroupToken::dataTagTemplateGroup:
fragment[nFragments++] = &ParserMessages::dataTagTemplateGroup;
break;
default:
break;
}
switch (allow_.nameStart()) {
case GroupToken::name:
fragment[nFragments++] = &ParserMessages::name;
break;
case GroupToken::nameToken:
fragment[nFragments++] = &ParserMessages::nameToken;
break;
case GroupToken::elementToken:
fragment[nFragments++] = &ParserMessages::elementToken;
break;
default:
break;
}
Boolean first = 1;
for (int i = 0; i < nFragments; i++) {
if (!first)
builder.appendFragment(ParserMessages::listSep);
else
first = 0;
builder.appendFragment(*fragment[i]);
}
if (allow_.groupToken(GroupToken::pcdata)) {
if (!first)
builder.appendFragment(ParserMessages::listSep);
StringC pcdata(syntax_->delimGeneral(Syntax::dRNI));
pcdata += syntax_->reservedName(Syntax::rPCDATA);
builder.appendChars(pcdata.data(), pcdata.size());
}
}
#ifdef SP_NAMESPACE
}
#endif
|
sTeeLM/MINIME
|
toolkit/srpm/SOURCES/cde-2.2.4/programs/nsgmls/Group.C
|
C++
|
gpl-2.0
| 5,099
|
<?php
/**
* WURFL API
*
* LICENSE
*
* This file is released under the GNU General Public License. Refer to the
* COPYING file distributed with this package.
*
* Copyright (c) 2008-2009, WURFL-Pro S.r.l., Rome, Italy
*
*
*
* @category WURFL
* @package WURFL
* @copyright WURFL-PRO SRL, Rome, Italy
* @license
* @version 1.0.0
*/
class WURFL_Xml_PersistenceProvider_APCPersistenceProvider extends WURFL_Xml_PersistenceProvider_AbstractPersistenceProvider {
const EXTENSION_MODULE_NAME = "apc";
protected $persistenceIdentifier = "APC_PERSISTENCE_PROVIDER";
public function __construct() {
}
public function initialize() {
$this->_ensureModuleExistance();
}
public function save($objectId, $object) {
return apc_store($this->encode($objectId), $object);
}
public function find($objectId) {
return apc_fetch($this->encode($objectId));
}
public function getAllData() {
}
public function cleanAllData() {
}
/**
* Ensures the existance of the the PHP Extension apc
*
*/
private function _ensureModuleExistance() {
if(!extension_loaded(self::EXTENSION_MODULE_NAME)) {
throw new WURFL_Xml_PersistenceProvider_Exception("The PHP extension apc must be installed and loaded in order to use the Memcached.");
}
}
}
|
eusholli/drupal
|
sites/all/modules/wurfl/lib/wurfl/WURFL/Xml/PersistenceProvider/APCPersistanceProvider.php
|
PHP
|
gpl-2.0
| 1,399
|
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for the HIPPI handlers.
*
* Version: @(#)hippidevice.h 1.0.0 05/26/97
*
* Author: Jes Sorensen, <Jes.Sorensen@cern.ch>
*
* hippidevice.h is based on previous fddidevice.h work by
* Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Lawrence V. Stefani, <stefani@lkg.dec.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_HIPPIDEVICE_H
#define _LINUX_HIPPIDEVICE_H
#include <linux/if_hippi.h>
#ifdef __KERNEL__
extern int hippi_header(struct sk_buff *skb,
struct device *dev,
unsigned short type,
void *daddr,
void *saddr,
unsigned len);
extern int hippi_rebuild_header(struct sk_buff *skb);
extern unsigned short hippi_type_trans(struct sk_buff *skb,
struct device *dev);
extern void hippi_header_cache_bind(struct hh_cache ** hhp,
struct device *dev,
unsigned short htype,
__u32 daddr);
extern void hippi_header_cache_update(struct hh_cache *hh,
struct device *dev,
unsigned char * haddr);
extern int hippi_header_parse(struct sk_buff *skb, unsigned char *haddr);
extern void hippi_net_init(void);
void hippi_setup(struct device *dev);
extern struct device *init_hippi_dev(struct device *, int);
#endif
#endif /* _LINUX_HIPPIDEVICE_H */
|
jur/linux-2.2.1-ps2
|
include/linux/hippidevice.h
|
C
|
gpl-2.0
| 1,766
|
/* Copyright (C) 1999-2008, 2009, 2010, 2011 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Andreas Jaeger <aj@suse.de>, 1999.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
#define PROCINFO_CLASS static
#include <alloca.h>
#include <argp.h>
#include <dirent.h>
#include <elf.h>
#include <error.h>
#include <errno.h>
#include <inttypes.h>
#include <libintl.h>
#include <locale.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdio_ext.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <glob.h>
#include <libgen.h>
#include <ldconfig.h>
#include <dl-cache.h>
#include <dl-procinfo.h>
#ifdef _DL_FIRST_PLATFORM
# define _DL_FIRST_EXTRA (_DL_FIRST_PLATFORM + _DL_PLATFORMS_COUNT)
#else
# define _DL_FIRST_EXTRA _DL_HWCAP_COUNT
#endif
#ifndef LD_SO_CONF
# define LD_SO_CONF SYSCONFDIR "/ld.so.conf"
#endif
/* Get libc version number. */
#include <version.h>
#define PACKAGE _libc_intl_domainname
static const struct
{
const char *name;
int flag;
} lib_types[] =
{
{"libc4", FLAG_LIBC4},
{"libc5", FLAG_ELF_LIBC5},
{"libc6", FLAG_ELF_LIBC6},
{"glibc2", FLAG_ELF_LIBC6}
};
/* List of directories to handle. */
struct dir_entry
{
char *path;
int flag;
ino64_t ino;
dev_t dev;
struct dir_entry *next;
};
/* The list is unsorted, contains no duplicates. Entries are added at
the end. */
static struct dir_entry *dir_entries;
/* Flags for different options. */
/* Print Cache. */
static int opt_print_cache;
/* Be verbose. */
int opt_verbose;
/* Format to support. */
/* 0: only libc5/glibc2; 1: both; 2: only glibc 2.2. */
int opt_format = 1;
/* Build cache. */
static int opt_build_cache = 1;
/* Generate links. */
static int opt_link = 1;
/* Only process directories specified on the command line. */
static int opt_only_cline;
/* Path to root for chroot. */
static char *opt_chroot;
/* Manually link given shared libraries. */
static int opt_manual_link;
/* Should we ignore an old auxiliary cache file? */
static int opt_ignore_aux_cache;
/* Cache file to use. */
static char *cache_file;
/* Configuration file. */
static const char *config_file;
/* Mask to use for important hardware capabilities. */
static unsigned long int hwcap_mask = HWCAP_IMPORTANT;
/* Configuration-defined capabilities defined in kernel vDSOs. */
static const char *hwcap_extra[64 - _DL_FIRST_EXTRA];
/* Name and version of program. */
static void print_version (FILE *stream, struct argp_state *state);
void (*argp_program_version_hook) (FILE *, struct argp_state *)
= print_version;
/* Function to print some extra text in the help message. */
static char *more_help (int key, const char *text, void *input);
/* Definitions of arguments for argp functions. */
static const struct argp_option options[] =
{
{ "print-cache", 'p', NULL, 0, N_("Print cache"), 0},
{ "verbose", 'v', NULL, 0, N_("Generate verbose messages"), 0},
{ NULL, 'N', NULL, 0, N_("Don't build cache"), 0},
{ NULL, 'X', NULL, 0, N_("Don't generate links"), 0},
{ NULL, 'r', N_("ROOT"), 0, N_("Change to and use ROOT as root directory"), 0},
{ NULL, 'C', N_("CACHE"), 0, N_("Use CACHE as cache file"), 0},
{ NULL, 'f', N_("CONF"), 0, N_("Use CONF as configuration file"), 0},
{ NULL, 'n', NULL, 0, N_("Only process directories specified on the command line. Don't build cache."), 0},
{ NULL, 'l', NULL, 0, N_("Manually link individual libraries."), 0},
{ "format", 'c', N_("FORMAT"), 0, N_("Format to use: new, old or compat (default)"), 0},
{ "ignore-aux-cache", 'i', NULL, 0, N_("Ignore auxiliary cache file"), 0},
{ NULL, 0, NULL, 0, NULL, 0 }
};
#define PROCINFO_CLASS static
#include <dl-procinfo.c>
/* Short description of program. */
static const char doc[] = N_("Configure Dynamic Linker Run Time Bindings.");
/* Prototype for option handler. */
static error_t parse_opt (int key, char *arg, struct argp_state *state);
/* Data structure to communicate with argp functions. */
static struct argp argp =
{
options, parse_opt, NULL, doc, NULL, more_help, NULL
};
/* Check if string corresponds to an important hardware capability or
a platform. */
static int
is_hwcap_platform (const char *name)
{
int hwcap_idx = _dl_string_hwcap (name);
if (hwcap_idx != -1 && ((1 << hwcap_idx) & hwcap_mask))
return 1;
hwcap_idx = _dl_string_platform (name);
if (hwcap_idx != -1)
return 1;
for (hwcap_idx = _DL_FIRST_EXTRA; hwcap_idx < 64; ++hwcap_idx)
if (hwcap_extra[hwcap_idx - _DL_FIRST_EXTRA] != NULL
&& !strcmp (name, hwcap_extra[hwcap_idx - _DL_FIRST_EXTRA]))
return 1;
return 0;
}
/* Get hwcap (including platform) encoding of path. */
static uint64_t
path_hwcap (const char *path)
{
char *str = xstrdup (path);
char *ptr;
uint64_t hwcap = 0;
uint64_t h;
size_t len;
len = strlen (str);
if (str[len] == '/')
str[len] = '\0';
/* Search pathname from the end and check for hwcap strings. */
for (;;)
{
ptr = strrchr (str, '/');
if (ptr == NULL)
break;
h = _dl_string_hwcap (ptr + 1);
if (h == (uint64_t) -1)
{
h = _dl_string_platform (ptr + 1);
if (h == (uint64_t) -1)
{
for (h = _DL_FIRST_EXTRA; h < 64; ++h)
if (hwcap_extra[h - _DL_FIRST_EXTRA] != NULL
&& !strcmp (ptr + 1, hwcap_extra[h - _DL_FIRST_EXTRA]))
break;
if (h == 64)
break;
}
}
hwcap += 1ULL << h;
/* Search the next part of the path. */
*ptr = '\0';
}
free (str);
return hwcap;
}
/* Handle program arguments. */
static error_t
parse_opt (int key, char *arg, struct argp_state *state)
{
switch (key)
{
case 'C':
cache_file = arg;
/* Ignore auxiliary cache since we use non-standard cache. */
opt_ignore_aux_cache = 1;
break;
case 'f':
config_file = arg;
break;
case 'i':
opt_ignore_aux_cache = 1;
break;
case 'l':
opt_manual_link = 1;
break;
case 'N':
opt_build_cache = 0;
break;
case 'n':
opt_build_cache = 0;
opt_only_cline = 1;
break;
case 'p':
opt_print_cache = 1;
break;
case 'r':
opt_chroot = arg;
break;
case 'v':
opt_verbose = 1;
break;
case 'X':
opt_link = 0;
break;
case 'c':
if (strcmp (arg, "old") == 0)
opt_format = 0;
else if (strcmp (arg, "compat") == 0)
opt_format = 1;
else if (strcmp (arg, "new") == 0)
opt_format = 2;
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
/* Print bug-reporting information in the help message. */
static char *
more_help (int key, const char *text, void *input)
{
switch (key)
{
case ARGP_KEY_HELP_EXTRA:
/* We print some extra information. */
return strdup (gettext ("\
For bug reporting instructions, please see:\n\
<http://www.gnu.org/software/libc/bugs.html>.\n"));
default:
break;
}
return (char *) text;
}
/* Print the version information. */
static void
print_version (FILE *stream, struct argp_state *state)
{
fprintf (stream, "ldconfig (GNU %s) %s\n", PACKAGE, VERSION);
fprintf (stream, gettext ("\
Copyright (C) %s Free Software Foundation, Inc.\n\
This is free software; see the source for copying conditions. There is NO\n\
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n\
"), "2011");
fprintf (stream, gettext ("Written by %s.\n"),
"Andreas Jaeger");
}
/* Add a single directory entry. */
static void
add_single_dir (struct dir_entry *entry, int verbose)
{
struct dir_entry *ptr, *prev;
ptr = dir_entries;
prev = ptr;
while (ptr != NULL)
{
/* Check for duplicates. */
if (ptr->ino == entry->ino && ptr->dev == entry->dev)
{
if (opt_verbose && verbose)
error (0, 0, _("Path `%s' given more than once"), entry->path);
/* Use the newer information. */
ptr->flag = entry->flag;
free (entry->path);
free (entry);
break;
}
prev = ptr;
ptr = ptr->next;
}
/* Is this the first entry? */
if (ptr == NULL && dir_entries == NULL)
dir_entries = entry;
else if (ptr == NULL)
prev->next = entry;
}
/* Add one directory to the list of directories to process. */
static void
add_dir (const char *line)
{
unsigned int i;
struct dir_entry *entry = xmalloc (sizeof (struct dir_entry));
entry->next = NULL;
/* Search for an '=' sign. */
entry->path = xstrdup (line);
char *equal_sign = strchr (entry->path, '=');
if (equal_sign)
{
*equal_sign = '\0';
++equal_sign;
entry->flag = FLAG_ANY;
for (i = 0; i < sizeof (lib_types) / sizeof (lib_types[0]); ++i)
if (strcmp (equal_sign, lib_types[i].name) == 0)
{
entry->flag = lib_types[i].flag;
break;
}
if (entry->flag == FLAG_ANY)
error (0, 0, _("%s is not a known library type"), equal_sign);
}
else
{
entry->flag = FLAG_ANY;
}
/* Canonify path: for now only remove leading and trailing
whitespace and the trailing slashes. */
i = strlen (entry->path);
while (i > 0 && isspace (entry->path[i - 1]))
entry->path[--i] = '\0';
while (i > 0 && entry->path[i - 1] == '/')
entry->path[--i] = '\0';
if (i == 0)
return;
char *path = entry->path;
if (opt_chroot)
path = chroot_canon (opt_chroot, path);
struct stat64 stat_buf;
if (path == NULL || stat64 (path, &stat_buf))
{
if (opt_verbose)
error (0, errno, _("Can't stat %s"), entry->path);
free (entry->path);
free (entry);
}
else
{
entry->ino = stat_buf.st_ino;
entry->dev = stat_buf.st_dev;
add_single_dir (entry, 1);
}
if (opt_chroot)
free (path);
}
static int
chroot_stat (const char *real_path, const char *path, struct stat64 *st)
{
int ret;
char *canon_path;
if (!opt_chroot)
return stat64 (real_path, st);
ret = lstat64 (real_path, st);
if (ret || !S_ISLNK (st->st_mode))
return ret;
canon_path = chroot_canon (opt_chroot, path);
if (canon_path == NULL)
return -1;
ret = stat64 (canon_path, st);
free (canon_path);
return ret;
}
/* Create a symbolic link from soname to libname in directory path. */
static void
create_links (const char *real_path, const char *path, const char *libname,
const char *soname)
{
char *full_libname, *full_soname;
char *real_full_libname, *real_full_soname;
struct stat64 stat_lib, stat_so, lstat_so;
int do_link = 1;
int do_remove = 1;
/* XXX: The logics in this function should be simplified. */
/* Get complete path. */
full_libname = alloca (strlen (path) + strlen (libname) + 2);
full_soname = alloca (strlen (path) + strlen (soname) + 2);
sprintf (full_libname, "%s/%s", path, libname);
sprintf (full_soname, "%s/%s", path, soname);
if (opt_chroot)
{
real_full_libname = alloca (strlen (real_path) + strlen (libname) + 2);
real_full_soname = alloca (strlen (real_path) + strlen (soname) + 2);
sprintf (real_full_libname, "%s/%s", real_path, libname);
sprintf (real_full_soname, "%s/%s", real_path, soname);
}
else
{
real_full_libname = full_libname;
real_full_soname = full_soname;
}
/* Does soname already exist and point to the right library? */
if (chroot_stat (real_full_soname, full_soname, &stat_so) == 0)
{
if (chroot_stat (real_full_libname, full_libname, &stat_lib))
{
error (0, 0, _("Can't stat %s\n"), full_libname);
return;
}
if (stat_lib.st_dev == stat_so.st_dev
&& stat_lib.st_ino == stat_so.st_ino)
/* Link is already correct. */
do_link = 0;
else if (lstat64 (full_soname, &lstat_so) == 0
&& !S_ISLNK (lstat_so.st_mode))
{
error (0, 0, _("%s is not a symbolic link\n"), full_soname);
do_link = 0;
do_remove = 0;
}
}
else if (lstat64 (real_full_soname, &lstat_so) != 0
|| !S_ISLNK (lstat_so.st_mode))
/* Unless it is a stale symlink, there is no need to remove. */
do_remove = 0;
if (opt_verbose)
printf ("\t%s -> %s", soname, libname);
if (do_link && opt_link)
{
/* Remove old link. */
if (do_remove)
if (unlink (real_full_soname))
{
error (0, 0, _("Can't unlink %s"), full_soname);
do_link = 0;
}
/* Create symbolic link. */
if (do_link && symlink (libname, real_full_soname))
{
error (0, 0, _("Can't link %s to %s"), full_soname, libname);
do_link = 0;
}
if (opt_verbose)
{
if (do_link)
fputs (_(" (changed)\n"), stdout);
else
fputs (_(" (SKIPPED)\n"), stdout);
}
}
else if (opt_verbose)
fputs ("\n", stdout);
}
/* Manually link the given library. */
static void
manual_link (char *library)
{
char *path;
char *real_path;
char *real_library;
char *libname;
char *soname;
struct stat64 stat_buf;
int flag;
unsigned int osversion;
/* Prepare arguments for create_links call. Split library name in
directory and filename first. Since path is allocated, we've got
to be careful to free at the end. */
path = xstrdup (library);
libname = strrchr (path, '/');
if (libname)
{
/* Successfully split names. Check if path is just "/" to avoid
an empty path. */
if (libname == path)
{
libname = library + 1;
path = xrealloc (path, 2);
strcpy (path, "/");
}
else
{
*libname = '\0';
++libname;
}
}
else
{
/* There's no path, construct one. */
libname = library;
path = xrealloc (path, 2);
strcpy (path, ".");
}
if (opt_chroot)
{
real_path = chroot_canon (opt_chroot, path);
if (real_path == NULL)
{
error (0, errno, _("Can't find %s"), path);
free (path);
return;
}
real_library = alloca (strlen (real_path) + strlen (libname) + 2);
sprintf (real_library, "%s/%s", real_path, libname);
}
else
{
real_path = path;
real_library = library;
}
/* Do some sanity checks first. */
if (lstat64 (real_library, &stat_buf))
{
error (0, errno, _("Cannot lstat %s"), library);
free (path);
return;
}
/* We don't want links here! */
else if (!S_ISREG (stat_buf.st_mode))
{
error (0, 0, _("Ignored file %s since it is not a regular file."),
library);
free (path);
return;
}
if (process_file (real_library, library, libname, &flag, &osversion,
&soname, 0, &stat_buf))
{
error (0, 0, _("No link created since soname could not be found for %s"),
library);
free (path);
return;
}
if (soname == NULL)
soname = implicit_soname (libname, flag);
create_links (real_path, path, libname, soname);
free (soname);
free (path);
}
/* Read a whole directory and search for libraries.
The purpose is two-fold:
- search for libraries which will be added to the cache
- create symbolic links to the soname for each library
This has to be done separatly for each directory.
To keep track of which libraries to add to the cache and which
links to create, we save a list of all libraries.
The algorithm is basically:
for all libraries in the directory do
get soname of library
if soname is already in list
if new library is newer, replace entry
otherwise ignore this library
otherwise add library to list
For example, if the two libraries libxy.so.1.1 and libxy.so.1.2
exist and both have the same soname, e.g. libxy.so, a symbolic link
is created from libxy.so.1.2 (the newer one) to libxy.so.
libxy.so.1.2 and libxy.so are added to the cache - but not
libxy.so.1.1. */
/* Information for one library. */
struct dlib_entry
{
char *name;
char *soname;
int flag;
int is_link;
unsigned int osversion;
struct dlib_entry *next;
};
static void
search_dir (const struct dir_entry *entry)
{
uint64_t hwcap = path_hwcap (entry->path);
if (opt_verbose)
{
if (hwcap != 0)
printf ("%s: (hwcap: %#.16" PRIx64 ")\n", entry->path, hwcap);
else
printf ("%s:\n", entry->path);
}
char *dir_name;
char *real_file_name;
size_t real_file_name_len;
size_t file_name_len = PATH_MAX;
char *file_name = alloca (file_name_len);
if (opt_chroot)
{
dir_name = chroot_canon (opt_chroot, entry->path);
real_file_name_len = PATH_MAX;
real_file_name = alloca (real_file_name_len);
}
else
{
dir_name = entry->path;
real_file_name_len = 0;
real_file_name = file_name;
}
DIR *dir;
if (dir_name == NULL || (dir = opendir (dir_name)) == NULL)
{
if (opt_verbose)
error (0, errno, _("Can't open directory %s"), entry->path);
if (opt_chroot && dir_name)
free (dir_name);
return;
}
struct dirent64 *direntry;
struct dlib_entry *dlibs = NULL;
while ((direntry = readdir64 (dir)) != NULL)
{
int flag;
#ifdef _DIRENT_HAVE_D_TYPE
/* We only look at links and regular files. */
if (direntry->d_type != DT_UNKNOWN
&& direntry->d_type != DT_LNK
&& direntry->d_type != DT_REG
&& direntry->d_type != DT_DIR)
continue;
#endif /* _DIRENT_HAVE_D_TYPE */
/* Does this file look like a shared library or is it a hwcap
subdirectory? The dynamic linker is also considered as
shared library. */
if (((strncmp (direntry->d_name, "lib", 3) != 0
&& strncmp (direntry->d_name, "ld-", 3) != 0)
|| strstr (direntry->d_name, ".so") == NULL)
&& (
#ifdef _DIRENT_HAVE_D_TYPE
direntry->d_type == DT_REG ||
#endif
!is_hwcap_platform (direntry->d_name)))
continue;
size_t len = strlen (direntry->d_name);
/* Skip temporary files created by the prelink program. Files with
names like these are never really DSOs we want to look at. */
if (len >= sizeof (".#prelink#") - 1)
{
if (strcmp (direntry->d_name + len - sizeof (".#prelink#") + 1,
".#prelink#") == 0)
continue;
if (len >= sizeof (".#prelink#.XXXXXX") - 1
&& memcmp (direntry->d_name + len - sizeof (".#prelink#.XXXXXX")
+ 1, ".#prelink#.", sizeof (".#prelink#.") - 1) == 0)
continue;
}
len += strlen (entry->path) + 2;
if (len > file_name_len)
{
file_name_len = len;
file_name = alloca (file_name_len);
if (!opt_chroot)
real_file_name = file_name;
}
sprintf (file_name, "%s/%s", entry->path, direntry->d_name);
if (opt_chroot)
{
len = strlen (dir_name) + strlen (direntry->d_name) + 2;
if (len > real_file_name_len)
{
real_file_name_len = len;
real_file_name = alloca (real_file_name_len);
}
sprintf (real_file_name, "%s/%s", dir_name, direntry->d_name);
}
struct stat64 lstat_buf;
#ifdef _DIRENT_HAVE_D_TYPE
/* We optimize and try to do the lstat call only if needed. */
if (direntry->d_type != DT_UNKNOWN)
lstat_buf.st_mode = DTTOIF (direntry->d_type);
else
#endif
if (__builtin_expect (lstat64 (real_file_name, &lstat_buf), 0))
{
error (0, errno, _("Cannot lstat %s"), file_name);
continue;
}
struct stat64 stat_buf;
int is_dir;
int is_link = S_ISLNK (lstat_buf.st_mode);
if (is_link)
{
/* In case of symlink, we check if the symlink refers to
a directory. */
char *target_name = real_file_name;
if (opt_chroot)
{
target_name = chroot_canon (opt_chroot, file_name);
if (target_name == NULL)
{
if (strstr (file_name, ".so") == NULL)
error (0, 0, _("Input file %s not found.\n"), file_name);
continue;
}
}
if (__builtin_expect (stat64 (target_name, &stat_buf), 0))
{
if (opt_verbose)
error (0, errno, _("Cannot stat %s"), file_name);
/* Remove stale symlinks. */
if (strstr (direntry->d_name, ".so."))
unlink (real_file_name);
continue;
}
is_dir = S_ISDIR (stat_buf.st_mode);
/* lstat_buf is later stored, update contents. */
lstat_buf.st_dev = stat_buf.st_dev;
lstat_buf.st_ino = stat_buf.st_ino;
lstat_buf.st_size = stat_buf.st_size;
lstat_buf.st_ctime = stat_buf.st_ctime;
}
else
is_dir = S_ISDIR (lstat_buf.st_mode);
if (is_dir && is_hwcap_platform (direntry->d_name))
{
/* Handle subdirectory later. */
struct dir_entry *new_entry;
new_entry = xmalloc (sizeof (struct dir_entry));
new_entry->path = xstrdup (file_name);
new_entry->flag = entry->flag;
new_entry->next = NULL;
#ifdef _DIRENT_HAVE_D_TYPE
/* We have filled in lstat only #ifndef
_DIRENT_HAVE_D_TYPE. Fill it in if needed. */
if (!is_link
&& direntry->d_type != DT_UNKNOWN
&& __builtin_expect (lstat64 (real_file_name, &lstat_buf), 0))
{
error (0, errno, _("Cannot lstat %s"), file_name);
free (new_entry->path);
free (new_entry);
continue;
}
#endif
new_entry->ino = lstat_buf.st_ino;
new_entry->dev = lstat_buf.st_dev;
add_single_dir (new_entry, 0);
continue;
}
else if (!S_ISREG (lstat_buf.st_mode) && !is_link)
continue;
char *real_name;
if (opt_chroot && is_link)
{
real_name = chroot_canon (opt_chroot, file_name);
if (real_name == NULL)
{
if (strstr (file_name, ".so") == NULL)
error (0, 0, _("Input file %s not found.\n"), file_name);
continue;
}
}
else
real_name = real_file_name;
#ifdef _DIRENT_HAVE_D_TYPE
/* Call lstat64 if not done yet. */
if (!is_link
&& direntry->d_type != DT_UNKNOWN
&& __builtin_expect (lstat64 (real_file_name, &lstat_buf), 0))
{
error (0, errno, _("Cannot lstat %s"), file_name);
continue;
}
#endif
/* First search whether the auxiliary cache contains this
library already and it's not changed. */
char *soname;
unsigned int osversion;
if (!search_aux_cache (&lstat_buf, &flag, &osversion, &soname))
{
if (process_file (real_name, file_name, direntry->d_name, &flag,
&osversion, &soname, is_link, &lstat_buf))
{
if (real_name != real_file_name)
free (real_name);
continue;
}
else if (opt_build_cache)
add_to_aux_cache (&lstat_buf, flag, osversion, soname);
}
if (soname == NULL)
soname = implicit_soname (direntry->d_name, flag);
/* A link may just point to itself. */
if (is_link)
{
/* If the path the link points to isn't its soname and it is not
.so symlink for ld(1) only, we treat it as a normal file. */
const char *real_base_name = basename (real_file_name);
if (strcmp (real_base_name, soname) != 0)
{
len = strlen (real_base_name);
if (len < strlen (".so")
|| strcmp (real_base_name + len - strlen (".so"), ".so") != 0
|| strncmp (real_base_name, soname, len) != 0)
is_link = 0;
}
}
if (real_name != real_file_name)
free (real_name);
if (is_link)
{
free (soname);
soname = xstrdup (direntry->d_name);
}
if (flag == FLAG_ELF
&& (entry->flag == FLAG_ELF_LIBC5
|| entry->flag == FLAG_ELF_LIBC6))
flag = entry->flag;
/* Some sanity checks to print warnings. */
if (opt_verbose)
{
if (flag == FLAG_ELF_LIBC5 && entry->flag != FLAG_ELF_LIBC5
&& entry->flag != FLAG_ANY)
error (0, 0, _("libc5 library %s in wrong directory"), file_name);
if (flag == FLAG_ELF_LIBC6 && entry->flag != FLAG_ELF_LIBC6
&& entry->flag != FLAG_ANY)
error (0, 0, _("libc6 library %s in wrong directory"), file_name);
if (flag == FLAG_LIBC4 && entry->flag != FLAG_LIBC4
&& entry->flag != FLAG_ANY)
error (0, 0, _("libc4 library %s in wrong directory"), file_name);
}
/* Add library to list. */
struct dlib_entry *dlib_ptr;
for (dlib_ptr = dlibs; dlib_ptr != NULL; dlib_ptr = dlib_ptr->next)
{
/* Is soname already in list? */
if (strcmp (dlib_ptr->soname, soname) == 0)
{
/* Prefer a file to a link, otherwise check which one
is newer. */
if ((!is_link && dlib_ptr->is_link)
|| (is_link == dlib_ptr->is_link
&& _dl_cache_libcmp (dlib_ptr->name, direntry->d_name) < 0))
{
/* It's newer - add it. */
/* Flag should be the same - sanity check. */
if (dlib_ptr->flag != flag)
{
if (dlib_ptr->flag == FLAG_ELF
&& (flag == FLAG_ELF_LIBC5 || flag == FLAG_ELF_LIBC6))
dlib_ptr->flag = flag;
else if ((dlib_ptr->flag == FLAG_ELF_LIBC5
|| dlib_ptr->flag == FLAG_ELF_LIBC6)
&& flag == FLAG_ELF)
dlib_ptr->flag = flag;
else
error (0, 0, _("libraries %s and %s in directory %s have same soname but different type."),
dlib_ptr->name, direntry->d_name,
entry->path);
}
free (dlib_ptr->name);
dlib_ptr->name = xstrdup (direntry->d_name);
dlib_ptr->is_link = is_link;
dlib_ptr->osversion = osversion;
}
/* Don't add this library, abort loop. */
/* Also free soname, since it's dynamically allocated. */
free (soname);
break;
}
}
/* Add the library if it's not already in. */
if (dlib_ptr == NULL)
{
dlib_ptr = (struct dlib_entry *)xmalloc (sizeof (struct dlib_entry));
dlib_ptr->name = xstrdup (direntry->d_name);
dlib_ptr->soname = soname;
dlib_ptr->flag = flag;
dlib_ptr->is_link = is_link;
dlib_ptr->osversion = osversion;
/* Add at head of list. */
dlib_ptr->next = dlibs;
dlibs = dlib_ptr;
}
}
closedir (dir);
/* Now dlibs contains a list of all libs - add those to the cache
and created all symbolic links. */
struct dlib_entry *dlib_ptr;
for (dlib_ptr = dlibs; dlib_ptr != NULL; dlib_ptr = dlib_ptr->next)
{
/* Don't create links to links. */
if (dlib_ptr->is_link == 0)
create_links (dir_name, entry->path, dlib_ptr->name,
dlib_ptr->soname);
if (opt_build_cache)
add_to_cache (entry->path, dlib_ptr->soname, dlib_ptr->flag,
dlib_ptr->osversion, hwcap);
}
/* Free all resources. */
while (dlibs)
{
dlib_ptr = dlibs;
free (dlib_ptr->soname);
free (dlib_ptr->name);
dlibs = dlibs->next;
free (dlib_ptr);
}
if (opt_chroot && dir_name)
free (dir_name);
}
/* Search through all libraries. */
static void
search_dirs (void)
{
struct dir_entry *entry;
for (entry = dir_entries; entry != NULL; entry = entry->next)
search_dir (entry);
/* Free all allocated memory. */
while (dir_entries)
{
entry = dir_entries;
dir_entries = dir_entries->next;
free (entry->path);
free (entry);
}
}
static void parse_conf_include (const char *config_file, unsigned int lineno,
bool do_chroot, const char *pattern);
/* Parse configuration file. */
static void
parse_conf (const char *filename, bool do_chroot)
{
FILE *file = NULL;
char *line = NULL;
const char *canon;
size_t len = 0;
unsigned int lineno;
if (do_chroot && opt_chroot)
{
canon = chroot_canon (opt_chroot, filename);
if (canon)
file = fopen (canon, "r");
else
canon = filename;
}
else
{
canon = filename;
file = fopen (filename, "r");
}
if (file == NULL)
{
error (0, errno, _("Can't open configuration file %s"), canon);
if (canon != filename)
free ((char *) canon);
return;
}
/* No threads use this stream. */
__fsetlocking (file, FSETLOCKING_BYCALLER);
if (canon != filename)
free ((char *) canon);
lineno = 0;
do
{
ssize_t n = getline (&line, &len, file);
if (n < 0)
break;
++lineno;
if (line[n - 1] == '\n')
line[n - 1] = '\0';
/* Because the file format does not know any form of quoting we
can search forward for the next '#' character and if found
make it terminating the line. */
*strchrnul (line, '#') = '\0';
/* Remove leading whitespace. NUL is no whitespace character. */
char *cp = line;
while (isspace (*cp))
++cp;
/* If the line is blank it is ignored. */
if (cp[0] == '\0')
continue;
if (!strncmp (cp, "include", 7) && isblank (cp[7]))
{
char *dir;
cp += 8;
while ((dir = strsep (&cp, " \t")) != NULL)
if (dir[0] != '\0')
parse_conf_include (filename, lineno, do_chroot, dir);
}
else if (!strncasecmp (cp, "hwcap", 5) && isblank (cp[5]))
{
cp += 6;
char *p, *name = NULL;
unsigned long int n = strtoul (cp, &cp, 0);
if (cp != NULL && isblank (*cp))
while ((p = strsep (&cp, " \t")) != NULL)
if (p[0] != '\0')
{
if (name == NULL)
name = p;
else
{
name = NULL;
break;
}
}
if (name == NULL)
{
error (EXIT_FAILURE, 0, _("%s:%u: bad syntax in hwcap line"),
filename, lineno);
break;
}
if (n >= (64 - _DL_FIRST_EXTRA))
error (EXIT_FAILURE, 0,
_("%s:%u: hwcap index %lu above maximum %u"),
filename, lineno, n, 64 - _DL_FIRST_EXTRA - 1);
if (hwcap_extra[n] == NULL)
{
for (unsigned long int h = 0; h < (64 - _DL_FIRST_EXTRA); ++h)
if (hwcap_extra[h] != NULL && !strcmp (name, hwcap_extra[h]))
error (EXIT_FAILURE, 0,
_("%s:%u: hwcap index %lu already defined as %s"),
filename, lineno, h, name);
hwcap_extra[n] = xstrdup (name);
}
else
{
if (strcmp (name, hwcap_extra[n]))
error (EXIT_FAILURE, 0,
_("%s:%u: hwcap index %lu already defined as %s"),
filename, lineno, n, hwcap_extra[n]);
if (opt_verbose)
error (0, 0, _("%s:%u: duplicate hwcap %lu %s"),
filename, lineno, n, name);
}
}
else
add_dir (cp);
}
while (!feof_unlocked (file));
/* Free buffer and close file. */
free (line);
fclose (file);
}
/* Handle one word in an `include' line, a glob pattern of additional
config files to read. */
static void
parse_conf_include (const char *config_file, unsigned int lineno,
bool do_chroot, const char *pattern)
{
if (opt_chroot && pattern[0] != '/')
error (EXIT_FAILURE, 0,
_("need absolute file name for configuration file when using -r"));
char *copy = NULL;
if (pattern[0] != '/' && strchr (config_file, '/') != NULL)
{
if (asprintf (©, "%s/%s", dirname (strdupa (config_file)),
pattern) < 0)
error (EXIT_FAILURE, 0, _("memory exhausted"));
pattern = copy;
}
glob64_t gl;
int result;
if (do_chroot && opt_chroot)
{
char *canon = chroot_canon (opt_chroot, pattern);
if (canon == NULL)
return;
result = glob64 (canon, 0, NULL, &gl);
free (canon);
}
else
result = glob64 (pattern, 0, NULL, &gl);
switch (result)
{
case 0:
for (size_t i = 0; i < gl.gl_pathc; ++i)
parse_conf (gl.gl_pathv[i], false);
globfree64 (&gl);
break;
case GLOB_NOMATCH:
break;
case GLOB_NOSPACE:
errno = ENOMEM;
case GLOB_ABORTED:
if (opt_verbose)
error (0, errno, _("%s:%u: cannot read directory %s"),
config_file, lineno, pattern);
break;
default:
abort ();
break;
}
free (copy);
}
/* Honour LD_HWCAP_MASK. */
static void
set_hwcap (void)
{
char *mask = getenv ("LD_HWCAP_MASK");
if (mask)
hwcap_mask = strtoul (mask, NULL, 0);
}
int
main (int argc, char **argv)
{
/* Set locale via LC_ALL. */
setlocale (LC_ALL, "");
/* Set the text message domain. */
textdomain (_libc_intl_domainname);
/* Parse and process arguments. */
int remaining;
argp_parse (&argp, argc, argv, 0, &remaining, NULL);
/* Remaining arguments are additional directories if opt_manual_link
is not set. */
if (remaining != argc && !opt_manual_link)
{
int i;
for (i = remaining; i < argc; ++i)
if (opt_build_cache && argv[i][0] != '/')
error (EXIT_FAILURE, 0,
_("relative path `%s' used to build cache"),
argv[i]);
else
add_dir (argv[i]);
}
hwcap_extra[63 - _DL_FIRST_EXTRA] = "tls";
set_hwcap ();
if (opt_chroot)
{
/* Normalize the path a bit, we might need it for printing later. */
char *endp = rawmemchr (opt_chroot, '\0');
while (endp > opt_chroot && endp[-1] == '/')
--endp;
*endp = '\0';
if (endp == opt_chroot)
opt_chroot = NULL;
if (opt_chroot)
{
/* It is faster to use chroot if we can. */
if (!chroot (opt_chroot))
{
if (chdir ("/"))
error (EXIT_FAILURE, errno, _("Can't chdir to /"));
opt_chroot = NULL;
}
}
}
if (cache_file == NULL)
{
cache_file = alloca (strlen (LD_SO_CACHE) + 1);
strcpy (cache_file, LD_SO_CACHE);
}
if (config_file == NULL)
config_file = LD_SO_CONF;
if (opt_print_cache)
{
if (opt_chroot)
{
char *p = chroot_canon (opt_chroot, cache_file);
if (p == NULL)
error (EXIT_FAILURE, errno, _("Can't open cache file %s\n"),
cache_file);
cache_file = p;
}
print_cache (cache_file);
if (opt_chroot)
free (cache_file);
exit (0);
}
if (opt_chroot)
{
/* Canonicalize the directory name of cache_file, not cache_file,
because we'll rename a temporary cache file to it. */
char *p = strrchr (cache_file, '/');
char *canon = chroot_canon (opt_chroot,
p ? (*p = '\0', cache_file) : "/");
if (canon == NULL)
error (EXIT_FAILURE, errno,
_("Can't open cache file directory %s\n"),
p ? cache_file : "/");
if (p)
++p;
else
p = cache_file;
cache_file = alloca (strlen (canon) + strlen (p) + 2);
sprintf (cache_file, "%s/%s", canon, p);
free (canon);
}
if (opt_manual_link)
{
/* Link all given libraries manually. */
int i;
for (i = remaining; i < argc; ++i)
manual_link (argv[i]);
exit (0);
}
if (opt_build_cache)
init_cache ();
if (!opt_only_cline)
{
parse_conf (config_file, true);
/* Always add the standard search paths. */
add_system_dir (SLIBDIR);
if (strcmp (SLIBDIR, LIBDIR))
add_system_dir (LIBDIR);
}
const char *aux_cache_file = _PATH_LDCONFIG_AUX_CACHE;
if (opt_chroot)
aux_cache_file = chroot_canon (opt_chroot, aux_cache_file);
if (! opt_ignore_aux_cache && aux_cache_file)
load_aux_cache (aux_cache_file);
else
init_aux_cache ();
search_dirs ();
if (opt_build_cache)
{
save_cache (cache_file);
if (aux_cache_file)
save_aux_cache (aux_cache_file);
}
return 0;
}
|
cdepillabout/glibc
|
elf/ldconfig.c
|
C
|
gpl-2.0
| 35,211
|
// OsmSharp - OpenStreetMap (OSM) SDK
// Copyright (C) 2013 Abelshausen Ben
//
// This file is part of OsmSharp.
//
// OsmSharp is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 2 of the License, or
// (at your option) any later version.
//
// OsmSharp is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with OsmSharp. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using OsmSharp.Math.AI.Genetic;
using OsmSharp.Math.AI.Genetic.Operations;
using OsmSharp.Math.AI.Genetic.Solvers;
using OsmSharp.Math.Random;
using OsmSharp.Math.VRP.MultiSalesman.Genetic;
using OsmSharp.Math.VRP.MultiSalesman.Genetic.Helpers;
namespace OsmSharp.Math.VRP.MultiSalesman.Solver.Operations.Mutation
{
internal class RoundCutMutationOperation : IMutationOperation<List<Genome>, Problem, Fitness>
{
public string Name
{
get
{
return "NotSet";
}
}
public Individual<List<Genome>, Problem, Fitness> Mutate(
Solver<List<Genome>, Problem, Fitness> solver, Individual<List<Genome>, Problem, Fitness> mutating)
{
//Individual<List<Genome>, Problem, Fitness> copy = mutating.Copy();
//if (!copy.FitnessCalculated)
//{
// copy.CalculateFitness(solver.Problem, solver.FitnessCalculator);
//}
//// randomly select a small genome.
//Genome big = IndividualHelper.SelectRandom(copy, false);
//// make sure there are at least two.
//if (big.Count < 3)
//{
// return copy;
//}
//// cut in two
//int random_idx = StaticRandomGenerator.Get().Generate(
// big.Count - 2) + 1;
//// remove the old round.
//copy.Genomes.Remove(big);
//// create new rounds.
//Genome new_round1 = new Genome();
//Genome new_round2 = new Genome();
//new_round1.AddRange(
// big.GetRange(0, random_idx));
//new_round2.AddRange(
// big.GetRange(random_idx, big.Count - random_idx));
//// do best placement
//random_idx = StaticRandomGenerator.Get().Generate(
// new_round1.Count);
//Genome new_round1_1 = new Genome();
//new_round1_1.Add(new_round1[random_idx]);
//new_round1.RemoveAt(random_idx);
//new_round1_1 = BestPlacementHelper.DoFast(
// solver.Problem,
// solver.FitnessCalculator as FitnessCalculator,
// new_round1_1,
// new_round1);
//random_idx = StaticRandomGenerator.Get().Generate(
// new_round2.Count);
//Genome new_round2_1 = new Genome();
//new_round2_1.Add(new_round2[random_idx]);
//new_round2.RemoveAt(random_idx);
//new_round2_1 = BestPlacementHelper.DoFast(
// solver.Problem,
// solver.FitnessCalculator as FitnessCalculator,
// new_round2_1,
// new_round2);
//copy.Genomes.Add(new_round1_1);
//copy.Genomes.Add(new_round2_1);
//copy.CalculateFitness(solver.Problem, solver.FitnessCalculator);
//return copy;
throw new NotImplementedException("Not re-implemented after refactoring GA");
}
}
}
|
ryfx/OsmSharp
|
OsmSharp/Math/VRP/MultiSalesman/Genetic/Operations/Mutation/RoundCutMutationOperation.cs
|
C#
|
gpl-2.0
| 3,915
|
/***************************************************************************
qgsnativealgorithms.cpp
---------------------
begin : April 2017
copyright : (C) 2017 by Nyall Dawson
email : nyall dot dawson at gmail dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include "qgsnativealgorithms.h"
#include "qgsruntimeprofiler.h"
#include "qgsalgorithmaddincrementalfield.h"
#include "qgsalgorithmaddtablefield.h"
#include "qgsalgorithmaddxyfields.h"
#include "qgsalgorithmaffinetransform.h"
#include "qgsalgorithmaggregate.h"
#include "qgsalgorithmangletonearest.h"
#include "qgsalgorithmapplylayerstyle.h"
#include "qgsalgorithmarraytranslatedfeatures.h"
#include "qgsalgorithmaspect.h"
#include "qgsalgorithmassignprojection.h"
#include "qgsalgorithmattributeindex.h"
#include "qgsalgorithmbatchnominatimgeocode.h"
#include "qgsalgorithmboundary.h"
#include "qgsalgorithmboundingbox.h"
#include "qgsalgorithmbuffer.h"
#include "qgsalgorithmcalculateoverlaps.h"
#include "qgsalgorithmcategorizeusingstyle.h"
#include "qgsalgorithmcellstatistics.h"
#include "qgsalgorithmcentroid.h"
#include "qgsalgorithmclip.h"
#include "qgsalgorithmconditionalbranch.h"
#include "qgsalgorithmconstantraster.h"
#include "qgsalgorithmconverttocurves.h"
#include "qgsalgorithmconvexhull.h"
#include "qgsalgorithmcreatedirectory.h"
#include "qgsalgorithmdbscanclustering.h"
#include "qgsalgorithmdeleteduplicategeometries.h"
#include "qgsalgorithmdensifygeometriesbycount.h"
#include "qgsalgorithmdensifygeometriesbyinterval.h"
#include "qgsalgorithmdetectdatasetchanges.h"
#include "qgsalgorithmdifference.h"
#include "qgsalgorithmdissolve.h"
#include "qgsalgorithmdistancewithin.h"
#include "qgsalgorithmdrape.h"
#include "qgsalgorithmdropfields.h"
#include "qgsalgorithmdropgeometry.h"
#include "qgsalgorithmdropmzvalues.h"
#include "qgsalgorithmdxfexport.h"
#include "qgsalgorithmexecutepostgisquery.h"
#include "qgsalgorithmexecutespatialitequery.h"
#include "qgsalgorithmexecutespatialitequeryregistered.h"
#include "qgsalgorithmexportmesh.h"
#include "qgsalgorithmexporttospreadsheet.h"
#include "qgsalgorithmexplode.h"
#include "qgsalgorithmexplodehstore.h"
#include "qgsalgorithmexportlayersinformation.h"
#include "qgsalgorithmextendlines.h"
#include "qgsalgorithmextentfromlayer.h"
#include "qgsalgorithmextenttolayer.h"
#include "qgsalgorithmextractbinary.h"
#include "qgsalgorithmextractbyattribute.h"
#include "qgsalgorithmextractbyexpression.h"
#include "qgsalgorithmextractbyextent.h"
#include "qgsalgorithmextractbylocation.h"
#include "qgsalgorithmextractlayoutmapextent.h"
#include "qgsalgorithmextractvertices.h"
#include "qgsalgorithmextractspecificvertices.h"
#include "qgsalgorithmextractzmvalues.h"
#include "qgsalgorithmfieldcalculator.h"
#include "qgsalgorithmfiledownloader.h"
#include "qgsalgorithmfillnodata.h"
#include "qgsalgorithmfilter.h"
#include "qgsalgorithmfilterbygeometry.h"
#include "qgsalgorithmfiltervertices.h"
#include "qgsalgorithmfixgeometries.h"
#include "qgsalgorithmflattenrelationships.h"
#include "qgsalgorithmforcerhr.h"
#include "qgsalgorithmfuzzifyraster.h"
#include "qgsalgorithmgeometrybyexpression.h"
#include "qgsalgorithmgpsbabeltools.h"
#include "qgsalgorithmgrid.h"
#include "qgsalgorithmhillshade.h"
#include "qgsalgorithmjoinbyattribute.h"
#include "qgsalgorithmjoinbylocation.h"
#include "qgsalgorithmjoinbynearest.h"
#include "qgsalgorithmjoinwithlines.h"
#include "qgsalgorithmimportphotos.h"
#include "qgsalgorithminterpolatepoint.h"
#include "qgsalgorithmintersection.h"
#include "qgsalgorithmkmeansclustering.h"
#ifndef QT_NO_PRINTER
#include "qgsalgorithmlayoutatlastoimage.h"
#include "qgsalgorithmlayoutatlastopdf.h"
#include "qgsalgorithmlayouttoimage.h"
#include "qgsalgorithmlayouttopdf.h"
#endif
#include "qgsalgorithmlinedensity.h"
#include "qgsalgorithmlineintersection.h"
#include "qgsalgorithmlinesubstring.h"
#include "qgsalgorithmloadlayer.h"
#include "qgsalgorithmmeancoordinates.h"
#include "qgsalgorithmmergelines.h"
#include "qgsalgorithmmergevector.h"
#include "qgsalgorithmminimumenclosingcircle.h"
#include "qgsalgorithmmultiparttosinglepart.h"
#include "qgsalgorithmmultiringconstantbuffer.h"
#include "qgsalgorithmnearestneighbouranalysis.h"
#include "qgsalgorithmoffsetlines.h"
#include "qgsalgorithmorderbyexpression.h"
#include "qgsalgorithmorientedminimumboundingbox.h"
#include "qgsalgorithmorthogonalize.h"
#include "qgsalgorithmpackage.h"
#include "qgsalgorithmpixelcentroidsfrompolygons.h"
#include "qgsalgorithmarrayoffsetlines.h"
#include "qgsalgorithmpointsinpolygon.h"
#include "qgsalgorithmpointonsurface.h"
#include "qgsalgorithmpointtolayer.h"
#include "qgsalgorithmpointsalonggeometry.h"
#include "qgsalgorithmpointslayerfromtable.h"
#include "qgsalgorithmpointstopaths.h"
#include "qgsalgorithmpoleofinaccessibility.h"
#include "qgsalgorithmpolygonize.h"
#include "qgsalgorithmprojectpointcartesian.h"
#include "qgsalgorithmpromotetomultipart.h"
#include "qgsalgorithmraiseexception.h"
#include "qgsalgorithmrandomextract.h"
#include "qgsalgorithmrandompointsextent.h"
#include "qgsalgorithmrandompointsinpolygons.h"
#include "qgsalgorithmrandompointsonlines.h"
#include "qgsalgorithmrandomraster.h"
#include "qgsalgorithmrasterfrequencybycomparisonoperator.h"
#include "qgsalgorithmrasterlayerproperties.h"
#include "qgsalgorithmrasterlayeruniquevalues.h"
#include "qgsalgorithmrasterlogicalop.h"
#include "qgsalgorithmrasterize.h"
#include "qgsalgorithmrastersampling.h"
#include "qgsalgorithmrasterstackposition.h"
#include "qgsalgorithmrasterstatistics.h"
#include "qgsalgorithmrastersurfacevolume.h"
#include "qgsalgorithmrasterzonalstats.h"
#include "qgsalgorithmreclassifybylayer.h"
#include "qgsalgorithmrectanglesovalsdiamonds.h"
#include "qgsalgorithmrefactorfields.h"
#include "qgsalgorithmremoveduplicatesbyattribute.h"
#include "qgsalgorithmremoveduplicatevertices.h"
#include "qgsalgorithmremoveholes.h"
#include "qgsalgorithmremovenullgeometry.h"
#include "qgsalgorithmrenamelayer.h"
#include "qgsalgorithmrenametablefield.h"
#include "qgsalgorithmrepairshapefile.h"
#include "qgsalgorithmrescaleraster.h"
#include "qgsalgorithmreverselinedirection.h"
#include "qgsalgorithmrotate.h"
#include "qgsalgorithmroundrastervalues.h"
#include "qgsalgorithmruggedness.h"
#include "qgsalgorithmsavefeatures.h"
#include "qgsalgorithmsavelog.h"
#include "qgsalgorithmsaveselectedfeatures.h"
#include "qgsalgorithmsegmentize.h"
#include "qgsalgorithmserviceareafromlayer.h"
#include "qgsalgorithmserviceareafrompoint.h"
#include "qgsalgorithmsetlayerencoding.h"
#include "qgsalgorithmsetmvalue.h"
#include "qgsalgorithmsetvariable.h"
#include "qgsalgorithmsetzvalue.h"
#include "qgsalgorithmshortestpathlayertopoint.h"
#include "qgsalgorithmshortestpathpointtolayer.h"
#include "qgsalgorithmshortestpathpointtopoint.h"
#include "qgsalgorithmshpencodinginfo.h"
#include "qgsalgorithmsimplify.h"
#include "qgsalgorithmsinglesidedbuffer.h"
#include "qgsalgorithmslope.h"
#include "qgsalgorithmsmooth.h"
#include "qgsalgorithmsnapgeometries.h"
#include "qgsalgorithmsnaptogrid.h"
#include "qgsalgorithmspatialindex.h"
#include "qgsalgorithmsplitfeaturesbyattributecharacter.h"
#include "qgsalgorithmsplitlineantimeridian.h"
#include "qgsalgorithmsplitlinesbylength.h"
#include "qgsalgorithmsplitvectorlayer.h"
#include "qgsalgorithmsplitwithlines.h"
#include "qgsalgorithmstdbscanclustering.h"
#include "qgsalgorithmstringconcatenation.h"
#include "qgsalgorithmsubdivide.h"
#include "qgsalgorithmsumlinelength.h"
#include "qgsalgorithmswapxy.h"
#include "qgsalgorithmsymmetricaldifference.h"
#include "qgsalgorithmtaperedbuffer.h"
#include "qgsalgorithmtinmeshcreation.h"
#include "qgsalgorithmtransect.h"
#include "qgsalgorithmtransform.h"
#include "qgsalgorithmtranslate.h"
#include "qgsalgorithmtruncatetable.h"
#include "qgsalgorithmunion.h"
#include "qgsalgorithmuniquevalueindex.h"
#include "qgsalgorithmvectorize.h"
#include "qgsalgorithmwedgebuffers.h"
#include "qgsalgorithmwritevectortiles.h"
#include "qgsalgorithmzonalhistogram.h"
#include "qgsalgorithmzonalstatistics.h"
#include "qgsalgorithmzonalstatisticsfeaturebased.h"
#include "qgsalgorithmpolygonstolines.h"
#include "qgsbookmarkalgorithms.h"
#include "qgsprojectstylealgorithms.h"
#include "qgsstylealgorithms.h"
///@cond PRIVATE
QgsNativeAlgorithms::QgsNativeAlgorithms( QObject *parent )
: QgsProcessingProvider( parent )
{}
QIcon QgsNativeAlgorithms::icon() const
{
return QgsApplication::getThemeIcon( QStringLiteral( "/providerQgis.svg" ) );
}
QString QgsNativeAlgorithms::svgIconPath() const
{
return QgsApplication::iconPath( QStringLiteral( "providerQgis.svg" ) );
}
QString QgsNativeAlgorithms::id() const
{
return QStringLiteral( "native" );
}
QString QgsNativeAlgorithms::helpId() const
{
return QStringLiteral( "qgis" );
}
QString QgsNativeAlgorithms::name() const
{
return tr( "QGIS (native c++)" );
}
bool QgsNativeAlgorithms::supportsNonFileBasedOutput() const
{
return true;
}
void QgsNativeAlgorithms::loadAlgorithms()
{
const QgsScopedRuntimeProfile profile( QObject::tr( "QGIS native provider" ) );
addAlgorithm( new QgsAddIncrementalFieldAlgorithm() );
addAlgorithm( new QgsAddTableFieldAlgorithm() );
addAlgorithm( new QgsAddXYFieldsAlgorithm() );
addAlgorithm( new QgsAddUniqueValueIndexAlgorithm() );
addAlgorithm( new QgsAffineTransformationAlgorithm() );
addAlgorithm( new QgsAggregateAlgorithm() );
addAlgorithm( new QgsAngleToNearestAlgorithm() );
addAlgorithm( new QgsApplyLayerStyleAlgorithm() );
addAlgorithm( new QgsArrayTranslatedFeaturesAlgorithm() );
addAlgorithm( new QgsAspectAlgorithm() );
addAlgorithm( new QgsAssignProjectionAlgorithm() );
addAlgorithm( new QgsAttributeIndexAlgorithm() );
addAlgorithm( new QgsBatchNominatimGeocodeAlgorithm() );
addAlgorithm( new QgsBookmarksToLayerAlgorithm() );
addAlgorithm( new QgsBoundaryAlgorithm() );
addAlgorithm( new QgsBoundingBoxAlgorithm() );
addAlgorithm( new QgsBufferAlgorithm() );
addAlgorithm( new QgsCalculateVectorOverlapsAlgorithm() );
addAlgorithm( new QgsCategorizeUsingStyleAlgorithm() );
addAlgorithm( new QgsCellStatisticsAlgorithm() );
addAlgorithm( new QgsCellStatisticsPercentileAlgorithm() );
addAlgorithm( new QgsCellStatisticsPercentRankFromRasterAlgorithm() );
addAlgorithm( new QgsCellStatisticsPercentRankFromValueAlgorithm() );
addAlgorithm( new QgsCentroidAlgorithm() );
addAlgorithm( new QgsClipAlgorithm() );
addAlgorithm( new QgsCollectAlgorithm() );
addAlgorithm( new QgsCombineStylesAlgorithm() );
addAlgorithm( new QgsConditionalBranchAlgorithm() );
addAlgorithm( new QgsConstantRasterAlgorithm() );
addAlgorithm( new QgsConvertToCurvesAlgorithm() );
addAlgorithm( new QgsConvexHullAlgorithm() );
addAlgorithm( new QgsCreateDirectoryAlgorithm() );
addAlgorithm( new QgsDbscanClusteringAlgorithm() );
addAlgorithm( new QgsDeleteDuplicateGeometriesAlgorithm() );
addAlgorithm( new QgsDetectVectorChangesAlgorithm() );
addAlgorithm( new QgsDifferenceAlgorithm() );
addAlgorithm( new QgsDissolveAlgorithm() );
addAlgorithm( new QgsDrapeToMAlgorithm() );
addAlgorithm( new QgsDrapeToZAlgorithm() );
addAlgorithm( new QgsDropTableFieldsAlgorithm() );
addAlgorithm( new QgsDropGeometryAlgorithm() );
addAlgorithm( new QgsDropMZValuesAlgorithm() );
addAlgorithm( new QgsDxfExportAlgorithm() );
addAlgorithm( new QgsExecutePostgisQueryAlgorithm() );
addAlgorithm( new QgsExecuteRegisteredSpatialiteQueryAlgorithm() );
addAlgorithm( new QgsExecuteSpatialiteQueryAlgorithm() );
addAlgorithm( new QgsExplodeAlgorithm() );
addAlgorithm( new QgsExplodeHstoreAlgorithm() );
addAlgorithm( new QgsExportLayersInformationAlgorithm() );
addAlgorithm( new QgsExportMeshVerticesAlgorithm );
addAlgorithm( new QgsExportMeshFacesAlgorithm );
addAlgorithm( new QgsExportMeshEdgesAlgorithm );
addAlgorithm( new QgsExportMeshOnGridAlgorithm );
addAlgorithm( new QgsExportToSpreadsheetAlgorithm() );
addAlgorithm( new QgsExtendLinesAlgorithm() );
addAlgorithm( new QgsExtentFromLayerAlgorithm() );
addAlgorithm( new QgsExtentToLayerAlgorithm() );
addAlgorithm( new QgsExtractBinaryFieldAlgorithm() );
addAlgorithm( new QgsExtractByAttributeAlgorithm() );
addAlgorithm( new QgsExtractByExpressionAlgorithm() );
addAlgorithm( new QgsExtractByExtentAlgorithm() );
addAlgorithm( new QgsExtractByLocationAlgorithm() );
addAlgorithm( new QgsExtractMValuesAlgorithm() );
addAlgorithm( new QgsExtractVerticesAlgorithm() );
addAlgorithm( new QgsExtractSpecificVerticesAlgorithm() );
addAlgorithm( new QgsExtractWithinDistanceAlgorithm() );
addAlgorithm( new QgsExtractZValuesAlgorithm() );
addAlgorithm( new QgsFieldCalculatorAlgorithm() );
addAlgorithm( new QgsFileDownloaderAlgorithm() );
addAlgorithm( new QgsFillNoDataAlgorithm() );
addAlgorithm( new QgsFilterAlgorithm() );
addAlgorithm( new QgsFilterByGeometryAlgorithm() );
addAlgorithm( new QgsFilterByLayerTypeAlgorithm() );
addAlgorithm( new QgsFilterVerticesByM() );
addAlgorithm( new QgsFilterVerticesByZ() );
addAlgorithm( new QgsFixGeometriesAlgorithm() );
addAlgorithm( new QgsFlattenRelationshipsAlgorithm() );
addAlgorithm( new QgsForceRHRAlgorithm() );
addAlgorithm( new QgsFuzzifyRasterLinearMembershipAlgorithm() );
addAlgorithm( new QgsFuzzifyRasterPowerMembershipAlgorithm() );
addAlgorithm( new QgsFuzzifyRasterLargeMembershipAlgorithm() );
addAlgorithm( new QgsFuzzifyRasterSmallMembershipAlgorithm() );
addAlgorithm( new QgsFuzzifyRasterGaussianMembershipAlgorithm() );
addAlgorithm( new QgsFuzzifyRasterNearMembershipAlgorithm() );
addAlgorithm( new QgsGeometryByExpressionAlgorithm() );
addAlgorithm( new QgsConvertGpxFeatureTypeAlgorithm() );
addAlgorithm( new QgsConvertGpsDataAlgorithm() );
addAlgorithm( new QgsDownloadGpsDataAlgorithm() );
addAlgorithm( new QgsUploadGpsDataAlgorithm() );
addAlgorithm( new QgsGridAlgorithm() );
addAlgorithm( new QgsHillshadeAlgorithm() );
addAlgorithm( new QgsImportPhotosAlgorithm() );
addAlgorithm( new QgsInterpolatePointAlgorithm() );
addAlgorithm( new QgsIntersectionAlgorithm() );
addAlgorithm( new QgsJoinByAttributeAlgorithm() );
addAlgorithm( new QgsJoinByLocationAlgorithm() );
addAlgorithm( new QgsJoinByNearestAlgorithm() );
addAlgorithm( new QgsJoinWithLinesAlgorithm() );
addAlgorithm( new QgsKMeansClusteringAlgorithm() );
addAlgorithm( new QgsLayerToBookmarksAlgorithm() );
addAlgorithm( new QgsLayoutMapExtentToLayerAlgorithm() );
#ifndef QT_NO_PRINTER
addAlgorithm( new QgsLayoutAtlasToImageAlgorithm() );
addAlgorithm( new QgsLayoutAtlasToPdfAlgorithm() );
addAlgorithm( new QgsLayoutToImageAlgorithm() );
addAlgorithm( new QgsLayoutToPdfAlgorithm() );
#endif
addAlgorithm( new QgsLineDensityAlgorithm() );
addAlgorithm( new QgsLineIntersectionAlgorithm() );
addAlgorithm( new QgsLineSubstringAlgorithm() );
addAlgorithm( new QgsLoadLayerAlgorithm() );
addAlgorithm( new QgsMeanCoordinatesAlgorithm() );
addAlgorithm( new QgsMergeLinesAlgorithm() );
addAlgorithm( new QgsMergeVectorAlgorithm() );
addAlgorithm( new QgsMeshRasterizeAlgorithm );
addAlgorithm( new QgsMeshContoursAlgorithm );
addAlgorithm( new QgsMeshExportCrossSection );
addAlgorithm( new QgsMeshExportTimeSeries );
addAlgorithm( new QgsMinimumEnclosingCircleAlgorithm() );
addAlgorithm( new QgsMultipartToSinglepartAlgorithm() );
addAlgorithm( new QgsMultiRingConstantBufferAlgorithm() );
addAlgorithm( new QgsNearestNeighbourAnalysisAlgorithm() );
addAlgorithm( new QgsOffsetLinesAlgorithm() );
addAlgorithm( new QgsOrderByExpressionAlgorithm() );
addAlgorithm( new QgsOrientedMinimumBoundingBoxAlgorithm() );
addAlgorithm( new QgsOrthogonalizeAlgorithm() );
addAlgorithm( new QgsPackageAlgorithm() );
addAlgorithm( new QgsPixelCentroidsFromPolygonsAlgorithm() );
addAlgorithm( new QgsCreateArrayOffsetLinesAlgorithm() );
addAlgorithm( new QgsPointsInPolygonAlgorithm() );
addAlgorithm( new QgsPointOnSurfaceAlgorithm() );
addAlgorithm( new QgsPointToLayerAlgorithm() );
addAlgorithm( new QgsPointsAlongGeometryAlgorithm() );
addAlgorithm( new QgsPointsLayerFromTableAlgorithm() );
addAlgorithm( new QgsPointsToPathsAlgorithm() );
addAlgorithm( new QgsPoleOfInaccessibilityAlgorithm() );
addAlgorithm( new QgsPolygonizeAlgorithm() );
addAlgorithm( new QgsProjectPointCartesianAlgorithm() );
addAlgorithm( new QgsPromoteToMultipartAlgorithm() );
addAlgorithm( new QgsRaiseExceptionAlgorithm() );
addAlgorithm( new QgsRaiseWarningAlgorithm() );
addAlgorithm( new QgsRandomBinomialRasterAlgorithm() );
addAlgorithm( new QgsRandomExponentialRasterAlgorithm() );
addAlgorithm( new QgsRandomExtractAlgorithm() );
addAlgorithm( new QgsRandomGammaRasterAlgorithm() );
addAlgorithm( new QgsRandomGeometricRasterAlgorithm() );
addAlgorithm( new QgsRandomNegativeBinomialRasterAlgorithm() );
addAlgorithm( new QgsRandomNormalRasterAlgorithm() );
addAlgorithm( new QgsRandomPointsExtentAlgorithm() );
addAlgorithm( new QgsRandomPointsInPolygonsAlgorithm() );
addAlgorithm( new QgsRandomPointsOnLinesAlgorithm() );
addAlgorithm( new QgsRandomPoissonRasterAlgorithm() );
addAlgorithm( new QgsRandomUniformRasterAlgorithm() );
addAlgorithm( new QgsRasterFrequencyByEqualOperatorAlgorithm() );
addAlgorithm( new QgsRasterFrequencyByGreaterThanOperatorAlgorithm() );
addAlgorithm( new QgsRasterFrequencyByLessThanOperatorAlgorithm() );
addAlgorithm( new QgsRasterLayerPropertiesAlgorithm() );
addAlgorithm( new QgsRasterLayerUniqueValuesReportAlgorithm() );
addAlgorithm( new QgsRasterLayerZonalStatsAlgorithm() );
addAlgorithm( new QgsRasterLogicalAndAlgorithm() );
addAlgorithm( new QgsRasterLogicalOrAlgorithm() );
addAlgorithm( new QgsRasterizeAlgorithm() );
addAlgorithm( new QgsRasterPixelsToPointsAlgorithm() );
addAlgorithm( new QgsRasterPixelsToPolygonsAlgorithm() );
addAlgorithm( new QgsRasterSamplingAlgorithm() );
addAlgorithm( new QgsRasterStackHighestPositionAlgorithm() );
addAlgorithm( new QgsRasterStackLowestPositionAlgorithm() );
addAlgorithm( new QgsRasterStatisticsAlgorithm() );
addAlgorithm( new QgsRasterSurfaceVolumeAlgorithm() );
addAlgorithm( new QgsAlgorithmRemoveDuplicateVertices() );
addAlgorithm( new QgsReclassifyByLayerAlgorithm() );
addAlgorithm( new QgsReclassifyByTableAlgorithm() );
addAlgorithm( new QgsRectanglesOvalsDiamondsAlgorithm() );
addAlgorithm( new QgsRefactorFieldsAlgorithm() );
addAlgorithm( new QgsRemoveDuplicatesByAttributeAlgorithm() );
addAlgorithm( new QgsRemoveHolesAlgorithm() );
addAlgorithm( new QgsRemoveNullGeometryAlgorithm() );
addAlgorithm( new QgsRenameLayerAlgorithm() );
addAlgorithm( new QgsRenameTableFieldAlgorithm() );
addAlgorithm( new QgsRepairShapefileAlgorithm() );
addAlgorithm( new QgsRescaleRasterAlgorithm() );
addAlgorithm( new QgsRetainTableFieldsAlgorithm() );
addAlgorithm( new QgsReverseLineDirectionAlgorithm() );
addAlgorithm( new QgsRotateFeaturesAlgorithm() );
addAlgorithm( new QgsRoundRasterValuesAlgorithm() );
addAlgorithm( new QgsRuggednessAlgorithm() );
addAlgorithm( new QgsSaveFeaturesAlgorithm() );
addAlgorithm( new QgsSaveLogToFileAlgorithm() );
addAlgorithm( new QgsSaveSelectedFeatures() );
addAlgorithm( new QgsSegmentizeByMaximumAngleAlgorithm() );
addAlgorithm( new QgsSegmentizeByMaximumDistanceAlgorithm() );
addAlgorithm( new QgsSelectByLocationAlgorithm() );
addAlgorithm( new QgsSelectWithinDistanceAlgorithm() );
addAlgorithm( new QgsServiceAreaFromLayerAlgorithm() );
addAlgorithm( new QgsServiceAreaFromPointAlgorithm() );
addAlgorithm( new QgsSetLayerEncodingAlgorithm() );
addAlgorithm( new QgsSetMValueAlgorithm() );
addAlgorithm( new QgsSetProjectVariableAlgorithm() );
addAlgorithm( new QgsSetZValueAlgorithm() );
addAlgorithm( new QgsShapefileEncodingInfoAlgorithm() );
addAlgorithm( new QgsShortestPathLayerToPointAlgorithm() );
addAlgorithm( new QgsShortestPathPointToLayerAlgorithm() );
addAlgorithm( new QgsShortestPathPointToPointAlgorithm() );
addAlgorithm( new QgsSimplifyAlgorithm() );
addAlgorithm( new QgsSingleSidedBufferAlgorithm() );
addAlgorithm( new QgsSlopeAlgorithm() );
addAlgorithm( new QgsSmoothAlgorithm() );
addAlgorithm( new QgsSnapGeometriesAlgorithm() );
addAlgorithm( new QgsSnapToGridAlgorithm() );
addAlgorithm( new QgsSpatialIndexAlgorithm() );
addAlgorithm( new QgsSplitFeaturesByAttributeCharacterAlgorithm() );
addAlgorithm( new QgsSplitGeometryAtAntimeridianAlgorithm() );
addAlgorithm( new QgsSplitLinesByLengthAlgorithm() );
addAlgorithm( new QgsSplitVectorLayerAlgorithm() );
addAlgorithm( new QgsSplitWithLinesAlgorithm() );
addAlgorithm( new QgsStDbscanClusteringAlgorithm() );
addAlgorithm( new QgsStringConcatenationAlgorithm() );
addAlgorithm( new QgsStyleFromProjectAlgorithm() );
addAlgorithm( new QgsSubdivideAlgorithm() );
addAlgorithm( new QgsSumLineLengthAlgorithm() );
addAlgorithm( new QgsSwapXYAlgorithm() );
addAlgorithm( new QgsSymmetricalDifferenceAlgorithm() );
addAlgorithm( new QgsTaperedBufferAlgorithm() );
addAlgorithm( new QgsTinMeshCreationAlgorithm() );
addAlgorithm( new QgsTransectAlgorithm() );
addAlgorithm( new QgsTransformAlgorithm() );
addAlgorithm( new QgsTranslateAlgorithm() );
addAlgorithm( new QgsTruncateTableAlgorithm() );
addAlgorithm( new QgsUnionAlgorithm() );
addAlgorithm( new QgsVariableWidthBufferByMAlgorithm() );
addAlgorithm( new QgsWedgeBuffersAlgorithm() );
addAlgorithm( new QgsWriteVectorTilesXyzAlgorithm() );
addAlgorithm( new QgsWriteVectorTilesMbtilesAlgorithm() );
addAlgorithm( new QgsZonalHistogramAlgorithm() );
addAlgorithm( new QgsZonalStatisticsAlgorithm() );
addAlgorithm( new QgsZonalStatisticsFeatureBasedAlgorithm() );
addAlgorithm( new QgsPolygonsToLinesAlgorithm() );
addAlgorithm( new QgsDensifyGeometriesByIntervalAlgorithm() );
addAlgorithm( new QgsDensifyGeometriesByCountAlgorithm() );
}
///@endcond
|
nyalldawson/QGIS
|
src/analysis/processing/qgsnativealgorithms.cpp
|
C++
|
gpl-2.0
| 22,818
|
<?php
/**
* @class FLButtonModule
*/
class FLButtonModule extends FLBuilderModule {
/**
* @method __construct
*/
public function __construct()
{
parent::__construct(array(
'name' => __('Button', 'fl-builder'),
'description' => __('A simple call to action button.', 'fl-builder'),
'category' => __('Advanced Modules', 'fl-builder')
));
}
/**
* @method update
*/
public function update( $settings )
{
// Remove the old three_d setting.
if ( isset( $settings->three_d ) ) {
unset( $settings->three_d );
}
return $settings;
}
/**
* @method get_classname
*/
public function get_classname()
{
$classname = 'fl-button-wrap';
if(!empty($this->settings->width)) {
$classname .= ' fl-button-width-' . $this->settings->width;
}
if(!empty($this->settings->align)) {
$classname .= ' fl-button-' . $this->settings->align;
}
if(!empty($this->settings->icon)) {
$classname .= ' fl-button-has-icon';
}
return $classname;
}
}
/**
* Register the module and its form settings.
*/
FLBuilder::register_module('FLButtonModule', array(
'general' => array(
'title' => __('General', 'fl-builder'),
'sections' => array(
'general' => array(
'title' => '',
'fields' => array(
'text' => array(
'type' => 'text',
'label' => __('Text', 'fl-builder'),
'default' => __('Click Here', 'fl-builder'),
'preview' => array(
'type' => 'text',
'selector' => '.fl-button-text'
)
),
'icon' => array(
'type' => 'icon',
'label' => __('Icon', 'fl-builder'),
'show_remove' => true
),
'icon_position' => array(
'type' => 'select',
'label' => __('Icon Position', 'fl-builder'),
'default' => 'before',
'options' => array(
'before' => __('Before Text', 'fl-builder'),
'after' => __('After Text', 'fl-builder')
)
)
)
),
'link' => array(
'title' => __('Link', 'fl-builder'),
'fields' => array(
'link' => array(
'type' => 'link',
'label' => __('Link', 'fl-builder'),
'placeholder' => __( 'http://www.example.com', 'fl-builder' ),
'preview' => array(
'type' => 'none'
)
),
'link_target' => array(
'type' => 'select',
'label' => __('Link Target', 'fl-builder'),
'default' => '_self',
'options' => array(
'_self' => __('Same Window', 'fl-builder'),
'_blank' => __('New Window', 'fl-builder')
),
'preview' => array(
'type' => 'none'
)
)
)
)
)
),
'style' => array(
'title' => __('Style', 'fl-builder'),
'sections' => array(
'colors' => array(
'title' => __('Colors', 'fl-builder'),
'fields' => array(
'bg_color' => array(
'type' => 'color',
'label' => __('Background Color', 'fl-builder'),
'default' => '',
'show_reset' => true
),
'bg_hover_color' => array(
'type' => 'color',
'label' => __('Background Hover Color', 'fl-builder'),
'default' => '',
'show_reset' => true,
'preview' => array(
'type' => 'none'
)
),
'text_color' => array(
'type' => 'color',
'label' => __('Text Color', 'fl-builder'),
'default' => '',
'show_reset' => true
),
'text_hover_color' => array(
'type' => 'color',
'label' => __('Text Hover Color', 'fl-builder'),
'default' => '',
'show_reset' => true,
'preview' => array(
'type' => 'none'
)
)
)
),
'style' => array(
'title' => __('Style', 'fl-builder'),
'fields' => array(
'style' => array(
'type' => 'select',
'label' => __('Style', 'fl-builder'),
'default' => 'flat',
'options' => array(
'flat' => __('Flat', 'fl-builder'),
'gradient' => __('Gradient', 'fl-builder'),
'transparent' => __('Transparent', 'fl-builder')
),
'toggle' => array(
'transparent' => array(
'fields' => array('bg_opacity', 'border_size')
)
)
),
'border_size' => array(
'type' => 'text',
'label' => __('Border Size', 'fl-builder'),
'default' => '2',
'description' => 'px',
'maxlength' => '3',
'size' => '5',
'placeholder' => '0'
),
'bg_opacity' => array(
'type' => 'text',
'label' => __('Background Opacity', 'fl-builder'),
'default' => '0',
'description' => '%',
'maxlength' => '3',
'size' => '5',
'placeholder' => '0'
)
)
),
'formatting' => array(
'title' => __('Structure', 'fl-builder'),
'fields' => array(
'width' => array(
'type' => 'select',
'label' => __('Width', 'fl-builder'),
'default' => 'auto',
'options' => array(
'auto' => _x( 'Auto', 'Width.', 'fl-builder' ),
'full' => __('Full Width', 'fl-builder'),
'custom' => __('Custom', 'fl-builder')
),
'toggle' => array(
'auto' => array(
'fields' => array('align')
),
'full' => array(),
'custom' => array(
'fields' => array('align', 'custom_width')
)
)
),
'custom_width' => array(
'type' => 'text',
'label' => __('Custom Width', 'fl-builder'),
'default' => '200',
'maxlength' => '3',
'size' => '4',
'description' => 'px'
),
'align' => array(
'type' => 'select',
'label' => __('Alignment', 'fl-builder'),
'default' => 'left',
'options' => array(
'center' => __('Center', 'fl-builder'),
'left' => __('Left', 'fl-builder'),
'right' => __('Right', 'fl-builder')
)
),
'font_size' => array(
'type' => 'text',
'label' => __('Font Size', 'fl-builder'),
'default' => '16',
'maxlength' => '3',
'size' => '4',
'description' => 'px'
),
'padding' => array(
'type' => 'text',
'label' => __('Padding', 'fl-builder'),
'default' => '12',
'maxlength' => '3',
'size' => '4',
'description' => 'px'
),
'border_radius' => array(
'type' => 'text',
'label' => __('Round Corners', 'fl-builder'),
'default' => '4',
'maxlength' => '3',
'size' => '4',
'description' => 'px'
)
)
)
)
)
));
|
LunaLisa/www.dogtraining.dev
|
wp-content/plugins/bb-plugin/modules/button/button.php
|
PHP
|
gpl-2.0
| 7,372
|
/*
Place all the styles related to the matching controller here.
They will automatically be included in application.css.
*/
.appends_container{
min-height: 65px;
margin-top: 1%;
}
.appends_container .plus_box{
float: left;
margin: 5px;
}
.source_appends, .product_references{
padding: 1%;
margin-top: 1%;
background-color: #FBFBFB;
box-shadow: 5px 5px 5px #bac4ce;
}
.documents_appends_box{
margin-bottom: 1%;
height: 150px;
width: 160px;
display: inline-block;
background-color: #E8F0F8;
margin: 5px;
}
.header_append{
display: inline;
position: relative;
top: 5px;
left: 0px;
}
.header_append img{
height: 30px;
}
.content_append p{
padding: 3%;
overflow: hidden;
font-family: Roboto;
font-size: 14px;
margin-top: 20px;
}
.snippet_view_append{
margin: 5px 5px;
width: 26%;
height: 275px;
}
.snippet_view_append iframe{
overflow: hidden;
margin-left: 1%;
}
.content_append{
position: relative;
margin: 30px 0px 0px 0px;
border-radius: 10px;
}
.delete_append{
height: 24px;
margin-bottom: 3px;
cursor: pointer;
}
.document_selector{
position: absolute;
background-color: rgba(70,130,180,0.6);
cursor: pointer;
bottom: 0;
text-align: center;
width: 50%;
left: 25%;
}
.record_document img{
width: 30px;
}
.document_presentation{
margin-top: 1.2%;
}
|
UvigoEduArea/EduAreaBeta2
|
app/assets/stylesheets/contents.css
|
CSS
|
gpl-2.0
| 1,309
|
<?php
/**
* @file
* Contains \Drupal\quicktabs\Plugin\TabType\ViewContent.
*/
namespace Drupal\quicktabs\Plugin\TabType;
use Drupal\Core\Form\FormStateInterface;
use Drupal\Core\Ajax\AjaxResponse;
use Drupal\Core\Ajax\ReplaceCommand;
use Drupal\quicktabs\TabTypeBase;
use Drupal\views\Views;
/**
* Provides a 'view content' tab type.
*
* @TabType(
* id = "view_content",
* name = @Translation("view"),
* )
*/
class ViewContent extends TabTypeBase {
/**
* {@inheritdoc}
*/
public function optionsForm(array $tab) {
$plugin_id = $this->getPluginDefinition()['id'];
$views = $this->getViews();
$views_keys = array_keys($views);
$selected_view = (isset($tab['content'][$plugin_id]['options']['vid']) ? $tab['content'][$plugin_id]['options']['vid'] : (isset($views_keys[0]) ? $views_keys[0] : ''));
$form = [];
$form['vid'] = array(
'#type' => 'select',
'#options' => $views,
'#default_value' => $selected_view,
'#title' => t('Select a view'),
'#ajax' => array(
'callback' => 'Drupal\quicktabs\Plugin\TabType\ViewContent::viewsDisplaysAjaxCallback',
'event' => 'change',
'progress' => array(
'type' => 'throbber',
'message' => 'Please wait...',
),
'effect' => 'fade',
),
);
$form['display'] = array(
'#type' => 'select',
'#title' => 'display',
'#options' => ViewContent::getViewDisplays($selected_view),
'#default_value' => isset($tab['content'][$plugin_id]['options']['display']) ? $tab['content'][$plugin_id]['options']['display'] : '',
'#prefix' => '<div id="view-display-dropdown-' . $tab['delta'] . '">',
'#suffix' => '</div>'
);
$form['args'] = array(
'#type' => 'textfield',
'#title' => 'arguments',
'#size' => '40',
'#required' => FALSE,
'#default_value' => isset($tab['content'][$plugin_id]['options']['args']) ? $tab['content'][$plugin_id]['options']['args'] : '',
'#description' => t('Additional arguments to send to the view as if they were part of the URL in the form of arg1/arg2/arg3. You may use %0, %1, ..., %N to grab arguments from the URL.'),
);
return $form;
}
/**
* {@inheritdoc}
*/
public function render(array $tab) {
$options = $tab['content'][$tab['type']]['options'];
$args = empty($options['args']) ? [] : array_map('trim', explode(',', $options['args']));
$view = Views::getView($options['vid']);
// Return empty render array if user doesn't have access.
if (!$view->access($options['display'], \Drupal::currentUser())) {
return [];
}
$render = $view->buildRenderable($options['display'], $args);
return $render;
}
/**
* Ajax callback to change views displays when view is selected.
*/
public function viewsDisplaysAjaxCallback(array &$form, FormStateInterface $form_state) {
$tab_index = $form_state->getTriggeringElement()['#array_parents'][2];
$element_id = '#view-display-dropdown-' . $tab_index;
$ajax_response = new AjaxResponse();
$ajax_response->addCommand(new ReplaceCommand($element_id, $form['configuration_data_wrapper']['configuration_data'][$tab_index]['content']['view_content']['options']['display']));
return $ajax_response;
}
private function getViews() {
$views = [];
foreach (Views::getEnabledViews() as $view_name => $view) {
$views[$view_name] = $view->label() . ' (' . $view_name . ')';
}
ksort($views);
return $views;
}
/**
* Get displays for a given view.
*/
public function getViewDisplays($view_name) {
$displays = [];
if (empty($view_name)) {
return $displays;
}
$view = \Drupal::entityTypeManager()->getStorage('view')->load($view_name);
foreach ($view->get('display') as $id => $display) {
$enabled = !empty($display['display_options']['enabled']) || !array_key_exists('enabled', $display['display_options']);
if ($enabled) {
$displays[$id] = $id .': '. $display['display_title'];
}
}
return $displays;
}
}
|
systemick3/systemick.co.uk
|
modules/contrib/quicktabs/src/Plugin/TabType/ViewContent.php
|
PHP
|
gpl-2.0
| 4,111
|
/***************************************************************************
NWNX FOR LINUX Vaultster plugin Class interface
Copyright (C) 2009 John Klar (plasmajohn at avlis dot org)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
***************************************************************************/
#ifndef NWNXVaultster_h_
#define NWNXVaultster_h_
#include <cstdlib>
#include <iostream>
#include <map>
#include <string>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#include "NWNXBase.h"
#include "client.h"
#include "NWNXLib.h"
using namespace::std;
using namespace boost::asio;
class CNWNXVaultster : public CNWNXBase
{
public:
CNWNXVaultster();
~CNWNXVaultster();
bool OnCreate(gline *nwnxConfig, const char *LogDir=NULL);
char *OnRequest(char* gameObject, char* Request, char* Parameters);
// bool OnRelease();
protected:
bool Configure();
private:
string key_, pass_, path_;
int numclients_;
int lastclient_;
request *clients_;
};
#endif
|
mirko1978/nwnx2-linux-old
|
plugins/vaultster/NWNXVaultster.h
|
C
|
gpl-2.0
| 1,640
|
#include <linux/hardirq.h>
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
* instructions or access the SIMD register file
*
* As architectures typically don't preserve the SIMD register file when
* taking an interrupt, !in_interrupt() should be a reasonable default.
*/
static __must_check inline bool may_use_simd(void)
{
return !in_interrupt();
}
|
MattCrystal/freezing-octo-ironman
|
include/asm-generic/simd.h
|
C
|
gpl-2.0
| 398
|
tspan {
font-size: 20px;
}
#svg-electoral h3 {
margin:0;
padding:0;
}
#svg-electoral p {
margin: 7px 0;
}
#svg-electoral .y {
margin: 0 0 27px 0;
}
#svg-electoral .y a, #svg-electoral .y a:visited {
margin:0 14px 0 0;
padding: 17px 0;
text-decoration: none!important;
border-bottom: none;
}
#svg-electoral .y a:hover {
}
#svg-electoral .y .d {
color: rgb(36, 73, 153)!important;
}
#svg-electoral .y .r {
color: rgb(210, 37, 50)!important;
}
#svg-electoral .red {
fill: rgb(210, 37, 50)!important;
}
#svg-electoral .blue {
fill: rgb(36, 73, 153)!important;
}
#svg-electoral .purple {
fill: rgb(138, 19, 153) !important;
background-color: white!important;
background-image: linear-gradient(90deg, rgba(200,0,0,.5) 50%, transparent 50%), linear-gradient(rgba(200,0,0,.5) 50%, transparent 50%)!important;
background-size: 50px 50px!important;
background-color: white!important;
background-image: linear-gradient(90deg, rgba(200,0,0,.5) 50%, transparent 50%), linear-gradient(rgba(200,0,0,.5) 50%, transparent 50%)!important;
background-size: 50px 50px!important;
}
#svg-electoral .purple-diag {
background-color: gray!important;
background-image: repeating-linear-gradient(45deg, transparent, transparent 35px, rgba(255,255,255,.5) 35px, rgba(255,255,255,.5) 70px)!important;
background-color: gray!important;
background-image: repeating-linear-gradient(45deg, transparent, transparent 35px, rgba(255,255,255,.5) 35px, rgba(255,255,255,.5) 70px)!important;
}
|
amprog/cap-maps
|
wp-content/plugins/cap-graphics/packages/svg/us_bases_d3/index.css
|
CSS
|
gpl-2.0
| 1,567
|
<?php
/**
* @package Regular Labs Library
* @version 18.10.16084
*
* @author Peter van Westen <info@regularlabs.com>
* @link http://www.regularlabs.com
* @copyright Copyright © 2018 Regular Labs All Rights Reserved
* @license http://www.gnu.org/licenses/gpl-2.0.html GNU/GPL
*/
namespace RegularLabs\Plugin\System\RegularLabs;
defined('_JEXEC') or die;
use Joomla\CMS\Factory as JFactory;
use RegularLabs\Library\Document as RL_Document;
class DownloadKey
{
public static function update()
{
// Save the download key from the Regular Labs Extension Manager config to the update sites
if (
RL_Document::isClient('site')
|| JFactory::getApplication()->input->get('option') != 'com_config'
|| JFactory::getApplication()->input->get('task') != 'config.save.component.apply'
|| JFactory::getApplication()->input->get('component') != 'com_regularlabsmanager'
)
{
return;
}
$form = JFactory::getApplication()->input->post->get('jform', [], 'array');
if ( ! isset($form['key']))
{
return;
}
$key = $form['key'];
$db = JFactory::getDbo();
$query = $db->getQuery(true)
->update('#__update_sites')
->set($db->quoteName('extra_query') . ' = ' . $db->quote('k=' . $key))
->where($db->quoteName('location') . ' LIKE ' . $db->quote('%download.regularlabs.com%'));
$db->setQuery($query);
$db->execute();
}
}
|
lyrasoft/lyrasoft.github.io
|
plugins/system/regularlabs/src/DownloadKey.php
|
PHP
|
gpl-2.0
| 1,419
|
/*
* (C) Copyright 2012
* Joe Hershberger, National Instruments, joe.hershberger@ni.com
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#ifndef __ENV_CALLBACK_H__
#define __ENV_CALLBACK_H__
#include <env_flags.h>
#include <linker_lists.h>
#include <search.h>
#define ENV_CALLBACK_VAR ".callbacks"
/* Board configs can define additional static callback bindings */
#ifndef CONFIG_ENV_CALLBACK_LIST_STATIC
#define CONFIG_ENV_CALLBACK_LIST_STATIC
#endif
#ifdef CONFIG_SILENT_CONSOLE
#define SILENT_CALLBACK "silent:silent,"
#else
#define SILENT_CALLBACK
#endif
/*
* This list of callback bindings is static, but may be overridden by defining
* a new association in the ".callbacks" environment variable.
*/
#define ENV_CALLBACK_LIST_STATIC ENV_CALLBACK_VAR ":callbacks," \
ENV_FLAGS_VAR ":flags," \
"baudrate:baudrate," \
"bootfile:bootfile," \
"loadaddr:loadaddr," \
SILENT_CALLBACK \
"stdin:console,stdout:console,stderr:console," \
CONFIG_ENV_CALLBACK_LIST_STATIC
struct env_clbk_tbl {
const char *name; /* Callback name */
int (*callback)(const char *name, const char *value, enum env_op op,
int flags);
};
void env_callback_init(ENTRY *var_entry);
/*
* Define a callback that can be associated with variables.
* when associated through the ".callbacks" environment variable, the callback
* will be executed any time the variable is inserted, overwritten, or deleted.
*/
#ifdef CONFIG_SPL_BUILD
#define U_BOOT_ENV_CALLBACK(name, callback) \
static inline void _u_boot_env_noop_##name(void) \
{ \
(void)callback; \
}
#else
#define U_BOOT_ENV_CALLBACK(name, callback) \
ll_entry_declare(struct env_clbk_tbl, name, env_clbk, env_clbk) = \
{#name, callback}
#endif
#endif /* __ENV_CALLBACK_H__ */
|
ninjablocks/u-boot-kern3.2
|
include/env_callback.h
|
C
|
gpl-2.0
| 2,483
|
RC_LEVEL_FIRST=/etc/init.d/rc2
RC_LEVEL_START=/etc/init.d/rc2
RC_LEVEL_PROC=/etc/init.d/rc10
RC_LEVEL_PON=/etc/init.d/rc32
RC_LEVEL_IPV6=/etc/init.d/rc14
RC_LEVEL_SLAVE=/etc/init.d/rc3
RC_LEVEL_FINISH=/etc/init.d/rc35
-include $(LINUX_CONFIG)
all:
.PHONY: rcX
rcX: rcX_kernel
$(ROMFSINST) -a "/bin/mount -t proc proc /proc" $(RC_LEVEL_FIRST)
$(ROMFSINST) -a "/bin/mount -t ramfs ramfs /var" $(RC_LEVEL_FIRST)
$(ROMFSINST) -a "/bin/mount -t sysfs sysfs /sys" $(RC_LEVEL_FIRST)
ifndef CONFIG_USER_NFBI_SLAVE
ifndef CONFIG_ARCH_LUNA_SLAVE
$(ROMFSINST) -a "/bin/echo /sbin/mdev > /proc/sys/kernel/hotplug" $(RC_LEVEL_FIRST)
endif
$(ROMFSINST) -a "/bin/mount -t tmpfs mdev /dev" $(RC_LEVEL_FIRST)
ifndef CONFIG_ARCH_LUNA_SLAVE
# $(ROMFSINST) -a "/bin/mknod /dev/adsl0 c 100 0" $(RC_LEVEL_FIRST)
endif
# $(ROMFSINST) mdev.conf /etc/mdev.conf
ifdef CONFIG_USER_NFBI_MASTER
$(ROMFSINST) -a "/bin/mknod /dev/rtl_nfbi c 14 0" $(RC_LEVEL_FIRST)
endif #CONFIG_USER_NFBI_MASTER
ifdef CONFIG_USER_NFBI_SLAVE
$(ROMFSINST) -a "/bin/mknod /dev/rtl_mdio c 14 0" $(RC_LEVEL_FIRST)
endif #CONFIG_USER_NFBI_SLAVE
$(ROMFSINST) -a "mdev -s" $(RC_LEVEL_FIRST)
endif
[ -f $(ROMFSDIR)$(RC_PATH) ] || (touch $(ROMFSDIR)$(RC_PATH) && chmod 744 $(ROMFSDIR)$(RC_PATH))
$(ROMFSINST) -a "mkdir /var/tmp" $(RC_LEVEL_START)
$(ROMFSINST) -a "mkdir /var/log" $(RC_LEVEL_START)
$(ROMFSINST) -a "mkdir /var/run" $(RC_LEVEL_START)
$(ROMFSINST) -a "mkdir /var/lock" $(RC_LEVEL_START)
$(ROMFSINST) -a "mkdir /var/ppp" $(RC_LEVEL_START)
$(ROMFSINST) -a "mkdir /var/config" $(RC_LEVEL_START)
ifdef CONFIG_MTD_NAND
ifdef CONFIG_YAFFS_FS
$(ROMFSINST) -a "mount -t yaffs2 -o tags-ecc-off /dev/mtdblock6 /var/config/" $(RC_LEVEL_START)
else
ifdef CONFIG_JFFS2_FS
$(ROMFSINST) -a "mount -t jffs2 /dev/mtdblock1 /var/config/" $(RC_LEVEL_START)
else
echo "ERROR: No valid file system"
exit 0
endif
endif
endif
ifdef CONFIG_MTD_LUNA_NOR_SPI
$(ROMFSINST) -e CONFIG_JFFS2_FS -a "mount -t jffs2 /dev/mtdblock1 /var/config/" $(RC_LEVEL_START)
endif
$(ROMFSINST) -a "mkdir /var/udhcpd" $(RC_LEVEL_START)
$(ROMFSINST) -a "mkdir /var/udhcpc" $(RC_LEVEL_START)
$(ROMFSINST) -a "mkdir /var/mnt" $(RC_LEVEL_START)
$(ROMFSINST) -a "mkdir /var/ct" $(RC_LEVEL_START)
$(ROMFSINST) -a "mkdir /var/ct/tmp" $(RC_LEVEL_START)
$(ROMFSINST) -a "ifconfig lo 127.0.0.1" $(RC_LEVEL_START)
$(ROMFSINST) -a "route add -net 127.0.0.0 netmask 255.255.255.0 lo" $(RC_LEVEL_START)
$(ROMFSINST) -a "PATH=.:$\PATH" $(RC_LEVEL_START)
$(ROMFSINST) -a "cd /etc/scripts" $(RC_LEVEL_START)
ifeq ($(CONFIG_GPON_FEATURE),y)
$(ROMFSINST) -a "/etc/runsdk.sh" $(RC_LEVEL_PON)
endif
rcX_kernel:
ifdef CONFIG_LUNA_DUAL_LINUX
ifndef CONFIG_ARCH_LUNA_SLAVE
$(ROMFSINST) -a "echo 1 > /proc/sys/vm/drop_caches" $(RC_LEVEL_FINISH)
$(ROMFSINST) -a "echo 'Bootup Slave CPU'" $(RC_LEVEL_SLAVE)
$(ROMFSINST) -a "/etc/rc_boot_dual_linux" $(RC_LEVEL_SLAVE)
$(ROMFSINST) -a "mkdir -p /tmp/slave" $(RC_LEVEL_SLAVE)
$(ROMFSINST) -s /var/ethers /etc/ethers
endif
endif
$(ROMFSINST) -a "echo 200 > /proc/sys/net/core/netdev_max_backlog" $(RC_LEVEL_PROC)
ifndef CONFIG_ARCH_LUNA_SLAVE
$(ROMFSINST) -a "echo 600 > /proc/sys/net/ipv4/netfilter/ip_conntrack_tcp_timeout_established" $(RC_LEVEL_PROC)
endif
ifneq ($(CONFIG_MODULE_BUILD_IN),y)
$(ROMFSINST) -a "/etc/runsdk.sh" /etc/rc
endif
ifdef CONFIG_8M_SDRAM
# $(ROMFSINST) -a "echo 256 > /proc/sys/net/ipv4/ip_conntrack_max" $(RC_LEVEL_PROC)
$(ROMFSINST) -a "echo 768 > /proc/sys/net/ipv4/route/max_size" $(RC_LEVEL_PROC)
$(ROMFSINST) -a "echo 256 > /proc/sys/net/ipv4/route/gc_thresh" $(RC_LEVEL_PROC)
$(ROMFSINST) -a "echo 1 > /proc/sys/net/ipv4/route/gc_elasticity" $(RC_LEVEL_PROC)
$(ROMFSINST) -a "echo 60 > /proc/sys/net/ipv4/route/gc_interval" $(RC_LEVEL_PROC)
$(ROMFSINST) -a "echo 20 > /proc/sys/net/ipv4/route/gc_timeout" $(RC_LEVEL_PROC)
else
ifdef CONFIG_MEM_LIMITATION
$(ROMFSINST) -a "echo 1500 > /proc/sys/net/ipv4/route/max_size" $(RC_LEVEL_PROC)
$(ROMFSINST) -a "echo 1500 > /proc/sys/net/ipv4/inet_peer_threshold" $(RC_LEVEL_PROC)
$(ROMFSINST) -a "echo 20 > /proc/sys/net/ipv4/inet_peer_minttl" $(RC_LEVEL_PROC)
$(ROMFSINST) -a "echo 60 > /proc/sys/net/ipv4/inet_peer_maxttl" $(RC_LEVEL_PROC)
$(ROMFSINST) -a "echo 50 > /proc/sys/net/ipv4/netfilter/ip_conntrack_generic_timeout" $(RC_LEVEL_PROC)
endif
endif
$(ROMFSINST) -a "echo 10 > /proc/sys/net/ipv4/tcp_keepalive_intvl" $(RC_LEVEL_PROC)
$(ROMFSINST) -a "echo 3 > /proc/sys/net/ipv4/tcp_keepalive_probes" $(RC_LEVEL_PROC)
$(ROMFSINST) -a "echo 10 > /proc/sys/net/ipv4/tcp_keepalive_time" $(RC_LEVEL_PROC)
ifdef CONFIG_ARCH_LUNA_SLAVE # specific networking interface configuration with Slave CPU
$(ROMFSINST) -a "echo 'Config Network Interface...'" $(RC_LEVEL_SLAVE)
$(ROMFSINST) -a "/bin/brctl addbr br0" $(RC_LEVEL_SLAVE)
$(ROMFSINST) -a "/bin/ifconfig vwlan hw ether $(CONFIG_DEFAULT_SLAVE_IPC_MAC_ADDRESS)" $(RC_LEVEL_SLAVE)
$(ROMFSINST) -a "/bin/ifconfig vwlan 10.253.253.2 netmask 255.255.255.252 up" $(RC_LEVEL_SLAVE)
$(ROMFSINST) -a "arp -s 10.253.253.1 $(CONFIG_DEFAULT_MASTER_IPC_MAC_ADDRESS)" $(RC_LEVEL_SLAVE)
endif
# Added by Mason Yu.
ifdef CONFIG_IPV6
$(ROMFSINST) -a "echo 2 > /proc/sys/net/ipv6/conf/default/accept_dad" $(RC_LEVEL_IPV6)
endif
|
ysleu/RTL8685
|
uClinux-dist/user2/proc_var/Makefile
|
Makefile
|
gpl-2.0
| 5,254
|
/*
* Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
* Copyright (C) 2007 Eric Seidel <eric@webkit.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include "config.h"
#include "Heap.h"
#include "CodeBlock.h"
#include "ConservativeRoots.h"
#include "CopiedSpace.h"
#include "CopiedSpaceInlines.h"
#include "CopyVisitorInlines.h"
#include "GCActivityCallback.h"
#include "HeapRootVisitor.h"
#include "HeapStatistics.h"
#include "IncrementalSweeper.h"
#include "Interpreter.h"
#include "VM.h"
#include "JSGlobalObject.h"
#include "JSLock.h"
#include "JSONObject.h"
#include "Operations.h"
#include "Tracing.h"
#include "UnlinkedCodeBlock.h"
#include "WeakSetInlines.h"
#include <algorithm>
#include <wtf/RAMSize.h>
#include <wtf/CurrentTime.h>
using namespace std;
using namespace JSC;
namespace JSC {
namespace {
static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
#if ENABLE(GC_LOGGING)
#if COMPILER(CLANG)
#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
_Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
static type name arguments; \
_Pragma("clang diagnostic pop")
#else
#define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
static type name arguments;
#endif // COMPILER(CLANG)
struct GCTimer {
GCTimer(const char* name)
: m_time(0)
, m_min(100000000)
, m_max(0)
, m_count(0)
, m_name(name)
{
}
~GCTimer()
{
dataLogF("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
}
double m_time;
double m_min;
double m_max;
size_t m_count;
const char* m_name;
};
struct GCTimerScope {
GCTimerScope(GCTimer* timer)
: m_timer(timer)
, m_start(WTF::currentTime())
{
}
~GCTimerScope()
{
double delta = WTF::currentTime() - m_start;
if (delta < m_timer->m_min)
m_timer->m_min = delta;
if (delta > m_timer->m_max)
m_timer->m_max = delta;
m_timer->m_count++;
m_timer->m_time += delta;
}
GCTimer* m_timer;
double m_start;
};
struct GCCounter {
GCCounter(const char* name)
: m_name(name)
, m_count(0)
, m_total(0)
, m_min(10000000)
, m_max(0)
{
}
void count(size_t amount)
{
m_count++;
m_total += amount;
if (amount < m_min)
m_min = amount;
if (amount > m_max)
m_max = amount;
}
~GCCounter()
{
dataLogF("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
}
const char* m_name;
size_t m_count;
size_t m_total;
size_t m_min;
size_t m_max;
};
#define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
#define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
#define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
#else
#define GCPHASE(name) do { } while (false)
#define COND_GCPHASE(cond, name1, name2) do { } while (false)
#define GCCOUNTER(name, value) do { } while (false)
#endif
static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
{
if (heapType == LargeHeap)
return min(largeHeapSize, ramSize / 4);
return smallHeapSize;
}
static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
{
// Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
if (heapSize < ramSize / 4)
return 2 * heapSize;
if (heapSize < ramSize / 2)
return 1.5 * heapSize;
return 1.25 * heapSize;
}
static inline bool isValidSharedInstanceThreadState(VM* vm)
{
return vm->apiLock().currentThreadIsHoldingLock();
}
static inline bool isValidThreadState(VM* vm)
{
if (vm->identifierTable != wtfThreadData().currentIdentifierTable())
return false;
if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
return false;
return true;
}
struct MarkObject : public MarkedBlock::VoidFunctor {
void operator()(JSCell* cell)
{
if (cell->isZapped())
return;
Heap::heap(cell)->setMarked(cell);
}
};
struct Count : public MarkedBlock::CountFunctor {
void operator()(JSCell*) { count(1); }
};
struct CountIfGlobalObject : MarkedBlock::CountFunctor {
void operator()(JSCell* cell) {
if (!cell->isObject())
return;
if (!asObject(cell)->isGlobalObject())
return;
count(1);
}
};
class RecordType {
public:
typedef PassOwnPtr<TypeCountSet> ReturnType;
RecordType();
void operator()(JSCell*);
ReturnType returnValue();
private:
const char* typeName(JSCell*);
OwnPtr<TypeCountSet> m_typeCountSet;
};
inline RecordType::RecordType()
: m_typeCountSet(adoptPtr(new TypeCountSet))
{
}
inline const char* RecordType::typeName(JSCell* cell)
{
const ClassInfo* info = cell->classInfo();
if (!info || !info->className)
return "[unknown]";
return info->className;
}
inline void RecordType::operator()(JSCell* cell)
{
m_typeCountSet->add(typeName(cell));
}
inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
{
return m_typeCountSet.release();
}
} // anonymous namespace
Heap::Heap(VM* vm, HeapType heapType)
: m_heapType(heapType)
, m_ramSize(ramSize())
, m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
, m_sizeAfterLastCollect(0)
, m_bytesAllocatedLimit(m_minBytesPerCycle)
, m_bytesAllocated(0)
, m_bytesAbandoned(0)
, m_operationInProgress(NoOperation)
, m_blockAllocator()
, m_objectSpace(this)
, m_storageSpace(this)
, m_machineThreads(this)
, m_sharedData(vm)
, m_slotVisitor(m_sharedData)
, m_copyVisitor(m_sharedData)
, m_handleSet(vm)
, m_isSafeToCollect(false)
, m_vm(vm)
, m_lastGCLength(0)
, m_lastCodeDiscardTime(WTF::currentTime())
, m_activityCallback(DefaultGCActivityCallback::create(this))
, m_sweeper(IncrementalSweeper::create(this))
{
m_storageSpace.init();
}
Heap::~Heap()
{
}
bool Heap::isPagedOut(double deadline)
{
return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
}
// The VM is being destroyed and the collector will never run again.
// Run all pending finalizers now because we won't get another chance.
void Heap::lastChanceToFinalize()
{
RELEASE_ASSERT(!m_vm->dynamicGlobalObject);
RELEASE_ASSERT(m_operationInProgress == NoOperation);
m_objectSpace.lastChanceToFinalize();
#if ENABLE(SIMPLE_HEAP_PROFILING)
m_slotVisitor.m_visitedTypeCounts.dump(WTF::dataFile(), "Visited Type Counts");
m_destroyedTypeCounts.dump(WTF::dataFile(), "Destroyed Type Counts");
#endif
}
void Heap::reportExtraMemoryCostSlowCase(size_t cost)
{
// Our frequency of garbage collection tries to balance memory use against speed
// by collecting based on the number of newly created values. However, for values
// that hold on to a great deal of memory that's not in the form of other JS values,
// that is not good enough - in some cases a lot of those objects can pile up and
// use crazy amounts of memory without a GC happening. So we track these extra
// memory costs. Only unusually large objects are noted, and we only keep track
// of this extra cost until the next GC. In garbage collected languages, most values
// are either very short lived temporaries, or have extremely long lifetimes. So
// if a large value survives one garbage collection, there is not much point to
// collecting more frequently as long as it stays alive.
didAllocate(cost);
if (shouldCollect())
collect(DoNotSweep);
}
void Heap::reportAbandonedObjectGraph()
{
// Our clients don't know exactly how much memory they
// are abandoning so we just guess for them.
double abandonedBytes = 0.10 * m_sizeAfterLastCollect;
// We want to accelerate the next collection. Because memory has just
// been abandoned, the next collection has the potential to
// be more profitable. Since allocation is the trigger for collection,
// we hasten the next collection by pretending that we've allocated more memory.
didAbandon(abandonedBytes);
}
void Heap::didAbandon(size_t bytes)
{
m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
m_bytesAbandoned += bytes;
}
void Heap::protect(JSValue k)
{
ASSERT(k);
ASSERT(m_vm->apiLock().currentThreadIsHoldingLock());
if (!k.isCell())
return;
m_protectedValues.add(k.asCell());
}
bool Heap::unprotect(JSValue k)
{
ASSERT(k);
ASSERT(m_vm->apiLock().currentThreadIsHoldingLock());
if (!k.isCell())
return false;
return m_protectedValues.remove(k.asCell());
}
void Heap::jettisonDFGCodeBlock(PassOwnPtr<CodeBlock> codeBlock)
{
m_dfgCodeBlocks.jettison(codeBlock);
}
void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
{
ProtectCountSet::iterator end = m_protectedValues.end();
for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
heapRootVisitor.visit(&it->key);
}
void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
{
m_tempSortingVectors.append(tempVector);
}
void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
{
ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
m_tempSortingVectors.removeLast();
}
void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
{
typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>* > VectorOfValueStringVectors;
VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempSortingVector = *it;
Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
if (vectorIt->first)
heapRootVisitor.visit(&vectorIt->first);
}
}
}
void Heap::harvestWeakReferences()
{
m_slotVisitor.harvestWeakReferences();
}
void Heap::finalizeUnconditionalFinalizers()
{
m_slotVisitor.finalizeUnconditionalFinalizers();
}
inline JSStack& Heap::stack()
{
return m_vm->interpreter->stack();
}
void Heap::canonicalizeCellLivenessData()
{
m_objectSpace.canonicalizeCellLivenessData();
}
void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
{
ASSERT(isValidThreadState(m_vm));
ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
stack().gatherConservativeRoots(stackRoots);
size_t stackRootCount = stackRoots.size();
JSCell** registerRoots = stackRoots.roots();
for (size_t i = 0; i < stackRootCount; i++) {
setMarked(registerRoots[i]);
roots.add(registerRoots[i]);
}
}
void Heap::markRoots()
{
SamplingRegion samplingRegion("Garbage Collection: Tracing");
GCPHASE(MarkRoots);
ASSERT(isValidThreadState(m_vm));
#if ENABLE(OBJECT_MARK_LOGGING)
double gcStartTime = WTF::currentTime();
#endif
void* dummy;
// We gather conservative roots before clearing mark bits because conservative
// gathering uses the mark bits to determine whether a reference is valid.
ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
m_jitStubRoutines.clearMarks();
{
GCPHASE(GatherConservativeRoots);
m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
}
ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
m_dfgCodeBlocks.clearMarks();
{
GCPHASE(GatherStackRoots);
stack().gatherConservativeRoots(
stackRoots, m_jitStubRoutines, m_dfgCodeBlocks);
}
#if ENABLE(DFG_JIT)
ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
{
GCPHASE(GatherScratchBufferRoots);
m_vm->gatherConservativeRoots(scratchBufferRoots);
}
#endif
{
GCPHASE(clearMarks);
m_objectSpace.clearMarks();
}
m_sharedData.didStartMarking();
SlotVisitor& visitor = m_slotVisitor;
visitor.setup();
HeapRootVisitor heapRootVisitor(visitor);
{
ParallelModeEnabler enabler(visitor);
if (m_vm->codeBlocksBeingCompiled.size()) {
GCPHASE(VisitActiveCodeBlock);
for (size_t i = 0; i < m_vm->codeBlocksBeingCompiled.size(); i++)
m_vm->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
}
m_vm->smallStrings.visitStrongReferences(visitor);
{
GCPHASE(VisitMachineRoots);
MARK_LOG_ROOT(visitor, "C++ Stack");
visitor.append(machineThreadRoots);
visitor.donateAndDrain();
}
{
GCPHASE(VisitStackRoots);
MARK_LOG_ROOT(visitor, "Stack");
visitor.append(stackRoots);
visitor.donateAndDrain();
}
#if ENABLE(DFG_JIT)
{
GCPHASE(VisitScratchBufferRoots);
MARK_LOG_ROOT(visitor, "Scratch Buffers");
visitor.append(scratchBufferRoots);
visitor.donateAndDrain();
}
#endif
{
GCPHASE(VisitProtectedObjects);
MARK_LOG_ROOT(visitor, "Protected Objects");
markProtectedObjects(heapRootVisitor);
visitor.donateAndDrain();
}
{
GCPHASE(VisitTempSortVectors);
MARK_LOG_ROOT(visitor, "Temp Sort Vectors");
markTempSortVectors(heapRootVisitor);
visitor.donateAndDrain();
}
{
GCPHASE(MarkingArgumentBuffers);
if (m_markListSet && m_markListSet->size()) {
MARK_LOG_ROOT(visitor, "Argument Buffers");
MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
visitor.donateAndDrain();
}
}
if (m_vm->exception) {
GCPHASE(MarkingException);
MARK_LOG_ROOT(visitor, "Exceptions");
heapRootVisitor.visit(&m_vm->exception);
visitor.donateAndDrain();
}
{
GCPHASE(VisitStrongHandles);
MARK_LOG_ROOT(visitor, "Strong Handles");
m_handleSet.visitStrongHandles(heapRootVisitor);
visitor.donateAndDrain();
}
{
GCPHASE(HandleStack);
MARK_LOG_ROOT(visitor, "Handle Stack");
m_handleStack.visit(heapRootVisitor);
visitor.donateAndDrain();
}
{
GCPHASE(TraceCodeBlocksAndJITStubRoutines);
MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines");
m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor);
m_jitStubRoutines.traceMarkedStubRoutines(visitor);
visitor.donateAndDrain();
}
#if ENABLE(PARALLEL_GC)
{
GCPHASE(Convergence);
visitor.drainFromShared(SlotVisitor::MasterDrain);
}
#endif
}
// Weak references must be marked last because their liveness depends on
// the liveness of the rest of the object graph.
{
GCPHASE(VisitingLiveWeakHandles);
MARK_LOG_ROOT(visitor, "Live Weak Handles");
while (true) {
m_objectSpace.visitWeakSets(heapRootVisitor);
harvestWeakReferences();
if (visitor.isEmpty())
break;
{
ParallelModeEnabler enabler(visitor);
visitor.donateAndDrain();
#if ENABLE(PARALLEL_GC)
visitor.drainFromShared(SlotVisitor::MasterDrain);
#endif
}
}
}
GCCOUNTER(VisitedValueCount, visitor.visitCount());
m_sharedData.didFinishMarking();
#if ENABLE(OBJECT_MARK_LOGGING)
size_t visitCount = visitor.visitCount();
#if ENABLE(PARALLEL_GC)
visitCount += m_sharedData.childVisitCount();
#endif
MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::currentTime() - gcStartTime);
#endif
visitor.reset();
#if ENABLE(PARALLEL_GC)
m_sharedData.resetChildren();
#endif
m_sharedData.reset();
}
void Heap::copyBackingStores()
{
m_storageSpace.startedCopying();
if (m_storageSpace.shouldDoCopyPhase()) {
m_sharedData.didStartCopying();
m_copyVisitor.startCopying();
m_copyVisitor.copyFromShared();
m_copyVisitor.doneCopying();
// We need to wait for everybody to finish and return their CopiedBlocks
// before signaling that the phase is complete.
m_storageSpace.doneCopying();
m_sharedData.didFinishCopying();
} else
m_storageSpace.doneCopying();
}
size_t Heap::objectCount()
{
return m_objectSpace.objectCount();
}
size_t Heap::size()
{
return m_objectSpace.size() + m_storageSpace.size();
}
size_t Heap::capacity()
{
return m_objectSpace.capacity() + m_storageSpace.capacity();
}
size_t Heap::protectedGlobalObjectCount()
{
return forEachProtectedCell<CountIfGlobalObject>();
}
size_t Heap::globalObjectCount()
{
return m_objectSpace.forEachLiveCell<CountIfGlobalObject>();
}
size_t Heap::protectedObjectCount()
{
return forEachProtectedCell<Count>();
}
PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
{
return forEachProtectedCell<RecordType>();
}
PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
{
return m_objectSpace.forEachLiveCell<RecordType>();
}
void Heap::deleteAllCompiledCode()
{
// If JavaScript is running, it's not safe to delete code, since we'll end
// up deleting code that is live on the stack.
if (m_vm->dynamicGlobalObject)
return;
for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
if (!current->isFunctionExecutable())
continue;
static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
}
m_dfgCodeBlocks.clearMarks();
m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
}
void Heap::deleteUnmarkedCompiledCode()
{
ExecutableBase* next;
for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
next = current->next();
if (isMarked(current))
continue;
// We do this because executable memory is limited on some platforms and because
// CodeBlock requires eager finalization.
ExecutableBase::clearCodeVirtual(current);
m_compiledCode.remove(current);
}
m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
}
void Heap::collectAllGarbage()
{
if (!m_isSafeToCollect)
return;
collect(DoSweep);
}
static double minute = 60.0;
void Heap::collect(SweepToggle sweepToggle)
{
SamplingRegion samplingRegion("Garbage Collection");
GCPHASE(Collect);
ASSERT(vm()->apiLock().currentThreadIsHoldingLock());
RELEASE_ASSERT(vm()->identifierTable == wtfThreadData().currentIdentifierTable());
ASSERT(m_isSafeToCollect);
JAVASCRIPTCORE_GC_BEGIN();
RELEASE_ASSERT(m_operationInProgress == NoOperation);
m_operationInProgress = Collection;
m_activityCallback->willCollect();
double lastGCStartTime = WTF::currentTime();
if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
deleteAllCompiledCode();
m_lastCodeDiscardTime = WTF::currentTime();
}
{
GCPHASE(Canonicalize);
m_objectSpace.canonicalizeCellLivenessData();
}
markRoots();
{
GCPHASE(ReapingWeakHandles);
m_objectSpace.reapWeakSets();
}
JAVASCRIPTCORE_GC_MARKED();
{
m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
m_objectSpace.forEachBlock(functor);
}
copyBackingStores();
{
GCPHASE(FinalizeUnconditionalFinalizers);
finalizeUnconditionalFinalizers();
}
{
GCPHASE(finalizeSmallStrings);
m_vm->smallStrings.finalizeSmallStrings();
}
{
GCPHASE(DeleteCodeBlocks);
deleteUnmarkedCompiledCode();
}
{
GCPHASE(DeleteSourceProviderCaches);
m_vm->clearSourceProviderCaches();
}
if (sweepToggle == DoSweep) {
SamplingRegion samplingRegion("Garbage Collection: Sweeping");
GCPHASE(Sweeping);
m_objectSpace.sweep();
m_objectSpace.shrink();
}
m_sweeper->startSweeping(m_blockSnapshot);
m_bytesAbandoned = 0;
{
GCPHASE(ResetAllocators);
m_objectSpace.resetAllocators();
}
size_t currentHeapSize = size();
if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
HeapStatistics::exitWithFailure();
m_sizeAfterLastCollect = currentHeapSize;
// To avoid pathological GC churn in very small and very large heaps, we set
// the new allocation limit based on the current size of the heap, with a
// fixed minimum.
size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
m_bytesAllocatedLimit = maxHeapSize - currentHeapSize;
m_bytesAllocated = 0;
double lastGCEndTime = WTF::currentTime();
m_lastGCLength = lastGCEndTime - lastGCStartTime;
if (Options::recordGCPauseTimes())
HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime);
RELEASE_ASSERT(m_operationInProgress == Collection);
m_operationInProgress = NoOperation;
JAVASCRIPTCORE_GC_END();
if (Options::useZombieMode())
zombifyDeadObjects();
if (Options::objectsAreImmortal())
markDeadObjects();
if (Options::showObjectStatistics())
HeapStatistics::showObjectStatistics(this);
}
void Heap::markDeadObjects()
{
m_objectSpace.forEachDeadCell<MarkObject>();
}
void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
{
m_activityCallback = activityCallback;
}
GCActivityCallback* Heap::activityCallback()
{
return m_activityCallback.get();
}
IncrementalSweeper* Heap::sweeper()
{
return m_sweeper.get();
}
void Heap::setGarbageCollectionTimerEnabled(bool enable)
{
activityCallback()->setEnabled(enable);
}
void Heap::didAllocate(size_t bytes)
{
m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
m_bytesAllocated += bytes;
}
bool Heap::isValidAllocation(size_t)
{
if (!isValidThreadState(m_vm))
return false;
if (m_operationInProgress != NoOperation)
return false;
return true;
}
void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
{
WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
}
void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
{
HandleSlot slot = handle.slot();
Finalizer finalizer = reinterpret_cast<Finalizer>(context);
finalizer(slot->asCell());
WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
}
void Heap::addCompiledCode(ExecutableBase* executable)
{
m_compiledCode.append(executable);
}
class Zombify : public MarkedBlock::VoidFunctor {
public:
void operator()(JSCell* cell)
{
void** current = reinterpret_cast<void**>(cell);
// We want to maintain zapped-ness because that's how we know if we've called
// the destructor.
if (cell->isZapped())
current++;
void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
for (; current < limit; current++)
*current = reinterpret_cast<void*>(0xbbadbeef);
}
};
void Heap::zombifyDeadObjects()
{
// Sweep now because destructors will crash once we're zombified.
m_objectSpace.sweep();
m_objectSpace.forEachDeadCell<Zombify>();
}
} // namespace JSC
|
166MMX/openjdk.java.net-openjfx-8u40-rt
|
modules/web/src/main/native/Source/JavaScriptCore/heap/Heap.cpp
|
C++
|
gpl-2.0
| 25,580
|
/*
Theme Name: Pytheas
Description: Adds support for languages written in a Right To Left (RTL) direction.
It's easy, just a matter of overwriting all the horizontal positioning attributes
of your CSS stylesheet in a separate stylesheet file named rtl.css.
See http://codex.wordpress.org/Right_to_Left_Language_Support
*/
|
rkarpeles/readernew
|
wp-content/themes/Pytheas/rtl.css
|
CSS
|
gpl-2.0
| 322
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='StaticPage',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('url', models.CharField(verbose_name='URL', db_index=True, max_length=100)),
('title', models.CharField(verbose_name='title', max_length=200)),
('title_ru', models.CharField(null=True, verbose_name='title', max_length=200)),
('title_en', models.CharField(null=True, verbose_name='title', max_length=200)),
('content', models.TextField(blank=True, verbose_name='content')),
('content_ru', models.TextField(null=True, blank=True, verbose_name='content')),
('content_en', models.TextField(null=True, blank=True, verbose_name='content')),
('template_name', models.CharField(help_text="Example: 'staticpages/contact_page.html'. If this isn't provided, the system will use 'staticpages/default.html'.", verbose_name='template name', blank=True, max_length=70)),
],
options={
'verbose_name_plural': 'static pages',
'ordering': ('url',),
'verbose_name': 'static page',
},
bases=(models.Model,),
),
]
|
null-none/OpenGain
|
default_set/staticpages/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 1,523
|
<?php
/**
* Homepage Shop Panel
*/
/**
* The Variables
*
* Setup default variables, overriding them if the "Theme Options" have been saved.
*/
global $woocommerce, $post;
$settings = array(
'homepage_hero_product_id' => 0,
'homepage_hero_product_heading' => ''
);
$settings = woo_get_dynamic_values( $settings );
if ( 0 < intval( $settings['homepage_hero_product_id'] ) ) {
?>
<section id="home-hero" class="widget_woo_component">
<div class="col-full">
<?php
$args = array(
'post_type' => 'product',
'post__in' => array( intval( $settings['homepage_hero_product_id'] ) )
);
$first_or_last = 'first';
$loop = new WP_Query( $args );
$query_count = $loop->post_count;
$count = 0;
while ( $loop->have_posts() ) : $loop->the_post(); $count++;
if ( function_exists( 'get_product' ) ) {
$_product = get_product( $loop->post->ID );
} else {
$_product = new WC_Product( $loop->post->ID );
}
?>
<?php if ( '' != $settings['homepage_hero_product_heading'] ) { ?><span class="heading"><?php echo $settings['homepage_hero_product_heading']; ?></span><?php } ?>
<h2 class="widget-title"><a href="<?php echo esc_url( get_permalink( $loop->post->ID ) ); ?>" title="<?php echo esc_attr($loop->post->post_title ? $loop->post->post_title : $loop->post->ID); ?>"><?php echo get_the_title(); ?></a></h2>
<div class="hero-product">
<div class="hero-image">
<?php woocommerce_show_product_sale_flash( $post, $_product ); ?>
<?php if (has_post_thumbnail( $loop->post->ID )) { ?>
<a href="<?php echo esc_url( get_permalink( $loop->post->ID ) ); ?>" title="<?php echo esc_attr($loop->post->post_title ? $loop->post->post_title : $loop->post->ID); ?>">
<?php echo get_the_post_thumbnail( $loop->post->ID, 'shop_single' ); ?>
</a>
<?php }
else {
echo '<img src="'.woocommerce_placeholder_img_src().'" alt="Placeholder" />';
}
?>
<span class="price-wrap"><a href="<?php echo esc_url( get_permalink( $loop->post->ID ) ); ?>" title="<?php echo esc_attr($loop->post->post_title ? $loop->post->post_title : $loop->post->ID); ?>"><span class="price"><strong><?php echo $_product->get_price_html(); ?></strong></span></a></span>
</div>
<div class="hero-excerpt">
<?php woocommerce_template_single_excerpt(); ?>
<a class="button details" href="<?php echo esc_url( get_permalink( $loop->post->ID ) ); ?>" title="<?php echo esc_attr($loop->post->post_title ? $loop->post->post_title : $loop->post->ID); ?>"><?php _e('View Details' ,'woothemes'); ?></a>
<?php woocommerce_template_loop_add_to_cart( $loop->post, $_product ); ?>
</div>
<?php endwhile; ?>
</div><!--/ul.recent-->
</div><!-- /.col-full -->
</section>
<?php wp_reset_postdata(); ?>
<?php } ?>
|
pixbit/wp-dpkt
|
wp-content/themes/theonepager/includes/hero-product.php
|
PHP
|
gpl-2.0
| 3,003
|
<?php global $data; ?>
<h2 class="sub-header video"><span class="title-wrap"><a href="<?php echo $data['featured_video_header_link'] ?>"><?php echo $data['featured_video_header'] ?> ››</a></span></h2>
<div id="sections-video" class="carousel module">
<ul>
<?php
$args=array( 'showposts' => $data['featured_video_num'],'cat' => $data['featured_videos'] ); $my_query = new WP_Query($args);$count=0;
if ( $my_query->have_posts() ) { while ($my_query->have_posts()) : $my_query->the_post();
$count++;
//SMALL IMAGES
$thumb = get_post_thumbnail_id();
$img_url = wp_get_attachment_url( $thumb,'index-blog' );
$image = aq_resize( $img_url, 150, 150, true );
$video = get_post_meta($post->ID, 'siiimple_video', TRUE);
//LARGE IMAGES
$thumb2 = get_post_thumbnail_id();
$img_url2 = wp_get_attachment_url( $thumb2,'index-blog' );
$image2 = aq_resize( $img_url2, 500, true );
?>
<?php if ($video) { ?>
<li>
<?php if ($image) { ?>
<a class="various fancybox.iframe" href="<?php echo $video; ?>"><img src="<?php echo $image ?>" alt="<?php the_title(); ?>"/></a>
<span class="play-icon"></span>
<?php } else if (!$image) {?>
<a class="various fancybox.iframe" href="<?php echo $video; ?>"><img src="<?php echo get_template_directory_uri(); ?>/framework/images/no-img-small-no-sidebar.png" alt="<?php the_title(); ?>"/></a>
<span class="play-icon"></span>
<?php } ?>
</li>
<?php } else { ?>
<li>
<?php if ($image) { ?>
<img src="<?php echo $image ?>" alt="<?php the_title(); ?>"/>
<a class="photo-gallery extend" href="<?php echo $image2; ?>"><span class="icon-eye-2"></span></a>
<a class="page-link" href="<?php the_permalink(); ?>"><span class="icon-link-2"></span></a>
<?php } else if (!$image) {?>
<img src="<?php echo get_template_directory_uri(); ?>/framework/images/no-img-small-no-sidebar.png" alt="<?php the_title(); ?>"/>
<a class="photo-gallery extend" href="<?php echo $image2; ?>"><span class="icon-eye-2"></span></a>
<a class="page-link" href="<?php the_permalink(); ?>"><span class="icon-link-2"></span></a>
<?php } ?>
</li>
<?php } ?>
<?php endwhile; } ?>
<?php wp_reset_query(); ?>
</ul>
</div><!-- END SECTION VIDEO -->
|
achyutdahal/1234
|
wp-content/themes/urbannews32/section-video-no-sidebar.php
|
PHP
|
gpl-2.0
| 2,368
|
/*
* An easily extendable chat bot for any chat service.
* Copyright (C) 2015 bogeymanEST
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.superfuntime.chatty.arguments;
import java.util.Scanner;
/**
* A string value. Reads until the next whitespace (space, new line, etc) unless it is surrounded by double-quotes.
* <p/>
* To get everything to the end of the line, see {@link AllArgument}.
*/
public class StringArgument implements ArgumentParser<String> {
@Override
public String parse(Scanner scanner) {
String str = scanner.next();
if (str.startsWith("\"") && !str.endsWith("\"")) {
str += scanner.findInLine("[^\"]*\"");
str = str.replace("\"", "");
}
return str;
}
@Override
public String getUsage() {
return "a string";
}
}
|
rasmussaks/chatty
|
src/main/java/org/superfuntime/chatty/arguments/StringArgument.java
|
Java
|
gpl-2.0
| 1,516
|
//=============================================================================
// MuseScore
// Music Composition & Notation
//
// Copyright (C) 2012 Werner Schweer
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2
// as published by the Free Software Foundation and appearing in
// the file LICENCE.GPL
//=============================================================================
#include "range.h"
#include "measure.h"
#include "segment.h"
#include "rest.h"
#include "chord.h"
#include "score.h"
#include "slur.h"
#include "tie.h"
#include "note.h"
#include "tuplet.h"
#include "barline.h"
#include "utils.h"
namespace Ms {
//---------------------------------------------------------
// cleanupTuplet
//---------------------------------------------------------
static void cleanupTuplet(Tuplet* t)
{
foreach(DurationElement* e, t->elements()) {
if (e->type() == Element::TUPLET)
cleanupTuplet(static_cast<Tuplet*>(e));
delete e;
}
}
//---------------------------------------------------------
// TrackList
//---------------------------------------------------------
TrackList::~TrackList()
{
int n = size();
for (int i = 0; i < n; ++i) {
Element* e = at(i);
if (e->type() == Element::TUPLET) {
Tuplet* t = static_cast<Tuplet*>(e);
cleanupTuplet(t);
}
else
delete e;
}
}
//---------------------------------------------------------
// append
//---------------------------------------------------------
void TrackList::append(Element* e)
{
if (e->isDurationElement()) {
Fraction d = static_cast<DurationElement*>(e)->duration();
_duration += d;
bool accumulateRest = e->type() == Element::REST && !isEmpty()
&& back()->type() == Element::REST;
Segment* s = accumulateRest ? static_cast<Rest*>(e)->segment() : 0;
if (s && !s->score()->isSpannerStartEnd(s->tick(), e->track()) && !s->annotations().size()) {
// akkumulate rests
Rest* rest = static_cast<Rest*>(back());
Fraction d = rest->duration();
d += static_cast<Rest*>(e)->duration();
rest->setDuration(d);
}
else
{
Element* element = e->clone();
QList<Element*>::append(element);
if (e->type() == Element::TUPLET) {
Tuplet* srcTuplet = static_cast<Tuplet*>(e);
Tuplet* dstTuplet = static_cast<Tuplet*>(element);
foreach(const DurationElement* de, srcTuplet->elements())
dstTuplet->add(de->clone());
}
else {
ChordRest* src = static_cast<ChordRest*>(e);
Segment* s = src->segment();
foreach(Element* ee, s->annotations()) {
if (ee->track() == e->track())
_range->annotations.push_back({ s->tick(), ee->clone() });
}
}
}
}
else
QList<Element*>::append(e->clone());
}
//---------------------------------------------------------
// appendGap
//---------------------------------------------------------
void TrackList::appendGap(const Fraction& d)
{
Element* e = isEmpty() ? 0 : back();
if (e && (e->type() == Element::REST)) {
Rest* rest = static_cast<Rest*>(back());
Fraction dd = rest->duration();
dd += d;
_duration += d;
rest->setDuration(dd);
}
else {
Rest* rest = new Rest(0);
rest->setDuration(d);
QList<Element*>::append(rest);
_duration += d;
}
}
//---------------------------------------------------------
// read
//---------------------------------------------------------
void TrackList::read(int track, const Segment* fs, const Segment* es)
{
int tick = fs->tick();
int gap = 0;
const Segment* s;
for (s = fs; s && (s != es); s = s->next1()) {
Element* e = s->element(track);
if (!e || e->generated()) {
foreach(Element* ee, s->annotations()) {
if (ee->track() == track)
_range->annotations.push_back({ s->tick(), ee->clone() });
}
continue;
}
if (e->isChordRest()) {
DurationElement* de = static_cast<DurationElement*>(e);
gap = s->tick() - tick;
if (de->tuplet()) {
Tuplet* tuplet = de->tuplet();
if (tuplet->elements().front() != de) {
qFatal("TrackList::read: cannot start in middle of tuplet");
}
de = tuplet;
// find last chord/rest in (possibly nested) tuplet:
DurationElement* nde = tuplet;
while (nde) {
nde = tuplet->elements().back();
if (nde->type() != Element::TUPLET)
break;
}
s = static_cast<ChordRest*>(nde)->segment();
// continue with first chord/rest after tuplet
}
if (gap) {
appendGap(Fraction::fromTicks(gap));
tick += gap;
}
append(de);
tick += de->duration().ticks();;
}
else if (e->type() == Element::BAR_LINE) {
BarLine* bl = static_cast<BarLine*>(e);
if (bl->barLineType() != BarLineType::NORMAL_BAR)
append(e);
}
// else if (e->type() == Element::REPEAT_MEASURE) {
// // TODO: copy previous measure contents?
// }
else
append(e);
}
gap = es->tick() - tick;
if (gap)
appendGap(Fraction::fromTicks(gap));
//
// connect ties
//
int n = size();
for (int i = 0; i < n; ++i) {
Element* e = at(i);
if (e->type() != Element::CHORD)
continue;
Chord* chord = static_cast<Chord*>(e);
foreach(Note* n1, chord->notes()) {
Tie* tie = n1->tieFor();
if (!tie)
continue;
for (int k = i+1; k < n; ++k) {
Element* ee = at(k);
if (ee->type() != Element::CHORD)
continue;
Chord* c2 = static_cast<Chord*>(ee);
bool found = false;
foreach(Note* n2, c2->notes()) {
if (n1->pitch() == n2->pitch()) {
tie->setEndNote(n2);
n2->setTieBack(tie);
found = true;
break;
}
}
if (!found)
qDebug("Tied note not found");
break;
}
}
}
}
//---------------------------------------------------------
// writeTuplet
//---------------------------------------------------------
Tuplet* TrackList::writeTuplet(Tuplet* tuplet, Measure* measure, int tick) const
{
Tuplet* dt = tuplet->clone();
dt->setParent(measure);
foreach (DurationElement* e, tuplet->elements()) {
if (e->isChordRest()) {
Element* ne = e->clone();
Segment::SegmentType st = Segment::SegChordRest;
Segment* segment = measure->getSegment(st, tick);
segment->add(ne);
dt->add(ne);
}
else {
Tuplet* nt = writeTuplet(static_cast<Tuplet*>(e), measure, tick);
dt->add(nt);
}
tick += e->globalDuration().ticks();
}
return dt;
}
//---------------------------------------------------------
// canWrite
// check if list can be written to measure list m
// check for tuplets crossing barlines
//---------------------------------------------------------
bool TrackList::canWrite(const Fraction& measureLen) const
{
Fraction pos;
Fraction rest = measureLen;
int n = size();
for (int i = 0; i < n; ++i) {
Element* e = at(i);
if (e->isDurationElement()) {
Fraction duration = static_cast<DurationElement*>(e)->duration();
if (duration > rest && e->type() == Element::TUPLET)
return false;
while (!duration.isZero()) {
if (e->type() == Element::REST && duration >= rest && rest == measureLen) {
duration -= rest;
pos = measureLen;
}
else {
Fraction d = qMin(rest, duration);
duration -= d;
rest -= d;
pos += d;
}
if (pos == measureLen) {
pos = Fraction();
rest = measureLen;
}
}
}
}
return true;
}
//---------------------------------------------------------
// dump
//---------------------------------------------------------
void TrackList::dump() const
{
printf("TrackList: elements %d, duration %d/%d\n", size(), _duration.numerator(), _duration.denominator());
for (int i = 0; i < size(); ++i) {
Element* e = at(i);
printf(" %s\n", e->name());
if (e->isDurationElement()) {
Fraction d = static_cast<DurationElement*>(e)->duration();
printf(" duration %d/%d\n", d.numerator(), d.denominator());
}
}
}
//---------------------------------------------------------
// write
// rewrite notes into measure list m
//---------------------------------------------------------
bool TrackList::write(int track, Measure* measure) const
{
Fraction pos;
Measure* m = measure;
Score* score = m->score();
Fraction rest = m->len();
Segment* segment = 0;
int n = size();
for (int i = 0; i < n; ++i) {
Element* e = at(i);
if (e->isDurationElement()) {
Fraction duration = static_cast<DurationElement*>(e)->duration();
if (duration > rest && e->type() == Element::TUPLET) {
// cannot split tuplet
qDebug("TrackList::write: cannot split tuplet");
return false;
}
//
// split note/rest
//
while (duration.numerator() > 0) {
if ((e->type() == Element::REST || e->type() == Element::REPEAT_MEASURE)
&& (duration >= rest || e == back())
&& (rest == m->len()))
{
//
// handle full measure rest
//
segment = m->getSegment(e, m->tick() + pos.ticks());
if ((track % VOICES) == 0) {
// write only for voice 1
Rest* r = new Rest(score, TDuration::V_MEASURE);
r->setDuration(m->len());
r->setTrack(track);
segment->add(r);
}
duration -= m->len();
pos += m->len();
rest.set(0, 1);
}
else {
Fraction d = qMin(rest, duration);
if (e->type() == Element::REST || e->type() == Element::REPEAT_MEASURE) {
segment = m->getSegment(Segment::SegChordRest, m->tick() + pos.ticks());
Rest* r = new Rest(score, TDuration(d));
r->setTrack(track);
segment->add(r);
duration -= d;
rest -= d;
pos += d;
}
else if (e->type() == Element::CHORD) {
segment = m->getSegment(e, m->tick() + pos.ticks());
Chord* c = static_cast<Chord*>(e)->clone();
c->setScore(score);
c->setTrack(track);
c->setDuration(d);
c->setDurationType(TDuration(d));
segment->add(c);
duration -= d;
rest -= d;
pos += d;
foreach(Note* note, c->notes()) {
if (!duration.isZero() || note->tieFor()) {
Tie* tie = new Tie(score);
note->add(tie);
}
else
note->setTieFor(0);
note->setTieBack(0);
}
}
else if (e->type() == Element::TUPLET) {
writeTuplet(static_cast<Tuplet*>(e), m, m->tick() + pos.ticks());
duration -= d;
rest -= d;
pos += d;
}
}
if (pos == m->len()) {
if (m->nextMeasure()) {
m = m->nextMeasure();
rest = m->len();
pos = Fraction();
}
else {
if (!duration.isZero()) {
qDebug("Tracklist::write: premature end of measure list in track %d, rest %d/%d",
track, duration.numerator(), duration.denominator());
++i;
qDebug("%d elements missing", n-i);
for (; i < n; ++i) {
Element* e = at(i);
qDebug(" <%s>", e->name());
if (e->isChordRest()) {
ChordRest* cr = static_cast<ChordRest*>(e);
qDebug(" %d/%d",
cr->duration().numerator(),
cr->duration().denominator());
}
}
rest = Fraction();
duration = Fraction();
Q_ASSERT(false);
}
}
}
}
}
// else if (e->type() == Element::KEYSIG) {
// // keysig has to be at start of measure
// }
else if (e->type() == Element::BAR_LINE) {
if (pos.numerator() == 0 && m) {
BarLineType t = static_cast<BarLine*>(e)->barLineType();
Measure* pm = m->prevMeasure();
if (pm)
pm->setEndBarLineType(t,0);
}
}
else {
if (m == nullptr)
break;
// add the element in its own segment;
// but KeySig has to be at start of (current) measure
segment = m->getSegment(e, m->tick() + e->type() == Element::KEYSIG ? 0 : pos.ticks());
Element* ne = e->clone();
ne->setScore(score);
ne->setTrack(track);
segment->add(ne);
}
}
//
// connect ties
//
for (Segment* s = measure->first(); s; s = s->next1()) {
Element* el = s->element(track);
if (el == 0 || el->type() != Element::CHORD)
continue;
foreach(Note* n, static_cast<Chord*>(el)->notes()) {
Tie* tie = n->tieFor();
if (!tie)
continue;
Note* nn = searchTieNote(n);
if (nn) {
tie->setEndNote(nn);
nn->setTieBack(tie);
}
}
if (s == segment)
break;
}
return true;
}
//---------------------------------------------------------
// ScoreRange
//---------------------------------------------------------
ScoreRange::~ScoreRange()
{
qDeleteAll(tracks);
}
//---------------------------------------------------------
// canWrite
//---------------------------------------------------------
bool ScoreRange::canWrite(const Fraction& f) const
{
int n = tracks.size();
for (int i = 0; i < n; ++i) {
TrackList* dl = tracks[i];
if (!dl->canWrite(f))
return false;
}
return true;
}
//---------------------------------------------------------
// read
//---------------------------------------------------------
void ScoreRange::read(Segment* first, Segment* last, int startTrack, int endTrack)
{
_first = first;
_last = last;
spanner.clear();
for (auto i : first->score()->spanner()) {
Spanner* s = i.second;
if (s->tick() >= first->tick() && s->tick() < last->tick() &&
s->track() >= startTrack && s->track() < endTrack) {
Spanner* ns = static_cast<Spanner*>(s->clone());
ns->setTick(ns->tick() - first->tick());
ns->setTick2(ns->tick2() - first->tick());
spanner.push_back(ns);
}
}
for (int track = startTrack; track < endTrack; ++track) {
TrackList* dl = new TrackList(this);
dl->read(track, first, last);
tracks.append(dl);
}
}
//---------------------------------------------------------
// write
//---------------------------------------------------------
bool ScoreRange::write(int track, Measure* m) const
{
int n = tracks.size();
for (int i = 0; i < n; ++i) {
const TrackList* dl = tracks[i];
if (!dl->write(track + i, m))
return false;
}
return true;
}
//---------------------------------------------------------
// fixup
//---------------------------------------------------------
void ScoreRange::fixup(Measure* m) const
{
Score* score = m->score();
for (Spanner* s : spanner) {
s->setTick(s->tick() + first()->tick());
s->setTick2(s->tick2() + first()->tick());
score->undoAddElement(s);
}
for (const Annotation& a : annotations) {
Measure* tm = score->tick2measure(a.tick);
Segment *op = static_cast<Segment*>(a.e->parent());
Segment* s = tm->undoGetSegment(op->segmentType(), a.tick);
if (s) {
a.e->setParent(s);
score->undoAddElement(a.e);
}
}
}
//---------------------------------------------------------
// fill
//---------------------------------------------------------
void ScoreRange::fill(const Fraction& f)
{
int n = tracks.size();
for (int i = 0; i < n; ++i)
tracks[i]->appendGap(f);
}
//---------------------------------------------------------
// duration
//---------------------------------------------------------
Fraction ScoreRange::duration() const
{
return tracks.isEmpty() ? Fraction() : tracks[0]->duration();
}
}
|
jasonbcox/MuseScore
|
libmscore/range.cpp
|
C++
|
gpl-2.0
| 22,358
|
/*
* Copyright (C) 1999 Lars Knoll (knoll@kde.org)
* (C) 1999 Antti Koivisto (koivisto@kde.org)
* (C) 2000 Simon Hausmann (hausmann@kde.org)
* (C) 2001 Dirk Mueller (mueller@kde.org)
* Copyright (C) 2004-2017 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "config.h"
#include "HTMLBodyElement.h"
#include "CSSImageValue.h"
#include "CSSParser.h"
#include "CSSValueKeywords.h"
#include "DOMWindow.h"
#include "DOMWrapperWorld.h"
#include "EventNames.h"
#include "Frame.h"
#include "FrameView.h"
#include "HTMLFrameElement.h"
#include "HTMLIFrameElement.h"
#include "HTMLNames.h"
#include "HTMLParserIdioms.h"
#include "StyleProperties.h"
#include <wtf/NeverDestroyed.h>
namespace WebCore {
using namespace HTMLNames;
HTMLBodyElement::HTMLBodyElement(const QualifiedName& tagName, Document& document)
: HTMLElement(tagName, document)
{
ASSERT(hasTagName(bodyTag));
}
bool HTMLBodyElement::isFirstBodyElementOfDocument() const
{
// By spec http://dev.w3.org/csswg/cssom-view/#the-html-body-element
// "The HTML body element is the first body HTML element child of the root HTML element html."
return document().body() == this;
}
Ref<HTMLBodyElement> HTMLBodyElement::create(Document& document)
{
return adoptRef(*new HTMLBodyElement(bodyTag, document));
}
Ref<HTMLBodyElement> HTMLBodyElement::create(const QualifiedName& tagName, Document& document)
{
return adoptRef(*new HTMLBodyElement(tagName, document));
}
HTMLBodyElement::~HTMLBodyElement() = default;
bool HTMLBodyElement::isPresentationAttribute(const QualifiedName& name) const
{
if (name == backgroundAttr || name == marginwidthAttr || name == leftmarginAttr || name == marginheightAttr || name == topmarginAttr || name == bgcolorAttr || name == textAttr || name == bgpropertiesAttr)
return true;
return HTMLElement::isPresentationAttribute(name);
}
void HTMLBodyElement::collectStyleForPresentationAttribute(const QualifiedName& name, const AtomicString& value, MutableStyleProperties& style)
{
if (name == backgroundAttr) {
String url = stripLeadingAndTrailingHTMLSpaces(value);
if (!url.isEmpty()) {
auto imageValue = CSSImageValue::create(document().completeURL(url));
imageValue.get().setInitiator(localName());
style.setProperty(CSSProperty(CSSPropertyBackgroundImage, WTFMove(imageValue)));
}
} else if (name == marginwidthAttr || name == leftmarginAttr) {
addHTMLLengthToStyle(style, CSSPropertyMarginRight, value);
addHTMLLengthToStyle(style, CSSPropertyMarginLeft, value);
} else if (name == marginheightAttr || name == topmarginAttr) {
addHTMLLengthToStyle(style, CSSPropertyMarginBottom, value);
addHTMLLengthToStyle(style, CSSPropertyMarginTop, value);
} else if (name == bgcolorAttr) {
addHTMLColorToStyle(style, CSSPropertyBackgroundColor, value);
} else if (name == textAttr) {
addHTMLColorToStyle(style, CSSPropertyColor, value);
} else if (name == bgpropertiesAttr) {
if (equalLettersIgnoringASCIICase(value, "fixed"))
addPropertyToPresentationAttributeStyle(style, CSSPropertyBackgroundAttachment, CSSValueFixed);
} else
HTMLElement::collectStyleForPresentationAttribute(name, value, style);
}
HTMLElement::EventHandlerNameMap HTMLBodyElement::createWindowEventHandlerNameMap()
{
static const QualifiedName* const table[] = {
&onbeforeunloadAttr.get(),
&onblurAttr.get(),
&onerrorAttr.get(),
&onfocusAttr.get(),
&onfocusinAttr.get(),
&onfocusoutAttr.get(),
&onhashchangeAttr.get(),
&onlanguagechangeAttr.get(),
&onloadAttr.get(),
&onmessageAttr.get(),
&onofflineAttr.get(),
&ononlineAttr.get(),
&onorientationchangeAttr.get(),
&onpagehideAttr.get(),
&onpageshowAttr.get(),
&onpopstateAttr.get(),
&onresizeAttr.get(),
&onscrollAttr.get(),
&onstorageAttr.get(),
&onunloadAttr.get(),
&onwebkitmouseforcechangedAttr.get(),
&onwebkitmouseforcedownAttr.get(),
&onwebkitmouseforceupAttr.get(),
&onwebkitmouseforcewillbeginAttr.get(),
&onwebkitwillrevealbottomAttr.get(),
&onwebkitwillrevealleftAttr.get(),
&onwebkitwillrevealrightAttr.get(),
&onwebkitwillrevealtopAttr.get(),
};
EventHandlerNameMap map;
populateEventHandlerNameMap(map, table);
return map;
}
const AtomicString& HTMLBodyElement::eventNameForWindowEventHandlerAttribute(const QualifiedName& attributeName)
{
static NeverDestroyed<EventHandlerNameMap> map = createWindowEventHandlerNameMap();
return eventNameForEventHandlerAttribute(attributeName, map.get());
}
void HTMLBodyElement::parseAttribute(const QualifiedName& name, const AtomicString& value)
{
if (name == vlinkAttr || name == alinkAttr || name == linkAttr) {
if (value.isNull()) {
if (name == linkAttr)
document().resetLinkColor();
else if (name == vlinkAttr)
document().resetVisitedLinkColor();
else
document().resetActiveLinkColor();
} else {
Color color = CSSParser::parseColor(value, !document().inQuirksMode());
if (color.isValid()) {
if (name == linkAttr)
document().setLinkColor(color);
else if (name == vlinkAttr)
document().setVisitedLinkColor(color);
else
document().setActiveLinkColor(color);
}
}
invalidateStyleForSubtree();
return;
}
if (name == onselectionchangeAttr) {
document().setAttributeEventListener(eventNames().selectionchangeEvent, name, value, mainThreadNormalWorld());
return;
}
auto& eventName = eventNameForWindowEventHandlerAttribute(name);
if (!eventName.isNull()) {
document().setWindowAttributeEventListener(eventName, name, value, mainThreadNormalWorld());
return;
}
HTMLElement::parseAttribute(name, value);
}
Node::InsertedIntoAncestorResult HTMLBodyElement::insertedIntoAncestor(InsertionType insertionType, ContainerNode& parentOfInsertedTree)
{
HTMLElement::insertedIntoAncestor(insertionType, parentOfInsertedTree);
if (!insertionType.connectedToDocument)
return InsertedIntoAncestorResult::Done;
// FIXME: It's surprising this is web compatible since it means a marginwidth and marginheight attribute can
// magically appear on the <body> of all documents embedded through <iframe> or <frame>.
// FIXME: Perhaps this code should be in attach() instead of here.
auto ownerElement = makeRefPtr(document().ownerElement());
if (!is<HTMLFrameElementBase>(ownerElement))
return InsertedIntoAncestorResult::Done;
return InsertedIntoAncestorResult::NeedsPostInsertionCallback;
}
void HTMLBodyElement::didFinishInsertingNode()
{
auto ownerElement = makeRefPtr(document().ownerElement());
RELEASE_ASSERT(is<HTMLFrameElementBase>(ownerElement));
auto& ownerFrameElement = downcast<HTMLFrameElementBase>(*ownerElement);
// Read values from the owner before setting any attributes, since setting an attribute can run arbitrary
// JavaScript, which might delete the owner element.
int marginWidth = ownerFrameElement.marginWidth();
int marginHeight = ownerFrameElement.marginHeight();
if (marginWidth != -1)
setIntegralAttribute(marginwidthAttr, marginWidth);
if (marginHeight != -1)
setIntegralAttribute(marginheightAttr, marginHeight);
}
bool HTMLBodyElement::isURLAttribute(const Attribute& attribute) const
{
return attribute.name() == backgroundAttr || HTMLElement::isURLAttribute(attribute);
}
bool HTMLBodyElement::supportsFocus() const
{
return hasEditableStyle() || HTMLElement::supportsFocus();
}
static int adjustForZoom(int value, const Frame& frame)
{
double zoomFactor = frame.pageZoomFactor() * frame.frameScaleFactor();
if (zoomFactor == 1)
return value;
// Needed because of truncation (rather than rounding) when scaling up.
if (zoomFactor > 1)
value++;
return static_cast<int>(value / zoomFactor);
}
int HTMLBodyElement::scrollLeft()
{
if (isFirstBodyElementOfDocument()) {
document().updateLayoutIgnorePendingStylesheets();
RefPtr<Frame> frame = document().frame();
if (!frame)
return 0;
RefPtr<FrameView> view = frame->view();
if (!view)
return 0;
return adjustForZoom(view->contentsScrollPosition().x(), *frame);
}
return HTMLElement::scrollLeft();
}
void HTMLBodyElement::setScrollLeft(int scrollLeft)
{
if (isFirstBodyElementOfDocument()) {
document().updateLayoutIgnorePendingStylesheets();
RefPtr<Frame> frame = document().frame();
if (!frame)
return;
RefPtr<FrameView> view = frame->view();
if (!view)
return;
view->setScrollPosition(IntPoint(static_cast<int>(scrollLeft * frame->pageZoomFactor() * frame->frameScaleFactor()), view->scrollY()));
}
HTMLElement::setScrollLeft(scrollLeft);
}
int HTMLBodyElement::scrollTop()
{
if (isFirstBodyElementOfDocument()) {
document().updateLayoutIgnorePendingStylesheets();
RefPtr<Frame> frame = document().frame();
if (!frame)
return 0;
RefPtr<FrameView> view = frame->view();
if (!view)
return 0;
return adjustForZoom(view->contentsScrollPosition().y(), *frame);
}
return HTMLElement::scrollTop();
}
void HTMLBodyElement::setScrollTop(int scrollTop)
{
if (isFirstBodyElementOfDocument()) {
document().updateLayoutIgnorePendingStylesheets();
RefPtr<Frame> frame = document().frame();
if (!frame)
return;
RefPtr<FrameView> view = frame->view();
if (!view)
return;
view->setScrollPosition(IntPoint(view->scrollX(), static_cast<int>(scrollTop * frame->pageZoomFactor() * frame->frameScaleFactor())));
}
return HTMLElement::setScrollTop(scrollTop);
}
void HTMLBodyElement::scrollTo(const ScrollToOptions& options, ScrollClamping clamping)
{
if (isFirstBodyElementOfDocument()) {
// If the element is the HTML body element, document is in quirks mode, and the element is not potentially scrollable,
// invoke scroll() on window with options as the only argument, and terminate these steps.
// Note that WebKit always uses quirks mode document scrolling behavior. See Document::scrollingElement().
// FIXME: Scrolling an independently scrollable body is broken: webkit.org/b/161612.
auto window = makeRefPtr(document().domWindow());
if (!window)
return;
window->scrollTo(options);
return;
}
return HTMLElement::scrollTo(options, clamping);
}
int HTMLBodyElement::scrollHeight()
{
if (isFirstBodyElementOfDocument()) {
// Update the document's layout.
document().updateLayoutIgnorePendingStylesheets();
RefPtr<Frame> frame = document().frame();
if (!frame)
return 0;
RefPtr<FrameView> view = frame->view();
if (!view)
return 0;
return adjustForZoom(view->contentsHeight(), *frame);
}
return HTMLElement::scrollHeight();
}
int HTMLBodyElement::scrollWidth()
{
if (isFirstBodyElementOfDocument()) {
// Update the document's layout.
document().updateLayoutIgnorePendingStylesheets();
RefPtr<Frame> frame = document().frame();
if (!frame)
return 0;
RefPtr<FrameView> view = frame->view();
if (!view)
return 0;
return adjustForZoom(view->contentsWidth(), *frame);
}
return HTMLElement::scrollWidth();
}
void HTMLBodyElement::addSubresourceAttributeURLs(ListHashSet<URL>& urls) const
{
HTMLElement::addSubresourceAttributeURLs(urls);
addSubresourceURL(urls, document().completeURL(attributeWithoutSynchronization(backgroundAttr)));
}
} // namespace WebCore
|
teamfx/openjfx-8u-dev-rt
|
modules/web/src/main/native/Source/WebCore/html/HTMLBodyElement.cpp
|
C++
|
gpl-2.0
| 13,035
|
<?php
/**
* Skeleton subclass for performing query and update operations on the 'trip_logs_to_features' table.
*
*
*
* You should add additional methods to this class to meet the
* application requirements. This class will only be generated as
* long as it does not already exist in the output directory.
*
* @package propel.generator.speogis
*/
class TripLogsToFeaturesQuery extends BaseTripLogsToFeaturesQuery
{
}
|
apgeo/silexgis
|
db/propel_1_6_pr/build/classes/speogis/TripLogsToFeaturesQuery.php
|
PHP
|
gpl-2.0
| 433
|
#!/bin/sh
test_description='git grep with a binary pattern files'
. ./lib-gettext.sh
nul_match_internal () {
matches=$1
prereqs=$2
lc_all=$3
extra_flags=$4
flags=$5
pattern=$6
pattern_human=$(echo "$pattern" | sed 's/Q/<NUL>/g')
if test "$matches" = 1
then
test_expect_success $prereqs "LC_ALL='$lc_all' git grep $extra_flags -f f $flags '$pattern_human' a" "
printf '$pattern' | q_to_nul >f &&
LC_ALL='$lc_all' git grep $extra_flags -f f $flags a
"
elif test "$matches" = 0
then
test_expect_success $prereqs "LC_ALL='$lc_all' git grep $extra_flags -f f $flags '$pattern_human' a" "
>stderr &&
printf '$pattern' | q_to_nul >f &&
test_must_fail env LC_ALL=\"$lc_all\" git grep $extra_flags -f f $flags a 2>stderr &&
test_i18ngrep ! 'This is only supported with -P under PCRE v2' stderr
"
elif test "$matches" = P
then
test_expect_success $prereqs "error, PCRE v2 only: LC_ALL='$lc_all' git grep -f f $flags '$pattern_human' a" "
>stderr &&
printf '$pattern' | q_to_nul >f &&
test_must_fail env LC_ALL=\"$lc_all\" git grep -f f $flags a 2>stderr &&
test_i18ngrep 'This is only supported with -P under PCRE v2' stderr
"
else
test_expect_success "PANIC: Test framework error. Unknown matches value $matches" 'false'
fi
}
nul_match () {
matches=$1
matches_pcre2=$2
matches_pcre2_locale=$3
flags=$4
pattern=$5
pattern_human=$(echo "$pattern" | sed 's/Q/<NUL>/g')
nul_match_internal "$matches" "" "C" "" "$flags" "$pattern"
nul_match_internal "$matches_pcre2" "LIBPCRE2" "C" "-P" "$flags" "$pattern"
nul_match_internal "$matches_pcre2_locale" "LIBPCRE2,GETTEXT_LOCALE" "$is_IS_locale" "-P" "$flags" "$pattern"
}
test_expect_success 'setup' "
echo 'binaryQfileQm[*]cQ*æQð' | q_to_nul >a &&
git add a &&
git commit -m.
"
# Simple fixed-string matching
nul_match P P P '-F' 'yQf'
nul_match P P P '-F' 'yQx'
nul_match P P P '-Fi' 'YQf'
nul_match P P P '-Fi' 'YQx'
nul_match P P 1 '' 'yQf'
nul_match P P 0 '' 'yQx'
nul_match P P 1 '' 'æQð'
nul_match P P P '-F' 'eQm[*]c'
nul_match P P P '-Fi' 'EQM[*]C'
# Regex patterns that would match but shouldn't with -F
nul_match P P P '-F' 'yQ[f]'
nul_match P P P '-F' '[y]Qf'
nul_match P P P '-Fi' 'YQ[F]'
nul_match P P P '-Fi' '[Y]QF'
nul_match P P P '-F' 'æQ[ð]'
nul_match P P P '-F' '[æ]Qð'
# Matching pattern and subject case with -i
nul_match P 1 1 '-i' '[æ]Qð'
# ...PCRE v2 only matches non-ASCII with -i casefolding under UTF-8
# semantics
nul_match P P P '-Fi' 'ÆQ[Ð]'
nul_match P 0 1 '-i' 'ÆQ[Ð]'
nul_match P 0 1 '-i' '[Æ]QÐ'
nul_match P 0 1 '-i' '[Æ]Qð'
nul_match P 0 1 '-i' 'ÆQÐ'
# \0 in regexes can only work with -P & PCRE v2
nul_match P P 1 '' 'yQ[f]'
nul_match P P 1 '' '[y]Qf'
nul_match P P 1 '-i' 'YQ[F]'
nul_match P P 1 '-i' '[Y]Qf'
nul_match P P 1 '' 'æQ[ð]'
nul_match P P 1 '' '[æ]Qð'
nul_match P P 1 '-i' 'ÆQ[Ð]'
nul_match P P 1 '' 'eQm.*cQ'
nul_match P P 1 '-i' 'EQM.*cQ'
nul_match P P 0 '' 'eQm[*]c'
nul_match P P 0 '-i' 'EQM[*]C'
# Assert that we're using REG_STARTEND and the pattern doesn't match
# just because it's cut off at the first \0.
nul_match P P 0 '-i' 'NOMATCHQð'
nul_match P P 0 '-i' '[Æ]QNOMATCH'
nul_match P P 0 '-i' '[æ]QNOMATCH'
# Ensure that the matcher doesn't regress to something that stops at
# \0
nul_match P P P '-F' 'yQ[f]'
nul_match P P P '-Fi' 'YQ[F]'
nul_match P P 0 '' 'yQNOMATCH'
nul_match P P 0 '' 'QNOMATCH'
nul_match P P 0 '-i' 'YQNOMATCH'
nul_match P P 0 '-i' 'QNOMATCH'
nul_match P P P '-F' 'æQ[ð]'
nul_match P P P '-Fi' 'ÆQ[Ð]'
nul_match P P 1 '-i' 'ÆQ[Ð]'
nul_match P P 0 '' 'yQNÓMATCH'
nul_match P P 0 '' 'QNÓMATCH'
nul_match P P 0 '-i' 'YQNÓMATCH'
nul_match P P 0 '-i' 'QNÓMATCH'
test_done
|
Osse/git
|
t/t7816-grep-binary-pattern.sh
|
Shell
|
gpl-2.0
| 3,710
|
/***************************************************************************
qgstilescalewidget.cpp - slider to choose wms-c resolutions
-------------------
begin : 28 Mar 2010
copyright: (C) 2010 Juergen E. Fischer < jef at norbit dot de >
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include "qgstilescalewidget.h"
#include "qgsmapcanvas.h"
#include "qgsrasterlayer.h"
#include "qgsrasterdataprovider.h"
#include "qgsmessagelog.h"
#include "qgslogger.h"
#include "qgsdockwidget.h"
#include "qgssettings.h"
#include "layertree/qgslayertreeview.h"
#include <QMainWindow>
#include <QMenu>
#include <QGraphicsView>
#include <QToolTip>
QgsTileScaleWidget::QgsTileScaleWidget( QgsMapCanvas *mapCanvas, QWidget *parent, Qt::WindowFlags f )
: QWidget( parent, f )
, mMapCanvas( mapCanvas )
{
setupUi( this );
connect( mSlider, &QSlider::valueChanged, this, &QgsTileScaleWidget::mSlider_valueChanged );
connect( mMapCanvas, &QgsMapCanvas::scaleChanged, this, &QgsTileScaleWidget::scaleChanged );
layerChanged( mMapCanvas->currentLayer() );
}
void QgsTileScaleWidget::layerChanged( QgsMapLayer *layer )
{
mSlider->setDisabled( true );
QgsRasterLayer *rl = qobject_cast<QgsRasterLayer *>( layer );
if ( !rl || rl->providerType() != QLatin1String( "wms" ) || !rl->dataProvider() )
return;
QVariant res = rl->dataProvider()->property( "resolutions" );
mResolutions.clear();
Q_FOREACH ( const QVariant &r, res.toList() )
{
QgsDebugMsg( QString( "found resolution: %1" ).arg( r.toDouble() ) );
mResolutions << r.toDouble();
}
if ( mResolutions.isEmpty() )
return;
mSlider->setRange( 0, mResolutions.size() - 1 );
mSlider->setTickInterval( 1 );
mSlider->setInvertedAppearance( true );
mSlider->setPageStep( 1 );
mSlider->setTracking( false );
scaleChanged( mMapCanvas->scale() );
mSlider->setEnabled( true );
show();
}
void QgsTileScaleWidget::scaleChanged( double scale )
{
Q_UNUSED( scale );
if ( mResolutions.isEmpty() )
return;
double mupp = mMapCanvas->mapUnitsPerPixel();
QgsDebugMsg( QString( "resolution changed to %1" ).arg( mupp ) );
int i;
for ( i = 0; i < mResolutions.size() && mResolutions.at( i ) < mupp; i++ )
QgsDebugMsg( QString( "test resolution %1: %2 d:%3" ).arg( i ).arg( mResolutions.at( i ) ).arg( mupp - mResolutions.at( i ) ) );
if ( i == mResolutions.size() ||
( i > 0 && mResolutions.at( i ) - mupp > mupp - mResolutions.at( i - 1 ) ) )
{
QgsDebugMsg( "previous resolution" );
i--;
}
QgsDebugMsg( QString( "selected resolution %1: %2" ).arg( i ).arg( mResolutions.at( i ) ) );
mSlider->blockSignals( true );
mSlider->setValue( i );
mSlider->blockSignals( false );
}
void QgsTileScaleWidget::mSlider_valueChanged( int value )
{
QgsDebugMsg( QString( "slider released at %1: %2" ).arg( mSlider->value() ).arg( mResolutions.at( mSlider->value() ) ) );
// Invert value in tooltip to match expectation (i.e. 0 = zoomed out, maximum = zoomed in)
QToolTip::showText( QCursor::pos(), tr( "Zoom level: %1" ).arg( mSlider->maximum() - value ) + "\n" + tr( "Resolution: %1" ).arg( mResolutions.at( value ) ), this );
mMapCanvas->zoomByFactor( mResolutions.at( mSlider->value() ) / mMapCanvas->mapUnitsPerPixel() );
}
void QgsTileScaleWidget::locationChanged( Qt::DockWidgetArea area )
{
mSlider->setOrientation( area == Qt::TopDockWidgetArea || area == Qt::BottomDockWidgetArea ? Qt::Horizontal : Qt::Vertical );
}
void QgsTileScaleWidget::showTileScale( QMainWindow *mainWindow )
{
QgsDockWidget *dock = mainWindow->findChild<QgsDockWidget *>( QStringLiteral( "theTileScaleDock" ) );
if ( dock )
{
dock->setVisible( dock->isHidden() );
return;
}
QgsMapCanvas *canvas = mainWindow->findChild<QgsMapCanvas *>( QStringLiteral( "theMapCanvas" ) );
QgsDebugMsg( QString( "canvas:%1 [%2]" ).arg( ( quint64 ) canvas, 0, 16 ).arg( canvas ? canvas->objectName() : "" ) );
if ( !canvas )
{
QgsDebugMsg( "map canvas mapCanvas not found" );
return;
}
QgsTileScaleWidget *tws = new QgsTileScaleWidget( canvas );
tws->setObjectName( QStringLiteral( "theTileScaleWidget" ) );
QgsLayerTreeView *legend = mainWindow->findChild<QgsLayerTreeView *>( QStringLiteral( "theLayerTreeView" ) );
if ( legend )
{
connect( legend, &QgsLayerTreeView::currentLayerChanged,
tws, &QgsTileScaleWidget::layerChanged );
}
else
{
QgsDebugMsg( "legend not found" );
}
//create the dock widget
dock = new QgsDockWidget( tr( "Tile Scale" ), mainWindow );
dock->setObjectName( QStringLiteral( "theTileScaleDock" ) );
connect( dock, &QDockWidget::dockLocationChanged, tws, &QgsTileScaleWidget::locationChanged );
mainWindow->addDockWidget( Qt::RightDockWidgetArea, dock );
// add to the Panel submenu
QMenu *panelMenu = mainWindow->findChild<QMenu *>( QStringLiteral( "mPanelMenu" ) );
if ( panelMenu )
{
// add to the Panel submenu
panelMenu->addAction( dock->toggleViewAction() );
}
else
{
QgsDebugMsg( "panel menu not found" );
}
dock->setWidget( tws );
connect( dock, &QDockWidget::visibilityChanged, tws, &QgsTileScaleWidget::scaleEnabled );
QgsSettings settings;
dock->setVisible( settings.value( QStringLiteral( "UI/tileScaleEnabled" ), false ).toBool() );
}
void QgsTileScaleWidget::scaleEnabled( bool enabled )
{
QgsSettings settings;
settings.setValue( QStringLiteral( "UI/tileScaleEnabled" ), enabled );
}
|
raymondnijssen/QGIS
|
src/providers/wms/qgstilescalewidget.cpp
|
C++
|
gpl-2.0
| 6,157
|
<!-- INCLUDE mcp_header.html -->
<form method="post" name="mcp" action="{U_POST_ACTION}">
<table width="100%" cellpadding="3" cellspacing="1" border="0" class="modcp">
<tr>
<th colspan="2"><h2>{USERNAME}</h2></th>
</tr>
<tr>
<td class="row1 pad4" align="center">
<table cellspacing="1" cellpadding="2" border="0">
<tr>
<td align="center"><b>{USERNAME_FULL}</b></td>
</tr>
<!-- IF RANK_TITLE -->
<tr>
<td align="center">{RANK_TITLE}</td>
</tr>
<!-- ENDIF -->
<!-- IF RANK_IMG -->
<tr>
<td align="center">{RANK_IMG}</td>
</tr>
<!-- ENDIF -->
<tr>
<td align="center"><!-- IF AVATAR_IMG -->{AVATAR_IMG}<!-- ELSE --><img src="{T_THEME_PATH}/images/no_avatar.gif" alt="" /><!-- ENDIF --></td>
</tr>
</table>
</td>
<td class="row1 pad4">
<table width="100%" cellspacing="1" cellpadding="2" border="0">
<tr>
<td align="{S_CONTENT_FLOW_END}" nowrap="nowrap">{L_JOINED}: </td>
<td width="100%"><b>{JOINED}</b></td>
</tr>
<tr>
<td align="{S_CONTENT_FLOW_END}" valign="top" nowrap="nowrap">{L_TOTAL_POSTS}: </td>
<td><b>{POSTS}</b></td>
</tr>
<tr>
<td align="{S_CONTENT_FLOW_END}" valign="top" nowrap="nowrap">{L_WARNINGS}: </td>
<td><b>{WARNINGS}</b></td>
</tr>
</table>
</td>
</tr>
</table>
<br />
<table width="100%" cellpadding="3" cellspacing="1" border="0" class="modcp">
<tr>
<th colspan="5"><h2>{L_FEEDBACK}</h2></th>
</tr>
<!-- IF S_USER_NOTES -->
<tr align="center">
<td colspan="5" class="row3 pad4">{L_DISPLAY_LOG}: {S_SELECT_SORT_DAYS} {L_SORT_BY}: {S_SELECT_SORT_KEY} {S_SELECT_SORT_DIR} <input class="button" type="submit" value="{L_GO}" name="sort" /></td>
</tr>
<tr class="toplines">
<th>{L_REPORT_BY}</th>
<th>{L_IP}</th>
<th>{L_TIME}</th>
<th>{L_ACTION}</th>
<th><!-- IF S_CLEAR_ALLOWED -->{L_MARK}<!-- ENDIF --></th>
</tr>
<!-- BEGIN usernotes -->
<!-- IF usernotes.S_ROW_COUNT is even --><tr class="row1 pad4"><!-- ELSE --><tr class="row2 pad4"><!-- ENDIF -->
<td>{usernotes.REPORT_BY}</td>
<td style="text-align: center;">{usernotes.IP}</td>
<td style="text-align: center;">{usernotes.REPORT_AT}</td>
<td>
{usernotes.ACTION}
<!-- IF usernotes.DATA --><br />» [ {usernotes.DATA} ]<!-- ENDIF -->
</td>
<td style="text-align: center;"><!-- IF S_CLEAR_ALLOWED --><input type="checkbox" class="radio" name="marknote[]" value="{usernotes.ID}" /><!-- ENDIF --></td>
</tr>
<!-- END usernotes -->
<!-- IF S_CLEAR_ALLOWED -->
<tr>
<td class="row3 pad4" colspan="5" align="center"><input class="button" type="submit" name="action[del_all]" value="{L_DELETE_ALL}" /> <input class="button" type="submit" name="action[del_marked]" value="{L_DELETE_MARKED}" /></td>
</tr>
<!-- ENDIF -->
<!-- ELSE -->
<tr>
<td class="row1 pad4" colspan="2" align="center">{L_NO_FEEDBACK}</td>
</tr>
<!-- ENDIF -->
</table>
<div class="spacing"></div>
<table width="100%" cellpadding="3" cellspacing="1" border="0" class="modcp">
<tr>
<th colspan="2"><h2>{L_ADD_FEEDBACK}</h2></th>
</tr>
<tr>
<td class="row3 pad4" align="center" colspan="2">{L_ADD_FEEDBACK_EXPLAIN}</td>
</tr>
<tr>
<td colspan="2" class="row1 pad4" align="center"><textarea name="usernote" rows="10" cols="76"></textarea></td>
</tr>
<tr>
<td class="row3 pad4" colspan="2" align="center"><input class="button" type="submit" name="action[add_feedback]" value="{L_SUBMIT}" /> <input class="button" type="reset" value="{L_RESET}" /></td>
</tr>
</table>
<table width="100%" cellspacing="0" cellpadding="0">
<tr>
<td class="pagination">{PAGE_NUMBER} [ {TOTAL_REPORTS} ]</td>
<td align="{S_CONTENT_FLOW_END}"><span class="pagination"><!-- INCLUDE pagination.html --></td>
</tr>
</table>
{S_FORM_TOKEN}
</form>
<div class="spacing"></div>
<!-- INCLUDE mcp_footer.html -->
|
AlexBrown-appnovation/tcrg
|
forum/styles/nosebleed_3/template/mcp_notes_user.html
|
HTML
|
gpl-2.0
| 3,812
|
from django.db import models
from django.utils import timezone
class Hcmeta(models.Model):
hcver = models.IntegerField(blank=True, null=True)
org_id = models.CharField(max_length=50, blank=True, null=True)
details = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = '_hcmeta'
class SfEventLog(models.Model):
table_name = models.CharField(max_length=128, blank=True, null=True)
action = models.CharField(max_length=7, blank=True, null=True)
synced_at = models.DateTimeField(blank=True, null=True)
sf_timestamp = models.DateTimeField(blank=True, null=True)
sfid = models.CharField(max_length=20, blank=True, null=True)
record = models.TextField(blank=True, null=True)
processed = models.BooleanField(null=True)
class Meta:
managed = False
db_table = '_sf_event_log'
class TriggerLog(models.Model):
txid = models.BigIntegerField(blank=True, null=True)
created_at = models.DateTimeField(blank=True, null=True)
updated_at = models.DateTimeField(blank=True, null=True)
processed_at = models.DateTimeField(blank=True, null=True)
processed_tx = models.BigIntegerField(blank=True, null=True)
state = models.CharField(max_length=8, blank=True, null=True)
action = models.CharField(max_length=7, blank=True, null=True)
table_name = models.CharField(max_length=128, blank=True, null=True)
record_id = models.IntegerField(blank=True, null=True)
sfid = models.CharField(max_length=18, blank=True, null=True)
old = models.TextField(blank=True, null=True)
values = models.TextField(blank=True, null=True)
sf_result = models.IntegerField(blank=True, null=True)
sf_message = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = '_trigger_log'
class TriggerLogArchive(models.Model):
id = models.IntegerField(primary_key=True)
txid = models.BigIntegerField(blank=True, null=True)
created_at = models.DateTimeField(blank=True, null=True)
updated_at = models.DateTimeField(blank=True, null=True)
processed_at = models.DateTimeField(blank=True, null=True)
processed_tx = models.BigIntegerField(blank=True, null=True)
state = models.CharField(max_length=8, blank=True, null=True)
action = models.CharField(max_length=7, blank=True, null=True)
table_name = models.CharField(max_length=128, blank=True, null=True)
record_id = models.IntegerField(blank=True, null=True)
sfid = models.CharField(max_length=18, blank=True, null=True)
old = models.TextField(blank=True, null=True)
values = models.TextField(blank=True, null=True)
sf_result = models.IntegerField(blank=True, null=True)
sf_message = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = '_trigger_log_archive'
class SiteAccount(models.Model):
jigsaw = models.CharField(max_length=20, blank=True, null=True)
shippinglongitude = models.FloatField(blank=True, null=True)
shippingstate = models.CharField(max_length=80, blank=True, null=True)
youtubeid = models.CharField(db_column='youtubeid__c', max_length=80, blank=True, null=True)
numberofemployees = models.IntegerField(blank=True, null=True)
parent = models.ForeignKey('SiteAccount', to_field='sfid', db_column='parentid',
on_delete=models.CASCADE,
max_length=18, blank=True, null=True)
recordtypeid = models.CharField(max_length=18, blank=True, null=True)
shippingpostalcode = models.CharField(max_length=20, blank=True, null=True)
billingcity = models.CharField(max_length=40, blank=True, null=True)
billinglatitude = models.FloatField(blank=True, null=True)
accountsource = models.CharField(max_length=40, blank=True, null=True)
shippingcountry = models.CharField(max_length=80, blank=True, null=True)
lastvieweddate = models.DateTimeField(blank=True, null=True)
shippinggeocodeaccuracy = models.CharField(max_length=40, blank=True, null=True)
last_el_update = models.DateTimeField(db_column='last_el_update__c', blank=True, null=True)
name = models.CharField(max_length=255, blank=True, null=True)
site_el_raised = models.FloatField(db_column='site_el_raised__c', blank=True, null=True)
lastmodifieddate = models.DateTimeField(blank=True, null=True)
phone = models.CharField(max_length=40, blank=True, null=True)
masterrecordid = models.CharField(max_length=18, blank=True, null=True)
ownerid = models.CharField(max_length=18, blank=True, null=True)
isdeleted = models.BooleanField(null=True)
site_el_goal = models.FloatField(db_column='site_el_goal__c', blank=True, null=True)
systemmodstamp = models.DateTimeField(blank=True, null=True)
el_id = models.CharField(db_column='el_id__c', max_length=80, blank=True, null=True)
lastmodifiedbyid = models.CharField(max_length=18, blank=True, null=True)
shippingstreet = models.CharField(max_length=255, blank=True, null=True)
lastactivitydate = models.DateField(blank=True, null=True)
billingpostalcode = models.CharField(max_length=20, blank=True, null=True)
billinglongitude = models.FloatField(blank=True, null=True)
twitchid = models.CharField(db_column='twitchid__c', max_length=80, blank=True, null=True)
twitterid = models.CharField(db_column='twitterid__c', max_length=80, blank=True, null=True)
createddate = models.DateTimeField(blank=True, null=True)
billingstate = models.CharField(max_length=80, blank=True, null=True)
supplies = models.TextField(db_column='supplies__c', blank=True, null=True)
jigsawcompanyid = models.CharField(max_length=20, blank=True, null=True)
shippingcity = models.CharField(max_length=40, blank=True, null=True)
shippinglatitude = models.FloatField(blank=True, null=True)
createdbyid = models.CharField(max_length=18, blank=True, null=True)
type = models.CharField(max_length=40, blank=True, null=True)
website = models.CharField(max_length=255, blank=True, null=True)
billingcountry = models.CharField(max_length=80, blank=True, null=True)
description = models.TextField(blank=True, null=True)
billinggeocodeaccuracy = models.CharField(max_length=40, blank=True, null=True)
photourl = models.CharField(max_length=255, blank=True, null=True)
lastreferenceddate = models.DateTimeField(blank=True, null=True)
sicdesc = models.CharField(max_length=80, blank=True, null=True)
industry = models.CharField(max_length=40, blank=True, null=True)
billingstreet = models.CharField(max_length=255, blank=True, null=True)
site_email = models.CharField(db_column='site_email__c', max_length=80, blank=True, null=True)
sfid = models.CharField(unique=True, max_length=18, blank=True, null=True)
field_hc_lastop = models.CharField(db_column='_hc_lastop', max_length=32, blank=True, null=True)
field_hc_err = models.TextField(db_column='_hc_err', blank=True, null=True)
site_info = models.TextField(db_column='site_info__c', blank=True, null=True)
nerd_in_chief = models.CharField(db_column='nerd_in_chief__c', max_length=18, blank=True, null=True)
mayedit = models.BooleanField(null=True)
# contacturl = models.CharField(db_column='contacturl__c', max_length=1300, blank=True, null=True)
islocked = models.BooleanField(null=True)
loot_guard = models.CharField(db_column='loot_guard__c', max_length=18, blank=True, null=True)
class Meta:
managed = False
db_table = 'account'
def has_events(self):
""" Return True if this account has upcoming events """
return Event.objects.filter(event_start_date__gte=timezone.now(), site=self).count() > 0
def upcoming(self):
return self.events.filter(event_start_date__gte=timezone.now()).order_by('event_start_date').all()
def past(self):
return self.events.filter(event_start_date__lt=timezone.now()).order_by('-event_start_date').all()
class Contact(models.Model):
lastname = models.CharField(max_length=80, blank=True, null=True)
account = models.ForeignKey(SiteAccount, to_field='sfid', db_column='accountid', on_delete=models.CASCADE,
max_length=18, blank=True, null=True)
name = models.CharField(max_length=121, blank=True, null=True)
ownerid = models.CharField(max_length=18, blank=True, null=True)
department = models.CharField(max_length=80, blank=True, null=True)
extra_life_id = models.CharField(db_column='extra_life_id__c', unique=True, max_length=20, blank=True, null=True)
fragforce_org_user = models.CharField(db_column='fragforce_org_user__c', max_length=18, blank=True, null=True)
title = models.CharField(max_length=128, blank=True, null=True)
firstname = models.CharField(max_length=40, blank=True, null=True)
sfid = models.CharField(unique=True, max_length=18, blank=True, null=True)
field_hc_lastop = models.CharField(db_column='_hc_lastop', max_length=32, blank=True, null=True)
field_hc_err = models.TextField(db_column='_hc_err', blank=True, null=True)
def donate_link(self):
if self.extra_life_id:
return "https://www.extra-life.org/index.cfm?fuseaction=donate.participant&participantID=%d" % (
int(self.extra_life_id),
)
raise ValueError("No extra life id set for %r" % self)
class Meta:
managed = False
db_table = 'contact'
class ELHistory(models.Model):
currencyisocode = models.CharField(max_length=3, blank=True, null=True)
contact = models.ForeignKey(Contact, to_field='sfid', db_column='contact__c', on_delete=models.CASCADE,
max_length=18, blank=True, null=True)
year = models.CharField(db_column='year__c', max_length=255, blank=True, null=True)
name = models.CharField(max_length=80, blank=True, null=True)
raised = models.FloatField(db_column='raised__c', blank=True, null=True)
lastmodifieddate = models.DateTimeField(blank=True, null=True)
ownerid = models.CharField(max_length=18, blank=True, null=True)
mayedit = models.BooleanField(null=True)
isdeleted = models.BooleanField(null=True)
goal = models.FloatField(db_column='goal__c', blank=True, null=True)
systemmodstamp = models.DateTimeField(blank=True, null=True)
el_id = models.CharField(db_column='el_id__c', max_length=7, blank=True, null=True)
lastmodifiedbyid = models.CharField(max_length=18, blank=True, null=True)
islocked = models.BooleanField(null=True)
createddate = models.DateTimeField(blank=True, null=True)
createdbyid = models.CharField(max_length=18, blank=True, null=True)
site = models.ForeignKey(SiteAccount, to_field='sfid', db_column='site__c', on_delete=models.CASCADE, max_length=18,
blank=True, null=True)
sfid = models.CharField(unique=True, max_length=18, blank=True, null=True)
field_hc_lastop = models.CharField(db_column='_hc_lastop', max_length=32, blank=True, null=True)
field_hc_err = models.TextField(db_column='_hc_err', blank=True, null=True)
class Meta:
managed = False
db_table = 'el_history__c'
class Event(models.Model):
lastvieweddate = models.DateTimeField(blank=True, null=True)
volunteerforce_link = models.CharField(db_column='volunteerforce_link__c', max_length=255, blank=True, null=True)
name = models.CharField(max_length=80, blank=True, null=True)
event_end_date = models.DateTimeField(db_column='event_end_date__c', blank=True, null=True)
lastmodifieddate = models.DateTimeField(blank=True, null=True)
isdeleted = models.BooleanField(null=True)
systemmodstamp = models.DateTimeField(blank=True, null=True)
lastmodifiedbyid = models.CharField(max_length=18, blank=True, null=True)
lastactivitydate = models.DateField(blank=True, null=True)
event_start_date = models.DateTimeField(db_column='event_start_date__c', blank=True, null=True)
createddate = models.DateTimeField(blank=True, null=True)
createdbyid = models.CharField(max_length=18, blank=True, null=True)
site = models.ForeignKey(SiteAccount, to_field='sfid', db_column='site__c', on_delete=models.CASCADE, max_length=18,
blank=True, null=True, related_name='events')
lastreferenceddate = models.DateTimeField(blank=True, null=True)
sfid = models.CharField(unique=True, max_length=18, blank=True, null=True)
field_hc_lastop = models.CharField(db_column='_hc_lastop', max_length=32, blank=True, null=True)
field_hc_err = models.TextField(db_column='_hc_err', blank=True, null=True)
use_secondary_address = models.BooleanField(db_column='use_secondary_address__c', null=True)
stream_recording_link = models.CharField(db_column='stream_recording_link__c', max_length=255, blank=True,
null=True)
# participant_count = models.FloatField(db_column='participant_count__c', blank=True, null=True)
# prereg_url = models.CharField(db_column='prereg_url__c', max_length=1300, blank=True, null=True)
mayedit = models.BooleanField(null=True)
# open_for_preregistration = models.BooleanField(db_column='open_for_preregistration__c', null=True)
islocked = models.BooleanField(null=True)
# signinurl = models.CharField(db_column='signinurl__c', max_length=1300, blank=True, null=True)
# event_address_lookup = models.CharField(db_column='event_address_lookup__c', max_length=1300, blank=True, null=True)
event_information = models.TextField(db_column='event_information__c', blank=True, null=True)
# open_for_registration = models.BooleanField(db_column='open_for_registration__c', null=True)
# Short description of the event
description = models.TextField(db_column='description__c', blank=True, null=True)
class Meta:
managed = False
db_table = 'fragforce_event__c'
class EventParticipant(models.Model):
contact = models.ForeignKey(Contact, to_field='sfid', db_column='contact__c', on_delete=models.CASCADE,
max_length=18, blank=True, null=True)
lastvieweddate = models.DateTimeField(blank=True, null=True)
name = models.CharField(max_length=80, blank=True, null=True)
lastmodifieddate = models.DateTimeField(blank=True, null=True)
ownerid = models.CharField(max_length=18, blank=True, null=True)
mayedit = models.BooleanField(null=True)
event = models.ForeignKey(Event, to_field='sfid', db_column='fragforce_event__c', on_delete=models.CASCADE,
max_length=18, blank=True,
null=True)
isdeleted = models.BooleanField(null=True)
participant = models.BooleanField(db_column='participant__c', null=True)
systemmodstamp = models.DateTimeField(blank=True, null=True)
lastmodifiedbyid = models.CharField(max_length=18, blank=True, null=True)
lastactivitydate = models.DateField(blank=True, null=True)
islocked = models.BooleanField(null=True)
createddate = models.DateTimeField(blank=True, null=True)
name = models.CharField(db_column='name__c', max_length=120, blank=True, null=True)
createdbyid = models.CharField(max_length=18, blank=True, null=True)
lastreferenceddate = models.DateTimeField(blank=True, null=True)
sfid = models.CharField(unique=True, max_length=18, blank=True, null=True)
field_hc_lastop = models.CharField(db_column='_hc_lastop', max_length=32, blank=True, null=True)
field_hc_err = models.TextField(db_column='_hc_err', blank=True, null=True)
class Meta:
managed = False
db_table = 'event_participant__c'
|
AevumDecessus/fragforce.org
|
ffsfdc/models.py
|
Python
|
gpl-2.0
| 15,743
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* emac definitions for keystone2 devices
*
* (C) Copyright 2012-2014
* Texas Instruments Incorporated, <www.ti.com>
*/
#ifndef _KEYSTONE_NET_H_
#define _KEYSTONE_NET_H_
#include <asm/io.h>
#include <phy.h>
/* EMAC */
#ifdef CONFIG_KSNET_NETCP_V1_0
#define GBETH_BASE (CONFIG_KSNET_NETCP_BASE + 0x00090000)
#define EMAC_EMACSL_BASE_ADDR (GBETH_BASE + 0x900)
#define EMAC_MDIO_BASE_ADDR (GBETH_BASE + 0x300)
#define EMAC_SGMII_BASE_ADDR (GBETH_BASE + 0x100)
#define DEVICE_EMACSL_BASE(x) (EMAC_EMACSL_BASE_ADDR + (x) * 0x040)
/* Register offsets */
#define CPGMACSL_REG_CTL 0x04
#define CPGMACSL_REG_STATUS 0x08
#define CPGMACSL_REG_RESET 0x0c
#define CPGMACSL_REG_MAXLEN 0x10
#elif defined CONFIG_KSNET_NETCP_V1_5
#define GBETH_BASE (CONFIG_KSNET_NETCP_BASE + 0x00200000)
#define CPGMACSL_REG_RX_PRI_MAP 0x020
#define EMAC_EMACSL_BASE_ADDR (GBETH_BASE + 0x22000)
#define EMAC_MDIO_BASE_ADDR (GBETH_BASE + 0x00f00)
#define EMAC_SGMII_BASE_ADDR (GBETH_BASE + 0x00100)
#define DEVICE_EMACSL_BASE(x) (EMAC_EMACSL_BASE_ADDR + (x) * 0x1000)
/* Register offsets */
#define CPGMACSL_REG_CTL 0x330
#define CPGMACSL_REG_STATUS 0x334
#define CPGMACSL_REG_RESET 0x338
#define CPGMACSL_REG_MAXLEN 0x024
#endif
#define KEYSTONE2_EMAC_GIG_ENABLE
#define MAC_ID_BASE_ADDR CONFIG_KSNET_MAC_ID_BASE
/* MDIO module input frequency */
#ifdef CONFIG_SOC_K2G
#define EMAC_MDIO_BUS_FREQ (ks_clk_get_rate(sys_clk0_3_clk))
#else
#define EMAC_MDIO_BUS_FREQ (ks_clk_get_rate(pass_pll_clk))
#endif
/* MDIO clock output frequency */
#define EMAC_MDIO_CLOCK_FREQ 2500000 /* 2.5 MHz */
/* MII Status Register */
#define MII_STATUS_REG 1
#define MII_STATUS_LINK_MASK 0x4
#define MDIO_CONTROL_IDLE 0x80000000
#define MDIO_CONTROL_ENABLE 0x40000000
#define MDIO_CONTROL_FAULT_ENABLE 0x40000
#define MDIO_CONTROL_FAULT 0x80000
#define MDIO_USERACCESS0_GO 0x80000000
#define MDIO_USERACCESS0_WRITE_READ 0x0
#define MDIO_USERACCESS0_WRITE_WRITE 0x40000000
#define MDIO_USERACCESS0_ACK 0x20000000
#define EMAC_MACCONTROL_MIIEN_ENABLE 0x20
#define EMAC_MACCONTROL_FULLDUPLEX_ENABLE 0x1
#define EMAC_MACCONTROL_GIGABIT_ENABLE BIT(7)
#define EMAC_MACCONTROL_GIGFORCE BIT(17)
#define EMAC_MACCONTROL_RMIISPEED_100 BIT(15)
#define EMAC_MIN_ETHERNET_PKT_SIZE 60
struct mac_sl_cfg {
u_int32_t max_rx_len; /* Maximum receive packet length. */
u_int32_t ctl; /* Control bitfield */
};
/**
* Definition: Control bitfields used in the ctl field of mac_sl_cfg
*/
#define GMACSL_RX_ENABLE_RCV_CONTROL_FRAMES BIT(24)
#define GMACSL_RX_ENABLE_RCV_SHORT_FRAMES BIT(23)
#define GMACSL_RX_ENABLE_RCV_ERROR_FRAMES BIT(22)
#define GMACSL_RX_ENABLE_EXT_CTL BIT(18)
#define GMACSL_RX_ENABLE_GIG_FORCE BIT(17)
#define GMACSL_RX_ENABLE_IFCTL_B BIT(16)
#define GMACSL_RX_ENABLE_IFCTL_A BIT(15)
#define GMACSL_RX_ENABLE_CMD_IDLE BIT(11)
#define GMACSL_TX_ENABLE_SHORT_GAP BIT(10)
#define GMACSL_ENABLE_GIG_MODE BIT(7)
#define GMACSL_TX_ENABLE_PACE BIT(6)
#define GMACSL_ENABLE BIT(5)
#define GMACSL_TX_ENABLE_FLOW_CTL BIT(4)
#define GMACSL_RX_ENABLE_FLOW_CTL BIT(3)
#define GMACSL_ENABLE_LOOPBACK BIT(1)
#define GMACSL_ENABLE_FULL_DUPLEX BIT(0)
/* EMAC SL function return values */
#define GMACSL_RET_OK 0
#define GMACSL_RET_INVALID_PORT -1
#define GMACSL_RET_WARN_RESET_INCOMPLETE -2
#define GMACSL_RET_WARN_MAXLEN_TOO_BIG -3
#define GMACSL_RET_CONFIG_FAIL_RESET_ACTIVE -4
/* EMAC SL register definitions */
#define DEVICE_EMACSL_RESET_POLL_COUNT 100
/* Soft reset register values */
#define CPGMAC_REG_RESET_VAL_RESET_MASK BIT(0)
#define CPGMAC_REG_RESET_VAL_RESET BIT(0)
#define CPGMAC_REG_MAXLEN_LEN 0x3fff
/* CPSW */
/* Control bitfields */
#define CPSW_CTL_P2_PASS_PRI_TAGGED BIT(5)
#define CPSW_CTL_P1_PASS_PRI_TAGGED BIT(4)
#define CPSW_CTL_P0_PASS_PRI_TAGGED BIT(3)
#define CPSW_CTL_P0_ENABLE BIT(2)
#define CPSW_CTL_VLAN_AWARE BIT(1)
#define CPSW_CTL_FIFO_LOOPBACK BIT(0)
#define DEVICE_CPSW_NUM_PORTS CONFIG_KSNET_CPSW_NUM_PORTS
#define DEVICE_N_GMACSL_PORTS (DEVICE_CPSW_NUM_PORTS - 1)
#ifdef CONFIG_KSNET_NETCP_V1_0
#define DEVICE_CPSW_BASE (GBETH_BASE + 0x800)
#define CPSW_REG_CTL 0x004
#define CPSW_REG_STAT_PORT_EN 0x00c
#define CPSW_REG_MAXLEN 0x040
#define CPSW_REG_ALE_CONTROL 0x608
#define CPSW_REG_ALE_PORTCTL(x) (0x640 + (x) * 4)
#define CPSW_REG_VAL_STAT_ENABLE_ALL 0xf
#elif defined CONFIG_KSNET_NETCP_V1_5
#define DEVICE_CPSW_BASE (GBETH_BASE + 0x20000)
#define CPSW_REG_CTL 0x00004
#define CPSW_REG_STAT_PORT_EN 0x00014
#define CPSW_REG_MAXLEN 0x01024
#define CPSW_REG_ALE_CONTROL 0x1e008
#define CPSW_REG_ALE_PORTCTL(x) (0x1e040 + (x) * 4)
#define CPSW_REG_VAL_STAT_ENABLE_ALL 0x1ff
#endif
#define CPSW_REG_VAL_ALE_CTL_RESET_AND_ENABLE ((u_int32_t)0xc0000000)
#define CPSW_REG_VAL_ALE_CTL_BYPASS ((u_int32_t)0x00000010)
#define CPSW_REG_VAL_PORTCTL_FORWARD_MODE 0x3
#define target_get_switch_ctl() CPSW_CTL_P0_ENABLE
#define SWITCH_MAX_PKT_SIZE 9000
/* SGMII */
#define SGMII_REG_STATUS_LOCK BIT(4)
#define SGMII_REG_STATUS_LINK BIT(0)
#define SGMII_REG_STATUS_AUTONEG BIT(2)
#define SGMII_REG_CONTROL_AUTONEG BIT(0)
#define SGMII_REG_CONTROL_MASTER BIT(5)
#define SGMII_REG_MR_ADV_ENABLE BIT(0)
#define SGMII_REG_MR_ADV_LINK BIT(15)
#define SGMII_REG_MR_ADV_FULL_DUPLEX BIT(12)
#define SGMII_REG_MR_ADV_GIG_MODE BIT(11)
#define SGMII_LINK_MAC_MAC_AUTONEG 0
#define SGMII_LINK_MAC_PHY 1
#define SGMII_LINK_MAC_MAC_FORCED 2
#define SGMII_LINK_MAC_FIBER 3
#define SGMII_LINK_MAC_PHY_FORCED 4
#ifdef CONFIG_KSNET_NETCP_V1_0
#define SGMII_OFFSET(x) ((x <= 1) ? (x * 0x100) : ((x * 0x100) + 0x100))
#elif defined CONFIG_KSNET_NETCP_V1_5
#define SGMII_OFFSET(x) ((x) * 0x100)
#endif
#define SGMII_IDVER_REG(x) (EMAC_SGMII_BASE_ADDR + SGMII_OFFSET(x) + 0x000)
#define SGMII_SRESET_REG(x) (EMAC_SGMII_BASE_ADDR + SGMII_OFFSET(x) + 0x004)
#define SGMII_CTL_REG(x) (EMAC_SGMII_BASE_ADDR + SGMII_OFFSET(x) + 0x010)
#define SGMII_STATUS_REG(x) (EMAC_SGMII_BASE_ADDR + SGMII_OFFSET(x) + 0x014)
#define SGMII_MRADV_REG(x) (EMAC_SGMII_BASE_ADDR + SGMII_OFFSET(x) + 0x018)
#define SGMII_LPADV_REG(x) (EMAC_SGMII_BASE_ADDR + SGMII_OFFSET(x) + 0x020)
#define SGMII_TXCFG_REG(x) (EMAC_SGMII_BASE_ADDR + SGMII_OFFSET(x) + 0x030)
#define SGMII_RXCFG_REG(x) (EMAC_SGMII_BASE_ADDR + SGMII_OFFSET(x) + 0x034)
#define SGMII_AUXCFG_REG(x) (EMAC_SGMII_BASE_ADDR + SGMII_OFFSET(x) + 0x038)
/* RGMII */
#define RGMII_REG_STATUS_LINK BIT(0)
#define RGMII_STATUS_REG (GBETH_BASE + 0x18)
/* PSS */
#ifdef CONFIG_KSNET_NETCP_V1_0
#define DEVICE_PSTREAM_CFG_REG_ADDR (CONFIG_KSNET_NETCP_BASE + 0x604)
#define DEVICE_PSTREAM_CFG_VAL_ROUTE_CPPI 0x06060606
#define hw_config_streaming_switch()\
writel(DEVICE_PSTREAM_CFG_VAL_ROUTE_CPPI, DEVICE_PSTREAM_CFG_REG_ADDR);
#elif defined CONFIG_KSNET_NETCP_V1_5
#define DEVICE_PSTREAM_CFG_REG_ADDR (CONFIG_KSNET_NETCP_BASE + 0x500)
#define DEVICE_PSTREAM_CFG_VAL_ROUTE_CPPI 0x0
#define hw_config_streaming_switch()\
writel(DEVICE_PSTREAM_CFG_VAL_ROUTE_CPPI,\
DEVICE_PSTREAM_CFG_REG_ADDR);\
writel(DEVICE_PSTREAM_CFG_VAL_ROUTE_CPPI,\
DEVICE_PSTREAM_CFG_REG_ADDR+4);\
writel(DEVICE_PSTREAM_CFG_VAL_ROUTE_CPPI,\
DEVICE_PSTREAM_CFG_REG_ADDR+8);\
writel(DEVICE_PSTREAM_CFG_VAL_ROUTE_CPPI,\
DEVICE_PSTREAM_CFG_REG_ADDR+12);
#endif
/* EMAC MDIO Registers Structure */
struct mdio_regs {
u32 version;
u32 control;
u32 alive;
u32 link;
u32 linkintraw;
u32 linkintmasked;
u32 rsvd0[2];
u32 userintraw;
u32 userintmasked;
u32 userintmaskset;
u32 userintmaskclear;
u32 rsvd1[20];
u32 useraccess0;
u32 userphysel0;
u32 useraccess1;
u32 userphysel1;
};
struct eth_priv_t {
char int_name[32];
int rx_flow;
int phy_addr;
int slave_port;
int sgmii_link_type;
phy_interface_t phy_if;
struct phy_device *phy_dev;
};
int keystone2_emac_initialize(struct eth_priv_t *eth_priv);
void sgmii_serdes_setup_156p25mhz(void);
void sgmii_serdes_shutdown(void);
#endif /* _KEYSTONE_NET_H_ */
|
ev3dev/u-boot
|
arch/arm/include/asm/ti-common/keystone_net.h
|
C
|
gpl-2.0
| 8,034
|
--*******
--GLOBALS
--*******
D_SHORT = 1
D_LONG = 2
D_STRING = 3
D_FLOAT = 4
D_BYTE = 5
D_VECTOR = 6
local strings = {}
strings[D_SHORT] = "Short"
strings[D_LONG] = "Long"
strings[D_STRING] = "String"
strings[D_FLOAT] = "Float"
strings[D_BYTE] = "Byte"
strings[D_VECTOR] = "Vector"
local types = {}
types[D_SHORT] = "number"
types[D_LONG] = "number"
types[D_STRING] = "string"
types[D_FLOAT] = "number"
types[D_BYTE] = "number"
types[D_VECTOR] = "userdata"
local defaults = {}
defaults[D_SHORT] = 0
defaults[D_LONG] = 0
defaults[D_STRING] = ""
defaults[D_FLOAT] = 0
defaults[D_BYTE] = 0
defaults[D_VECTOR] = Vector(0,0,0)
local MessageT = {}
local Prototypes = {}
local connections = {}
local standIns = {}
local funcs = {}
local QueueMessage = nil
local sendPrototype = nil
local d_Message = _Message
local d_Send = _SendDataMessage
if(SERVER) then
funcs[D_SHORT] = _message.WriteShort
funcs[D_LONG] = _message.WriteLong
funcs[D_STRING] = _message.WriteString
funcs[D_FLOAT] = _message.WriteFloat
funcs[D_BYTE] = _message.WriteByte
funcs[D_VECTOR] = function(m,v)
_message.WriteFloat(m,v.x)
_message.WriteFloat(m,v.y)
_message.WriteFloat(m,v.z)
end
else
funcs[D_BYTE] = _message.ReadByte
funcs[D_SHORT] = _message.ReadShort
funcs[D_LONG] = _message.ReadLong
funcs[D_STRING] = _message.ReadString
funcs[D_FLOAT] = _message.ReadFloat
funcs[D_VECTOR] = function(m,v)
local x = _message.ReadFloat()
local y = _message.ReadFloat()
local z = _message.ReadFloat()
return Vector(x,y,z)
end
end
local function protoForName(name)
for k,v in pairs(Prototypes) do
if(v.name == name) then return k end
end
return nil
end
local function protoForId(id)
for k,v in pairs(Prototypes) do
if(v._id == id) then return k end
end
return nil
end
--*********
--METATABLE
--*********
function MessageT:Byte() table.insert(self.stack,D_BYTE) return self end
function MessageT:Short() table.insert(self.stack,D_SHORT) return self end
function MessageT:Long() table.insert(self.stack,D_LONG) return self end
function MessageT:String() table.insert(self.stack,D_STRING) return self end
function MessageT:Float() table.insert(self.stack,D_FLOAT) return self end
function MessageT:Vector() table.insert(self.stack,D_VECTOR) return self end
function MessageT:Recv(data) end
function MessageT:E()
for k,v in pairs(GetAllPlayers()) do
if(connections[v:EntIndex()] == true) then
sendPrototype(self,v)
end
end
return self
end
function MessageT:Send(pl,...)
if(type(pl) == "userdata") then pl = pl:EntIndex() end
if(type(pl) ~= "number") then error("Not a player\n") return end
local data = {}
--local msg = d_Message(pl,LUA_PROTOMESSAGE_MSG)
table.insert(data,{D_BYTE,self._id})
for i=1, #self.stack do
local v = arg[i]
local t = self.stack[i]
local b,e = nil,"Nan"
if(types[t] == type(v)) then
table.insert(data,{t,v})
--b,e = pcall(funcs[t],v)
else
table.insert(data,{t,defaults[t]})
--b,e = pcall(funcs[t],defaults[t])
end
end
QueueMessage(LUA_PROTOMESSAGE_MSG,pl,data)
end
function MessageT:Read()
if not (CLIENT) then return end
self.data = {}
for i=1, #self.stack do
local v = self.stack[i]
local b,e = pcall(funcs[v])
if not (b) then
print("Error reading message data: " .. strings[v] .. " : " .. e .. "\n")
else
--print("ProtoRead[" .. i .. "]: " .. strings[v] .. " - " .. tostring(e) .. "\n")
table.insert(self.data, e)
end
end
self:Recv(self.data)
end
function MessageT:Pack()
local contents = ""
for i=1, #self.stack do
local v = self.stack[i]
contents = contents .. tostring(v)
end
if(contents == "") then contents = "9" end
return tonumber(contents)
end
function MessageT:SetStack(stack)
stack = tostring(stack)
self.stack = {}
if(stack != "9") then
stack = string.ToTable(stack)
for k,v in pairs(stack) do
v = tonumber(v)
print("Loaded Into Stack: " .. v .. "\n")
table.insert(self.stack,v)
end
end
end
local function __MessagePrototype(name,stack)
local o = {}
if(type(name) ~= "string") then error("Invalid Prototype Name\n") end
--if(protoForName(name) ~= nil) then error("Prototype Already Exists with that name: " .. name .. "\n") end
local exist = protoForName(name)
if(exist ~= nil) then
Prototypes[exist].stack = {}
Prototypes[exist].data = {}
return Prototypes[exist]
end
setmetatable(o,MessageT)
MessageT.__index = MessageT
o.stack = {}
o.name = name
o.data = {}
if(stack == nil) then
o._id = #Prototypes + 1
table.insert(Prototypes,o)
else
o._id = stack
end
return o;
end
function MessagePrototype(name)
return __MessagePrototype(name,nil)
end
--*********
--SERVER IO
--*********
if(SERVER) then
local messageQueue = {}
local function Think()
local ltime = LevelTime()
for i=1, 3 do --try and do 3 messages
if(#messageQueue == 0) then return end
--print("Queue: " .. #MessageQueue .. "\n")
local focus = messageQueue[1]
local msgid = focus[1]
local player = focus[2]
local data = focus[3]
local expires = focus[4]
if(player == nil) then
table.remove(messageQueue,1)
else
if(connections[player]) then
table.remove(messageQueue,1)
local msg = d_Message(player,msgid)
for i=1, #data do
local t = data[i][1]
local v = data[i][2]
local b,e = pcall(funcs[t],msg,v)
--print("Type: " .. t .. "\n")
if not (b) then
print("^1Error sending message data: " .. strings[t] .. " : " .. e .. "\n")
else
--print(" " .. v .. "\n")
end
end
d_Send(msg)
else
if(expires < ltime) then
table.remove(messageQueue,1)
print("^6ProtoMessage Expired: " .. msgid .. "\n")
end
end
end
end
end
hook.add("Think","messageproto",Think)
local function PlayerJoined(pl)
if(pl == nil) then return end
connections[pl] = true
for k,v in pairs(Prototypes) do
sendPrototype(v,pl)
end
end
hook.add("ClientReady","messageproto",PlayerJoined,9998)
local function PlayerStop(pl)
if(pl == nil) then return end
if(!pl:IsBot()) then connections[pl:EntIndex()] = false end
end
hook.add("ClientShutdownLua","messageproto",PlayerStop,9999)
local function DemoSend(pl)
if(pl == nil) then return end
for k,v in pairs(Prototypes) do
sendPrototype(v,pl:EntIndex())
end
end
hook.add("DemoStarted","messageproto",DemoSend,9999)
QueueMessage = function(msg,pl,data)
table.insert(messageQueue,{msg,pl,data,LevelTime() + 100})
end
sendPrototype = function(proto,pl)
local data = {}
--local msg = d_Message(pl,LUA_PROTOMESSAGE_INDEX)
--_message.WriteByte(msg,proto._id)
--_message.WriteLong(msg,proto:Pack())
print("Send Proto: " .. proto.name .. "\n")
--_message.WriteString(msg,proto.name)
table.insert(data,{D_BYTE,proto._id})
table.insert(data,{D_LONG,proto:Pack()})
table.insert(data,{D_STRING,proto.name})
QueueMessage(LUA_PROTOMESSAGE_INDEX,pl,data)
end
else
--*********
--CLIENT IO
--*********
local function resolveProto(id)
if(standIns[id] ~= nil) then
local id2 = protoForName(standIns[id].name)
return Prototypes[id2] or standIns[id]
else
local id2 = protoForId(id)
return Prototypes[id2]
end
return nil
end
local function handle(msgid)
if(msgid == LUA_PROTOMESSAGE_INDEX) then
local id = _message.ReadByte()
local contents = _message.ReadLong()
local str = _message.ReadString()
print("Got messageID: " .. id .. "->" .. str .. "->" .. tostring(contents) .. "\n")
local proto = protoForName(str)
if(proto == nil) then
standIns[id] = __MessagePrototype(str,id)
standIns[id]:SetStack(contents)
else
proto = Prototypes[proto]
proto._id = id
proto:SetStack(contents)
end
elseif(msgid == LUA_PROTOMESSAGE_CACHE) then
elseif(msgid == LUA_PROTOMESSAGE_MSG) then
local id = _message.ReadByte()
local proto = resolveProto(id)
if(proto ~= nil) then
proto:Read()
--print("Read Prototype\n")
else
error("GOT NO PROTOTYPE, MESSAGE LOST, GAME OVER [" .. id .. "]\n")
end
end
end
hook.add("_HandleMessage","messageproto",handle)
end
|
redrumrobot/quakeconstruct
|
code/debug/lua/includes/messageproto.lua
|
Lua
|
gpl-2.0
| 8,460
|
/*
* MekWars - Copyright (C) 2008
*
* Original author - jtighe (torren@users.sourceforge.net)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
package server.campaign.commands.mod;
import java.io.File;
import java.util.StringTokenizer;
import megamek.common.MechSummaryCache;
import server.MWChatServer.auth.IAuthenticator;
import server.campaign.CampaignMain;
import server.campaign.commands.Command;
/**
* Remove a part from a player.
*/
public class UpdateServerUnitsCacheCommand implements Command {
int accessLevel = IAuthenticator.MODERATOR;
String syntax = "Player Name";
public int getExecutionLevel() {
return accessLevel;
}
public void setExecutionLevel(int i) {
accessLevel = i;
}
public String getSyntax() {
return syntax;
}
public void process(StringTokenizer command, String Username) {
if (accessLevel != 0) {
int userLevel = CampaignMain.cm.getServer().getUserLevel(Username);
if (userLevel < getExecutionLevel()) {
CampaignMain.cm.toUser("AM:Insufficient access level for command. Level: " + userLevel + ". Required: " + accessLevel + ".", Username, true);
return;
}
}
/*
* clear any cache'd unit files. these will be rebuilt later in the
* start process. clearing @ each start ensures that updates take hold
* properly.
*/
File cache = new File("./data/mechfiles/units.cache");
if (cache.exists())
cache.delete();
MechSummaryCache.getInstance();
CampaignMain.cm.doSendModMail("NOTE", Username + " has updated the servers unit cache.");
}
}
|
FireSight/GoonWars
|
src/server/campaign/commands/mod/UpdateServerUnitsCacheCommand.java
|
Java
|
gpl-2.0
| 2,249
|
/* modules/m_cap.c
*
* Copyright (C) 2005 Lee Hardy <lee@leeh.co.uk>
* Copyright (C) 2005 ircd-ratbox development team
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1.Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2.Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3.The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "stdinc.h"
#include "class.h"
#include "client.h"
#include "match.h"
#include "ircd.h"
#include "numeric.h"
#include "msg.h"
#include "parse.h"
#include "modules.h"
#include "s_serv.h"
#include "s_user.h"
#include "hash.h"
typedef int (*bqcmp)(const void *, const void *);
static int m_cap(struct Client *, struct Client *, int, const char **);
static int modinit(void);
struct Message cap_msgtab = {
"CAP", 0, 0, 0, MFLG_SLOW,
{{m_cap, 2}, {m_cap, 2}, mg_ignore, mg_ignore, mg_ignore, {m_cap, 2}}
};
mapi_clist_av1 cap_clist[] = { &cap_msgtab, NULL };
DECLARE_MODULE_AV1(cap, modinit, NULL, cap_clist, NULL, NULL, "$Revision$");
#define _CLICAP(name, capserv, capclient, flags) \
{ (name), (capserv), (capclient), (flags), sizeof(name) - 1 }
#define CLICAP_FLAGS_STICKY 0x001
static struct clicap {
const char *name;
int cap_serv; /* for altering s->c */
int cap_cli; /* for altering c->s */
int flags;
int namelen;
} clicap_list[] = {
_CLICAP("multi-prefix", CLICAP_MULTI_PREFIX, 0, 0),
_CLICAP("sasl", CLICAP_SASL, 0, 0),
_CLICAP("account-notify", CLICAP_ACCOUNT_NOTIFY, 0, 0),
_CLICAP("extended-join", CLICAP_EXTENDED_JOIN, 0, 0),
_CLICAP("away-notify", CLICAP_AWAY_NOTIFY, 0, 0),
_CLICAP("chghost", CLICAP_CHGHOST, 0, 0),
};
#define CLICAP_LIST_LEN (sizeof(clicap_list) / sizeof(struct clicap))
static int clicap_sort(struct clicap *, struct clicap *);
static int
modinit(void)
{
qsort(clicap_list, CLICAP_LIST_LEN, sizeof(struct clicap),
(bqcmp) clicap_sort);
return 0;
}
static int
clicap_sort(struct clicap *one, struct clicap *two)
{
return irccmp(one->name, two->name);
}
static int
clicap_compare(const char *name, struct clicap *cap)
{
return irccmp(name, cap->name);
}
/* clicap_find()
* Used iteratively over a buffer, extracts individual cap tokens.
*
* Inputs: buffer to start iterating over (NULL to iterate over existing buf)
* int pointer to whether the cap token is negated
* int pointer to whether we finish with success
* Ouputs: Cap entry if found, NULL otherwise.
*/
static struct clicap *
clicap_find(const char *data, int *negate, int *finished)
{
static char buf[BUFSIZE];
static char *p;
struct clicap *cap;
char *s;
*negate = 0;
if(data) {
rb_strlcpy(buf, data, sizeof(buf));
p = buf;
}
if(*finished)
return NULL;
/* skip any whitespace */
while(*p && IsSpace(*p))
p++;
if(EmptyString(p)) {
*finished = 1;
return NULL;
}
if(*p == '-') {
*negate = 1;
p++;
/* someone sent a '-' without a parameter.. */
if(*p == '\0')
return NULL;
}
if((s = strchr(p, ' ')))
*s++ = '\0';
if((cap = bsearch(p, clicap_list, CLICAP_LIST_LEN,
sizeof(struct clicap), (bqcmp) clicap_compare))) {
if(s)
p = s;
else
*finished = 1;
}
return cap;
}
/* clicap_generate()
* Generates a list of capabilities.
*
* Inputs: client to send to, subcmd to send,
* flags to match against: 0 to do none, -1 if client has no flags,
* int to whether we are doing CAP CLEAR
* Outputs: None
*/
static void
clicap_generate(struct Client *source_p, const char *subcmd, int flags, int clear)
{
char buf[BUFSIZE];
char capbuf[BUFSIZE];
char *p;
int buflen = 0;
int curlen, mlen;
size_t i;
mlen = sprintf(buf, ":%s CAP %s %s",
me.name,
EmptyString(source_p->name) ? "*" : source_p->name,
subcmd);
p = capbuf;
buflen = mlen;
/* shortcut, nothing to do */
if(flags == -1) {
sendto_one(source_p, "%s :", buf);
return;
}
for(i = 0; i < CLICAP_LIST_LEN; i++) {
if(flags) {
if(!IsCapable(source_p, clicap_list[i].cap_serv))
continue;
/* they are capable of this, check sticky */
else if(clear && clicap_list[i].flags & CLICAP_FLAGS_STICKY)
continue;
}
/* \r\n\0, possible "-~=", space, " *" */
if(buflen + clicap_list[i].namelen >= BUFSIZE - 10) {
/* remove our trailing space -- if buflen == mlen
* here, we didnt even succeed in adding one.
*/
if(buflen != mlen)
*(p - 1) = '\0';
else
*p = '\0';
sendto_one(source_p, "%s * :%s", buf, capbuf);
p = capbuf;
buflen = mlen;
}
if(clear) {
*p++ = '-';
buflen++;
/* needs a client ack */
if(clicap_list[i].cap_cli &&
IsCapable(source_p, clicap_list[i].cap_cli)) {
*p++ = '~';
buflen++;
}
} else {
if(clicap_list[i].flags & CLICAP_FLAGS_STICKY) {
*p++ = '=';
buflen++;
}
/* if we're doing an LS, then we only send this if
* they havent ack'd
*/
if(clicap_list[i].cap_cli &&
(!flags || !IsCapable(source_p, clicap_list[i].cap_cli))) {
*p++ = '~';
buflen++;
}
}
curlen = sprintf(p, "%s ", clicap_list[i].name);
p += curlen;
buflen += curlen;
}
/* remove trailing space */
if(buflen != mlen)
*(p - 1) = '\0';
else
*p = '\0';
sendto_one(source_p, "%s :%s", buf, capbuf);
}
static void
cap_ack(struct Client *source_p, const char *arg)
{
struct clicap *cap;
int capadd = 0, capdel = 0;
int finished = 0, negate;
if(EmptyString(arg))
return;
for(cap = clicap_find(arg, &negate, &finished); cap;
cap = clicap_find(NULL, &negate, &finished)) {
/* sent an ACK for something they havent REQd */
if(!IsCapable(source_p, cap->cap_serv))
continue;
if(negate) {
/* dont let them ack something sticky off */
if(cap->flags & CLICAP_FLAGS_STICKY)
continue;
capdel |= cap->cap_cli;
} else
capadd |= cap->cap_cli;
}
source_p->localClient->caps |= capadd;
source_p->localClient->caps &= ~capdel;
}
static void
cap_clear(struct Client *source_p, const char *arg)
{
clicap_generate(source_p, "ACK",
source_p->localClient->caps ? source_p->localClient->caps : -1, 1);
/* XXX - sticky capabs */
#ifdef CLICAP_STICKY
source_p->localClient->caps = source_p->localClient->caps & CLICAP_STICKY;
#else
source_p->localClient->caps = 0;
#endif
}
static void
cap_end(struct Client *source_p, const char *arg)
{
if(IsRegistered(source_p))
return;
source_p->flags &= ~FLAGS_CLICAP;
if(source_p->name[0] && source_p->flags & FLAGS_SENTUSER) {
char buf[USERLEN+1];
rb_strlcpy(buf, source_p->username, sizeof(buf));
register_local_user(source_p, source_p, buf);
}
}
static void
cap_list(struct Client *source_p, const char *arg)
{
/* list of what theyre currently using */
clicap_generate(source_p, "LIST",
source_p->localClient->caps ? source_p->localClient->caps : -1, 0);
}
static void
cap_ls(struct Client *source_p, const char *arg)
{
if(!IsRegistered(source_p))
source_p->flags |= FLAGS_CLICAP;
/* list of what we support */
clicap_generate(source_p, "LS", 0, 0);
}
static void
cap_req(struct Client *source_p, const char *arg)
{
char buf[BUFSIZE];
char pbuf[2][BUFSIZE];
struct clicap *cap;
int buflen, plen;
int i = 0;
int capadd = 0, capdel = 0;
int finished = 0, negate;
if(!IsRegistered(source_p))
source_p->flags |= FLAGS_CLICAP;
if(EmptyString(arg))
return;
buflen = snprintf(buf, sizeof(buf), ":%s CAP %s ACK",
me.name, EmptyString(source_p->name) ? "*" : source_p->name);
pbuf[0][0] = '\0';
plen = 0;
for(cap = clicap_find(arg, &negate, &finished); cap;
cap = clicap_find(NULL, &negate, &finished)) {
/* filled the first array, but cant send it in case the
* request fails. one REQ should never fill more than two
* buffers --fl
*/
if(buflen + plen + cap->namelen + 6 >= BUFSIZE) {
pbuf[1][0] = '\0';
plen = 0;
i = 1;
}
if(negate) {
if(cap->flags & CLICAP_FLAGS_STICKY) {
finished = 0;
break;
}
strcat(pbuf[i], "-");
plen++;
capdel |= cap->cap_serv;
} else {
if(cap->flags & CLICAP_FLAGS_STICKY) {
strcat(pbuf[i], "=");
plen++;
}
capadd |= cap->cap_serv;
}
if(cap->cap_cli) {
strcat(pbuf[i], "~");
plen++;
}
strcat(pbuf[i], cap->name);
strcat(pbuf[i], " ");
plen += (cap->namelen + 1);
}
if(!finished) {
sendto_one(source_p, ":%s CAP %s NAK :%s",
me.name, EmptyString(source_p->name) ? "*" : source_p->name, arg);
return;
}
if(i) {
sendto_one(source_p, "%s * :%s", buf, pbuf[0]);
sendto_one(source_p, "%s :%s", buf, pbuf[1]);
} else
sendto_one(source_p, "%s :%s", buf, pbuf[0]);
source_p->localClient->caps |= capadd;
source_p->localClient->caps &= ~capdel;
}
static struct clicap_cmd {
const char *cmd;
void (*func)(struct Client *source_p, const char *arg);
} clicap_cmdlist[] = {
/* This list *MUST* be in alphabetical order */
{ "ACK", cap_ack },
{ "CLEAR", cap_clear },
{ "END", cap_end },
{ "LIST", cap_list },
{ "LS", cap_ls },
{ "REQ", cap_req },
};
static int
clicap_cmd_search(const char *command, struct clicap_cmd *entry)
{
return irccmp(command, entry->cmd);
}
static int
m_cap(struct Client *client_p, struct Client *source_p, int parc, const char *parv[])
{
struct clicap_cmd *cmd;
if(!(cmd = bsearch(parv[1], clicap_cmdlist,
sizeof(clicap_cmdlist) / sizeof(struct clicap_cmd),
sizeof(struct clicap_cmd), (bqcmp) clicap_cmd_search))) {
sendto_one(source_p, form_str(ERR_INVALIDCAPCMD),
me.name, EmptyString(source_p->name) ? "*" : source_p->name,
parv[1]);
return 0;
}
(cmd->func)(source_p, parv[2]);
return 0;
}
|
Elemental-IRCd/elemental-ircd
|
modules/m_cap.c
|
C
|
gpl-2.0
| 12,207
|
<?php
/**
* This is the model class for table "sportscatagory".
*
* The followings are the available columns in table 'sportscatagory':
* @property integer $sportscatagory_id
* @property string $name
*/
class Sportscatagory extends CActiveRecord
{
/**
* @return string the associated database table name
*/
public function tableName()
{
return 'sportscatagory';
}
/**
* @return array validation rules for model attributes.
*/
public function rules()
{
// NOTE: you should only define rules for those attributes that
// will receive user inputs.
return array(
array('name', 'required'),
array('name', 'length', 'max'=>255),
// The following rule is used by search().
// @todo Please remove those attributes that should not be searched.
array('sportscatagory_id, name', 'safe', 'on'=>'search'),
);
}
/**
* @return array relational rules.
*/
public function relations()
{
// NOTE: you may need to adjust the relation name and the related
// class name for the relations automatically generated below.
return array(
);
}
/**
* @return array customized attribute labels (name=>label)
*/
public function attributeLabels()
{
return array(
'sportscatagory_id' => 'Sportscatagory',
'name' => 'Name',
);
}
/**
* Retrieves a list of models based on the current search/filter conditions.
*
* Typical usecase:
* - Initialize the model fields with values from filter form.
* - Execute this method to get CActiveDataProvider instance which will filter
* models according to data in model fields.
* - Pass data provider to CGridView, CListView or any similar widget.
*
* @return CActiveDataProvider the data provider that can return the models
* based on the search/filter conditions.
*/
public function search()
{
// @todo Please modify the following code to remove attributes that should not be searched.
$criteria=new CDbCriteria;
$criteria->compare('sportscatagory_id',$this->sportscatagory_id);
$criteria->compare('name',$this->name,true);
return new CActiveDataProvider($this, array(
'criteria'=>$criteria,
));
}
/**
* Returns the static model of the specified AR class.
* Please note that you should have this exact method in all your CActiveRecord descendants!
* @param string $className active record class name.
* @return Sportscatagory the static model class
*/
public static function model($className=__CLASS__)
{
return parent::model($className);
}
}
|
livingdreams/gamechanger
|
gamechanger_admin/protected/models/Sportscatagory.php
|
PHP
|
gpl-2.0
| 2,555
|
# OeQ autogenerated correlation for 'Window/Wall Ratio South in Correlation to the Building Age'
import math
import numpy as np
from . import oeqCorrelation as oeq
def get(*xin):
# OeQ autogenerated correlation for 'Window to Wall Ratio in Southern Direction'
A_WIN_S_BY_AW= oeq.correlation(
const= 20818.6194135,
a= -42.6513518642,
b= 0.0327511835635,
c= -1.11718058834e-05,
d= 1.42836626434e-09,
mode= "lin")
return dict(A_WIN_S_BY_AW=A_WIN_S_BY_AW.lookup(*xin))
|
UdK-VPT/Open_eQuarter
|
mole3/stat_corr/window_wall_ratio_south_MFH_by_building_age_correlation.py
|
Python
|
gpl-2.0
| 521
|
@CHARSET "ISO-8859-1";
h2 {
text-align: center;
font-family: "Times New Roman";
font-size: 30px;
}
#mainform\3A heightlabel {
margin-left: 30%;
margin-top: 20%;
font-family: "Times New Roman";
font-size: 25px;
}
#mainform\3A heightvalidatormessage {
margin-left: 1%;
font-family: "Times New Roman";
font-size: 15px;
color: red;
}
#mainform\3A heighttextbox {
margin-left: 30%;
margin-top: 1%;
width: 15%;
height: 10%;
font-family: "Times New Roman";
font-size: 20px;
}
#mainform\3A azimuthlabel {
margin-left: 30%;
margin-top: 20%;
font-family: "Times New Roman";
font-size: 25px;
}
#mainform\3A azimuthvalidatormessage {
margin-left: 1%;
font-family: "Times New Roman";
font-size: 15px;
color: red;
}
#mainform\3A azimuthtextbox {
margin-left: 30%;
margin-top: 1%;
width: 15%;
height: 10%;
font-family: "Times New Roman";
font-size: 20px;
}
#mainform\3A downtiltlabel {
margin-left: 30%;
margin-top: 20%;
font-family: "Times New Roman";
font-size: 25px;
}
#mainform\3A downtiltvalidatormessage {
margin-left: 1%;
font-family: "Times New Roman";
font-size: 15px;
color: red;
}
#mainform\3A downtilttextbox {
margin-left: 30%;
margin-top: 1%;
width: 15%;
height: 10%;
font-family: "Times New Roman";
font-size: 20px;
}
|
peterzhu2118/base-station-database-project
|
target/war/resources/css/editantenna.css
|
CSS
|
gpl-2.0
| 1,272
|
/* Target-dependent code for NetBSD/i386.
Copyright (C) 1988, 1989, 1991, 1992, 1994, 1996, 2000, 2001, 2002,
2003, 2004
Free Software Foundation, Inc.
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
#include "defs.h"
#include "arch-utils.h"
#include "frame.h"
#include "gdbcore.h"
#include "regcache.h"
#include "regset.h"
#include "osabi.h"
#include "symtab.h"
#include "gdb_assert.h"
#include "gdb_string.h"
#include "i386-tdep.h"
#include "i387-tdep.h"
#include "nbsd-tdep.h"
#include "solib-svr4.h"
/* From <machine/reg.h>. */
static int i386nbsd_r_reg_offset[] =
{
0 * 4, /* %eax */
1 * 4, /* %ecx */
2 * 4, /* %edx */
3 * 4, /* %ebx */
4 * 4, /* %esp */
5 * 4, /* %ebp */
6 * 4, /* %esi */
7 * 4, /* %edi */
8 * 4, /* %eip */
9 * 4, /* %eflags */
10 * 4, /* %cs */
11 * 4, /* %ss */
12 * 4, /* %ds */
13 * 4, /* %es */
14 * 4, /* %fs */
15 * 4 /* %gs */
};
static void
i386nbsd_aout_supply_regset (const struct regset *regset,
struct regcache *regcache, int regnum,
const void *regs, size_t len)
{
const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
gdb_assert (len >= tdep->sizeof_gregset + I387_SIZEOF_FSAVE);
i386_supply_gregset (regset, regcache, regnum, regs, tdep->sizeof_gregset);
i387_supply_fsave (regcache, regnum, (char *) regs + tdep->sizeof_gregset);
}
static const struct regset *
i386nbsd_aout_regset_from_core_section (struct gdbarch *gdbarch,
const char *sect_name,
size_t sect_size)
{
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
/* NetBSD a.out core dumps don't use seperate register sets for the
general-purpose and floating-point registers. */
if (strcmp (sect_name, ".reg") == 0
&& sect_size >= tdep->sizeof_gregset + I387_SIZEOF_FSAVE)
{
if (tdep->gregset == NULL)
tdep->gregset =
regset_alloc (gdbarch, i386nbsd_aout_supply_regset, NULL);
return tdep->gregset;
}
return NULL;
}
/* Under NetBSD/i386, signal handler invocations can be identified by the
designated code sequence that is used to return from a signal handler.
In particular, the return address of a signal handler points to the
following code sequence:
leal 0x10(%esp), %eax
pushl %eax
pushl %eax
movl $0x127, %eax # __sigreturn14
int $0x80
Each instruction has a unique encoding, so we simply attempt to match
the instruction the PC is pointing to with any of the above instructions.
If there is a hit, we know the offset to the start of the designated
sequence and can then check whether we really are executing in the
signal trampoline. If not, -1 is returned, otherwise the offset from the
start of the return sequence is returned. */
#define RETCODE_INSN1 0x8d
#define RETCODE_INSN2 0x50
#define RETCODE_INSN3 0x50
#define RETCODE_INSN4 0xb8
#define RETCODE_INSN5 0xcd
#define RETCODE_INSN2_OFF 4
#define RETCODE_INSN3_OFF 5
#define RETCODE_INSN4_OFF 6
#define RETCODE_INSN5_OFF 11
static const unsigned char sigtramp_retcode[] =
{
RETCODE_INSN1, 0x44, 0x24, 0x10,
RETCODE_INSN2,
RETCODE_INSN3,
RETCODE_INSN4, 0x27, 0x01, 0x00, 0x00,
RETCODE_INSN5, 0x80,
};
static LONGEST
i386nbsd_sigtramp_offset (struct frame_info *next_frame)
{
CORE_ADDR pc = frame_pc_unwind (next_frame);
unsigned char ret[sizeof(sigtramp_retcode)], insn;
LONGEST off;
int i;
if (!safe_frame_unwind_memory (next_frame, pc, &insn, 1))
return -1;
switch (insn)
{
case RETCODE_INSN1:
off = 0;
break;
case RETCODE_INSN2:
/* INSN2 and INSN3 are the same. Read at the location of PC+1
to determine if we're actually looking at INSN2 or INSN3. */
if (!safe_frame_unwind_memory (next_frame, pc + 1, &insn, 1))
return -1;
if (insn == RETCODE_INSN3)
off = RETCODE_INSN2_OFF;
else
off = RETCODE_INSN3_OFF;
break;
case RETCODE_INSN4:
off = RETCODE_INSN4_OFF;
break;
case RETCODE_INSN5:
off = RETCODE_INSN5_OFF;
break;
default:
return -1;
}
pc -= off;
if (!safe_frame_unwind_memory (next_frame, pc, ret, sizeof (ret)))
return -1;
if (memcmp (ret, sigtramp_retcode, sizeof (ret)) == 0)
return off;
return -1;
}
/* Return whether the frame preceding NEXT_FRAME corresponds to a
NetBSD sigtramp routine. */
static int
i386nbsd_sigtramp_p (struct frame_info *next_frame)
{
CORE_ADDR pc = frame_pc_unwind (next_frame);
char *name;
find_pc_partial_function (pc, &name, NULL, NULL);
return (nbsd_pc_in_sigtramp (pc, name)
|| i386nbsd_sigtramp_offset (next_frame) >= 0);
}
/* From <machine/signal.h>. */
int i386nbsd_sc_reg_offset[] =
{
10 * 4, /* %eax */
9 * 4, /* %ecx */
8 * 4, /* %edx */
7 * 4, /* %ebx */
14 * 4, /* %esp */
6 * 4, /* %ebp */
5 * 4, /* %esi */
4 * 4, /* %edi */
11 * 4, /* %eip */
13 * 4, /* %eflags */
12 * 4, /* %cs */
15 * 4, /* %ss */
3 * 4, /* %ds */
2 * 4, /* %es */
1 * 4, /* %fs */
0 * 4 /* %gs */
};
static void
i386nbsd_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
{
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
/* Obviously NetBSD is BSD-based. */
i386bsd_init_abi (info, gdbarch);
/* NetBSD has a different `struct reg'. */
tdep->gregset_reg_offset = i386nbsd_r_reg_offset;
tdep->gregset_num_regs = ARRAY_SIZE (i386nbsd_r_reg_offset);
tdep->sizeof_gregset = 16 * 4;
/* NetBSD has different signal trampoline conventions. */
tdep->sigtramp_start = 0;
tdep->sigtramp_end = 0;
tdep->sigtramp_p = i386nbsd_sigtramp_p;
/* NetBSD uses -freg-struct-return by default. */
tdep->struct_return = reg_struct_return;
/* NetBSD has a `struct sigcontext' that's different from the
original 4.3 BSD. */
tdep->sc_reg_offset = i386nbsd_sc_reg_offset;
tdep->sc_num_regs = ARRAY_SIZE (i386nbsd_sc_reg_offset);
}
/* NetBSD a.out. */
static void
i386nbsdaout_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
{
i386nbsd_init_abi (info, gdbarch);
/* NetBSD a.out has a single register set. */
set_gdbarch_regset_from_core_section
(gdbarch, i386nbsd_aout_regset_from_core_section);
}
/* NetBSD ELF. */
static void
i386nbsdelf_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
{
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
/* It's still NetBSD. */
i386nbsd_init_abi (info, gdbarch);
/* But ELF-based. */
i386_elf_init_abi (info, gdbarch);
/* NetBSD ELF uses SVR4-style shared libraries. */
set_solib_svr4_fetch_link_map_offsets
(gdbarch, svr4_ilp32_fetch_link_map_offsets);
/* NetBSD ELF uses -fpcc-struct-return by default. */
tdep->struct_return = pcc_struct_return;
}
void
_initialize_i386nbsd_tdep (void)
{
gdbarch_register_osabi (bfd_arch_i386, 0, GDB_OSABI_NETBSD_AOUT,
i386nbsdaout_init_abi);
gdbarch_register_osabi (bfd_arch_i386, 0, GDB_OSABI_NETBSD_ELF,
i386nbsdelf_init_abi);
}
|
ipwndev/DSLinux-Mirror
|
user/gdb/gdb/i386nbsd-tdep.c
|
C
|
gpl-2.0
| 7,697
|
<?php
namespace Drupal\FunctionalTests\Update;
use Drupal\Component\Utility\Crypt;
use Drupal\Core\Test\TestRunnerKernel;
use Drupal\Tests\BrowserTestBase;
use Drupal\Tests\SchemaCheckTestTrait;
use Drupal\Core\Database\Database;
use Drupal\Core\DependencyInjection\ContainerBuilder;
use Drupal\Core\Language\Language;
use Drupal\Core\Url;
use Drupal\user\Entity\User;
use Symfony\Component\DependencyInjection\Reference;
use Symfony\Component\HttpFoundation\Request;
/**
* Provides a base class for writing an update test.
*
* To write an update test:
* - Write the hook_update_N() implementations that you are testing.
* - Create one or more database dump files, which will set the database to the
* "before updates" state. Normally, these will add some configuration data to
* the database, set up some tables/fields, etc.
* - Create a class that extends this class.
* - Ensure that the test is in the legacy group. Tests can have multiple
* groups.
* - In your setUp() method, point the $this->databaseDumpFiles variable to the
* database dump files, and then call parent::setUp() to run the base setUp()
* method in this class.
* - In your test method, call $this->runUpdates() to run the necessary updates,
* and then use test assertions to verify that the result is what you expect.
* - In order to test both with a "bare" database dump as well as with a
* database dump filled with content, extend your update path test class with
* a new test class that overrides the bare database dump. Refer to
* UpdatePathTestBaseFilledTest for an example.
*
* @ingroup update_api
*
* @see hook_update_N()
*/
abstract class UpdatePathTestBase extends BrowserTestBase {
use SchemaCheckTestTrait;
/**
* Modules to enable after the database is loaded.
*/
protected static $modules = [];
/**
* The file path(s) to the dumped database(s) to load into the child site.
*
* The file system/tests/fixtures/update/drupal-8.bare.standard.php.gz is
* normally included first -- this sets up the base database from a bare
* standard Drupal installation.
*
* The file system/tests/fixtures/update/drupal-8.filled.standard.php.gz
* can also be used in case we want to test with a database filled with
* content, and with all core modules enabled.
*
* @var array
*/
protected $databaseDumpFiles = [];
/**
* The install profile used in the database dump file.
*
* @var string
*/
protected $installProfile = 'standard';
/**
* Flag that indicates whether the child site has been updated.
*
* @var bool
*/
protected $upgradedSite = FALSE;
/**
* Array of errors triggered during the update process.
*
* @var array
*/
protected $upgradeErrors = [];
/**
* Array of modules loaded when the test starts.
*
* @var array
*/
protected $loadedModules = [];
/**
* Flag to indicate whether zlib is installed or not.
*
* @var bool
*/
protected $zlibInstalled = TRUE;
/**
* Flag to indicate whether there are pending updates or not.
*
* @var bool
*/
protected $pendingUpdates = TRUE;
/**
* The update URL.
*
* @var string
*/
protected $updateUrl;
/**
* Disable strict config schema checking.
*
* The schema is verified at the end of running the update.
*
* @var bool
*/
protected $strictConfigSchema = FALSE;
/**
* Fail the test if there are failed updates.
*
* @var bool
*/
protected $checkFailedUpdates = TRUE;
/**
* Constructs an UpdatePathTestCase object.
*
* @param $test_id
* (optional) The ID of the test. Tests with the same id are reported
* together.
*/
public function __construct($test_id = NULL) {
parent::__construct($test_id);
$this->zlibInstalled = function_exists('gzopen');
}
/**
* Overrides WebTestBase::setUp() for update testing.
*
* The main difference in this method is that rather than performing the
* installation via the installer, a database is loaded. Additional work is
* then needed to set various things such as the config directories and the
* container that would normally be done via the installer.
*/
protected function setUp() {
$request = Request::createFromGlobals();
// Boot up Drupal into a state where calling the database API is possible.
// This is used to initialize the database system, so we can load the dump
// files.
$autoloader = require $this->root . '/autoload.php';
$kernel = TestRunnerKernel::createFromRequest($request, $autoloader);
$kernel->loadLegacyIncludes();
// Set the update url. This must be set here rather than in
// self::__construct() or the old URL generator will leak additional test
// sites.
$this->updateUrl = Url::fromRoute('system.db_update');
$this->setupBaseUrl();
// Install Drupal test site.
$this->prepareEnvironment();
$this->runDbTasks();
// Allow classes to set database dump files.
$this->setDatabaseDumpFiles();
// We are going to set a missing zlib requirement property for usage
// during the performUpgrade() and tearDown() methods. Also set that the
// tests failed.
if (!$this->zlibInstalled) {
parent::setUp();
return;
}
$this->installDrupal();
// Add the config directories to settings.php.
drupal_install_config_directories();
// Set the container. parent::rebuildAll() would normally do this, but this
// not safe to do here, because the database has not been updated yet.
$this->container = \Drupal::getContainer();
$this->replaceUser1();
require_once $this->root . '/core/includes/update.inc';
// Setup Mink.
$this->initMink();
// Set up the browser test output file.
$this->initBrowserOutputFile();
}
/**
* {@inheritdoc}
*/
public function installDrupal() {
$this->initUserSession();
$this->prepareSettings();
$this->doInstall();
$this->initSettings();
$request = Request::createFromGlobals();
$container = $this->initKernel($request);
$this->initConfig($container);
}
/**
* {@inheritdoc}
*/
protected function doInstall() {
$this->runDbTasks();
// Allow classes to set database dump files.
$this->setDatabaseDumpFiles();
// Load the database(s).
foreach ($this->databaseDumpFiles as $file) {
if (substr($file, -3) == '.gz') {
$file = "compress.zlib://$file";
}
require $file;
}
}
/**
* {@inheritdoc}
*/
protected function initFrontPage() {
// Do nothing as Drupal is not installed yet.
}
/**
* Set database dump files to be used.
*/
abstract protected function setDatabaseDumpFiles();
/**
* Add settings that are missed since the installer isn't run.
*/
protected function prepareSettings() {
parent::prepareSettings();
// Remember the profile which was used.
$settings['settings']['install_profile'] = (object) [
'value' => $this->installProfile,
'required' => TRUE,
];
// Generate a hash salt.
$settings['settings']['hash_salt'] = (object) [
'value' => Crypt::randomBytesBase64(55),
'required' => TRUE,
];
// Since the installer isn't run, add the database settings here too.
$settings['databases']['default'] = (object) [
'value' => Database::getConnectionInfo(),
'required' => TRUE,
];
$this->writeSettings($settings);
}
/**
* Helper function to run pending database updates.
*/
protected function runUpdates() {
if (!$this->zlibInstalled) {
$this->fail('Missing zlib requirement for update tests.');
return FALSE;
}
// The site might be broken at the time so logging in using the UI might
// not work, so we use the API itself.
drupal_rewrite_settings([
'settings' => [
'update_free_access' => (object) [
'value' => TRUE,
'required' => TRUE,
],
],
]);
$this->drupalGet($this->updateUrl);
$this->clickLink(t('Continue'));
$this->doSelectionTest();
// Run the update hooks.
$this->clickLink(t('Apply pending updates'));
$this->checkForMetaRefresh();
// Ensure there are no failed updates.
if ($this->checkFailedUpdates) {
$failure = $this->cssSelect('.failure');
if ($failure) {
$this->fail('The update failed with the following message: "' . reset($failure)->getText() . '"');
}
// Ensure that there are no pending updates.
foreach (['update', 'post_update'] as $update_type) {
switch ($update_type) {
case 'update':
$all_updates = update_get_update_list();
break;
case 'post_update':
$all_updates = \Drupal::service('update.post_update_registry')->getPendingUpdateInformation();
break;
}
foreach ($all_updates as $module => $updates) {
if (!empty($updates['pending'])) {
foreach (array_keys($updates['pending']) as $update_name) {
$this->fail("The $update_name() update function from the $module module did not run.");
}
}
}
}
// Ensure that the container is updated if any modules are installed or
// uninstalled during the update.
/** @var \Drupal\Core\Extension\ModuleHandlerInterface $module_handler */
$module_handler = $this->container->get('module_handler');
$config_module_list = $this->config('core.extension')->get('module');
$module_handler_list = $module_handler->getModuleList();
$modules_installed = FALSE;
// Modules that are in configuration but not the module handler have been
// installed.
foreach (array_keys(array_diff_key($config_module_list, $module_handler_list)) as $module) {
$module_handler->addModule($module, drupal_get_path('module', $module));
$modules_installed = TRUE;
}
$modules_uninstalled = FALSE;
$module_handler_list = $module_handler->getModuleList();
// Modules that are in the module handler but not configuration have been
// uninstalled.
foreach (array_keys(array_diff_key($module_handler_list, $config_module_list)) as $module) {
$modules_uninstalled = TRUE;
unset($module_handler_list[$module]);
}
if ($modules_installed || $modules_uninstalled) {
// Note that resetAll() does not reset the kernel module list so we
// have to do that manually.
$this->kernel->updateModules($module_handler_list, $module_handler_list);
}
// If we have successfully clicked 'Apply pending updates' then we need to
// clear the caches in the update test runner as this has occurred as part
// of the updates.
$this->resetAll();
// The config schema can be incorrect while the update functions are being
// executed. But once the update has been completed, it needs to be valid
// again. Assert the schema of all configuration objects now.
$names = $this->container->get('config.storage')->listAll();
/** @var \Drupal\Core\Config\TypedConfigManagerInterface $typed_config */
$typed_config = $this->container->get('config.typed');
foreach ($names as $name) {
$config = $this->config($name);
$this->assertConfigSchema($typed_config, $name, $config->get());
}
// Ensure that the update hooks updated all entity schema.
$needs_updates = \Drupal::entityDefinitionUpdateManager()->needsUpdates();
if ($needs_updates) {
foreach (\Drupal::entityDefinitionUpdateManager()->getChangeSummary() as $entity_type_id => $summary) {
$entity_type_label = \Drupal::entityTypeManager()->getDefinition($entity_type_id)->getLabel();
foreach ($summary as $message) {
$this->fail("$entity_type_label: $message");
}
}
// The above calls to `fail()` should prevent this from ever being
// called, but it is here in case something goes really wrong.
$this->assertFalse($needs_updates, 'After all updates ran, entity schema is up to date.');
}
}
}
/**
* Runs the install database tasks for the driver used by the test runner.
*/
protected function runDbTasks() {
// Create a minimal container so that t() works.
// @see install_begin_request()
$container = new ContainerBuilder();
$container->setParameter('language.default_values', Language::$defaultValues);
$container
->register('language.default', 'Drupal\Core\Language\LanguageDefault')
->addArgument('%language.default_values%');
$container
->register('string_translation', 'Drupal\Core\StringTranslation\TranslationManager')
->addArgument(new Reference('language.default'));
\Drupal::setContainer($container);
require_once __DIR__ . '/../../../../includes/install.inc';
$connection = Database::getConnection();
$errors = db_installer_object($connection->driver())->runTasks();
if (!empty($errors)) {
$this->fail('Failed to run installer database tasks: ' . implode(', ', $errors));
}
}
/**
* Replace User 1 with the user created here.
*/
protected function replaceUser1() {
/** @var \Drupal\user\UserInterface $account */
// @todo: Saving the account before the update is problematic.
// https://www.drupal.org/node/2560237
$account = User::load(1);
$account->setPassword($this->rootUser->pass_raw);
$account->setEmail($this->rootUser->getEmail());
$account->setUsername($this->rootUser->getUsername());
$account->save();
}
/**
* Tests the selection page.
*/
protected function doSelectionTest() {
// No-op. Tests wishing to do test the selection page or the general
// update.php environment before running update.php can override this method
// and implement their required tests.
}
}
|
dekisha/dartlamp
|
core/tests/Drupal/FunctionalTests/Update/UpdatePathTestBase.php
|
PHP
|
gpl-2.0
| 13,997
|
// rdadd_cart.h
//
// Add a Rivendell Cart
//
// (C) Copyright 2002-2004 Fred Gleason <fredg@paravelsystems.com>
//
// $Id: rdadd_cart.h,v 1.3 2007/02/14 21:48:41 fredg Exp $
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public
// License along with this program; if not, write to the Free Software
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
//
#ifndef RDADD_CART_H
#define RDADD_CART_H
#include <qdialog.h>
#include <qcombobox.h>
#include <qlineedit.h>
#include <rdcart.h>
#include <rduser.h>
class RDAddCart : public QDialog
{
Q_OBJECT
public:
RDAddCart(QString *group,RDCart::Type *type,QString *title,
const QString &username,QWidget *parent=0,const char *name=0);
QSize sizeHint() const;
QSizePolicy sizePolicy() const;
private slots:
void groupActivatedData(const QString &);
void okData();
void cancelData();
protected:
void closeEvent(QCloseEvent *e);
private:
QLineEdit *cart_number_edit;
QComboBox *cart_group_box;
QComboBox *cart_type_box;
QLineEdit *cart_title_edit;
QString *cart_group;
RDCart::Type *cart_type;
QString *cart_title;
};
#endif // RDADD_CART_H
|
stgabmp/Rivendell
|
lib/rdadd_cart.h
|
C
|
gpl-2.0
| 1,635
|
<?php
namespace GuzzleHttp\Psr7;
use Psr\Http\Message\UriInterface;
/**
* Basic PSR-7 URI implementation.
*
* @link https://github.com/phly/http This class is based upon
* Matthew Weier O'Phinney's URI implementation in phly/http.
*/
class Uri implements UriInterface
{
private static $schemes = [
'http' => 80,
'https' => 443,
];
private static $charUnreserved = 'a-zA-Z0-9_\-\.~';
private static $charSubDelims = '!\$&\'\(\)\*\+,;=';
private static $replaceQuery = ['=' => '%3D', '&' => '%26'];
/** @var string Uri scheme. */
private $scheme = '';
/** @var string Uri user info. */
private $userInfo = '';
/** @var string Uri host. */
private $host = '';
/** @var int|null Uri port. */
private $port;
/** @var string Uri path. */
private $path = '';
/** @var string Uri query string. */
private $query = '';
/** @var string Uri fragment. */
private $fragment = '';
/**
* @param string $uri URI to parse and wrap.
*/
public function __construct($uri = '')
{
if ($uri != null) {
$parts = parse_url($uri);
if ($parts === false) {
throw new \InvalidArgumentException("Unable to parse URI: $uri");
}
$this->applyParts($parts);
}
}
public function __toString()
{
return self::createUriString(
$this->scheme,
$this->getAuthority(),
$this->getPath(),
$this->query,
$this->fragment
);
}
/**
* Removes dot segments from a path and returns the new path.
*
* @param string $path
*
* @return string
* @link http://tools.ietf.org/html/rfc3986#section-5.2.4
*/
public static function removeDotSegments($path)
{
static $noopPaths = ['' => true, '/' => true, '*' => true];
static $ignoreSegments = ['.' => true, '..' => true];
if (isset($noopPaths[$path])) {
return $path;
}
$results = [];
$segments = explode('/', $path);
foreach ($segments as $segment) {
if ($segment == '..') {
array_pop($results);
} elseif (!isset($ignoreSegments[$segment])) {
$results[] = $segment;
}
}
$newPath = implode('/', $results);
// Add the leading slash if necessary
if (substr($path, 0, 1) === '/' &&
substr($newPath, 0, 1) !== '/'
) {
$newPath = '/' . $newPath;
}
// Add the trailing slash if necessary
if ($newPath != '/' && isset($ignoreSegments[end($segments)])) {
$newPath .= '/';
}
return $newPath;
}
/**
* Resolve a base URI with a relative URI and return a new URI.
*
* @param UriInterface $base Base URI
* @param string $rel Relative URI
*
* @return UriInterface
*/
public static function resolve(UriInterface $base, $rel)
{
if ($rel === null || $rel === '') {
return $base;
}
if (!($rel instanceof UriInterface)) {
$rel = new self($rel);
}
// Return the relative uri as-is if it has a scheme.
if ($rel->getScheme()) {
return $rel->withPath(static::removeDotSegments($rel->getPath()));
}
$relParts = [
'scheme' => $rel->getScheme(),
'authority' => $rel->getAuthority(),
'path' => $rel->getPath(),
'query' => $rel->getQuery(),
'fragment' => $rel->getFragment()
];
$parts = [
'scheme' => $base->getScheme(),
'authority' => $base->getAuthority(),
'path' => $base->getPath(),
'query' => $base->getQuery(),
'fragment' => $base->getFragment()
];
if (!empty($relParts['authority'])) {
$parts['authority'] = $relParts['authority'];
$parts['path'] = self::removeDotSegments($relParts['path']);
$parts['query'] = $relParts['query'];
$parts['fragment'] = $relParts['fragment'];
} elseif (!empty($relParts['path'])) {
if (substr($relParts['path'], 0, 1) == '/') {
$parts['path'] = self::removeDotSegments($relParts['path']);
$parts['query'] = $relParts['query'];
$parts['fragment'] = $relParts['fragment'];
} else {
if (!empty($parts['authority']) && empty($parts['path'])) {
$mergedPath = '/';
} else {
$mergedPath = substr($parts['path'], 0, strrpos($parts['path'], '/') + 1);
}
$parts['path'] = self::removeDotSegments($mergedPath . $relParts['path']);
$parts['query'] = $relParts['query'];
$parts['fragment'] = $relParts['fragment'];
}
} elseif (!empty($relParts['query'])) {
$parts['query'] = $relParts['query'];
} elseif ($relParts['fragment'] != null) {
$parts['fragment'] = $relParts['fragment'];
}
return new self(self::createUriString(
$parts['scheme'],
$parts['authority'],
$parts['path'],
$parts['query'],
$parts['fragment']
));
}
/**
* Create a new URI with a specific query string value removed.
*
* Any existing query string values that exactly match the provided key are
* removed.
*
* Note: this function will convert "=" to "%3D" and "&" to "%26".
*
* @param UriInterface $uri URI to use as a base.
* @param string $key Query string key value pair to remove.
*
* @return UriInterface
*/
public static function withoutQueryValue(UriInterface $uri, $key)
{
$current = $uri->getQuery();
if (!$current) {
return $uri;
}
$result = [];
foreach (explode('&', $current) as $part) {
if (explode('=', $part)[0] !== $key) {
$result[] = $part;
};
}
return $uri->withQuery(implode('&', $result));
}
/**
* Create a new URI with a specific query string value.
*
* Any existing query string values that exactly match the provided key are
* removed and replaced with the given key value pair.
*
* Note: this function will convert "=" to "%3D" and "&" to "%26".
*
* @param UriInterface $uri URI to use as a base.
* @param string $key Key to set.
* @param string $value Value to set.
*
* @return UriInterface
*/
public static function withQueryValue(UriInterface $uri, $key, $value)
{
$current = $uri->getQuery();
$key = strtr($key, self::$replaceQuery);
if (!$current) {
$result = [];
} else {
$result = [];
foreach (explode('&', $current) as $part) {
if (explode('=', $part)[0] !== $key) {
$result[] = $part;
};
}
}
if ($value !== null) {
$result[] = $key . '=' . strtr($value, self::$replaceQuery);
} else {
$result[] = $key;
}
return $uri->withQuery(implode('&', $result));
}
/**
* Create a URI from a hash of parse_url parts.
*
* @param array $parts
*
* @return self
*/
public static function fromParts(array $parts)
{
$uri = new self();
$uri->applyParts($parts);
return $uri;
}
public function getScheme()
{
return $this->scheme;
}
public function getAuthority()
{
if (empty($this->host)) {
return '';
}
$authority = $this->host;
if (!empty($this->userInfo)) {
$authority = $this->userInfo . '@' . $authority;
}
if ($this->isNonStandardPort($this->scheme, $this->host, $this->port)) {
$authority .= ':' . $this->port;
}
return $authority;
}
public function getUserInfo()
{
return $this->userInfo;
}
public function getHost()
{
return $this->host;
}
public function getPort()
{
return $this->port;
}
public function getPath()
{
return $this->path == null ? '' : $this->path;
}
public function getQuery()
{
return $this->query;
}
public function getFragment()
{
return $this->fragment;
}
public function withScheme($scheme)
{
$scheme = $this->filterScheme($scheme);
if ($this->scheme === $scheme) {
return $this;
}
$new = clone $this;
$new->scheme = $scheme;
$new->port = $new->filterPort($new->scheme, $new->host, $new->port);
return $new;
}
public function withUserInfo($user, $password = null)
{
$info = $user;
if ($password) {
$info .= ':' . $password;
}
if ($this->userInfo === $info) {
return $this;
}
$new = clone $this;
$new->userInfo = $info;
return $new;
}
public function withHost($host)
{
if ($this->host === $host) {
return $this;
}
$new = clone $this;
$new->host = $host;
return $new;
}
public function withPort($port)
{
$port = $this->filterPort($this->scheme, $this->host, $port);
if ($this->port === $port) {
return $this;
}
$new = clone $this;
$new->port = $port;
return $new;
}
public function withPath($path)
{
if (!is_string($path)) {
throw new \InvalidArgumentException(
'Invalid path provided; must be a string'
);
}
$path = $this->filterPath($path);
if ($this->path === $path) {
return $this;
}
$new = clone $this;
$new->path = $path;
return $new;
}
public function withQuery($query)
{
if (!is_string($query) && !method_exists($query, '__toString')) {
throw new \InvalidArgumentException(
'Query string must be a string'
);
}
$query = (string) $query;
if (substr($query, 0, 1) === '?') {
$query = substr($query, 1);
}
$query = $this->filterQueryAndFragment($query);
if ($this->query === $query) {
return $this;
}
$new = clone $this;
$new->query = $query;
return $new;
}
public function withFragment($fragment)
{
if (substr($fragment, 0, 1) === '#') {
$fragment = substr($fragment, 1);
}
$fragment = $this->filterQueryAndFragment($fragment);
if ($this->fragment === $fragment) {
return $this;
}
$new = clone $this;
$new->fragment = $fragment;
return $new;
}
/**
* Apply parse_url parts to a URI.
*
* @param $parts Array of parse_url parts to apply.
*/
private function applyParts(array $parts)
{
$this->scheme = isset($parts['scheme'])
? $this->filterScheme($parts['scheme'])
: '';
$this->userInfo = isset($parts['user']) ? $parts['user'] : '';
$this->host = isset($parts['host']) ? $parts['host'] : '';
$this->port = !empty($parts['port'])
? $this->filterPort($this->scheme, $this->host, $parts['port'])
: null;
$this->path = isset($parts['path'])
? $this->filterPath($parts['path'])
: '';
$this->query = isset($parts['query'])
? $this->filterQueryAndFragment($parts['query'])
: '';
$this->fragment = isset($parts['fragment'])
? $this->filterQueryAndFragment($parts['fragment'])
: '';
if (isset($parts['pass'])) {
$this->userInfo .= ':' . $parts['pass'];
}
}
/**
* Create a URI string from its various parts
*
* @param string $scheme
* @param string $authority
* @param string $path
* @param string $query
* @param string $fragment
* @return string
*/
private static function createUriString($scheme, $authority, $path, $query, $fragment)
{
$uri = '';
if (!empty($scheme)) {
$uri .= $scheme . ':';
}
$hierPart = '';
if (!empty($authority)) {
if (!empty($scheme)) {
$hierPart .= '//';
}
$hierPart .= $authority;
}
if ($path != null) {
// Add a leading slash if necessary.
if ($hierPart && substr($path, 0, 1) !== '/') {
$hierPart .= '/';
}
$hierPart .= $path;
}
$uri .= $hierPart;
if ($query != null) {
$uri .= '?' . $query;
}
if ($fragment != null) {
$uri .= '#' . $fragment;
}
return $uri;
}
/**
* Is a given port non-standard for the current scheme?
*
* @param string $scheme
* @param string $host
* @param int $port
* @return bool
*/
private static function isNonStandardPort($scheme, $host, $port)
{
if (!$scheme && $port) {
return true;
}
if (!$host || !$port) {
return false;
}
return !isset(self::$schemes[$scheme]) || $port !== self::$schemes[$scheme];
}
/**
* @param string $scheme
*
* @return string
*/
private function filterScheme($scheme)
{
$scheme = strtolower($scheme);
$scheme = rtrim($scheme, ':/');
return $scheme;
}
/**
* @param string $scheme
* @param string $host
* @param int $port
*
* @return int|null
*
* @throws \InvalidArgumentException If the port is invalid.
*/
private function filterPort($scheme, $host, $port)
{
if (null !== $port) {
$port = (int) $port;
if (1 > $port || 0xffff < $port) {
throw new \InvalidArgumentException(
sprintf('Invalid port: %d. Must be between 1 and 65535', $port)
);
}
}
return $this->isNonStandardPort($scheme, $host, $port) ? $port : null;
}
/**
* Filters the path of a URI
*
* @param $path
*
* @return string
*/
private function filterPath($path)
{
return preg_replace_callback(
'/(?:[^' . self::$charUnreserved . self::$charSubDelims . ':@\/%]+|%(?![A-Fa-f0-9]{2}))/',
[$this, 'rawurlencodeMatchZero'],
$path
);
}
/**
* Filters the query string or fragment of a URI.
*
* @param $str
*
* @return string
*/
private function filterQueryAndFragment($str)
{
return preg_replace_callback(
'/(?:[^' . self::$charUnreserved . self::$charSubDelims . '%:@\/\?]+|%(?![A-Fa-f0-9]{2}))/',
[$this, 'rawurlencodeMatchZero'],
$str
);
}
private function rawurlencodeMatchZero(array $match)
{
return rawurlencode($match[0]);
}
}
|
rignaneseleo/Strings.xml-builder-and-translator-PHP
|
vendor/guzzlehttp/psr7/src/Uri.php
|
PHP
|
gpl-2.0
| 16,217
|
/**
* @file
*
* @brief RTEMS Message Queue Name to Id
* @ingroup ClassicMessageQueue
*/
/*
* COPYRIGHT (c) 1989-1999.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems/system.h>
#include <rtems/score/sysstate.h>
#include <rtems/score/chain.h>
#include <rtems/score/isr.h>
#include <rtems/score/coremsg.h>
#include <rtems/score/object.h>
#include <rtems/score/states.h>
#include <rtems/score/thread.h>
#include <rtems/score/wkspace.h>
#if defined(RTEMS_MULTIPROCESSING)
#include <rtems/score/mpci.h>
#endif
#include <rtems/rtems/status.h>
#include <rtems/rtems/attr.h>
#include <rtems/rtems/message.h>
#include <rtems/rtems/options.h>
#include <rtems/rtems/support.h>
rtems_status_code rtems_message_queue_ident(
rtems_name name,
uint32_t node,
rtems_id *id
)
{
Objects_Name_or_id_lookup_errors status;
status = _Objects_Name_to_id_u32(
&_Message_queue_Information,
name,
node,
id
);
return _Status_Object_name_errors_to_status[ status ];
}
|
eugmes/rtems
|
cpukit/rtems/src/msgqident.c
|
C
|
gpl-2.0
| 1,223
|
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2017 Intel Corporation
*/
#include "gso_common.h"
#include "gso_tunnel_tcp4.h"
static void
update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
struct rte_mbuf **segs, uint16_t nb_segs)
{
struct rte_ipv4_hdr *ipv4_hdr;
struct rte_tcp_hdr *tcp_hdr;
uint32_t sent_seq;
uint16_t outer_id, inner_id, tail_idx, i;
uint16_t outer_ipv4_offset, inner_ipv4_offset;
uint16_t udp_gre_offset, tcp_offset;
uint8_t update_udp_hdr;
outer_ipv4_offset = pkt->outer_l2_len;
udp_gre_offset = outer_ipv4_offset + pkt->outer_l3_len;
inner_ipv4_offset = udp_gre_offset + pkt->l2_len;
tcp_offset = inner_ipv4_offset + pkt->l3_len;
/* Outer IPv4 header. */
ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
outer_ipv4_offset);
outer_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
/* Inner IPv4 header. */
ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
inner_ipv4_offset);
inner_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
tail_idx = nb_segs - 1;
/* Only update UDP header for VxLAN packets. */
update_udp_hdr = (pkt->ol_flags & PKT_TX_TUNNEL_VXLAN) ? 1 : 0;
for (i = 0; i < nb_segs; i++) {
update_ipv4_header(segs[i], outer_ipv4_offset, outer_id);
if (update_udp_hdr)
update_udp_header(segs[i], udp_gre_offset);
update_ipv4_header(segs[i], inner_ipv4_offset, inner_id);
update_tcp_header(segs[i], tcp_offset, sent_seq, i < tail_idx);
outer_id++;
inner_id += ipid_delta;
sent_seq += (segs[i]->pkt_len - segs[i]->data_len);
}
}
int
gso_tunnel_tcp4_segment(struct rte_mbuf *pkt,
uint16_t gso_size,
uint8_t ipid_delta,
struct rte_mempool *direct_pool,
struct rte_mempool *indirect_pool,
struct rte_mbuf **pkts_out,
uint16_t nb_pkts_out)
{
struct rte_ipv4_hdr *inner_ipv4_hdr;
uint16_t pyld_unit_size, hdr_offset, frag_off;
int ret = 1;
hdr_offset = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len;
inner_ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
hdr_offset);
/*
* Don't process the packet whose MF bit or offset in the inner
* IPv4 header are non-zero.
*/
frag_off = rte_be_to_cpu_16(inner_ipv4_hdr->fragment_offset);
if (unlikely(IS_FRAGMENTED(frag_off))) {
pkts_out[0] = pkt;
return 1;
}
hdr_offset += pkt->l3_len + pkt->l4_len;
/* Don't process the packet without data */
if (hdr_offset >= pkt->pkt_len) {
pkts_out[0] = pkt;
return 1;
}
pyld_unit_size = gso_size - hdr_offset;
/* Segment the payload */
ret = gso_do_segment(pkt, hdr_offset, pyld_unit_size, direct_pool,
indirect_pool, pkts_out, nb_pkts_out);
if (ret <= 1)
return ret;
update_tunnel_ipv4_tcp_headers(pkt, ipid_delta, pkts_out, ret);
return ret;
}
|
grivet/dpdk
|
lib/librte_gso/gso_tunnel_tcp4.c
|
C
|
gpl-2.0
| 2,859
|
# Copyright (C) 2008 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import absolute_import
import collections
from cStringIO import StringIO
from bzrlib import (
debug,
errors,
)
from bzrlib.trace import mutter
class MessageHandler(object):
"""Base class for handling messages received via the smart protocol.
As parts of a message are received, the corresponding PART_received method
will be called.
"""
def __init__(self):
self.headers = None
def headers_received(self, headers):
"""Called when message headers are received.
This default implementation just stores them in self.headers.
"""
self.headers = headers
def byte_part_received(self, byte):
"""Called when a 'byte' part is received.
Note that a 'byte' part is a message part consisting of exactly one
byte.
"""
raise NotImplementedError(self.byte_received)
def bytes_part_received(self, bytes):
"""Called when a 'bytes' part is received.
A 'bytes' message part can contain any number of bytes. It should not
be confused with a 'byte' part, which is always a single byte.
"""
raise NotImplementedError(self.bytes_received)
def structure_part_received(self, structure):
"""Called when a 'structure' part is received.
:param structure: some structured data, which will be some combination
of list, dict, int, and str objects.
"""
raise NotImplementedError(self.bytes_received)
def protocol_error(self, exception):
"""Called when there is a protocol decoding error.
The default implementation just re-raises the exception.
"""
raise
def end_received(self):
"""Called when the end of the message is received."""
# No-op by default.
pass
class ConventionalRequestHandler(MessageHandler):
"""A message handler for "conventional" requests.
"Conventional" is used in the sense described in
doc/developers/network-protocol.txt: a simple message with arguments and an
optional body.
Possible states:
* args: expecting args
* body: expecting body (terminated by receiving a post-body status)
* error: expecting post-body error
* end: expecting end of message
* nothing: finished
"""
def __init__(self, request_handler, responder):
MessageHandler.__init__(self)
self.request_handler = request_handler
self.responder = responder
self.expecting = 'args'
self._should_finish_body = False
self._response_sent = False
def protocol_error(self, exception):
if self.responder.response_sent:
# We can only send one response to a request, no matter how many
# errors happen while processing it.
return
self.responder.send_error(exception)
def byte_part_received(self, byte):
if self.expecting == 'body':
if byte == 'S':
# Success. Nothing more to come except the end of message.
self.expecting = 'end'
elif byte == 'E':
# Error. Expect an error structure.
self.expecting = 'error'
else:
raise errors.SmartProtocolError(
'Non-success status byte in request body: %r' % (byte,))
else:
raise errors.SmartProtocolError(
'Unexpected message part: byte(%r)' % (byte,))
def structure_part_received(self, structure):
if self.expecting == 'args':
self._args_received(structure)
elif self.expecting == 'error':
self._error_received(structure)
else:
raise errors.SmartProtocolError(
'Unexpected message part: structure(%r)' % (structure,))
def _args_received(self, args):
self.expecting = 'body'
self.request_handler.args_received(args)
if self.request_handler.finished_reading:
self._response_sent = True
self.responder.send_response(self.request_handler.response)
self.expecting = 'end'
def _error_received(self, error_args):
self.expecting = 'end'
self.request_handler.post_body_error_received(error_args)
def bytes_part_received(self, bytes):
if self.expecting == 'body':
self._should_finish_body = True
self.request_handler.accept_body(bytes)
else:
raise errors.SmartProtocolError(
'Unexpected message part: bytes(%r)' % (bytes,))
def end_received(self):
if self.expecting not in ['body', 'end']:
raise errors.SmartProtocolError(
'End of message received prematurely (while expecting %s)'
% (self.expecting,))
self.expecting = 'nothing'
self.request_handler.end_received()
if not self.request_handler.finished_reading:
raise errors.SmartProtocolError(
"Complete conventional request was received, but request "
"handler has not finished reading.")
if not self._response_sent:
self.responder.send_response(self.request_handler.response)
class ResponseHandler(object):
"""Abstract base class for an object that handles a smart response."""
def read_response_tuple(self, expect_body=False):
"""Reads and returns the response tuple for the current request.
:keyword expect_body: a boolean indicating if a body is expected in the
response. Some protocol versions needs this information to know
when a response is finished. If False, read_body_bytes should
*not* be called afterwards. Defaults to False.
:returns: tuple of response arguments.
"""
raise NotImplementedError(self.read_response_tuple)
def read_body_bytes(self, count=-1):
"""Read and return some bytes from the body.
:param count: if specified, read up to this many bytes. By default,
reads the entire body.
:returns: str of bytes from the response body.
"""
raise NotImplementedError(self.read_body_bytes)
def read_streamed_body(self):
"""Returns an iterable that reads and returns a series of body chunks.
"""
raise NotImplementedError(self.read_streamed_body)
def cancel_read_body(self):
"""Stop expecting a body for this response.
If expect_body was passed to read_response_tuple, this cancels that
expectation (and thus finishes reading the response, allowing a new
request to be issued). This is useful if a response turns out to be an
error rather than a normal result with a body.
"""
raise NotImplementedError(self.cancel_read_body)
class ConventionalResponseHandler(MessageHandler, ResponseHandler):
def __init__(self):
MessageHandler.__init__(self)
self.status = None
self.args = None
self._bytes_parts = collections.deque()
self._body_started = False
self._body_stream_status = None
self._body = None
self._body_error_args = None
self.finished_reading = False
def setProtoAndMediumRequest(self, protocol_decoder, medium_request):
self._protocol_decoder = protocol_decoder
self._medium_request = medium_request
def byte_part_received(self, byte):
if byte not in ['E', 'S']:
raise errors.SmartProtocolError(
'Unknown response status: %r' % (byte,))
if self._body_started:
if self._body_stream_status is not None:
raise errors.SmartProtocolError(
'Unexpected byte part received: %r' % (byte,))
self._body_stream_status = byte
else:
if self.status is not None:
raise errors.SmartProtocolError(
'Unexpected byte part received: %r' % (byte,))
self.status = byte
def bytes_part_received(self, bytes):
self._body_started = True
self._bytes_parts.append(bytes)
def structure_part_received(self, structure):
if type(structure) is not tuple:
raise errors.SmartProtocolError(
'Args structure is not a sequence: %r' % (structure,))
if not self._body_started:
if self.args is not None:
raise errors.SmartProtocolError(
'Unexpected structure received: %r (already got %r)'
% (structure, self.args))
self.args = structure
else:
if self._body_stream_status != 'E':
raise errors.SmartProtocolError(
'Unexpected structure received after body: %r'
% (structure,))
self._body_error_args = structure
def _wait_for_response_args(self):
while self.args is None and not self.finished_reading:
self._read_more()
def _wait_for_response_end(self):
while not self.finished_reading:
self._read_more()
def _read_more(self):
next_read_size = self._protocol_decoder.next_read_size()
if next_read_size == 0:
# a complete request has been read.
self.finished_reading = True
self._medium_request.finished_reading()
return
bytes = self._medium_request.read_bytes(next_read_size)
if bytes == '':
# end of file encountered reading from server
if 'hpss' in debug.debug_flags:
mutter(
'decoder state: buf[:10]=%r, state_accept=%s',
self._protocol_decoder._get_in_buffer()[:10],
self._protocol_decoder.state_accept.__name__)
raise errors.ConnectionReset(
"Unexpected end of message. "
"Please check connectivity and permissions, and report a bug "
"if problems persist.")
self._protocol_decoder.accept_bytes(bytes)
def protocol_error(self, exception):
# Whatever the error is, we're done with this request.
self.finished_reading = True
self._medium_request.finished_reading()
raise
def read_response_tuple(self, expect_body=False):
"""Read a response tuple from the wire."""
self._wait_for_response_args()
if not expect_body:
self._wait_for_response_end()
if 'hpss' in debug.debug_flags:
mutter(' result: %r', self.args)
if self.status == 'E':
self._wait_for_response_end()
_raise_smart_server_error(self.args)
return tuple(self.args)
def read_body_bytes(self, count=-1):
"""Read bytes from the body, decoding into a byte stream.
We read all bytes at once to ensure we've checked the trailer for
errors, and then feed the buffer back as read_body_bytes is called.
Like the builtin file.read in Python, a count of -1 (the default) means
read the entire body.
"""
# TODO: we don't necessarily need to buffer the full request if count
# != -1. (2008/04/30, Andrew Bennetts)
if self._body is None:
self._wait_for_response_end()
body_bytes = ''.join(self._bytes_parts)
if 'hpss' in debug.debug_flags:
mutter(' %d body bytes read', len(body_bytes))
self._body = StringIO(body_bytes)
self._bytes_parts = None
return self._body.read(count)
def read_streamed_body(self):
while not self.finished_reading:
while self._bytes_parts:
bytes_part = self._bytes_parts.popleft()
if 'hpssdetail' in debug.debug_flags:
mutter(' %d byte part read', len(bytes_part))
yield bytes_part
self._read_more()
if self._body_stream_status == 'E':
_raise_smart_server_error(self._body_error_args)
def cancel_read_body(self):
self._wait_for_response_end()
def _raise_smart_server_error(error_tuple):
"""Raise exception based on tuple received from smart server
Specific error translation is handled by bzrlib.remote._translate_error
"""
if error_tuple[0] == 'UnknownMethod':
raise errors.UnknownSmartMethod(error_tuple[1])
raise errors.ErrorFromSmartServer(error_tuple)
|
Distrotech/bzr
|
bzrlib/smart/message.py
|
Python
|
gpl-2.0
| 13,276
|
/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#ifndef __TLB_H__
#define __TLB_H__
#include "um_mmu.h"
struct host_vm_op {
enum { MMAP, MUNMAP, MPROTECT } type;
union {
struct {
unsigned long addr;
unsigned long len;
unsigned int r:1;
unsigned int w:1;
unsigned int x:1;
int fd;
__u64 offset;
} mmap;
struct {
unsigned long addr;
unsigned long len;
} munmap;
struct {
unsigned long addr;
unsigned long len;
unsigned int r:1;
unsigned int w:1;
unsigned int x:1;
} mprotect;
} u;
};
extern void mprotect_kernel_vm(int w);
extern void force_flush_all(void);
extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force,
void (*do_ops)(union mm_context *,
struct host_vm_op *, int));
extern int flush_tlb_kernel_range_common(unsigned long start,
unsigned long end);
extern int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
int r, int w, int x, struct host_vm_op *ops, int index,
int last_filled, union mm_context *mmu,
void (*do_ops)(union mm_context *, struct host_vm_op *,
int));
extern int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *ops, int index, int last_filled,
union mm_context *mmu,
void (*do_ops)(union mm_context *, struct host_vm_op *,
int));
extern int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
int x, struct host_vm_op *ops, int index,
int last_filled, union mm_context *mmu,
void (*do_ops)(union mm_context *, struct host_vm_op *,
int));
#endif
|
kzlin129/tt-gpl
|
go9/linux-s3c24xx/arch/um/include/tlb.h
|
C
|
gpl-2.0
| 1,946
|
#ifndef MODINITTOOLS_TABLES_H
#define MODINITTOOLS_TABLES_H
#include <stddef.h>
/* Taken from the 2.5.49 kernel, with the kernel specific fields removed */
struct pci_device_id {
unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
};
#define PCI_DEVICE_SIZE32 (6 * 4 + 4)
#define PCI_DEVICE_SIZE64 (6 * 4 + 8)
struct usb_device_id {
/* which fields to match against? */
unsigned short match_flags;
/* Used for product specific matches; range is inclusive */
unsigned short idVendor;
unsigned short idProduct;
unsigned short bcdDevice_lo;
unsigned short bcdDevice_hi;
/* Used for device class matches */
unsigned char bDeviceClass;
unsigned char bDeviceSubClass;
unsigned char bDeviceProtocol;
/* Used for interface class matches */
unsigned char bInterfaceClass;
unsigned char bInterfaceSubClass;
unsigned char bInterfaceProtocol;
};
#define USB_DEVICE_SIZE32 (5 * 2 + 6 * 1 + 4)
#define USB_DEVICE_SIZE64 (5 * 2 + 6 * 1 + 8)
struct ieee1394_device_id {
unsigned int match_flags;
unsigned int vendor_id;
unsigned int model_id;
unsigned int specifier_id;
unsigned int version;
};
#define IEEE1394_DEVICE_SIZE32 (5 * 4 + 4)
#define IEEE1394_DEVICE_SIZE64 (5 * 4 + 4 /*padding*/ + 8)
struct ccw_device_id {
unsigned short match_flags; /* which fields to match against */
unsigned short cu_type; /* control unit type */
unsigned short dev_type; /* device type */
unsigned char cu_model; /* control unit model */
unsigned char dev_model; /* device model */
};
#define CCW_DEVICE_SIZE32 (3 * 2 + 2 * 1 + 4)
#define CCW_DEVICE_SIZE64 (3 * 2 + 2 * 1 + 8)
struct pnp_device_id {
char id[8];
};
#define PNP_DEVICE_SIZE32 (8 + 4)
#define PNP_DEVICE_SIZE64 (8 + 8)
struct pnp_card_devid
{
char devid[8][8];
};
struct pnp_card_device_id_32 {
char id[8];
char driver_data[4];
char devid[8][8];
};
struct pnp_card_device_id_64 {
char id[8];
char driver_data[8];
char devid[8][8];
};
#define PNP_CARD_DEVICE_SIZE32 (sizeof(struct pnp_card_device_id_32))
#define PNP_CARD_DEVICE_SIZE64 (sizeof(struct pnp_card_device_id_64))
#define PNP_CARD_DEVICE_OFFSET32 (offsetof(struct pnp_card_device_id_32, devid))
#define PNP_CARD_DEVICE_OFFSET64 (offsetof(struct pnp_card_device_id_64, devid))
struct input_device_id_64 {
unsigned long long match_flags;
unsigned short bustype;
unsigned short vendor;
unsigned short product;
unsigned short version;
unsigned long long evbit[1];
unsigned long long keybit[8]; /* 512 bits */
unsigned long long relbit[1];
unsigned long long absbit[1]; /* 64 bits */
unsigned long long mscbit[1];
unsigned long long ledbit[1];
unsigned long long sndbit[1];
unsigned long long ffbit[2]; /* 128 bits */
unsigned long long driver_info;
};
struct input_device_id_32 {
unsigned int match_flags;
unsigned short bustype;
unsigned short vendor;
unsigned short product;
unsigned short version;
unsigned int evbit[1];
unsigned int keybit[16]; /* 512 bits */
unsigned int relbit[1];
unsigned int absbit[2]; /* 64 bits */
unsigned int mscbit[1];
unsigned int ledbit[1];
unsigned int sndbit[1];
unsigned int ffbit[4]; /* 128 bits */
unsigned int driver_info;
};
#define INPUT_DEVICE_SIZE32 (4 + 4 * 2 + 4 + 16 * 4 + 4 + 2 * 4 + 4 + 4 + 4 + 4 * 4 + 4)
#define INPUT_DEVICE_SIZE64 (8 + 4 * 2 + 8 + 8 * 8 + 8 + 8 + 8 + 8 + 8 + 2 * 8 + 8)
/* Functions provided by tables.c */
struct module;
void output_usb_table(struct module *modules, FILE *out);
void output_ieee1394_table(struct module *modules, FILE *out);
void output_pci_table(struct module *modules, FILE *out);
void output_ccw_table(struct module *modules, FILE *out);
void output_isapnp_table(struct module *modules, FILE *out);
void output_input_table(struct module *modules, FILE *out);
#endif /* MODINITTOOLS_TABLES_H */
|
carlobar/uclinux_leon3_UD
|
user/module-init-tools-3.1-pre6/tables.h
|
C
|
gpl-2.0
| 3,941
|
var searchData=
[
['quick_5fsort',['quick_sort',['../input__output_8cc.html#af4e9acb1bb4f1aea6ef17d8344679c48',1,'input_output.cc']]]
];
|
cecco4/zombie-kaze
|
doc/html/search/functions_71.js
|
JavaScript
|
gpl-2.0
| 139
|
<?php
namespace Google\AdsApi\AdWords\v201705\cm;
/**
* This file was generated from WSDL. DO NOT EDIT.
*/
class CampaignSharedSetErrorReason
{
const CAMPAIGN_SHARED_SET_DOES_NOT_EXIST = 'CAMPAIGN_SHARED_SET_DOES_NOT_EXIST';
const UNKNOWN = 'UNKNOWN';
}
|
renshuki/dfp-manager
|
vendor/googleads/googleads-php-lib/src/Google/AdsApi/AdWords/v201705/cm/CampaignSharedSetErrorReason.php
|
PHP
|
gpl-2.0
| 269
|
<?php if ( ! defined( 'FW' ) ) {
die( 'Forbidden' );
}
/**
* @var array $atts
*/
$style_shortcodes = '';
$title = $atts['title'];
$subtitle = $atts['subtitle'];
$image = $atts['image'];
$header_image = $atts['header_image'];
$header_color = $atts['header_color'];
$header_pattern = $atts['header_pattern'];
$text_color = $atts['text_color'];
$uniq_id = rand(1,1000);
?>
<article id="zoom" class="va">
<header class="heading-a">
<?php if(!empty($title) || !empty($subtitle)):?>
<h3>
<span class="small"><?php echo esc_html($title);?></span>
<?php echo $subtitle; ?>
</h3>
<?php endif; ?>
</header>
<?php if(!empty($image)):?>
<figure class="zoomin"><img src="<?php echo esc_url($image['url']);?>" alt="" width="1100" height="550"></figure>
<?php endif;?>
</article>
<?php if(!empty($header_image) || (!empty($header_color) && $header_color['primary'] != ' ' && $header_color['secondary'] != ' ') || !empty($header_pattern) || !empty($text_color)): ?>
<?php
ob_start();
?>
<?php if(!empty($header_image) || !empty($header_color) ):?>
#content.a #zoom.va{
<?php if(!empty($header_color)) : ?>
background: -moz-linear-gradient(-45deg, <?php echo $header_color['primary']; ?> 0%, <?php echo $header_color['secondary']; ?> 100%);
background: -webkit-gradient(linear, left top, right bottom, color-stop(0%,<?php echo $header_color['primary']; ?>), color-stop(100%,<?php echo $header_color['secondary']; ?>));
background: -webkit-linear-gradient(-45deg, <?php echo $header_color['primary']; ?> 0%,<?php echo $header_color['secondary']; ?> 100%);
background: -o-linear-gradient(-45deg, <?php echo $header_color['primary']; ?> 0%,<?php echo $header_color['secondary']; ?> 100%);
background: -ms-linear-gradient(-45deg, <?php echo $header_color['primary']; ?> 0%,<?php echo $header_color['secondary']; ?> 100%);
background: linear-gradient(135deg, <?php echo $header_color['primary']; ?> 0%,<?php echo $header_color['secondary']; ?> 100%);
<?php endif;?>
<?php echo !empty($header_image) ? 'background-image: url('.esc_url($header_image['url']).');' : ''; ?>
}
<?php endif;?>
<?php if(!empty($header_pattern)):?>
#content.a #zoom.va:before{
content: "";
display: block;
position: absolute;
left: 0;
top: 0;
z-index: 1;
width: 100%;
height: 100%;
background: url(<?php echo esc_url($header_pattern['url']); ?>);
}
<?php endif;?>
<?php if(!empty($text_color)):?>
#content.a #zoom.va .heading-a,
#content.a #zoom.va .heading-a h3,
#content.a #zoom.va .heading-a h3 .small
{
color: <?php echo $text_color; ?>;
}
#content.a #zoom.va .heading-a h3:before
{
background: <?php echo $text_color; ?>;
}
<?php endif?>
<?php $style_shortcodes .= ob_get_clean();?>
<?php endif;?>
<?php if(trim($style_shortcodes) != ''):?>
<style>
<?php echo $style_shortcodes; ?>
</style>
<?php endif;?>
|
annegrundhoefer/sayfe
|
wp-content/themes/retouch-parent/framework-customizations/extensions/shortcodes/shortcodes/zoom/views/view.php
|
PHP
|
gpl-2.0
| 3,441
|
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Product name: redemption, a FLOSS RDP proxy
Copyright (C) Wallix 2010-2013
Author(s): Christophe Grosjean
Based on unit tests imported from FreeRDP (test_mppc*)
from Laxmikant Rashinkar.
Unit test for MPPC compression
*/
#define BOOST_AUTO_TEST_MAIN
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE TestMPPC
#include <boost/test/auto_unit_test.hpp>
#define LOGNULL
#include <sys/time.h>
#include "RDP/mppc_unified_dec.hpp"
BOOST_AUTO_TEST_CASE(TestMPPC)
{
// Load compressed_rd5 and decompressed_rd5
#include "../../fixtures/test_mppc_TestMPPC.hpp"
const uint8_t * rdata;
uint32_t rlen;
long int dur;
timeval start_time;
timeval end_time;
/* save starting time */
gettimeofday(&start_time, nullptr);
for (int x = 0; x < 1000 ; x++){
rdp_mppc_unified_dec rmppc_d;
rdp_mppc_dec & rmppc = rmppc_d;
/* uncompress data */
BOOST_CHECK_EQUAL(true, rmppc.decompress(compressed_rd5, sizeof(compressed_rd5), PACKET_COMPRESSED | PACKET_COMPR_TYPE_64K, rdata, rlen));
BOOST_CHECK_EQUAL(0, memcmp(decompressed_rd5, rdata, sizeof(decompressed_rd5)));
}
/* get end time */
gettimeofday(&end_time, nullptr);
/* print time taken */
dur = ((end_time.tv_sec - start_time.tv_sec) * 1000000) + (end_time.tv_usec - start_time.tv_usec);
LOG(LOG_INFO, "test_mppc: decompressed data in %ld micro seconds", dur);
}
BOOST_AUTO_TEST_CASE(TestMPPC_enc)
{
// Load decompressed_rd5_data
#include "../../fixtures/test_mppc_TestMPPC_enc.hpp"
const uint8_t * rdata;
uint32_t rlen;
/* required for timing the test */
timeval start_time;
timeval end_time;
/* setup decoder */
rdp_mppc_unified_dec rmppc_d;
rdp_mppc_dec & rmppc = rmppc_d;
/* setup encoder for RDP 5.0 */
rdp_mppc_50_enc enc;
int data_len = sizeof(decompressed_rd5_data);
LOG(LOG_INFO, "test_mppc_enc: testing with embedded data of %d bytes", data_len);
/* save starting time */
gettimeofday(&start_time, nullptr);
uint8_t compressionFlags;
uint16_t datalen;
enc.compress(decompressed_rd5_data, data_len, compressionFlags, datalen,
rdp_mppc_enc::MAX_COMPRESSED_DATA_SIZE_UNUSED);
BOOST_CHECK(0 != (compressionFlags & PACKET_COMPRESSED));
BOOST_CHECK_EQUAL(true, rmppc.decompress(enc.outputBuffer, enc.bytes_in_opb, enc.flags, rdata, rlen));
BOOST_CHECK_EQUAL(data_len, rlen);
BOOST_CHECK_EQUAL(0, memcmp(decompressed_rd5_data, rdata, rlen));
/* get end time */
gettimeofday(&end_time, nullptr);
/* print time taken */
long int dur = ((end_time.tv_sec - start_time.tv_sec) * 1000000) + (end_time.tv_usec - start_time.tv_usec);
LOG(LOG_INFO, "test_mppc_enc: compressed %d bytes in %f seconds\n", data_len, dur / 1000000.0);
}
BOOST_AUTO_TEST_CASE(TestBitsSerializer)
{
uint8_t outputBuffer[256] ={};
uint8_t bits_left = 8;
uint16_t opb_index = 0;
insert_n_bits_40_50(2, 3, outputBuffer, bits_left, opb_index, sizeof(outputBuffer));
BOOST_CHECK_EQUAL(6, bits_left);
BOOST_CHECK_EQUAL(0, opb_index);
BOOST_CHECK_EQUAL(192, outputBuffer[0] & 0xFF);
insert_n_bits_40_50(2, 3, outputBuffer, bits_left, opb_index, sizeof(outputBuffer));
BOOST_CHECK_EQUAL(4, bits_left);
BOOST_CHECK_EQUAL(0, opb_index);
BOOST_CHECK_EQUAL(0xF0, outputBuffer[0] & 0xFF);
insert_n_bits_40_50(2, 3, outputBuffer, bits_left, opb_index, sizeof(outputBuffer));
BOOST_CHECK_EQUAL(2, bits_left);
BOOST_CHECK_EQUAL(0, opb_index);
BOOST_CHECK_EQUAL(0xFc, outputBuffer[0] & 0xFF);
insert_n_bits_40_50(2, 3, outputBuffer, bits_left, opb_index, sizeof(outputBuffer));
BOOST_CHECK_EQUAL(8, bits_left);
BOOST_CHECK_EQUAL(1, opb_index);
BOOST_CHECK_EQUAL(0xFF, outputBuffer[0] & 0xFF);
}
BOOST_AUTO_TEST_CASE(TestHashTableManager)
{
const unsigned int length_of_data_to_sign = 3;
const unsigned int max_undo_element = 8;
typedef uint16_t offset_type;
typedef rdp_mppc_enc_hash_table_manager<offset_type> hash_table_manager;
typedef hash_table_manager::hash_type hash_type;
hash_table_manager hash_tab_mgr(
length_of_data_to_sign, max_undo_element);
uint8_t data[] = "0123456789ABCDEF";
hash_type hash;
hash_type hash_save;
offset_type offset;
offset_type offset_save;
// Test of insertion (explicit hash value).
hash_tab_mgr.reset();
offset = 1;
hash = hash_tab_mgr.sign(data + offset);
hash_tab_mgr.update(hash, offset);
BOOST_CHECK_EQUAL(offset, hash_tab_mgr.get_offset(hash));
// Test of insertion (implicit hash value).
hash_tab_mgr.reset();
offset = 1;
hash = hash_tab_mgr.sign(data + offset);
hash_tab_mgr.update_indirect(data, offset);
BOOST_CHECK_EQUAL(offset, hash_tab_mgr.get_offset(hash));
// Test of undoing last changes.
hash_tab_mgr.reset();
offset = 1;
hash = hash_tab_mgr.sign(data + offset);
hash_tab_mgr.update_indirect(data, offset);
BOOST_CHECK_EQUAL(offset, hash_tab_mgr.get_offset(hash));
hash_tab_mgr.clear_undo_history();
hash_save = hash;
offset_save = offset;
offset = 3;
hash = hash_tab_mgr.sign(data + offset);
hash_tab_mgr.update_indirect(data, offset);
BOOST_CHECK_EQUAL(offset, hash_tab_mgr.get_offset(hash));
BOOST_CHECK_EQUAL(true, hash_tab_mgr.undo_last_changes());
BOOST_CHECK_EQUAL(0, hash_tab_mgr.get_offset(hash));
BOOST_CHECK_EQUAL(offset_save, hash_tab_mgr.get_offset(hash_save));
// Test of undoing last changes (out of undo buffer).
hash_tab_mgr.reset();
for (int i = 0; i < 10; i++)
hash_tab_mgr.update_indirect(data + i, offset);
BOOST_CHECK_EQUAL(false, hash_tab_mgr.undo_last_changes());
}
|
speidy/redemption
|
tests/core/RDP/test_mppc.cpp
|
C++
|
gpl-2.0
| 6,577
|
/*
* *************************************************************************************
* Copyright (C) 2008 EsperTech, Inc. All rights reserved. *
* http://esper.codehaus.org *
* http://www.espertech.com *
* ---------------------------------------------------------------------------------- *
* The software in this package is published under the terms of the GPL license *
* a copy of which has been included with this distribution in the license.txt file. *
* *************************************************************************************
*/
package com.espertech.esper.event;
import com.espertech.esper.client.EventBean;
/**
* Interface for writing a set of event properties to an event.
*/
public interface EventBeanWriter
{
/**
* Write property values to the event.
* @param values to write
* @param theEvent to write to
*/
public void write(Object[] values, EventBean theEvent);
}
|
mobile-event-processing/Asper
|
source/src/com/espertech/esper/event/EventBeanWriter.java
|
Java
|
gpl-2.0
| 1,122
|
<?php get_header(); ?>
<div id="main">
<div class="width-container">
<div id="container-sidebar">
<div class="content-boxed">
<?php while(have_posts()): the_post(); ?>
<h2 class="title-bg"><?php the_title(); ?></h2>
<?php the_content(); ?>
<div class="clearfix"></div>
<?php endwhile; ?>
<?php wp_link_pages(); ?>
<div class="clearfix"></div>
</div><!-- close .content-boxed -->
<div class="clearfix"></div>
</div><!-- close #container-sidebar -->
<?php get_sidebar(); ?>
<div class="clearfix"></div>
</div><!-- close .width-container -->
</div><!-- close #main -->
<?php get_footer(); ?>
|
sahilbabu/wp_property
|
wp-content/themes/Freehold/page.php
|
PHP
|
gpl-2.0
| 706
|
<?php
// running from system?
if (empty($CFG) || empty($proposal)) {
die;
}
// initalize var
$values = array();
if (Context == 'admin' || Action == 'newproposal' || Action == 'updateproposal' || Action == 'deleteproposal' || Action == 'scheduleevent' || Action == 'editevent' || Action == 'cancelevent' || Action == 'proposalfiles') {
if (Context == 'admin' && Action != 'scheduleevent' && Action != 'editevent') {
$values[__('Nombre de Usuario')] = $proposal->login;
}
$values[__('Nombre de ponencia')] = $proposal->nombre;
if (Action == 'scheduleevent' || Action == 'editevent') {
$values[__('Nombre de ponente')] = $proposal->nombrep . ' ' . $proposal->apellidos;
}
}
// flag prop_noshow_resume
if (empty($prop_noshow_resume)) {
$values[__('Resumen')] = nl2br(htmlspecialchars($proposal->resumen));
}
if (Action != 'proposalfiles') {
$values = array_merge($values, array(
__('Tipo de Propuesta') => $proposal->tipo,
__('Orientación') => $proposal->orientacion,
__('Duración') => sprintf(__('%d Hrs.'), $proposal->duracion),
__('Nivel') => $proposal->nivel,
));
}
if (Action != 'newproposal' && Action != 'updateproposal' && Action != 'scheduleevent' && Action != 'editevent') {
$values[__('Status')] = '<b>' . $proposal->status . '</b>';
}
if (Context == 'ponente' && !empty($proposal->adminmail) && Action != 'proposalfiles') {
$contactmail = sprintf('<em>%s</em>', $proposal->adminmail);
$values = array_merge($values, array(
__('Correo de contacto') => $contactmail
));
}
do_table_values($values, 'narrow');
// if it's schedule merge date info
if ($proposal->id_status == 8) {
$values = array(
__('Fecha') => $proposal->human_date,
__('Lugar') => $proposal->lugar,
__('Hora') => $proposal->time
);
do_table_values($values, 'narrow');
}
if (empty($prop_noshow_resume)) {
// reset values
$values = array();
if (!empty($proposal->reqtecnicos) && (Context == 'ponente' || Context == 'admin')) {
$values[__('Requisitos técnicos del taller')] = $proposal->reqtecnicos;
}
if (!empty($proposal->reqasistente) && (Context == 'ponente' || Context == 'admin')) {
$values[__('Prerequisitos del Asistente')] = $proposal->reqasistente;
}
if (!empty($values)) {
// show proposal aditional info
do_table_values($values, 'narrow');
}
}
// show public files of proposals if it's programmed
if ((Action == 'viewproposal' || Action == 'viewdeletedproposal') && ($proposal->id_status == 8 || $proposal->id_ponente == $USER->id || Context == 'admin')) {
//show files
$files = get_records('prop_files', 'id_propuesta', $proposal->id);
$filelist = '';
if (!empty($files)) {
foreach ($files as $f) {
if (!empty($f->public) || (!empty($USER) && $proposal->id_ponente == $USER->id) || Context == 'admin') {
$size = human_filesize($f->size);
$title = htmlspecialchars($f->title, ENT_COMPAT, 'utf-8');
if (Context == 'main') {
$url = get_url('general/proposals/'.$proposal->id.'/files/'.$f->id.'/'.$f->name);
}
elseif (Context == 'asistente') {
$url = get_url('person/proposals/'.$proposal->id.'/files/'.$f->id.'/'.$f->name);
}
elseif (Context == 'ponente') {
$url = get_url('speaker/proposals/'.$proposal->id.'/files/'.$f->id.'/'.$f->name);
}
elseif (Context == 'admin') {
$url = get_url('admin/proposals/'.$proposal->id.'/files/'.$f->id.'/'.$f->name);
}
if (empty($f->public)) {
$private = '*';
} else {
$private = '';
}
$filelist .= <<< END
<li><a href="{$url}" title="({$f->name}) {$f->descr}">{$title}</a>{$private} <small>({$size})</small></li>
END;
}
}
}
if (!empty($USER) && Context == 'speaker' && $proposal->id_ponente == $USER->id) {
$url = get_url('speaker/proposals/'.$proposal->id.'/files');
$filelist .= "<li><a class=\"verde\" href=\"{$url}\">" . __('Subir archivos') . "</a></li>";
}
if (!empty($filelist)) {
$filelist = "<ul>{$filelist}</ul>";
do_table_values(array(__('Archivos') => $filelist), 'narrow');
}
}
if (Context == 'admin' && Action != 'newproposal' && Action != 'scheduleevent' && Action != 'editevent') {
$adminlogin = (empty($proposal->adminlogin)) ? __('Usuario') : $proposal->adminlogin;
$values = array(
__('Fecha de registro') => $proposal->reg_time,
__('Fecha de actualización') => $proposal->act_time,
__('Actualizado por') => $adminlogin
);
do_table_values($values, 'narrow');
}
?>
|
BackupTheBerlios/yupana
|
includes/common/prop_display_info.php
|
PHP
|
gpl-2.0
| 5,413
|
MODULE ccprod_I
INTERFACE
!...Generated by Pacific-Sierra Research 77to90 4.4G 10:47:18 03/09/06
SUBROUTINE ccprod (C1, C2, C12, LM2)
USE vast_kind_param,ONLY: DOUBLE
integer, INTENT(IN) :: LM2
real(DOUBLE), DIMENSION(LM2), INTENT(IN) :: C1
real(DOUBLE), DIMENSION(LM2), INTENT(IN) :: C2
real(DOUBLE), DIMENSION(*), INTENT(OUT) :: C12
END SUBROUTINE
END INTERFACE
END MODULE
|
trisyoungs/aten
|
src/plugins/method_mopac71/mopac7.1/ccprod_I.f90
|
FORTRAN
|
gpl-2.0
| 459
|
/* vim: ts=4 sw=4 expandtab smartindent cindent */
/*
* License: GPLv3
* Copyright (C) Dongguan Vali Network Technology Co., Ltd.
* Author: chen-qx@live.cn
* Date: 2012-05
* Description: A LightDM greeter for StartOS
*/
#include <glib/gi18n.h>
#include "backend.h"
#include "ui.h"
#include <gdk/gdkx.h>
#include <lightdm.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
typedef struct _backend_componet backend_componet;
typedef unsigned int u32;
typedef unsigned short u16;
typedef unsigned char u8;
backend_componet back;
gboolean has_prompted = FALSE;
static void backend_open_config (void);
static void backend_open_state (void);
static void show_prompt_cb (LightDMGreeter *greeter, const gchar *prompt, LightDMPromptType type);
static void show_message_cb (LightDMGreeter *greeter, const gchar *message, LightDMMessageType type);
static void authentication_complete_cb (LightDMGreeter *greeter);
static void start_session (void);
static cairo_surface_t * backend_create_root_surface (GdkScreen *screen);
static void * mapmem_to_mem (size_t base, size_t len);
static void show_prompt_cb (LightDMGreeter *greeter, const gchar *prompt, LightDMPromptType type)
{
/*
* FIXME: It must to do so and must be here, or login box can't
* re-SENSITIVE. Any suggestion ?
*/
ui_set_login_box_sensitive (TRUE);
/*
if (!has_prompted)
{
ui_set_prompt_text (dgettext("Linux-PAM", prompt), type);
}
*/
}
static void show_message_cb (LightDMGreeter *greeter, const gchar *message, LightDMMessageType type)
{
ui_set_prompt_text (dgettext("Linux-PAM", message), type);
}
static void start_session ()
{
GError *error = NULL;
const char * session = NULL;
const char * language = NULL;
session = ui_get_session ();
language = ui_get_language ();
backend_state_file_set_language (language);
backend_state_file_set_session (session);
language ? lightdm_greeter_set_language (back.greeter, language) : g_warning ("Get language NULL, use default");
if (!session)
{
g_warning ("Get session NULL, use default");
}
g_warning ("lang = %s, session = %s", language, session);
if (!lightdm_greeter_start_session_sync (back.greeter, session, &error))
{
g_warning ("Starting session: %s\n", error->message);
g_clear_error (&error);
}
}
void backend_state_file_set_session (const char * session)
{
gsize length;
char * data;
g_return_if_fail (session);
g_key_file_set_value (back.statekeyfile, "greeter", "last-session", session);
data = g_key_file_to_data (back.statekeyfile, &length, NULL);
g_warning ("Put session to state file: %s\n", session);
g_file_set_contents (back.statefile, data, length, NULL);
g_free (data);
}
void backend_state_file_set_language (const char * lang)
{
gsize length;
char * data;
g_return_if_fail (lang);
g_key_file_set_value (back.statekeyfile, "greeter", "last-language", lang);
data = g_key_file_to_data (back.statekeyfile, &length, NULL);
g_warning ("Put language to state file: %s\n", lang);
g_file_set_contents (back.statefile, data, length, NULL);
g_free (data);
}
void backend_state_file_set_keyboard (const char * kb)
{
char * data;
gsize length;
g_return_if_fail (kb);
g_key_file_set_value (back.statekeyfile, "greeter", "last-keyboard", kb);
data = g_key_file_to_data (back.statekeyfile, &length, NULL);
g_warning ("Put keyboard to state file: %s\n", kb);
g_file_set_contents (back.statefile, data, length, NULL);
g_free (data);
}
gchar * backend_state_file_get_session ()
{
return g_key_file_get_value (back.statekeyfile, "greeter", "last-session", NULL);
}
gchar * backend_state_file_get_language (void)
{
return g_key_file_get_value (back.statekeyfile, "greeter", "last-language", NULL);
}
gchar * backend_state_file_get_keyboard (void)
{
return g_key_file_get_value (back.statekeyfile, "greeter", "last-keyboard", NULL);
}
gchar * backend_state_file_get_user (void)
{
return g_key_file_get_value (back.statekeyfile, "greeter", "last-user", NULL);
}
static void authentication_complete_cb (LightDMGreeter *greeter)
{
//ui_set_prompt_text (NULL, 0);
if (lightdm_greeter_get_is_authenticated (back.greeter))
{
start_session ();
}
else
{
ui_set_prompt_text (_("Authenticated Failed, Try Again"), 1);
ui_set_prompt_show (TRUE);
has_prompted = TRUE;
backend_authenticate_process (lightdm_greeter_get_authentication_user (back.greeter));
}
}
void backend_authenticate_username_only (const gchar *username)
{
char * data;
gsize length;
gboolean truename;
lightdm_greeter_authenticate (back.greeter, username);
truename = username && *username;
if (truename)
{
if (!has_prompted)
{
ui_set_prompt_text (NULL, 1); /* password */
ui_set_prompt_show (FALSE);
}
}
else
{
has_prompted = FALSE;
ui_set_prompt_text (_("Login"), 0);
ui_set_prompt_show (TRUE);
}
if (!truename)
return ;
g_key_file_set_value (back.statekeyfile, "greeter", "last-user", username);
data = g_key_file_to_data (back.statekeyfile, &length, NULL);
g_debug ("Put username to state file: %s\n", username);
g_file_set_contents (back.statefile, data, length, NULL);
}
void backend_authenticate_process (const gchar *text)
{
if (lightdm_greeter_get_is_authenticated (back.greeter))
{
start_session ();
}
else if (lightdm_greeter_get_in_authentication (back.greeter))
{
//has_prompted = TRUE;
ui_set_prompt_show (FALSE);
ui_set_prompt_text ("", 1); /* entry invisible */
lightdm_greeter_respond (back.greeter, text); /* password */
}
else
{
backend_authenticate_username_only (text); /* username */
}
}
gboolean backend_init_greeter ()
{
GError *error;
back.greeter = lightdm_greeter_new ();
if (!lightdm_greeter_connect_sync (back.greeter, &error))
{
g_critical ("Greeter connect Fail: %s\n", error->message);
g_clear_error (&error);
return FALSE;
}
g_signal_connect (back.greeter, "show-prompt", G_CALLBACK(show_prompt_cb), NULL);
g_signal_connect (back.greeter, "show-message", G_CALLBACK (show_message_cb), NULL);
g_signal_connect (back.greeter, "authentication-complete", G_CALLBACK (authentication_complete_cb), NULL);
backend_open_config ();
backend_open_state ();
return TRUE;
}
static void backend_open_config ()
{
back.conffile = open_key_file (GREETER_CONF_FILE);
}
static void backend_open_state ()
{
char *state_dir;
state_dir = g_build_filename (g_get_user_cache_dir (), APP_NAME, NULL);
if (g_mkdir_with_parents (state_dir, 0755) < 0)
{
back.statefile = NULL;
back.statekeyfile = NULL;
g_free (state_dir);
return ;
}
back.statefile = g_build_filename (state_dir, "state", NULL);
back.statekeyfile = open_key_file (back.statefile);
g_free (state_dir);
}
GKeyFile * open_key_file (const char *filepath)
{
GError *error = NULL;
GKeyFile *keyfile;
g_return_val_if_fail (filepath, NULL);
keyfile = g_key_file_new ();
if (!g_key_file_load_from_file (keyfile, filepath, G_KEY_FILE_NONE, &error))
{
g_warning ("Failed to Loading file \"%s\" : %s", filepath, error->message);
g_clear_error (&error);
}
return keyfile;
}
void backend_set_config (GtkSettings * settings)
{
char * value = NULL;
if (!back.conffile)
return ;
g_debug ("Set Configuration");
if ((value = g_key_file_get_value (back.conffile, "greeter", "theme-name", NULL)))
{
g_object_set (settings, "gtk-theme-name", value, NULL);
g_free (value);
}
if ((value = g_key_file_get_value (back.conffile, "greeter", "font-name", NULL)))
{
g_object_set (settings, "gtk-font-name", value, NULL);
g_free (value);
}
if ((value = g_key_file_get_value (back.conffile, "greeter", "xft-antialias", NULL)))
{
g_object_set (settings, "gtk-xft-antialias", g_strcmp0 (value, "true") == 0, NULL);
g_free (value);
}
if ((value = g_key_file_get_value (back.conffile, "greeter", "xft-dpi", NULL)))
{
g_object_set (settings, "gtk-xft-dpi", (int) (1024 * atof (value)), NULL);
g_free (value);
}
if ((value = g_key_file_get_value (back.conffile, "greeter", "xft-hintstyle", NULL)))
{
g_object_set (settings, "gtk-xft-hintstyle", value, NULL);
g_free (value);
}
if ((value = g_key_file_get_value (back.conffile, "greeter", "xft-rgba", NULL)))
{
g_object_set (settings, "gtk-xft-rgba", value, NULL);
g_free (value);
}
}
void backend_finalize ()
{
g_key_file_free (back.conffile);
g_key_file_free (back.statekeyfile);
g_free (back.statefile);
}
void backend_get_conf_background (GdkPixbuf ** bg_pixbuf, GdkRGBA *bg_color)
{
GError *error = NULL;
gchar *value;
value = g_key_file_get_value (back.conffile, "greeter", "background", NULL);
BACKEND_RESET_BG:
if (!value)
{
value = g_strdup ("#1F6492");
}
if (gdk_rgba_parse (bg_color, value))
{
g_debug ("Backgroud color %s\n", value);
g_free (value);
}
else
{
gchar *path;
if (g_path_is_absolute (value))
path = g_strdup (value);
else
path = g_build_filename (GREETER_DATA_DIR, value, NULL);
*bg_pixbuf = gdk_pixbuf_new_from_file (path, &error);
g_debug ("Backgroud picture %s\n", path);
g_free (path);
g_free (value);
value = NULL;
if(!(*bg_pixbuf))
{
g_warning ("Failed to load background: %s -- %s\n", path, error->message);
g_clear_error (&error);
goto BACKEND_RESET_BG;
}
}
}
void backend_set_screen_background ()
{
GdkRGBA bg_color;
GdkPixbuf * bg_pixbuf = NULL;
GdkRectangle monitor_geometry;
backend_get_conf_background (&bg_pixbuf, &bg_color);
/* Set the background */
int i;
for (i = 0; i < gdk_display_get_n_screens (gdk_display_get_default ()); i++)
{
GdkScreen *screen;
cairo_surface_t *surface;
cairo_t *c;
int monitor;
screen = gdk_display_get_screen (gdk_display_get_default (), i);
surface = backend_create_root_surface (screen);
c = cairo_create (surface);
for (monitor = 0; monitor < gdk_screen_get_n_monitors (screen); monitor++)
{
gdk_screen_get_monitor_geometry (screen, monitor, &monitor_geometry);
if (bg_pixbuf)
{
GdkPixbuf *pixbuf = gdk_pixbuf_scale_simple (bg_pixbuf, monitor_geometry.width, monitor_geometry.height, GDK_INTERP_BILINEAR);
gdk_cairo_set_source_pixbuf (c, pixbuf, monitor_geometry.x, monitor_geometry.y);
g_object_unref (pixbuf);
}
else
gdk_cairo_set_source_rgba (c, &bg_color);
cairo_paint (c);
}
cairo_destroy (c);
/* Refresh background */
gdk_flush ();
XClearWindow (GDK_SCREEN_XDISPLAY (screen), RootWindow (GDK_SCREEN_XDISPLAY (screen), i));
}
if (bg_pixbuf)
g_object_unref (bg_pixbuf);
}
/*
* ----- copy from lightdm-gtk-greeter ----
* Many Thanks to its author: Robert Ancell
*/
static cairo_surface_t *
backend_create_root_surface (GdkScreen *screen)
{
gint number, width, height;
Display *display;
Pixmap pixmap;
cairo_surface_t *surface;
number = gdk_screen_get_number (screen);
width = gdk_screen_get_width (screen);
height = gdk_screen_get_height (screen);
/* Open a new connection so with Retain Permanent so the pixmap remains when the greeter quits */
gdk_flush ();
display = XOpenDisplay (gdk_display_get_name (gdk_screen_get_display (screen)));
if (!display)
{
g_warning ("Failed to create root pixmap");
return NULL;
}
XSetCloseDownMode (display, RetainPermanent);
pixmap = XCreatePixmap (display, RootWindow (display, number), width, height, DefaultDepth (display, number));
XCloseDisplay (display);
/* Convert into a Cairo surface */
surface = cairo_xlib_surface_create (GDK_SCREEN_XDISPLAY (screen),
pixmap,
GDK_VISUAL_XVISUAL (gdk_screen_get_system_visual (screen)),
width, height);
/* Use this pixmap for the background */
XSetWindowBackgroundPixmap (GDK_SCREEN_XDISPLAY (screen),
RootWindow (GDK_SCREEN_XDISPLAY (screen), number),
cairo_xlib_surface_get_drawable (surface));
return surface;
}
unsigned char chk_machine_type ()
{
u16 i = 0;
u16 smstructlen = 0;
u32 smstructp = 0;
u8 * smentryp = NULL;
unsigned char ret_val = 2; /* 2 for "Unknow" */
void * mem = NULL;
if (!(mem = mapmem_to_mem (0xF0000, 0x10000)))
return ret_val;
while (i < 0xFFF0)
{
if (!memcmp (mem + i, "_SM_", 4))
{
goto MATCH;
}
i += 16;
}
g_warning("Can't found the Anchor String");
goto NOT_MATCH;
MATCH:
smstructp = (u32)(*((u32 *)(mem + i + 0x18))); /* SMBIOS structure table start address */
smstructlen = (u16)(*((u16 *)(mem + i + 0x16))); /* SMBIOS structure table length */
free (mem);
if (!(mem = mapmem_to_mem (smstructp, smstructlen)))
{
g_warning("Error: Read mem for SMBIOS structure table");
goto NOT_MATCH;
}
smentryp = mem;
while (smentryp <= (u8 *)(mem + smstructlen))
{
if (smentryp[0] == 3) /* type "3" for System Enclosure or chassis */
{
ret_val = smentryp[5];
break ;
}
smentryp = smentryp + smentryp[1]; /* base + length */
while (smentryp <= (u8 *)(mem + smstructlen) && (smentryp[0] != 0 || smentryp[1] != 0))
++smentryp;
smentryp += 2;
}
NOT_MATCH:
free (mem);
return ret_val;
}
static void * mapmem_to_mem (size_t base, size_t len)
{
void * mapp = NULL;
void * memp = NULL;
int fd;
unsigned long int offset;
offset = base % sysconf (_SC_PAGESIZE);
if ((fd = open ("/dev/mem", O_RDONLY)) < 0)
{
g_warning("Error: Open /dev/mem");
return NULL;
}
if (!(memp = malloc (len)))
{
close (fd);
g_warning("Error: malloc ()");
return NULL;
}
if ((mapp = mmap (NULL, len + offset, PROT_READ, MAP_PRIVATE, fd, base - offset)) < 0)
{
close (fd);
free (memp);
g_warning("Error: mmap()");
return NULL;
}
memcpy (memp, mapp + offset, len);
munmap (mapp, len + offset);
close (fd);
return memp;
}
|
huangzx/lightdm-startos-greeter
|
src/backend.c
|
C
|
gpl-2.0
| 15,065
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.