code
stringlengths
1
1.05M
repo_name
stringlengths
6
83
path
stringlengths
3
242
language
stringclasses
222 values
license
stringclasses
20 values
size
int64
1
1.05M
#!/usr/bin/perl open(OUTPUT, "> fsdata.c"); chdir("fs"); open(FILES, "find . -type f |"); while($file = <FILES>) { # Do not include files in CVS directories nor backup files. if($file =~ /(CVS|~)/) { next; } chop($file); open(HEADER, "> /tmp/header") || die $!; if($file =~ /404/) { print(HEADER "HTTP/1.0 404 File not found\r\n"); } else { print(HEADER "HTTP/1.0 200 OK\r\n"); } print(HEADER "Server: lwIP/pre-0.6 (http://www.sics.se/~adam/lwip/)\r\n"); if($file =~ /\.html$/) { print(HEADER "Content-type: text/html\r\n"); } elsif($file =~ /\.gif$/) { print(HEADER "Content-type: image/gif\r\n"); } elsif($file =~ /\.png$/) { print(HEADER "Content-type: image/png\r\n"); } elsif($file =~ /\.jpg$/) { print(HEADER "Content-type: image/jpeg\r\n"); } elsif($file =~ /\.class$/) { print(HEADER "Content-type: application/octet-stream\r\n"); } elsif($file =~ /\.ram$/) { print(HEADER "Content-type: audio/x-pn-realaudio\r\n"); } else { print(HEADER "Content-type: text/plain\r\n"); } print(HEADER "\r\n"); close(HEADER); unless($file =~ /\.plain$/ || $file =~ /cgi/) { system("cat /tmp/header $file > /tmp/file"); } else { system("cp $file /tmp/file"); } open(FILE, "/tmp/file"); unlink("/tmp/file"); unlink("/tmp/header"); $file =~ s/\.//; $fvar = $file; $fvar =~ s-/-_-g; $fvar =~ s-\.-_-g; print(OUTPUT "static const unsigned char data".$fvar."[] = {\n"); print(OUTPUT "\t/* $file */\n\t"); for($j = 0; $j < length($file); $j++) { printf(OUTPUT "%#02x, ", unpack("C", substr($file, $j, 1))); } printf(OUTPUT "0,\n"); $i = 0; while(read(FILE, $data, 1)) { if($i == 0) { print(OUTPUT "\t"); } printf(OUTPUT "%#02x, ", unpack("C", $data)); $i++; if($i == 10) { print(OUTPUT "\n"); $i = 0; } } print(OUTPUT "};\n\n"); close(FILE); push(@fvars, $fvar); push(@files, $file); } for($i = 0; $i < @fvars; $i++) { $file = $files[$i]; $fvar = $fvars[$i]; if($i == 0) { $prevfile = "NULL"; } else { $prevfile = "file" . $fvars[$i - 1]; } print(OUTPUT "const struct fsdata_file file".$fvar."[] = {{$prevfile, data$fvar, "); print(OUTPUT "data$fvar + ". (length($file) + 1) .", "); print(OUTPUT "sizeof(data$fvar) - ". (length($file) + 1) ."}};\n\n"); } print(OUTPUT "#define FS_ROOT file$fvars[$i - 1]\n\n"); print(OUTPUT "#define FS_NUMFILES $i\n");
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/httpd/makefsdata/makefsdata
Perl
unknown
2,686
/** * makefsdata: Converts a directory structure for use with the lwIP httpd. * * This file is part of the lwIP TCP/IP stack. * * Author: Jim Pettinato * Simon Goldschmidt * * @todo: * - take TCP_MSS, LWIP_TCP_TIMESTAMPS and * PAYLOAD_ALIGN_TYPE/PAYLOAD_ALIGNMENT as arguments */ #include <stdio.h> #include <stdlib.h> #ifdef WIN32 #define WIN32_LEAN_AND_MEAN #include "windows.h" #else #include <dir.h> #endif #include <dos.h> #include <string.h> #include <time.h> #include <sys/stat.h> /** Makefsdata can generate *all* files deflate-compressed (where file size shrinks). * Since nearly all browsers support this, this is a good way to reduce ROM size. * To compress the files, "miniz.c" must be downloaded seperately. */ #ifndef MAKEFS_SUPPORT_DEFLATE #define MAKEFS_SUPPORT_DEFLATE 0 #endif #define COPY_BUFSIZE (1024*1024) /* 1 MByte */ #if MAKEFS_SUPPORT_DEFLATE #include "../miniz.c" typedef unsigned char uint8; typedef unsigned short uint16; typedef unsigned int uint; #define my_max(a,b) (((a) > (b)) ? (a) : (b)) #define my_min(a,b) (((a) < (b)) ? (a) : (b)) /* COMP_OUT_BUF_SIZE is the size of the output buffer used during compression. COMP_OUT_BUF_SIZE must be >= 1 and <= OUT_BUF_SIZE */ #define COMP_OUT_BUF_SIZE COPY_BUFSIZE /* OUT_BUF_SIZE is the size of the output buffer used during decompression. OUT_BUF_SIZE must be a power of 2 >= TINFL_LZ_DICT_SIZE (because the low-level decompressor not only writes, but reads from the output buffer as it decompresses) */ #define OUT_BUF_SIZE COPY_BUFSIZE static uint8 s_outbuf[OUT_BUF_SIZE]; static uint8 s_checkbuf[OUT_BUF_SIZE]; /* tdefl_compressor contains all the state needed by the low-level compressor so it's a pretty big struct (~300k). This example makes it a global vs. putting it on the stack, of course in real-world usage you'll probably malloc() or new it. */ tdefl_compressor g_deflator; tinfl_decompressor g_inflator; int deflate_level = 10; /* default compression level, can be changed via command line */ #define USAGE_ARG_DEFLATE " [-defl<:compr_level>]" #else /* MAKEFS_SUPPORT_DEFLATE */ #define USAGE_ARG_DEFLATE "" #endif /* MAKEFS_SUPPORT_DEFLATE */ /* Compatibility defines Win32 vs. DOS */ #ifdef WIN32 #define FIND_T WIN32_FIND_DATAA #define FIND_T_FILENAME(fInfo) (fInfo.cFileName) #define FIND_T_IS_DIR(fInfo) ((fInfo.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) #define FIND_T_IS_FILE(fInfo) ((fInfo.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) == 0) #define FIND_RET_T HANDLE #define FINDFIRST_FILE(path, result) FindFirstFileA(path, result) #define FINDFIRST_DIR(path, result) FindFirstFileA(path, result) #define FINDNEXT(ff_res, result) FindNextFileA(ff_res, result) #define FINDFIRST_SUCCEEDED(ret) (ret != INVALID_HANDLE_VALUE) #define FINDNEXT_SUCCEEDED(ret) (ret == TRUE) #define GETCWD(path, len) GetCurrentDirectoryA(len, path) #define CHDIR(path) SetCurrentDirectoryA(path) #define CHDIR_SUCCEEDED(ret) (ret == TRUE) #else #define FIND_T struct ffblk #define FIND_T_FILENAME(fInfo) (fInfo.ff_name) #define FIND_T_IS_DIR(fInfo) ((fInfo.ff_attrib & FA_DIREC) == FA_DIREC) #define FIND_T_IS_FILE(fInfo) (1) #define FIND_RET_T int #define FINDFIRST_FILE(path, result) findfirst(path, result, FA_ARCH) #define FINDFIRST_DIR(path, result) findfirst(path, result, FA_DIREC) #define FINDNEXT(ff_res, result) FindNextFileA(ff_res, result) #define FINDFIRST_SUCCEEDED(ret) (ret == 0) #define FINDNEXT_SUCCEEDED(ret) (ret == 0) #define GETCWD(path, len) getcwd(path, len) #define CHDIR(path) chdir(path) #define CHDIR_SUCCEEDED(ret) (ret == 0) #endif #define NEWLINE "\r\n" #define NEWLINE_LEN 2 /* define this to get the header variables we use to build HTTP headers */ #define LWIP_HTTPD_DYNAMIC_HEADERS 1 #define LWIP_HTTPD_SSI 1 #include "lwip/init.h" #include "../httpd_structs.h" #include "lwip/apps/fs.h" #include "../core/inet_chksum.c" #include "../core/def.c" /** (Your server name here) */ const char *serverID = "Server: "HTTPD_SERVER_AGENT"\r\n"; char serverIDBuffer[1024]; /* change this to suit your MEM_ALIGNMENT */ #define PAYLOAD_ALIGNMENT 4 /* set this to 0 to prevent aligning payload */ #define ALIGN_PAYLOAD 1 /* define this to a type that has the required alignment */ #define PAYLOAD_ALIGN_TYPE "unsigned int" static int payload_alingment_dummy_counter = 0; #define HEX_BYTES_PER_LINE 16 #define MAX_PATH_LEN 256 struct file_entry { struct file_entry* next; const char* filename_c; }; int process_sub(FILE *data_file, FILE *struct_file); int process_file(FILE *data_file, FILE *struct_file, const char *filename); int file_write_http_header(FILE *data_file, const char *filename, int file_size, u16_t *http_hdr_len, u16_t *http_hdr_chksum, u8_t provide_content_len, int is_compressed); int file_put_ascii(FILE *file, const char *ascii_string, int len, int *i); int s_put_ascii(char *buf, const char *ascii_string, int len, int *i); void concat_files(const char *file1, const char *file2, const char *targetfile); int check_path(char* path, size_t size); /* 5 bytes per char + 3 bytes per line */ static char file_buffer_c[COPY_BUFSIZE * 5 + ((COPY_BUFSIZE / HEX_BYTES_PER_LINE) * 3)]; char curSubdir[MAX_PATH_LEN]; char lastFileVar[MAX_PATH_LEN]; char hdr_buf[4096]; unsigned char processSubs = 1; unsigned char includeHttpHeader = 1; unsigned char useHttp11 = 0; unsigned char supportSsi = 1; unsigned char precalcChksum = 0; unsigned char includeLastModified = 0; #if MAKEFS_SUPPORT_DEFLATE unsigned char deflateNonSsiFiles = 0; size_t deflatedBytesReduced = 0; size_t overallDataBytes = 0; #endif struct file_entry* first_file = NULL; struct file_entry* last_file = NULL; static void print_usage(void) { printf(" Usage: htmlgen [targetdir] [-s] [-e] [-i] [-11] [-nossi] [-c] [-f:<filename>] [-m] [-svr:<name>]" USAGE_ARG_DEFLATE NEWLINE NEWLINE); printf(" targetdir: relative or absolute path to files to convert" NEWLINE); printf(" switch -s: toggle processing of subdirectories (default is on)" NEWLINE); printf(" switch -e: exclude HTTP header from file (header is created at runtime, default is off)" NEWLINE); printf(" switch -11: include HTTP 1.1 header (1.0 is default)" NEWLINE); printf(" switch -nossi: no support for SSI (cannot calculate Content-Length for SSI)" NEWLINE); printf(" switch -c: precalculate checksums for all pages (default is off)" NEWLINE); printf(" switch -f: target filename (default is \"fsdata.c\")" NEWLINE); printf(" switch -m: include \"Last-Modified\" header based on file time" NEWLINE); printf(" switch -svr: server identifier sent in HTTP response header ('Server' field)" NEWLINE); #if MAKEFS_SUPPORT_DEFLATE printf(" switch -defl: deflate-compress all non-SSI files (with opt. compr.-level, default=10)" NEWLINE); printf(" ATTENTION: browser has to support \"Content-Encoding: deflate\"!" NEWLINE); #endif printf(" if targetdir not specified, htmlgen will attempt to" NEWLINE); printf(" process files in subdirectory 'fs'" NEWLINE); } int main(int argc, char *argv[]) { char path[MAX_PATH_LEN]; char appPath[MAX_PATH_LEN]; FILE *data_file; FILE *struct_file; int filesProcessed; int i; char targetfile[MAX_PATH_LEN]; strcpy(targetfile, "fsdata.c"); memset(path, 0, sizeof(path)); memset(appPath, 0, sizeof(appPath)); printf(NEWLINE " makefsdata - HTML to C source converter" NEWLINE); printf(" by Jim Pettinato - circa 2003 " NEWLINE); printf(" extended by Simon Goldschmidt - 2009 " NEWLINE NEWLINE); strcpy(path, "fs"); for (i = 1; i < argc; i++) { if (argv[i] == NULL) { continue; } if (argv[i][0] == '-') { if (strstr(argv[i], "-svr:") == argv[i]) { snprintf(serverIDBuffer, sizeof(serverIDBuffer), "Server: %s\r\n", &argv[i][5]); serverID = serverIDBuffer; printf("Using Server-ID: \"%s\"\n", serverID); } else if (strstr(argv[i], "-s") == argv[i]) { processSubs = 0; } else if (strstr(argv[i], "-e") == argv[i]) { includeHttpHeader = 0; } else if (strstr(argv[i], "-11") == argv[i]) { useHttp11 = 1; } else if (strstr(argv[i], "-nossi") == argv[i]) { supportSsi = 0; } else if (strstr(argv[i], "-c") == argv[i]) { precalcChksum = 1; } else if (strstr(argv[i], "-f:") == argv[i]) { strncpy(targetfile, &argv[i][3], sizeof(targetfile) - 1); targetfile[sizeof(targetfile) - 1] = 0; printf("Writing to file \"%s\"\n", targetfile); } else if (strstr(argv[i], "-m") == argv[i]) { includeLastModified = 1; } else if (strstr(argv[i], "-defl") == argv[i]) { #if MAKEFS_SUPPORT_DEFLATE char* colon = strstr(argv[i], ":"); if (colon) { if (colon[1] != 0) { int defl_level = atoi(&colon[1]); if ((defl_level >= 0) && (defl_level <= 10)) { deflate_level = defl_level; } else { printf("ERROR: deflate level must be [0..10]" NEWLINE); exit(0); } } } deflateNonSsiFiles = 1; printf("Deflating all non-SSI files with level %d (but only if size is reduced)" NEWLINE, deflate_level); #else printf("WARNING: Deflate support is disabled\n"); #endif } else if ((strstr(argv[i], "-?")) || (strstr(argv[i], "-h"))) { print_usage(); exit(0); } } else if ((argv[i][0] == '/') && (argv[i][1] == '?') && (argv[i][2] == 0)) { print_usage(); exit(0); } else { strncpy(path, argv[i], sizeof(path)-1); path[sizeof(path)-1] = 0; } } if (!check_path(path, sizeof(path))) { printf("Invalid path: \"%s\"." NEWLINE, path); exit(-1); } GETCWD(appPath, MAX_PATH_LEN); /* if command line param or subdir named 'fs' not found spout usage verbiage */ if (!CHDIR_SUCCEEDED(CHDIR(path))) { /* if no subdir named 'fs' (or the one which was given) exists, spout usage verbiage */ printf(" Failed to open directory \"%s\"." NEWLINE NEWLINE, path); print_usage(); exit(-1); } CHDIR(appPath); printf("HTTP %sheader will %s statically included." NEWLINE, (includeHttpHeader ? (useHttp11 ? "1.1 " : "1.0 ") : ""), (includeHttpHeader ? "be" : "not be")); sprintf(curSubdir, ""); /* start off in web page's root directory - relative paths */ printf(" Processing all files in directory %s", path); if (processSubs) { printf(" and subdirectories..." NEWLINE NEWLINE); } else { printf("..." NEWLINE NEWLINE); } data_file = fopen("fsdata.tmp", "wb"); if (data_file == NULL) { printf("Failed to create file \"fsdata.tmp\"\n"); exit(-1); } struct_file = fopen("fshdr.tmp", "wb"); if (struct_file == NULL) { printf("Failed to create file \"fshdr.tmp\"\n"); fclose(data_file); exit(-1); } CHDIR(path); fprintf(data_file, "#include \"lwip/apps/fs.h\"" NEWLINE); fprintf(data_file, "#include \"lwip/def.h\"" NEWLINE); fprintf(data_file, "#include \"fsdata.h\"" NEWLINE NEWLINE NEWLINE); fprintf(data_file, "#define file_NULL (struct fsdata_file *) NULL" NEWLINE NEWLINE NEWLINE); /* define FS_FILE_FLAGS_HEADER_INCLUDED to 1 if not defined (compatibility with older httpd/fs) */ fprintf(data_file, "#ifndef FS_FILE_FLAGS_HEADER_INCLUDED" NEWLINE "#define FS_FILE_FLAGS_HEADER_INCLUDED 1" NEWLINE "#endif" NEWLINE); /* define FS_FILE_FLAGS_HEADER_PERSISTENT to 0 if not defined (compatibility with older httpd/fs: wasn't supported back then) */ fprintf(data_file, "#ifndef FS_FILE_FLAGS_HEADER_PERSISTENT" NEWLINE "#define FS_FILE_FLAGS_HEADER_PERSISTENT 0" NEWLINE "#endif" NEWLINE); /* define alignment defines */ #if ALIGN_PAYLOAD fprintf(data_file, "/* FSDATA_FILE_ALIGNMENT: 0=off, 1=by variable, 2=by include */" NEWLINE "#ifndef FSDATA_FILE_ALIGNMENT" NEWLINE "#define FSDATA_FILE_ALIGNMENT 0" NEWLINE "#endif" NEWLINE); #endif fprintf(data_file, "#ifndef FSDATA_ALIGN_PRE" NEWLINE "#define FSDATA_ALIGN_PRE" NEWLINE "#endif" NEWLINE); fprintf(data_file, "#ifndef FSDATA_ALIGN_POST" NEWLINE "#define FSDATA_ALIGN_POST" NEWLINE "#endif" NEWLINE); #if ALIGN_PAYLOAD fprintf(data_file, "#if FSDATA_FILE_ALIGNMENT==2" NEWLINE "#include \"fsdata_alignment.h\"" NEWLINE "#endif" NEWLINE); #endif sprintf(lastFileVar, "NULL"); filesProcessed = process_sub(data_file, struct_file); /* data_file now contains all of the raw data.. now append linked list of * file header structs to allow embedded app to search for a file name */ fprintf(data_file, NEWLINE NEWLINE); fprintf(struct_file, "#define FS_ROOT file_%s" NEWLINE, lastFileVar); fprintf(struct_file, "#define FS_NUMFILES %d" NEWLINE NEWLINE, filesProcessed); fclose(data_file); fclose(struct_file); CHDIR(appPath); /* append struct_file to data_file */ printf(NEWLINE "Creating target file..." NEWLINE NEWLINE); concat_files("fsdata.tmp", "fshdr.tmp", targetfile); /* if succeeded, delete the temporary files */ if (remove("fsdata.tmp") != 0) { printf("Warning: failed to delete fsdata.tmp\n"); } if (remove("fshdr.tmp") != 0) { printf("Warning: failed to delete fshdr.tmp\n"); } printf(NEWLINE "Processed %d files - done." NEWLINE, filesProcessed); #if MAKEFS_SUPPORT_DEFLATE if (deflateNonSsiFiles) { printf("(Deflated total byte reduction: %d bytes -> %d bytes (%.02f%%)" NEWLINE, (int)overallDataBytes, (int)deflatedBytesReduced, (float)((deflatedBytesReduced*100.0)/overallDataBytes)); } #endif printf(NEWLINE); while (first_file != NULL) { struct file_entry* fe = first_file; first_file = fe->next; free(fe); } return 0; } int check_path(char* path, size_t size) { size_t slen; if (path[0] == 0) { /* empty */ return 0; } slen = strlen(path); if (slen >= size) { /* not NULL-terminated */ return 0; } while ((slen > 0) && ((path[slen] == '\\') || (path[slen] == '/'))) { /* path should not end with trailing backslash */ path[slen] = 0; slen--; } if (slen == 0) { return 0; } return 1; } static void copy_file(const char *filename_in, FILE *fout) { FILE *fin; size_t len; void* buf; fin = fopen(filename_in, "rb"); if (fin == NULL) { printf("Failed to open file \"%s\"\n", filename_in); exit(-1); } buf = malloc(COPY_BUFSIZE); while ((len = fread(buf, 1, COPY_BUFSIZE, fin)) > 0) { fwrite(buf, 1, len, fout); } free(buf); fclose(fin); } void concat_files(const char *file1, const char *file2, const char *targetfile) { FILE *fout; fout = fopen(targetfile, "wb"); if (fout == NULL) { printf("Failed to open file \"%s\"\n", targetfile); exit(-1); } copy_file(file1, fout); copy_file(file2, fout); fclose(fout); } int process_sub(FILE *data_file, FILE *struct_file) { FIND_T fInfo; FIND_RET_T fret; int filesProcessed = 0; if (processSubs) { /* process subs recursively */ size_t sublen = strlen(curSubdir); size_t freelen = sizeof(curSubdir) - sublen - 1; LWIP_ASSERT("sublen < sizeof(curSubdir)", sublen < sizeof(curSubdir)); fret = FINDFIRST_DIR("*", &fInfo); if (FINDFIRST_SUCCEEDED(fret)) { do { const char *curName = FIND_T_FILENAME(fInfo); if ((curName[0] == '.') || (strcmp(curName, "CVS") == 0)) { continue; } if (!FIND_T_IS_DIR(fInfo)) { continue; } if (freelen > 0) { CHDIR(curName); strncat(curSubdir, "/", freelen); strncat(curSubdir, curName, freelen - 1); curSubdir[sizeof(curSubdir) - 1] = 0; printf("processing subdirectory %s/..." NEWLINE, curSubdir); filesProcessed += process_sub(data_file, struct_file); CHDIR(".."); curSubdir[sublen] = 0; } else { printf("WARNING: cannot process sub due to path length restrictions: \"%s/%s\"\n", curSubdir, curName); } } while (FINDNEXT_SUCCEEDED(FINDNEXT(fret, &fInfo))); } } fret = FINDFIRST_FILE("*.*", &fInfo); if (FINDFIRST_SUCCEEDED(fret)) { /* at least one file in directory */ do { if (FIND_T_IS_FILE(fInfo)) { const char *curName = FIND_T_FILENAME(fInfo); printf("processing %s/%s..." NEWLINE, curSubdir, curName); if (process_file(data_file, struct_file, curName) < 0) { printf(NEWLINE "Error... aborting" NEWLINE); return -1; } filesProcessed++; } } while (FINDNEXT_SUCCEEDED(FINDNEXT(fret, &fInfo))); } return filesProcessed; } u8_t* get_file_data(const char* filename, int* file_size, int can_be_compressed, int* is_compressed) { FILE *inFile; size_t fsize = 0; u8_t* buf; size_t r; int rs; inFile = fopen(filename, "rb"); if (inFile == NULL) { printf("Failed to open file \"%s\"\n", filename); exit(-1); } fseek(inFile, 0, SEEK_END); rs = ftell(inFile); if (rs < 0) { printf("ftell failed with %d\n", errno); exit(-1); } fsize = (size_t)rs; fseek(inFile, 0, SEEK_SET); buf = (u8_t*)malloc(fsize); LWIP_ASSERT("buf != NULL", buf != NULL); r = fread(buf, 1, fsize, inFile); *file_size = fsize; *is_compressed = 0; #if MAKEFS_SUPPORT_DEFLATE overallDataBytes += fsize; if (deflateNonSsiFiles) { if (can_be_compressed) { if (fsize < OUT_BUF_SIZE) { u8_t* ret_buf; tdefl_status status; size_t in_bytes = fsize; size_t out_bytes = OUT_BUF_SIZE; const void *next_in = buf; void *next_out = s_outbuf; /* create tdefl() compatible flags (we have to compose the low-level flags ourselves, or use tdefl_create_comp_flags_from_zip_params() but that means MINIZ_NO_ZLIB_APIS can't be defined). */ mz_uint comp_flags = s_tdefl_num_probes[MZ_MIN(10, deflate_level)] | ((deflate_level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (!deflate_level) { comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; } status = tdefl_init(&g_deflator, NULL, NULL, comp_flags); if (status != TDEFL_STATUS_OKAY) { printf("tdefl_init() failed!\n"); exit(-1); } memset(s_outbuf, 0, sizeof(s_outbuf)); status = tdefl_compress(&g_deflator, next_in, &in_bytes, next_out, &out_bytes, TDEFL_FINISH); if (status != TDEFL_STATUS_DONE) { printf("deflate failed: %d\n", status); exit(-1); } LWIP_ASSERT("out_bytes <= COPY_BUFSIZE", out_bytes <= OUT_BUF_SIZE); if (out_bytes < fsize) { ret_buf = (u8_t*)malloc(out_bytes); LWIP_ASSERT("ret_buf != NULL", ret_buf != NULL); memcpy(ret_buf, s_outbuf, out_bytes); { /* sanity-check compression be inflating and comparing to the original */ tinfl_status dec_status; tinfl_decompressor inflator; size_t dec_in_bytes = out_bytes; size_t dec_out_bytes = OUT_BUF_SIZE; next_out = s_checkbuf; tinfl_init(&inflator); memset(s_checkbuf, 0, sizeof(s_checkbuf)); dec_status = tinfl_decompress(&inflator, (const mz_uint8 *)ret_buf, &dec_in_bytes, s_checkbuf, (mz_uint8 *)next_out, &dec_out_bytes, 0); LWIP_ASSERT("tinfl_decompress failed", dec_status == TINFL_STATUS_DONE); LWIP_ASSERT("tinfl_decompress size mismatch", fsize == dec_out_bytes); LWIP_ASSERT("decompressed memcmp failed", !memcmp(s_checkbuf, buf, fsize)); } /* free original buffer, use compressed data + size */ free(buf); buf = ret_buf; *file_size = out_bytes; printf(" - deflate: %d bytes -> %d bytes (%.02f%%)" NEWLINE, (int)fsize, (int)out_bytes, (float)((out_bytes*100.0)/fsize)); deflatedBytesReduced += (size_t)(fsize - out_bytes); *is_compressed = 1; } else { printf(" - uncompressed: (would be %d bytes larger using deflate)" NEWLINE, (int)(out_bytes - fsize)); } } else { printf(" - uncompressed: (file is larger than deflate bufer)" NEWLINE); } } else { printf(" - SSI file, cannot be compressed" NEWLINE); } } #else LWIP_UNUSED_ARG(can_be_compressed); #endif fclose(inFile); return buf; } void process_file_data(FILE* data_file, u8_t* file_data, size_t file_size) { size_t written, i, src_off=0; size_t off = 0; for (i = 0; i < file_size; i++) { LWIP_ASSERT("file_buffer_c overflow", off < sizeof(file_buffer_c) - 5); sprintf(&file_buffer_c[off], "0x%02.2x,", file_data[i]); off += 5; if ((++src_off % HEX_BYTES_PER_LINE) == 0) { LWIP_ASSERT("file_buffer_c overflow", off < sizeof(file_buffer_c) - NEWLINE_LEN); memcpy(&file_buffer_c[off], NEWLINE, NEWLINE_LEN); off += NEWLINE_LEN; } if (off + 20 >= sizeof(file_buffer_c)) { written = fwrite(file_buffer_c, 1, off, data_file); LWIP_ASSERT("written == off", written == off); off = 0; } } written = fwrite(file_buffer_c, 1, off, data_file); LWIP_ASSERT("written == off", written == off); } int write_checksums(FILE *struct_file, const char *varname, u16_t hdr_len, u16_t hdr_chksum, const u8_t* file_data, size_t file_size) { int chunk_size = TCP_MSS; int offset, src_offset; size_t len; int i = 0; #if LWIP_TCP_TIMESTAMPS /* when timestamps are used, usable space is 12 bytes less per segment */ chunk_size -= 12; #endif fprintf(struct_file, "#if HTTPD_PRECALCULATED_CHECKSUM" NEWLINE); fprintf(struct_file, "const struct fsdata_chksum chksums_%s[] = {" NEWLINE, varname); if (hdr_len > 0) { /* add checksum for HTTP header */ fprintf(struct_file, "{%d, 0x%04x, %d}," NEWLINE, 0, hdr_chksum, hdr_len); i++; } src_offset = 0; for (offset = hdr_len; ; offset += len) { unsigned short chksum; void* data = (void*)&file_data[src_offset]; len = LWIP_MIN(chunk_size, (int)file_size - src_offset); if (len == 0) { break; } chksum = ~inet_chksum(data, (u16_t)len); /* add checksum for data */ fprintf(struct_file, "{%d, 0x%04x, %d}," NEWLINE, offset, chksum, len); i++; } fprintf(struct_file, "};" NEWLINE); fprintf(struct_file, "#endif /* HTTPD_PRECALCULATED_CHECKSUM */" NEWLINE); return i; } static int is_valid_char_for_c_var(char x) { if (((x >= 'A') && (x <= 'Z')) || ((x >= 'a') && (x <= 'z')) || ((x >= '0') && (x <= '9')) || (x == '_')) { return 1; } return 0; } static void fix_filename_for_c(char* qualifiedName, size_t max_len) { struct file_entry* f; size_t len = strlen(qualifiedName); char *new_name = (char*)malloc(len + 2); int filename_ok; int cnt = 0; size_t i; if (len + 3 == max_len) { printf("File name too long: \"%s\"\n", qualifiedName); exit(-1); } strcpy(new_name, qualifiedName); for (i = 0; i < len; i++) { if (!is_valid_char_for_c_var(new_name[i])) { new_name[i] = '_'; } } do { filename_ok = 1; for (f = first_file; f != NULL; f = f->next) { if (!strcmp(f->filename_c, new_name)) { filename_ok = 0; cnt++; /* try next unique file name */ sprintf(&new_name[len], "%d", cnt); break; } } } while (!filename_ok && (cnt < 999)); if (!filename_ok) { printf("Failed to get unique file name: \"%s\"\n", qualifiedName); exit(-1); } strcpy(qualifiedName, new_name); free(new_name); } static void register_filename(const char* qualifiedName) { struct file_entry* fe = (struct file_entry*)malloc(sizeof(struct file_entry)); fe->filename_c = strdup(qualifiedName); fe->next = NULL; if (first_file == NULL) { first_file = last_file = fe; } else { last_file->next = fe; last_file = fe; } } int is_ssi_file(const char* filename) { size_t loop; for (loop = 0; loop < NUM_SHTML_EXTENSIONS; loop++) { if (strstr(filename, g_pcSSIExtensions[loop])) { return 1; } } return 0; } int process_file(FILE *data_file, FILE *struct_file, const char *filename) { char varname[MAX_PATH_LEN]; int i = 0; char qualifiedName[MAX_PATH_LEN]; int file_size; u16_t http_hdr_chksum = 0; u16_t http_hdr_len = 0; int chksum_count = 0; u8_t flags = 0; const char* flags_str; u8_t has_content_len; u8_t* file_data; int is_compressed = 0; /* create qualified name (@todo: prepend slash or not?) */ sprintf(qualifiedName,"%s/%s", curSubdir, filename); /* create C variable name */ strcpy(varname, qualifiedName); /* convert slashes & dots to underscores */ fix_filename_for_c(varname, MAX_PATH_LEN); register_filename(varname); #if ALIGN_PAYLOAD /* to force even alignment of array, type 1 */ fprintf(data_file, "#if FSDATA_FILE_ALIGNMENT==1" NEWLINE); fprintf(data_file, "static const " PAYLOAD_ALIGN_TYPE " dummy_align_%s = %d;" NEWLINE, varname, payload_alingment_dummy_counter++); fprintf(data_file, "#endif" NEWLINE); #endif /* ALIGN_PAYLOAD */ fprintf(data_file, "static const unsigned char FSDATA_ALIGN_PRE data_%s[] FSDATA_ALIGN_POST = {" NEWLINE, varname); /* encode source file name (used by file system, not returned to browser) */ fprintf(data_file, "/* %s (%d chars) */" NEWLINE, qualifiedName, strlen(qualifiedName)+1); file_put_ascii(data_file, qualifiedName, strlen(qualifiedName)+1, &i); #if ALIGN_PAYLOAD /* pad to even number of bytes to assure payload is on aligned boundary */ while(i % PAYLOAD_ALIGNMENT != 0) { fprintf(data_file, "0x%02.2x,", 0); i++; } #endif /* ALIGN_PAYLOAD */ fprintf(data_file, NEWLINE); has_content_len = !is_ssi_file(filename); file_data = get_file_data(filename, &file_size, includeHttpHeader && has_content_len, &is_compressed); if (includeHttpHeader) { file_write_http_header(data_file, filename, file_size, &http_hdr_len, &http_hdr_chksum, has_content_len, is_compressed); flags = FS_FILE_FLAGS_HEADER_INCLUDED; if (has_content_len) { flags |= FS_FILE_FLAGS_HEADER_PERSISTENT; } } if (precalcChksum) { chksum_count = write_checksums(struct_file, varname, http_hdr_len, http_hdr_chksum, file_data, file_size); } /* build declaration of struct fsdata_file in temp file */ fprintf(struct_file, "const struct fsdata_file file_%s[] = { {" NEWLINE, varname); fprintf(struct_file, "file_%s," NEWLINE, lastFileVar); fprintf(struct_file, "data_%s," NEWLINE, varname); fprintf(struct_file, "data_%s + %d," NEWLINE, varname, i); fprintf(struct_file, "sizeof(data_%s) - %d," NEWLINE, varname, i); switch(flags) { case(FS_FILE_FLAGS_HEADER_INCLUDED): flags_str = "FS_FILE_FLAGS_HEADER_INCLUDED"; break; case(FS_FILE_FLAGS_HEADER_PERSISTENT): flags_str = "FS_FILE_FLAGS_HEADER_PERSISTENT"; break; case(FS_FILE_FLAGS_HEADER_INCLUDED | FS_FILE_FLAGS_HEADER_PERSISTENT): flags_str = "FS_FILE_FLAGS_HEADER_INCLUDED | FS_FILE_FLAGS_HEADER_PERSISTENT"; break; default: flags_str = "0"; break; } fprintf(struct_file, "%s," NEWLINE, flags_str); if (precalcChksum) { fprintf(struct_file, "#if HTTPD_PRECALCULATED_CHECKSUM" NEWLINE); fprintf(struct_file, "%d, chksums_%s," NEWLINE, chksum_count, varname); fprintf(struct_file, "#endif /* HTTPD_PRECALCULATED_CHECKSUM */" NEWLINE); } fprintf(struct_file, "}};" NEWLINE NEWLINE); strcpy(lastFileVar, varname); /* write actual file contents */ i = 0; fprintf(data_file, NEWLINE "/* raw file data (%d bytes) */" NEWLINE, file_size); process_file_data(data_file, file_data, file_size); fprintf(data_file, "};" NEWLINE NEWLINE); free(file_data); return 0; } int file_write_http_header(FILE *data_file, const char *filename, int file_size, u16_t *http_hdr_len, u16_t *http_hdr_chksum, u8_t provide_content_len, int is_compressed) { int i = 0; int response_type = HTTP_HDR_OK; const char* file_type; const char *cur_string; size_t cur_len; int written = 0; size_t hdr_len = 0; u16_t acc; const char *file_ext; int j; u8_t provide_last_modified = includeLastModified; memset(hdr_buf, 0, sizeof(hdr_buf)); if (useHttp11) { response_type = HTTP_HDR_OK_11; } fprintf(data_file, NEWLINE "/* HTTP header */"); if (strstr(filename, "404") == filename) { response_type = HTTP_HDR_NOT_FOUND; if (useHttp11) { response_type = HTTP_HDR_NOT_FOUND_11; } } else if (strstr(filename, "400") == filename) { response_type = HTTP_HDR_BAD_REQUEST; if (useHttp11) { response_type = HTTP_HDR_BAD_REQUEST_11; } } else if (strstr(filename, "501") == filename) { response_type = HTTP_HDR_NOT_IMPL; if (useHttp11) { response_type = HTTP_HDR_NOT_IMPL_11; } } cur_string = g_psHTTPHeaderStrings[response_type]; cur_len = strlen(cur_string); fprintf(data_file, NEWLINE "/* \"%s\" (%d bytes) */" NEWLINE, cur_string, cur_len); written += file_put_ascii(data_file, cur_string, cur_len, &i); i = 0; if (precalcChksum) { memcpy(&hdr_buf[hdr_len], cur_string, cur_len); hdr_len += cur_len; } cur_string = serverID; cur_len = strlen(cur_string); fprintf(data_file, NEWLINE "/* \"%s\" (%d bytes) */" NEWLINE, cur_string, cur_len); written += file_put_ascii(data_file, cur_string, cur_len, &i); i = 0; if (precalcChksum) { memcpy(&hdr_buf[hdr_len], cur_string, cur_len); hdr_len += cur_len; } file_ext = filename; if (file_ext != NULL) { while(strstr(file_ext, ".") != NULL) { file_ext = strstr(file_ext, "."); file_ext++; } } if ((file_ext == NULL) || (*file_ext == 0)) { printf("failed to get extension for file \"%s\", using default.\n", filename); file_type = HTTP_HDR_DEFAULT_TYPE; } else { file_type = NULL; for (j = 0; j < NUM_HTTP_HEADERS; j++) { if (!strcmp(file_ext, g_psHTTPHeaders[j].extension)) { file_type = g_psHTTPHeaders[j].content_type; break; } } if (file_type == NULL) { printf("failed to get file type for extension \"%s\", using default.\n", file_ext); file_type = HTTP_HDR_DEFAULT_TYPE; } } /* Content-Length is used for persistent connections in HTTP/1.1 but also for download progress in older versions @todo: just use a big-enough buffer and let the HTTPD send spaces? */ if (provide_content_len) { char intbuf[MAX_PATH_LEN]; int content_len = file_size; memset(intbuf, 0, sizeof(intbuf)); cur_string = g_psHTTPHeaderStrings[HTTP_HDR_CONTENT_LENGTH]; cur_len = strlen(cur_string); fprintf(data_file, NEWLINE "/* \"%s%d\r\n\" (%d+ bytes) */" NEWLINE, cur_string, content_len, cur_len+2); written += file_put_ascii(data_file, cur_string, cur_len, &i); if (precalcChksum) { memcpy(&hdr_buf[hdr_len], cur_string, cur_len); hdr_len += cur_len; } _itoa(content_len, intbuf, 10); strcat(intbuf, "\r\n"); cur_len = strlen(intbuf); written += file_put_ascii(data_file, intbuf, cur_len, &i); i = 0; if (precalcChksum) { memcpy(&hdr_buf[hdr_len], intbuf, cur_len); hdr_len += cur_len; } } if (provide_last_modified) { char modbuf[256]; struct stat stat_data; struct tm* t; memset(modbuf, 0, sizeof(modbuf)); memset(&stat_data, 0, sizeof(stat_data)); cur_string = modbuf; strcpy(modbuf, "Last-Modified: "); if (stat(filename, &stat_data) != 0) { printf("stat(%s) failed with error %d\n", filename, errno); exit(-1); } t = gmtime(&stat_data.st_mtime); if (t == NULL) { printf("gmtime() failed with error %d\n", errno); exit(-1); } strftime(&modbuf[15], sizeof(modbuf)-15, "%a, %d %b %Y %H:%M:%S GMT", t); cur_len = strlen(cur_string); fprintf(data_file, NEWLINE "/* \"%s\"\r\n\" (%d+ bytes) */" NEWLINE, cur_string, cur_len+2); written += file_put_ascii(data_file, cur_string, cur_len, &i); if (precalcChksum) { memcpy(&hdr_buf[hdr_len], cur_string, cur_len); hdr_len += cur_len; } modbuf[0] = 0; strcat(modbuf, "\r\n"); cur_len = strlen(modbuf); written += file_put_ascii(data_file, modbuf, cur_len, &i); i = 0; if (precalcChksum) { memcpy(&hdr_buf[hdr_len], modbuf, cur_len); hdr_len += cur_len; } } /* HTTP/1.1 implements persistent connections */ if (useHttp11) { if (provide_content_len) { cur_string = g_psHTTPHeaderStrings[HTTP_HDR_CONN_KEEPALIVE]; } else { /* no Content-Length available, so a persistent connection is no possible because the client does not know the data length */ cur_string = g_psHTTPHeaderStrings[HTTP_HDR_CONN_CLOSE]; } cur_len = strlen(cur_string); fprintf(data_file, NEWLINE "/* \"%s\" (%d bytes) */" NEWLINE, cur_string, cur_len); written += file_put_ascii(data_file, cur_string, cur_len, &i); i = 0; if (precalcChksum) { memcpy(&hdr_buf[hdr_len], cur_string, cur_len); hdr_len += cur_len; } } #if MAKEFS_SUPPORT_DEFLATE if (is_compressed) { /* tell the client about the deflate encoding */ LWIP_ASSERT("error", deflateNonSsiFiles); cur_string = "Content-Encoding: deflate\r\n"; cur_len = strlen(cur_string); fprintf(data_file, NEWLINE "/* \"%s\" (%d bytes) */" NEWLINE, cur_string, cur_len); written += file_put_ascii(data_file, cur_string, cur_len, &i); i = 0; } #else LWIP_UNUSED_ARG(is_compressed); #endif /* write content-type, ATTENTION: this includes the double-CRLF! */ cur_string = file_type; cur_len = strlen(cur_string); fprintf(data_file, NEWLINE "/* \"%s\" (%d bytes) */" NEWLINE, cur_string, cur_len); written += file_put_ascii(data_file, cur_string, cur_len, &i); i = 0; /* ATTENTION: headers are done now (double-CRLF has been written!) */ if (precalcChksum) { memcpy(&hdr_buf[hdr_len], cur_string, cur_len); hdr_len += cur_len; LWIP_ASSERT("hdr_len <= 0xffff", hdr_len <= 0xffff); LWIP_ASSERT("strlen(hdr_buf) == hdr_len", strlen(hdr_buf) == hdr_len); acc = ~inet_chksum(hdr_buf, (u16_t)hdr_len); *http_hdr_len = (u16_t)hdr_len; *http_hdr_chksum = acc; } return written; } int file_put_ascii(FILE *file, const char* ascii_string, int len, int *i) { int x; for (x = 0; x < len; x++) { unsigned char cur = ascii_string[x]; fprintf(file, "0x%02.2x,", cur); if ((++(*i) % HEX_BYTES_PER_LINE) == 0) { fprintf(file, NEWLINE); } } return len; } int s_put_ascii(char *buf, const char *ascii_string, int len, int *i) { int x; int idx = 0; for (x = 0; x < len; x++) { unsigned char cur = ascii_string[x]; sprintf(&buf[idx], "0x%02.2x,", cur); idx += 5; if ((++(*i) % HEX_BYTES_PER_LINE) == 0) { sprintf(&buf[idx], NEWLINE); idx += NEWLINE_LEN; } } return len; }
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/httpd/makefsdata/makefsdata.c
C
unknown
36,332
/** * @file * lwIP iPerf server implementation */ /** * @defgroup iperf Iperf server * @ingroup apps * * This is a simple performance measuring server to check your bandwith using * iPerf2 on a PC as client. * It is currently a minimal implementation providing an IPv4 TCP server only. * * @todo: implement UDP mode and IPv6 */ /* * Copyright (c) 2014 Simon Goldschmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Simon Goldschmidt */ #include "lwip/apps/lwiperf.h" #include "lwip/tcp.h" #include "lwip/sys.h" #include <string.h> /* Currently, only TCP-over-IPv4 is implemented (does iperf support IPv6 anyway?) */ #if LWIP_IPV4 && LWIP_TCP && LWIP_CALLBACK_API /** Specify the idle timeout (in seconds) after that the test fails */ #ifndef LWIPERF_TCP_MAX_IDLE_SEC #define LWIPERF_TCP_MAX_IDLE_SEC 10U #endif #if LWIPERF_TCP_MAX_IDLE_SEC > 255 #error LWIPERF_TCP_MAX_IDLE_SEC must fit into an u8_t #endif /* File internal memory allocation (struct lwiperf_*): this defaults to the heap */ #ifndef LWIPERF_ALLOC #define LWIPERF_ALLOC(type) mem_malloc(sizeof(type)) #define LWIPERF_FREE(type, item) mem_free(item) #endif /** If this is 1, check that received data has the correct format */ #ifndef LWIPERF_CHECK_RX_DATA #define LWIPERF_CHECK_RX_DATA 0 #endif /** This is the Iperf settings struct sent from the client */ typedef struct _lwiperf_settings { #define LWIPERF_FLAGS_ANSWER_TEST 0x80000000 #define LWIPERF_FLAGS_ANSWER_NOW 0x00000001 u32_t flags; u32_t num_threads; /* unused for now */ u32_t remote_port; u32_t buffer_len; /* unused for now */ u32_t win_band; /* TCP window / UDP rate: unused for now */ u32_t amount; /* pos. value: bytes?; neg. values: time (unit is 10ms: 1/100 second) */ } lwiperf_settings_t; /** Basic connection handle */ struct _lwiperf_state_base; typedef struct _lwiperf_state_base lwiperf_state_base_t; struct _lwiperf_state_base { /* 1=tcp, 0=udp */ u8_t tcp; /* 1=server, 0=client */ u8_t server; lwiperf_state_base_t* next; lwiperf_state_base_t* related_server_state; }; /** Connection handle for a TCP iperf session */ typedef struct _lwiperf_state_tcp { lwiperf_state_base_t base; struct tcp_pcb* server_pcb; struct tcp_pcb* conn_pcb; u32_t time_started; lwiperf_report_fn report_fn; void* report_arg; u8_t poll_count; u8_t next_num; u32_t bytes_transferred; lwiperf_settings_t settings; u8_t have_settings_buf; } lwiperf_state_tcp_t; /** List of active iperf sessions */ static lwiperf_state_base_t* lwiperf_all_connections; /** A const buffer to send from: we want to measure sending, not copying! */ static const u8_t lwiperf_txbuf_const[1600] = { '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', '0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9', }; static err_t lwiperf_tcp_poll(void *arg, struct tcp_pcb *tpcb); static void lwiperf_tcp_err(void *arg, err_t err); /** Add an iperf session to the 'active' list */ static void lwiperf_list_add(lwiperf_state_base_t* item) { if (lwiperf_all_connections == NULL) { lwiperf_all_connections = item; } else { item = lwiperf_all_connections; } } /** Remove an iperf session from the 'active' list */ static void lwiperf_list_remove(lwiperf_state_base_t* item) { lwiperf_state_base_t* prev = NULL; lwiperf_state_base_t* iter; for (iter = lwiperf_all_connections; iter != NULL; prev = iter, iter = iter->next) { if (iter == item) { if (prev == NULL) { lwiperf_all_connections = iter->next; } else { prev->next = item; } /* @debug: ensure this item is listed only once */ for (iter = iter->next; iter != NULL; iter = iter->next) { LWIP_ASSERT("duplicate entry", iter != item); } break; } } } /** Call the report function of an iperf tcp session */ static void lwip_tcp_conn_report(lwiperf_state_tcp_t* conn, enum lwiperf_report_type report_type) { if ((conn != NULL) && (conn->report_fn != NULL)) { u32_t now, duration_ms, bandwidth_kbitpsec; now = sys_now(); duration_ms = now - conn->time_started; if (duration_ms == 0) { bandwidth_kbitpsec = 0; } else { bandwidth_kbitpsec = (conn->bytes_transferred / duration_ms) * 8U; } conn->report_fn(conn->report_arg, report_type, &conn->conn_pcb->local_ip, conn->conn_pcb->local_port, &conn->conn_pcb->remote_ip, conn->conn_pcb->remote_port, conn->bytes_transferred, duration_ms, bandwidth_kbitpsec); } } /** Close an iperf tcp session */ static void lwiperf_tcp_close(lwiperf_state_tcp_t* conn, enum lwiperf_report_type report_type) { err_t err; lwip_tcp_conn_report(conn, report_type); lwiperf_list_remove(&conn->base); if (conn->conn_pcb != NULL) { tcp_arg(conn->conn_pcb, NULL); tcp_poll(conn->conn_pcb, NULL, 0); tcp_sent(conn->conn_pcb, NULL); tcp_recv(conn->conn_pcb, NULL); tcp_err(conn->conn_pcb, NULL); err = tcp_close(conn->conn_pcb); if (err != ERR_OK) { /* don't want to wait for free memory here... */ tcp_abort(conn->conn_pcb); } } else { /* no conn pcb, this is the server pcb */ err = tcp_close(conn->server_pcb); LWIP_ASSERT("error", err != ERR_OK); } LWIPERF_FREE(lwiperf_state_tcp_t, conn); } /** Try to send more data on an iperf tcp session */ static err_t lwiperf_tcp_client_send_more(lwiperf_state_tcp_t* conn) { int send_more; err_t err; u16_t txlen; u16_t txlen_max; void* txptr; u8_t apiflags; LWIP_ASSERT("conn invalid", (conn != NULL) && conn->base.tcp && (conn->base.server == 0)); do { send_more = 0; if (conn->settings.amount & PP_HTONL(0x80000000)) { /* this session is time-limited */ u32_t now = sys_now(); u32_t diff_ms = now - conn->time_started; u32_t time = (u32_t)-(s32_t)lwip_htonl(conn->settings.amount); u32_t time_ms = time * 10; if (diff_ms >= time_ms) { /* time specified by the client is over -> close the connection */ lwiperf_tcp_close(conn, LWIPERF_TCP_DONE_CLIENT); return ERR_OK; } } else { /* this session is byte-limited */ u32_t amount_bytes = lwip_htonl(conn->settings.amount); /* @todo: this can send up to 1*MSS more than requested... */ if (amount_bytes >= conn->bytes_transferred) { /* all requested bytes transferred -> close the connection */ lwiperf_tcp_close(conn, LWIPERF_TCP_DONE_CLIENT); return ERR_OK; } } if (conn->bytes_transferred < 24) { /* transmit the settings a first time */ txptr = &((u8_t*)&conn->settings)[conn->bytes_transferred]; txlen_max = (u16_t)(24 - conn->bytes_transferred); apiflags = TCP_WRITE_FLAG_COPY; } else if (conn->bytes_transferred < 48) { /* transmit the settings a second time */ txptr = &((u8_t*)&conn->settings)[conn->bytes_transferred - 24]; txlen_max = (u16_t)(48 - conn->bytes_transferred); apiflags = TCP_WRITE_FLAG_COPY | TCP_WRITE_FLAG_MORE; send_more = 1; } else { /* transmit data */ /* @todo: every x bytes, transmit the settings again */ txptr = LWIP_CONST_CAST(void*, &lwiperf_txbuf_const[conn->bytes_transferred % 10]); txlen_max = TCP_MSS; if (conn->bytes_transferred == 48) { /* @todo: fix this for intermediate settings, too */ txlen_max = TCP_MSS - 24; } apiflags = 0; /* no copying needed */ send_more = 1; } txlen = txlen_max; do { err = tcp_write(conn->conn_pcb, txptr, txlen, apiflags); if (err == ERR_MEM) { txlen /= 2; } } while ((err == ERR_MEM) && (txlen >= (TCP_MSS/2))); if (err == ERR_OK) { conn->bytes_transferred += txlen; } else { send_more = 0; } } while(send_more); tcp_output(conn->conn_pcb); return ERR_OK; } /** TCP sent callback, try to send more data */ static err_t lwiperf_tcp_client_sent(void *arg, struct tcp_pcb *tpcb, u16_t len) { lwiperf_state_tcp_t* conn = (lwiperf_state_tcp_t*)arg; /* @todo: check 'len' (e.g. to time ACK of all data)? for now, we just send more... */ LWIP_ASSERT("invalid conn", conn->conn_pcb == tpcb); LWIP_UNUSED_ARG(tpcb); LWIP_UNUSED_ARG(len); conn->poll_count = 0; return lwiperf_tcp_client_send_more(conn); } /** TCP connected callback (active connection), send data now */ static err_t lwiperf_tcp_client_connected(void *arg, struct tcp_pcb *tpcb, err_t err) { lwiperf_state_tcp_t* conn = (lwiperf_state_tcp_t*)arg; LWIP_ASSERT("invalid conn", conn->conn_pcb == tpcb); LWIP_UNUSED_ARG(tpcb); if (err != ERR_OK) { lwiperf_tcp_close(conn, LWIPERF_TCP_ABORTED_REMOTE); return ERR_OK; } conn->poll_count = 0; conn->time_started = sys_now(); return lwiperf_tcp_client_send_more(conn); } /** Start TCP connection back to the client (either parallel or after the * receive test has finished. */ static err_t lwiperf_tx_start(lwiperf_state_tcp_t* conn) { err_t err; lwiperf_state_tcp_t* client_conn; struct tcp_pcb* newpcb; ip_addr_t remote_addr; u16_t remote_port; client_conn = (lwiperf_state_tcp_t*)LWIPERF_ALLOC(lwiperf_state_tcp_t); if (client_conn == NULL) { return ERR_MEM; } newpcb = tcp_new(); if (newpcb == NULL) { LWIPERF_FREE(lwiperf_state_tcp_t, client_conn); return ERR_MEM; } MEMCPY(client_conn, conn, sizeof(lwiperf_state_tcp_t)); client_conn->base.server = 0; client_conn->server_pcb = NULL; client_conn->conn_pcb = newpcb; client_conn->time_started = sys_now(); /* @todo: set this again on 'connected' */ client_conn->poll_count = 0; client_conn->next_num = 4; /* initial nr is '4' since the header has 24 byte */ client_conn->bytes_transferred = 0; client_conn->settings.flags = 0; /* prevent the remote side starting back as client again */ tcp_arg(newpcb, client_conn); tcp_sent(newpcb, lwiperf_tcp_client_sent); tcp_poll(newpcb, lwiperf_tcp_poll, 2U); tcp_err(newpcb, lwiperf_tcp_err); ip_addr_copy(remote_addr, conn->conn_pcb->remote_ip); remote_port = (u16_t)lwip_htonl(client_conn->settings.remote_port); err = tcp_connect(newpcb, &remote_addr, remote_port, lwiperf_tcp_client_connected); if (err != ERR_OK) { lwiperf_tcp_close(client_conn, LWIPERF_TCP_ABORTED_LOCAL); return err; } lwiperf_list_add(&client_conn->base); return ERR_OK; } /** Receive data on an iperf tcp session */ static err_t lwiperf_tcp_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) { u8_t tmp; u16_t tot_len; u32_t packet_idx; struct pbuf* q; lwiperf_state_tcp_t* conn = (lwiperf_state_tcp_t*)arg; LWIP_ASSERT("pcb mismatch", conn->conn_pcb == tpcb); LWIP_UNUSED_ARG(tpcb); if (err != ERR_OK) { lwiperf_tcp_close(conn, LWIPERF_TCP_ABORTED_REMOTE); return ERR_OK; } if (p == NULL) { /* connection closed -> test done */ if ((conn->settings.flags & PP_HTONL(LWIPERF_FLAGS_ANSWER_TEST|LWIPERF_FLAGS_ANSWER_NOW)) == PP_HTONL(LWIPERF_FLAGS_ANSWER_TEST)) { /* client requested transmission after end of test */ lwiperf_tx_start(conn); } lwiperf_tcp_close(conn, LWIPERF_TCP_DONE_SERVER); return ERR_OK; } tot_len = p->tot_len; conn->poll_count = 0; if ((!conn->have_settings_buf) || ((conn->bytes_transferred -24) % (1024*128) == 0)) { /* wait for 24-byte header */ if (p->tot_len < sizeof(lwiperf_settings_t)) { lwiperf_tcp_close(conn, LWIPERF_TCP_ABORTED_LOCAL_DATAERROR); pbuf_free(p); return ERR_VAL; } if (!conn->have_settings_buf) { if (pbuf_copy_partial(p, &conn->settings, sizeof(lwiperf_settings_t), 0) != sizeof(lwiperf_settings_t)) { lwiperf_tcp_close(conn, LWIPERF_TCP_ABORTED_LOCAL); pbuf_free(p); return ERR_VAL; } conn->have_settings_buf = 1; if ((conn->settings.flags & PP_HTONL(LWIPERF_FLAGS_ANSWER_TEST|LWIPERF_FLAGS_ANSWER_NOW)) == PP_HTONL(LWIPERF_FLAGS_ANSWER_TEST|LWIPERF_FLAGS_ANSWER_NOW)) { /* client requested parallel transmission test */ err_t err2 = lwiperf_tx_start(conn); if (err2 != ERR_OK) { lwiperf_tcp_close(conn, LWIPERF_TCP_ABORTED_LOCAL_TXERROR); pbuf_free(p); return err2; } } } else { if (pbuf_memcmp(p, 0, &conn->settings, sizeof(lwiperf_settings_t)) != 0) { lwiperf_tcp_close(conn, LWIPERF_TCP_ABORTED_LOCAL_DATAERROR); pbuf_free(p); return ERR_VAL; } } conn->bytes_transferred += sizeof(lwiperf_settings_t); if (conn->bytes_transferred <= 24) { conn->time_started = sys_now(); tcp_recved(tpcb, p->tot_len); pbuf_free(p); return ERR_OK; } conn->next_num = 4; /* 24 bytes received... */ tmp = pbuf_header(p, -24); LWIP_ASSERT("pbuf_header failed", tmp == 0); } packet_idx = 0; for (q = p; q != NULL; q = q->next) { #if LWIPERF_CHECK_RX_DATA const u8_t* payload = (const u8_t*)q->payload; u16_t i; for (i = 0; i < q->len; i++) { u8_t val = payload[i]; u8_t num = val - '0'; if (num == conn->next_num) { conn->next_num++; if (conn->next_num == 10) { conn->next_num = 0; } } else { lwiperf_tcp_close(conn, LWIPERF_TCP_ABORTED_LOCAL_DATAERROR); pbuf_free(p); return ERR_VAL; } } #endif packet_idx += q->len; } LWIP_ASSERT("count mismatch", packet_idx == p->tot_len); conn->bytes_transferred += packet_idx; tcp_recved(tpcb, tot_len); pbuf_free(p); return ERR_OK; } /** Error callback, iperf tcp session aborted */ static void lwiperf_tcp_err(void *arg, err_t err) { lwiperf_state_tcp_t* conn = (lwiperf_state_tcp_t*)arg; LWIP_UNUSED_ARG(err); lwiperf_tcp_close(conn, LWIPERF_TCP_ABORTED_REMOTE); } /** TCP poll callback, try to send more data */ static err_t lwiperf_tcp_poll(void *arg, struct tcp_pcb *tpcb) { lwiperf_state_tcp_t* conn = (lwiperf_state_tcp_t*)arg; LWIP_ASSERT("pcb mismatch", conn->conn_pcb == tpcb); LWIP_UNUSED_ARG(tpcb); if (++conn->poll_count >= LWIPERF_TCP_MAX_IDLE_SEC) { lwiperf_tcp_close(conn, LWIPERF_TCP_ABORTED_LOCAL); return ERR_OK; /* lwiperf_tcp_close frees conn */ } if (!conn->base.server) { lwiperf_tcp_client_send_more(conn); } return ERR_OK; } /** This is called when a new client connects for an iperf tcp session */ static err_t lwiperf_tcp_accept(void *arg, struct tcp_pcb *newpcb, err_t err) { lwiperf_state_tcp_t *s, *conn; if ((err != ERR_OK) || (newpcb == NULL) || (arg == NULL)) { return ERR_VAL; } s = (lwiperf_state_tcp_t*)arg; conn = (lwiperf_state_tcp_t*)LWIPERF_ALLOC(lwiperf_state_tcp_t); if (conn == NULL) { return ERR_MEM; } memset(conn, 0, sizeof(lwiperf_state_tcp_t)); conn->base.tcp = 1; conn->base.server = 1; conn->base.related_server_state = &s->base; conn->server_pcb = s->server_pcb; conn->conn_pcb = newpcb; conn->time_started = sys_now(); conn->report_fn = s->report_fn; conn->report_arg = s->report_arg; /* setup the tcp rx connection */ tcp_arg(newpcb, conn); tcp_recv(newpcb, lwiperf_tcp_recv); tcp_poll(newpcb, lwiperf_tcp_poll, 2U); tcp_err(conn->conn_pcb, lwiperf_tcp_err); lwiperf_list_add(&conn->base); return ERR_OK; } /** * @ingroup iperf * Start a TCP iperf server on the default TCP port (5001) and listen for * incoming connections from iperf clients. * * @returns a connection handle that can be used to abort the server * by calling @ref lwiperf_abort() */ void* lwiperf_start_tcp_server_default(lwiperf_report_fn report_fn, void* report_arg) { return lwiperf_start_tcp_server(IP_ADDR_ANY, LWIPERF_TCP_PORT_DEFAULT, report_fn, report_arg); } /** * @ingroup iperf * Start a TCP iperf server on a specific IP address and port and listen for * incoming connections from iperf clients. * * @returns a connection handle that can be used to abort the server * by calling @ref lwiperf_abort() */ void* lwiperf_start_tcp_server(const ip_addr_t* local_addr, u16_t local_port, lwiperf_report_fn report_fn, void* report_arg) { err_t err; struct tcp_pcb* pcb; lwiperf_state_tcp_t* s; if (local_addr == NULL) { return NULL; } s = (lwiperf_state_tcp_t*)LWIPERF_ALLOC(lwiperf_state_tcp_t); if (s == NULL) { return NULL; } memset(s, 0, sizeof(lwiperf_state_tcp_t)); s->base.tcp = 1; s->base.server = 1; s->report_fn = report_fn; s->report_arg = report_arg; pcb = tcp_new(); if (pcb != NULL) { err = tcp_bind(pcb, local_addr, local_port); if (err == ERR_OK) { s->server_pcb = tcp_listen_with_backlog(pcb, 1); } } if (s->server_pcb == NULL) { if (pcb != NULL) { tcp_close(pcb); } LWIPERF_FREE(lwiperf_state_tcp_t, s); return NULL; } pcb = NULL; tcp_arg(s->server_pcb, s); tcp_accept(s->server_pcb, lwiperf_tcp_accept); lwiperf_list_add(&s->base); return s; } /** * @ingroup iperf * Abort an iperf session (handle returned by lwiperf_start_tcp_server*()) */ void lwiperf_abort(void* lwiperf_session) { lwiperf_state_base_t* i, *dealloc, *last = NULL; for (i = lwiperf_all_connections; i != NULL; ) { if ((i == lwiperf_session) || (i->related_server_state == lwiperf_session)) { dealloc = i; i = i->next; if (last != NULL) { last->next = i; } LWIPERF_FREE(lwiperf_state_tcp_t, dealloc); /* @todo: type? */ } else { last = i; i = i->next; } } } #endif /* LWIP_IPV4 && LWIP_TCP && LWIP_CALLBACK_API */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/lwiperf/lwiperf.c
C
unknown
26,123
/** * @file * MDNS responder implementation * * @defgroup mdns MDNS * @ingroup apps * * RFC 6762 - Multicast DNS\n * RFC 6763 - DNS-Based Service Discovery\n * * @verbinclude mdns.txt * * Things left to implement: * ------------------------- * * - Probing/conflict resolution * - Sending goodbye messages (zero ttl) - shutdown, DHCP lease about to expire, DHCP turned off... * - Checking that source address of unicast requests are on the same network * - Limiting multicast responses to 1 per second per resource record * - Fragmenting replies if required * - Subscribe to netif address/link change events and act on them (currently needs to be done manually) * - Handling multi-packet known answers * - Individual known answer detection for all local IPv6 addresses * - Dynamic size of outgoing packet */ /* * Copyright (c) 2015 Verisure Innovation AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Erik Ekman <erik@kryo.se> * */ #include "lwip/apps/mdns.h" #include "lwip/apps/mdns_priv.h" #include "lwip/netif.h" #include "lwip/udp.h" #include "lwip/ip_addr.h" #include "lwip/mem.h" #include "lwip/prot/dns.h" #include <string.h> #if LWIP_MDNS_RESPONDER #if (LWIP_IPV4 && !LWIP_IGMP) #error "If you want to use MDNS with IPv4, you have to define LWIP_IGMP=1 in your lwipopts.h" #endif #if (LWIP_IPV6 && !LWIP_IPV6_MLD) #error "If you want to use MDNS with IPv6, you have to define LWIP_IPV6_MLD=1 in your lwipopts.h" #endif #if (!LWIP_UDP) #error "If you want to use MDNS, you have to define LWIP_UDP=1 in your lwipopts.h" #endif #if LWIP_IPV4 #include "lwip/igmp.h" /* IPv4 multicast group 224.0.0.251 */ static const ip_addr_t v4group = DNS_MQUERY_IPV4_GROUP_INIT; #endif #if LWIP_IPV6 #include "lwip/mld6.h" /* IPv6 multicast group FF02::FB */ static const ip_addr_t v6group = DNS_MQUERY_IPV6_GROUP_INIT; #endif #define MDNS_PORT 5353 #define MDNS_TTL 255 /* Stored offsets to beginning of domain names * Used for compression. */ #define NUM_DOMAIN_OFFSETS 10 #define DOMAIN_JUMP_SIZE 2 #define DOMAIN_JUMP 0xc000 static u8_t mdns_netif_client_id; static struct udp_pcb *mdns_pcb; #define NETIF_TO_HOST(netif) (struct mdns_host*)(netif_get_client_data(netif, mdns_netif_client_id)) #define TOPDOMAIN_LOCAL "local" #define REVERSE_PTR_TOPDOMAIN "arpa" #define REVERSE_PTR_V4_DOMAIN "in-addr" #define REVERSE_PTR_V6_DOMAIN "ip6" #define SRV_PRIORITY 0 #define SRV_WEIGHT 0 /* Payload size allocated for each outgoing UDP packet */ #define OUTPACKET_SIZE 500 /* Lookup from hostname -> IPv4 */ #define REPLY_HOST_A 0x01 /* Lookup from IPv4/v6 -> hostname */ #define REPLY_HOST_PTR_V4 0x02 /* Lookup from hostname -> IPv6 */ #define REPLY_HOST_AAAA 0x04 /* Lookup from hostname -> IPv6 */ #define REPLY_HOST_PTR_V6 0x08 /* Lookup for service types */ #define REPLY_SERVICE_TYPE_PTR 0x10 /* Lookup for instances of service */ #define REPLY_SERVICE_NAME_PTR 0x20 /* Lookup for location of service instance */ #define REPLY_SERVICE_SRV 0x40 /* Lookup for text info on service instance */ #define REPLY_SERVICE_TXT 0x80 static const char *dnssd_protos[] = { "_udp", /* DNSSD_PROTO_UDP */ "_tcp", /* DNSSD_PROTO_TCP */ }; /** Description of a service */ struct mdns_service { /** TXT record to answer with */ struct mdns_domain txtdata; /** Name of service, like 'myweb' */ char name[MDNS_LABEL_MAXLEN + 1]; /** Type of service, like '_http' */ char service[MDNS_LABEL_MAXLEN + 1]; /** Callback function and userdata * to update txtdata buffer */ service_get_txt_fn_t txt_fn; void *txt_userdata; /** TTL in seconds of SRV/TXT replies */ u32_t dns_ttl; /** Protocol, TCP or UDP */ u16_t proto; /** Port of the service */ u16_t port; }; /** Description of a host/netif */ struct mdns_host { /** Hostname */ char name[MDNS_LABEL_MAXLEN + 1]; /** Pointer to services */ struct mdns_service *services[MDNS_MAX_SERVICES]; /** TTL in seconds of A/AAAA/PTR replies */ u32_t dns_ttl; }; /** Information about received packet */ struct mdns_packet { /** Sender IP/port */ ip_addr_t source_addr; u16_t source_port; /** If packet was received unicast */ u16_t recv_unicast; /** Netif that received the packet */ struct netif *netif; /** Packet data */ struct pbuf *pbuf; /** Current parsing offset in packet */ u16_t parse_offset; /** Identifier. Used in legacy queries */ u16_t tx_id; /** Number of questions in packet, * read from packet header */ u16_t questions; /** Number of unparsed questions */ u16_t questions_left; /** Number of answers in packet, * (sum of normal, authorative and additional answers) * read from packet header */ u16_t answers; /** Number of unparsed answers */ u16_t answers_left; }; /** Information about outgoing packet */ struct mdns_outpacket { /** Netif to send the packet on */ struct netif *netif; /** Packet data */ struct pbuf *pbuf; /** Current write offset in packet */ u16_t write_offset; /** Identifier. Used in legacy queries */ u16_t tx_id; /** Destination IP/port if sent unicast */ ip_addr_t dest_addr; u16_t dest_port; /** Number of questions written */ u16_t questions; /** Number of normal answers written */ u16_t answers; /** Number of additional answers written */ u16_t additional; /** Offsets for written domain names in packet. * Used for compression */ u16_t domain_offsets[NUM_DOMAIN_OFFSETS]; /** If all answers in packet should set cache_flush bit */ u8_t cache_flush; /** If reply should be sent unicast */ u8_t unicast_reply; /** If legacy query. (tx_id needed, and write * question again in reply before answer) */ u8_t legacy_query; /* Reply bitmask for host information */ u8_t host_replies; /* Bitmask for which reverse IPv6 hosts to answer */ u8_t host_reverse_v6_replies; /* Reply bitmask per service */ u8_t serv_replies[MDNS_MAX_SERVICES]; }; /** Domain, type and class. * Shared between questions and answers */ struct mdns_rr_info { struct mdns_domain domain; u16_t type; u16_t klass; }; struct mdns_question { struct mdns_rr_info info; /** unicast reply requested */ u16_t unicast; }; struct mdns_answer { struct mdns_rr_info info; /** cache flush command bit */ u16_t cache_flush; /* Validity time in seconds */ u32_t ttl; /** Length of variable answer */ u16_t rd_length; /** Offset of start of variable answer in packet */ u16_t rd_offset; }; /** * Add a label part to a domain * @param domain The domain to add a label to * @param label The label to add, like &lt;hostname&gt;, 'local', 'com' or '' * @param len The length of the label * @return ERR_OK on success, an err_t otherwise if label too long */ err_t mdns_domain_add_label(struct mdns_domain *domain, const char *label, u8_t len) { if (len > MDNS_LABEL_MAXLEN) { return ERR_VAL; } if (len > 0 && (1 + len + domain->length >= MDNS_DOMAIN_MAXLEN)) { return ERR_VAL; } /* Allow only zero marker on last byte */ if (len == 0 && (1 + domain->length > MDNS_DOMAIN_MAXLEN)) { return ERR_VAL; } domain->name[domain->length] = len; domain->length++; if (len) { MEMCPY(&domain->name[domain->length], label, len); domain->length += len; } return ERR_OK; } /** * Internal readname function with max 6 levels of recursion following jumps * while decompressing name */ static u16_t mdns_readname_loop(struct pbuf *p, u16_t offset, struct mdns_domain *domain, unsigned depth) { u8_t c; do { if (depth > 5) { /* Too many jumps */ return MDNS_READNAME_ERROR; } c = pbuf_get_at(p, offset); offset++; /* is this a compressed label? */ if((c & 0xc0) == 0xc0) { u16_t jumpaddr; if (offset >= p->tot_len) { /* Make sure both jump bytes fit in the packet */ return MDNS_READNAME_ERROR; } jumpaddr = (((c & 0x3f) << 8) | (pbuf_get_at(p, offset) & 0xff)); offset++; if (jumpaddr >= SIZEOF_DNS_HDR && jumpaddr < p->tot_len) { u16_t res; /* Recursive call, maximum depth will be checked */ res = mdns_readname_loop(p, jumpaddr, domain, depth + 1); /* Dont return offset since new bytes were not read (jumped to somewhere in packet) */ if (res == MDNS_READNAME_ERROR) { return res; } } else { return MDNS_READNAME_ERROR; } break; } /* normal label */ if (c <= MDNS_LABEL_MAXLEN) { u8_t label[MDNS_LABEL_MAXLEN]; err_t res; if (c + domain->length >= MDNS_DOMAIN_MAXLEN) { return MDNS_READNAME_ERROR; } if (c != 0) { if (pbuf_copy_partial(p, label, c, offset) != c) { return MDNS_READNAME_ERROR; } offset += c; } res = mdns_domain_add_label(domain, (char *) label, c); if (res != ERR_OK) { return MDNS_READNAME_ERROR; } } else { /* bad length byte */ return MDNS_READNAME_ERROR; } } while (c != 0); return offset; } /** * Read possibly compressed domain name from packet buffer * @param p The packet * @param offset start position of domain name in packet * @param domain The domain name destination * @return The new offset after the domain, or MDNS_READNAME_ERROR * if reading failed */ u16_t mdns_readname(struct pbuf *p, u16_t offset, struct mdns_domain *domain) { memset(domain, 0, sizeof(struct mdns_domain)); return mdns_readname_loop(p, offset, domain, 0); } /** * Print domain name to debug output * @param domain The domain name */ static void mdns_domain_debug_print(struct mdns_domain *domain) { u8_t *src = domain->name; u8_t i; while (*src) { u8_t label_len = *src; src++; for (i = 0; i < label_len; i++) { LWIP_DEBUGF(MDNS_DEBUG, ("%c", src[i])); } src += label_len; LWIP_DEBUGF(MDNS_DEBUG, (".")); } } /** * Return 1 if contents of domains match (case-insensitive) * @param a Domain name to compare 1 * @param b Domain name to compare 2 * @return 1 if domains are equal ignoring case, 0 otherwise */ int mdns_domain_eq(struct mdns_domain *a, struct mdns_domain *b) { u8_t *ptra, *ptrb; u8_t len; int res; if (a->length != b->length) { return 0; } ptra = a->name; ptrb = b->name; while (*ptra && *ptrb && ptra < &a->name[a->length]) { if (*ptra != *ptrb) { return 0; } len = *ptra; ptra++; ptrb++; res = lwip_strnicmp((char *) ptra, (char *) ptrb, len); if (res != 0) { return 0; } ptra += len; ptrb += len; } if (*ptra != *ptrb && ptra < &a->name[a->length]) { return 0; } return 1; } /** * Call user supplied function to setup TXT data * @param service The service to build TXT record for */ static void mdns_prepare_txtdata(struct mdns_service *service) { memset(&service->txtdata, 0, sizeof(struct mdns_domain)); if (service->txt_fn) { service->txt_fn(service, service->txt_userdata); } } #if LWIP_IPV4 /** * Build domain for reverse lookup of IPv4 address * like 12.0.168.192.in-addr.arpa. for 192.168.0.12 * @param domain Where to write the domain name * @param addr Pointer to an IPv4 address to encode * @return ERR_OK if domain was written, an err_t otherwise */ static err_t mdns_build_reverse_v4_domain(struct mdns_domain *domain, const ip4_addr_t *addr) { int i; err_t res; const u8_t *ptr; if (!domain || !addr) { return ERR_ARG; } memset(domain, 0, sizeof(struct mdns_domain)); ptr = (const u8_t *) addr; for (i = sizeof(ip4_addr_t) - 1; i >= 0; i--) { char buf[4]; u8_t val = ptr[i]; lwip_itoa(buf, sizeof(buf), val); res = mdns_domain_add_label(domain, buf, (u8_t)strlen(buf)); LWIP_ERROR("mdns_build_reverse_v4_domain: Failed to add label", (res == ERR_OK), return res); } res = mdns_domain_add_label(domain, REVERSE_PTR_V4_DOMAIN, (u8_t)(sizeof(REVERSE_PTR_V4_DOMAIN)-1)); LWIP_ERROR("mdns_build_reverse_v4_domain: Failed to add label", (res == ERR_OK), return res); res = mdns_domain_add_label(domain, REVERSE_PTR_TOPDOMAIN, (u8_t)(sizeof(REVERSE_PTR_TOPDOMAIN)-1)); LWIP_ERROR("mdns_build_reverse_v4_domain: Failed to add label", (res == ERR_OK), return res); res = mdns_domain_add_label(domain, NULL, 0); LWIP_ERROR("mdns_build_reverse_v4_domain: Failed to add label", (res == ERR_OK), return res); return ERR_OK; } #endif #if LWIP_IPV6 /** * Build domain for reverse lookup of IP address * like b.a.9.8.7.6.5.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa. for 2001:db8::567:89ab * @param domain Where to write the domain name * @param addr Pointer to an IPv6 address to encode * @return ERR_OK if domain was written, an err_t otherwise */ static err_t mdns_build_reverse_v6_domain(struct mdns_domain *domain, const ip6_addr_t *addr) { int i; err_t res; const u8_t *ptr; if (!domain || !addr) { return ERR_ARG; } memset(domain, 0, sizeof(struct mdns_domain)); ptr = (const u8_t *) addr; for (i = sizeof(ip6_addr_t) - 1; i >= 0; i--) { char buf; u8_t byte = ptr[i]; int j; for (j = 0; j < 2; j++) { if ((byte & 0x0F) < 0xA) { buf = '0' + (byte & 0x0F); } else { buf = 'a' + (byte & 0x0F) - 0xA; } res = mdns_domain_add_label(domain, &buf, sizeof(buf)); LWIP_ERROR("mdns_build_reverse_v6_domain: Failed to add label", (res == ERR_OK), return res); byte >>= 4; } } res = mdns_domain_add_label(domain, REVERSE_PTR_V6_DOMAIN, (u8_t)(sizeof(REVERSE_PTR_V6_DOMAIN)-1)); LWIP_ERROR("mdns_build_reverse_v6_domain: Failed to add label", (res == ERR_OK), return res); res = mdns_domain_add_label(domain, REVERSE_PTR_TOPDOMAIN, (u8_t)(sizeof(REVERSE_PTR_TOPDOMAIN)-1)); LWIP_ERROR("mdns_build_reverse_v6_domain: Failed to add label", (res == ERR_OK), return res); res = mdns_domain_add_label(domain, NULL, 0); LWIP_ERROR("mdns_build_reverse_v6_domain: Failed to add label", (res == ERR_OK), return res); return ERR_OK; } #endif /* Add .local. to domain */ static err_t mdns_add_dotlocal(struct mdns_domain *domain) { err_t res = mdns_domain_add_label(domain, TOPDOMAIN_LOCAL, (u8_t)(sizeof(TOPDOMAIN_LOCAL)-1)); LWIP_ERROR("mdns_add_dotlocal: Failed to add label", (res == ERR_OK), return res); return mdns_domain_add_label(domain, NULL, 0); } /** * Build the <hostname>.local. domain name * @param domain Where to write the domain name * @param mdns TMDNS netif descriptor. * @return ERR_OK if domain <hostname>.local. was written, an err_t otherwise */ static err_t mdns_build_host_domain(struct mdns_domain *domain, struct mdns_host *mdns) { err_t res; memset(domain, 0, sizeof(struct mdns_domain)); LWIP_ERROR("mdns_build_host_domain: mdns != NULL", (mdns != NULL), return ERR_VAL); res = mdns_domain_add_label(domain, mdns->name, (u8_t)strlen(mdns->name)); LWIP_ERROR("mdns_build_host_domain: Failed to add label", (res == ERR_OK), return res); return mdns_add_dotlocal(domain); } /** * Build the lookup-all-services special DNS-SD domain name * @param domain Where to write the domain name * @return ERR_OK if domain _services._dns-sd._udp.local. was written, an err_t otherwise */ static err_t mdns_build_dnssd_domain(struct mdns_domain *domain) { err_t res; memset(domain, 0, sizeof(struct mdns_domain)); res = mdns_domain_add_label(domain, "_services", (u8_t)(sizeof("_services")-1)); LWIP_ERROR("mdns_build_dnssd_domain: Failed to add label", (res == ERR_OK), return res); res = mdns_domain_add_label(domain, "_dns-sd", (u8_t)(sizeof("_dns-sd")-1)); LWIP_ERROR("mdns_build_dnssd_domain: Failed to add label", (res == ERR_OK), return res); res = mdns_domain_add_label(domain, dnssd_protos[DNSSD_PROTO_UDP], (u8_t)strlen(dnssd_protos[DNSSD_PROTO_UDP])); LWIP_ERROR("mdns_build_dnssd_domain: Failed to add label", (res == ERR_OK), return res); return mdns_add_dotlocal(domain); } /** * Build domain name for a service * @param domain Where to write the domain name * @param service The service struct, containing service name, type and protocol * @param include_name Whether to include the service name in the domain * @return ERR_OK if domain was written. If service name is included, * <name>.<type>.<proto>.local. will be written, otherwise <type>.<proto>.local. * An err_t is returned on error. */ static err_t mdns_build_service_domain(struct mdns_domain *domain, struct mdns_service *service, int include_name) { err_t res; memset(domain, 0, sizeof(struct mdns_domain)); if (include_name) { res = mdns_domain_add_label(domain, service->name, (u8_t)strlen(service->name)); LWIP_ERROR("mdns_build_service_domain: Failed to add label", (res == ERR_OK), return res); } res = mdns_domain_add_label(domain, service->service, (u8_t)strlen(service->service)); LWIP_ERROR("mdns_build_service_domain: Failed to add label", (res == ERR_OK), return res); res = mdns_domain_add_label(domain, dnssd_protos[service->proto], (u8_t)strlen(dnssd_protos[service->proto])); LWIP_ERROR("mdns_build_service_domain: Failed to add label", (res == ERR_OK), return res); return mdns_add_dotlocal(domain); } /** * Check which replies we should send for a host/netif based on question * @param netif The network interface that received the question * @param rr Domain/type/class from a question * @param reverse_v6_reply Bitmask of which IPv6 addresses to send reverse PTRs for * if reply bit has REPLY_HOST_PTR_V6 set * @return Bitmask of which replies to send */ static int check_host(struct netif *netif, struct mdns_rr_info *rr, u8_t *reverse_v6_reply) { err_t res; int replies = 0; struct mdns_domain mydomain; LWIP_UNUSED_ARG(reverse_v6_reply); /* if ipv6 is disabled */ if (rr->klass != DNS_RRCLASS_IN && rr->klass != DNS_RRCLASS_ANY) { /* Invalid class */ return replies; } /* Handle PTR for our addresses */ if (rr->type == DNS_RRTYPE_PTR || rr->type == DNS_RRTYPE_ANY) { #if LWIP_IPV6 int i; for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { res = mdns_build_reverse_v6_domain(&mydomain, netif_ip6_addr(netif, i)); if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain)) { replies |= REPLY_HOST_PTR_V6; /* Mark which addresses where requested */ if (reverse_v6_reply) { *reverse_v6_reply |= (1 << i); } } } } #endif #if LWIP_IPV4 if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { res = mdns_build_reverse_v4_domain(&mydomain, netif_ip4_addr(netif)); if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain)) { replies |= REPLY_HOST_PTR_V4; } } #endif } res = mdns_build_host_domain(&mydomain, NETIF_TO_HOST(netif)); /* Handle requests for our hostname */ if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain)) { /* TODO return NSEC if unsupported protocol requested */ #if LWIP_IPV4 if (!ip4_addr_isany_val(*netif_ip4_addr(netif)) && (rr->type == DNS_RRTYPE_A || rr->type == DNS_RRTYPE_ANY)) { replies |= REPLY_HOST_A; } #endif #if LWIP_IPV6 if (rr->type == DNS_RRTYPE_AAAA || rr->type == DNS_RRTYPE_ANY) { replies |= REPLY_HOST_AAAA; } #endif } return replies; } /** * Check which replies we should send for a service based on question * @param service A registered MDNS service * @param rr Domain/type/class from a question * @return Bitmask of which replies to send */ static int check_service(struct mdns_service *service, struct mdns_rr_info *rr) { err_t res; int replies = 0; struct mdns_domain mydomain; if (rr->klass != DNS_RRCLASS_IN && rr->klass != DNS_RRCLASS_ANY) { /* Invalid class */ return 0; } res = mdns_build_dnssd_domain(&mydomain); if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain) && (rr->type == DNS_RRTYPE_PTR || rr->type == DNS_RRTYPE_ANY)) { /* Request for all service types */ replies |= REPLY_SERVICE_TYPE_PTR; } res = mdns_build_service_domain(&mydomain, service, 0); if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain) && (rr->type == DNS_RRTYPE_PTR || rr->type == DNS_RRTYPE_ANY)) { /* Request for the instance of my service */ replies |= REPLY_SERVICE_NAME_PTR; } res = mdns_build_service_domain(&mydomain, service, 1); if (res == ERR_OK && mdns_domain_eq(&rr->domain, &mydomain)) { /* Request for info about my service */ if (rr->type == DNS_RRTYPE_SRV || rr->type == DNS_RRTYPE_ANY) { replies |= REPLY_SERVICE_SRV; } if (rr->type == DNS_RRTYPE_TXT || rr->type == DNS_RRTYPE_ANY) { replies |= REPLY_SERVICE_TXT; } } return replies; } /** * Return bytes needed to write before jump for best result of compressing supplied domain * against domain in outpacket starting at specified offset. * If a match is found, offset is updated to where to jump to * @param pbuf Pointer to pbuf with the partially constructed DNS packet * @param offset Start position of a domain written earlier. If this location is suitable * for compression, the pointer is updated to where in the domain to jump to. * @param domain The domain to write * @return Number of bytes to write of the new domain before writing a jump to the offset. * If compression can not be done against this previous domain name, the full new * domain length is returned. */ u16_t mdns_compress_domain(struct pbuf *pbuf, u16_t *offset, struct mdns_domain *domain) { struct mdns_domain target; u16_t target_end; u8_t target_len; u8_t writelen = 0; u8_t *ptr; if (pbuf == NULL) { return domain->length; } target_end = mdns_readname(pbuf, *offset, &target); if (target_end == MDNS_READNAME_ERROR) { return domain->length; } target_len = (u8_t)(target_end - *offset); ptr = domain->name; while (writelen < domain->length) { u8_t domainlen = (u8_t)(domain->length - writelen); u8_t labellen; if (domainlen <= target.length && domainlen > DOMAIN_JUMP_SIZE) { /* Compare domains if target is long enough, and we have enough left of the domain */ u8_t targetpos = (u8_t)(target.length - domainlen); if ((targetpos + DOMAIN_JUMP_SIZE) >= target_len) { /* We are checking at or beyond a jump in the original, stop looking */ break; } if (target.length >= domainlen && memcmp(&domain->name[writelen], &target.name[targetpos], domainlen) == 0) { *offset += targetpos; return writelen; } } /* Skip to next label in domain */ labellen = *ptr; writelen += 1 + labellen; ptr += 1 + labellen; } /* Nothing found */ return domain->length; } /** * Write domain to outpacket. Compression will be attempted, * unless domain->skip_compression is set. * @param outpkt The outpacket to write to * @param domain The domain name to write * @return ERR_OK on success, an err_t otherwise */ static err_t mdns_write_domain(struct mdns_outpacket *outpkt, struct mdns_domain *domain) { int i; err_t res; u16_t writelen = domain->length; u16_t jump_offset = 0; u16_t jump; if (!domain->skip_compression) { for (i = 0; i < NUM_DOMAIN_OFFSETS; ++i) { u16_t offset = outpkt->domain_offsets[i]; if (offset) { u16_t len = mdns_compress_domain(outpkt->pbuf, &offset, domain); if (len < writelen) { writelen = len; jump_offset = offset; } } } } if (writelen) { /* Write uncompressed part of name */ res = pbuf_take_at(outpkt->pbuf, domain->name, writelen, outpkt->write_offset); if (res != ERR_OK) { return res; } /* Store offset of this new domain */ for (i = 0; i < NUM_DOMAIN_OFFSETS; ++i) { if (outpkt->domain_offsets[i] == 0) { outpkt->domain_offsets[i] = outpkt->write_offset; break; } } outpkt->write_offset += writelen; } if (jump_offset) { /* Write jump */ jump = lwip_htons(DOMAIN_JUMP | jump_offset); res = pbuf_take_at(outpkt->pbuf, &jump, DOMAIN_JUMP_SIZE, outpkt->write_offset); if (res != ERR_OK) { return res; } outpkt->write_offset += DOMAIN_JUMP_SIZE; } return ERR_OK; } /** * Write a question to an outpacket * A question contains domain, type and class. Since an answer also starts with these fields this function is also * called from mdns_add_answer(). * @param outpkt The outpacket to write to * @param domain The domain name the answer is for * @param type The DNS type of the answer (like 'AAAA', 'SRV') * @param klass The DNS type of the answer (like 'IN') * @param unicast If highest bit in class should be set, to instruct the responder to * reply with a unicast packet * @return ERR_OK on success, an err_t otherwise */ static err_t mdns_add_question(struct mdns_outpacket *outpkt, struct mdns_domain *domain, u16_t type, u16_t klass, u16_t unicast) { u16_t question_len; u16_t field16; err_t res; if (!outpkt->pbuf) { /* If no pbuf is active, allocate one */ outpkt->pbuf = pbuf_alloc(PBUF_TRANSPORT, OUTPACKET_SIZE, PBUF_RAM); if (!outpkt->pbuf) { return ERR_MEM; } outpkt->write_offset = SIZEOF_DNS_HDR; } /* Worst case calculation. Domain string might be compressed */ question_len = domain->length + sizeof(type) + sizeof(klass); if (outpkt->write_offset + question_len > outpkt->pbuf->tot_len) { /* No space */ return ERR_MEM; } /* Write name */ res = mdns_write_domain(outpkt, domain); if (res != ERR_OK) { return res; } /* Write type */ field16 = lwip_htons(type); res = pbuf_take_at(outpkt->pbuf, &field16, sizeof(field16), outpkt->write_offset); if (res != ERR_OK) { return res; } outpkt->write_offset += sizeof(field16); /* Write class */ if (unicast) { klass |= 0x8000; } field16 = lwip_htons(klass); res = pbuf_take_at(outpkt->pbuf, &field16, sizeof(field16), outpkt->write_offset); if (res != ERR_OK) { return res; } outpkt->write_offset += sizeof(field16); return ERR_OK; } /** * Write answer to reply packet. * buf or answer_domain can be null. The rd_length written will be buf_length + * size of (compressed) domain. Most uses will need either buf or answer_domain, * special case is SRV that starts with 3 u16 and then a domain name. * @param reply The outpacket to write to * @param domain The domain name the answer is for * @param type The DNS type of the answer (like 'AAAA', 'SRV') * @param klass The DNS type of the answer (like 'IN') * @param cache_flush If highest bit in class should be set, to instruct receiver that * this reply replaces any earlier answer for this domain/type/class * @param ttl Validity time in seconds to send out for IP address data in DNS replies * @param buf Pointer to buffer of answer data * @param buf_length Length of variable data * @param answer_domain A domain to write after any buffer data as answer * @return ERR_OK on success, an err_t otherwise */ static err_t mdns_add_answer(struct mdns_outpacket *reply, struct mdns_domain *domain, u16_t type, u16_t klass, u16_t cache_flush, u32_t ttl, const u8_t *buf, size_t buf_length, struct mdns_domain *answer_domain) { u16_t answer_len; u16_t field16; u16_t rdlen_offset; u16_t answer_offset; u32_t field32; err_t res; if (!reply->pbuf) { /* If no pbuf is active, allocate one */ reply->pbuf = pbuf_alloc(PBUF_TRANSPORT, OUTPACKET_SIZE, PBUF_RAM); if (!reply->pbuf) { return ERR_MEM; } reply->write_offset = SIZEOF_DNS_HDR; } /* Worst case calculation. Domain strings might be compressed */ answer_len = domain->length + sizeof(type) + sizeof(klass) + sizeof(ttl) + sizeof(field16)/*rd_length*/; if (buf) { answer_len += (u16_t)buf_length; } if (answer_domain) { answer_len += answer_domain->length; } if (reply->write_offset + answer_len > reply->pbuf->tot_len) { /* No space */ return ERR_MEM; } /* Answer starts with same data as question, then more fields */ mdns_add_question(reply, domain, type, klass, cache_flush); /* Write TTL */ field32 = lwip_htonl(ttl); res = pbuf_take_at(reply->pbuf, &field32, sizeof(field32), reply->write_offset); if (res != ERR_OK) { return res; } reply->write_offset += sizeof(field32); /* Store offsets and skip forward to the data */ rdlen_offset = reply->write_offset; reply->write_offset += sizeof(field16); answer_offset = reply->write_offset; if (buf) { /* Write static data */ res = pbuf_take_at(reply->pbuf, buf, (u16_t)buf_length, reply->write_offset); if (res != ERR_OK) { return res; } reply->write_offset += (u16_t)buf_length; } if (answer_domain) { /* Write name answer (compressed if possible) */ res = mdns_write_domain(reply, answer_domain); if (res != ERR_OK) { return res; } } /* Write rd_length after when we know the answer size */ field16 = lwip_htons(reply->write_offset - answer_offset); res = pbuf_take_at(reply->pbuf, &field16, sizeof(field16), rdlen_offset); return res; } /** * Helper function for mdns_read_question/mdns_read_answer * Reads a domain, type and class from the packet * @param pkt The MDNS packet to read from. The parse_offset field will be * incremented to point to the next unparsed byte. * @param info The struct to fill with domain, type and class * @return ERR_OK on success, an err_t otherwise */ static err_t mdns_read_rr_info(struct mdns_packet *pkt, struct mdns_rr_info *info) { u16_t field16, copied; pkt->parse_offset = mdns_readname(pkt->pbuf, pkt->parse_offset, &info->domain); if (pkt->parse_offset == MDNS_READNAME_ERROR) { return ERR_VAL; } copied = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), pkt->parse_offset); if (copied != sizeof(field16)) { return ERR_VAL; } pkt->parse_offset += copied; info->type = lwip_ntohs(field16); copied = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), pkt->parse_offset); if (copied != sizeof(field16)) { return ERR_VAL; } pkt->parse_offset += copied; info->klass = lwip_ntohs(field16); return ERR_OK; } /** * Read a question from the packet. * All questions have to be read before the answers. * @param pkt The MDNS packet to read from. The questions_left field will be decremented * and the parse_offset will be updated. * @param question The struct to fill with question data * @return ERR_OK on success, an err_t otherwise */ static err_t mdns_read_question(struct mdns_packet *pkt, struct mdns_question *question) { /* Safety check */ if (pkt->pbuf->tot_len < pkt->parse_offset) { return ERR_VAL; } if (pkt->questions_left) { err_t res; pkt->questions_left--; memset(question, 0, sizeof(struct mdns_question)); res = mdns_read_rr_info(pkt, &question->info); if (res != ERR_OK) { return res; } /* Extract unicast flag from class field */ question->unicast = question->info.klass & 0x8000; question->info.klass &= 0x7FFF; return ERR_OK; } return ERR_VAL; } /** * Read an answer from the packet * The variable length reply is not copied, its pbuf offset and length is stored instead. * @param pkt The MDNS packet to read. The answers_left field will be decremented and * the parse_offset will be updated. * @param answer The struct to fill with answer data * @return ERR_OK on success, an err_t otherwise */ static err_t mdns_read_answer(struct mdns_packet *pkt, struct mdns_answer *answer) { /* Read questions first */ if (pkt->questions_left) { return ERR_VAL; } /* Safety check */ if (pkt->pbuf->tot_len < pkt->parse_offset) { return ERR_VAL; } if (pkt->answers_left) { u16_t copied, field16; u32_t ttl; err_t res; pkt->answers_left--; memset(answer, 0, sizeof(struct mdns_answer)); res = mdns_read_rr_info(pkt, &answer->info); if (res != ERR_OK) { return res; } /* Extract cache_flush flag from class field */ answer->cache_flush = answer->info.klass & 0x8000; answer->info.klass &= 0x7FFF; copied = pbuf_copy_partial(pkt->pbuf, &ttl, sizeof(ttl), pkt->parse_offset); if (copied != sizeof(ttl)) { return ERR_VAL; } pkt->parse_offset += copied; answer->ttl = lwip_ntohl(ttl); copied = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), pkt->parse_offset); if (copied != sizeof(field16)) { return ERR_VAL; } pkt->parse_offset += copied; answer->rd_length = lwip_ntohs(field16); answer->rd_offset = pkt->parse_offset; pkt->parse_offset += answer->rd_length; return ERR_OK; } return ERR_VAL; } #if LWIP_IPV4 /** Write an IPv4 address (A) RR to outpacket */ static err_t mdns_add_a_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct netif *netif) { struct mdns_domain host; mdns_build_host_domain(&host, NETIF_TO_HOST(netif)); LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with A record\n")); return mdns_add_answer(reply, &host, DNS_RRTYPE_A, DNS_RRCLASS_IN, cache_flush, (NETIF_TO_HOST(netif))->dns_ttl, (const u8_t *) netif_ip4_addr(netif), sizeof(ip4_addr_t), NULL); } /** Write a 4.3.2.1.in-addr.arpa -> hostname.local PTR RR to outpacket */ static err_t mdns_add_hostv4_ptr_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct netif *netif) { struct mdns_domain host, revhost; mdns_build_host_domain(&host, NETIF_TO_HOST(netif)); mdns_build_reverse_v4_domain(&revhost, netif_ip4_addr(netif)); LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with v4 PTR record\n")); return mdns_add_answer(reply, &revhost, DNS_RRTYPE_PTR, DNS_RRCLASS_IN, cache_flush, (NETIF_TO_HOST(netif))->dns_ttl, NULL, 0, &host); } #endif #if LWIP_IPV6 /** Write an IPv6 address (AAAA) RR to outpacket */ static err_t mdns_add_aaaa_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct netif *netif, int addrindex) { struct mdns_domain host; mdns_build_host_domain(&host, NETIF_TO_HOST(netif)); LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with AAAA record\n")); return mdns_add_answer(reply, &host, DNS_RRTYPE_AAAA, DNS_RRCLASS_IN, cache_flush, (NETIF_TO_HOST(netif))->dns_ttl, (const u8_t *) netif_ip6_addr(netif, addrindex), sizeof(ip6_addr_t), NULL); } /** Write a x.y.z.ip6.arpa -> hostname.local PTR RR to outpacket */ static err_t mdns_add_hostv6_ptr_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct netif *netif, int addrindex) { struct mdns_domain host, revhost; mdns_build_host_domain(&host, NETIF_TO_HOST(netif)); mdns_build_reverse_v6_domain(&revhost, netif_ip6_addr(netif, addrindex)); LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with v6 PTR record\n")); return mdns_add_answer(reply, &revhost, DNS_RRTYPE_PTR, DNS_RRCLASS_IN, cache_flush, (NETIF_TO_HOST(netif))->dns_ttl, NULL, 0, &host); } #endif /** Write an all-services -> servicetype PTR RR to outpacket */ static err_t mdns_add_servicetype_ptr_answer(struct mdns_outpacket *reply, struct mdns_service *service) { struct mdns_domain service_type, service_dnssd; mdns_build_service_domain(&service_type, service, 0); mdns_build_dnssd_domain(&service_dnssd); LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with service type PTR record\n")); return mdns_add_answer(reply, &service_dnssd, DNS_RRTYPE_PTR, DNS_RRCLASS_IN, 0, service->dns_ttl, NULL, 0, &service_type); } /** Write a servicetype -> servicename PTR RR to outpacket */ static err_t mdns_add_servicename_ptr_answer(struct mdns_outpacket *reply, struct mdns_service *service) { struct mdns_domain service_type, service_instance; mdns_build_service_domain(&service_type, service, 0); mdns_build_service_domain(&service_instance, service, 1); LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with service name PTR record\n")); return mdns_add_answer(reply, &service_type, DNS_RRTYPE_PTR, DNS_RRCLASS_IN, 0, service->dns_ttl, NULL, 0, &service_instance); } /** Write a SRV RR to outpacket */ static err_t mdns_add_srv_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct mdns_host *mdns, struct mdns_service *service) { struct mdns_domain service_instance, srvhost; u16_t srvdata[3]; mdns_build_service_domain(&service_instance, service, 1); mdns_build_host_domain(&srvhost, mdns); if (reply->legacy_query) { /* RFC 6762 section 18.14: * In legacy unicast responses generated to answer legacy queries, * name compression MUST NOT be performed on SRV records. */ srvhost.skip_compression = 1; } srvdata[0] = lwip_htons(SRV_PRIORITY); srvdata[1] = lwip_htons(SRV_WEIGHT); srvdata[2] = lwip_htons(service->port); LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with SRV record\n")); return mdns_add_answer(reply, &service_instance, DNS_RRTYPE_SRV, DNS_RRCLASS_IN, cache_flush, service->dns_ttl, (const u8_t *) &srvdata, sizeof(srvdata), &srvhost); } /** Write a TXT RR to outpacket */ static err_t mdns_add_txt_answer(struct mdns_outpacket *reply, u16_t cache_flush, struct mdns_service *service) { struct mdns_domain service_instance; mdns_build_service_domain(&service_instance, service, 1); mdns_prepare_txtdata(service); LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Responding with TXT record\n")); return mdns_add_answer(reply, &service_instance, DNS_RRTYPE_TXT, DNS_RRCLASS_IN, cache_flush, service->dns_ttl, (u8_t *) &service->txtdata.name, service->txtdata.length, NULL); } /** * Setup outpacket as a reply to the incoming packet */ static void mdns_init_outpacket(struct mdns_outpacket *out, struct mdns_packet *in) { memset(out, 0, sizeof(struct mdns_outpacket)); out->cache_flush = 1; out->netif = in->netif; /* Copy source IP/port to use when responding unicast, or to choose * which pcb to use for multicast (IPv4/IPv6) */ SMEMCPY(&out->dest_addr, &in->source_addr, sizeof(ip_addr_t)); out->dest_port = in->source_port; if (in->source_port != MDNS_PORT) { out->unicast_reply = 1; out->cache_flush = 0; if (in->questions == 1) { out->legacy_query = 1; out->tx_id = in->tx_id; } } if (in->recv_unicast) { out->unicast_reply = 1; } } /** * Send chosen answers as a reply * * Add all selected answers (first write will allocate pbuf) * Add additional answers based on the selected answers * Send the packet */ static void mdns_send_outpacket(struct mdns_outpacket *outpkt) { struct mdns_service *service; err_t res; int i; struct mdns_host* mdns = NETIF_TO_HOST(outpkt->netif); /* Write answers to host questions */ #if LWIP_IPV4 if (outpkt->host_replies & REPLY_HOST_A) { res = mdns_add_a_answer(outpkt, outpkt->cache_flush, outpkt->netif); if (res != ERR_OK) { goto cleanup; } outpkt->answers++; } if (outpkt->host_replies & REPLY_HOST_PTR_V4) { res = mdns_add_hostv4_ptr_answer(outpkt, outpkt->cache_flush, outpkt->netif); if (res != ERR_OK) { goto cleanup; } outpkt->answers++; } #endif #if LWIP_IPV6 if (outpkt->host_replies & REPLY_HOST_AAAA) { int addrindex; for (addrindex = 0; addrindex < LWIP_IPV6_NUM_ADDRESSES; ++addrindex) { if (ip6_addr_isvalid(netif_ip6_addr_state(outpkt->netif, addrindex))) { res = mdns_add_aaaa_answer(outpkt, outpkt->cache_flush, outpkt->netif, addrindex); if (res != ERR_OK) { goto cleanup; } outpkt->answers++; } } } if (outpkt->host_replies & REPLY_HOST_PTR_V6) { u8_t rev_addrs = outpkt->host_reverse_v6_replies; int addrindex = 0; while (rev_addrs) { if (rev_addrs & 1) { res = mdns_add_hostv6_ptr_answer(outpkt, outpkt->cache_flush, outpkt->netif, addrindex); if (res != ERR_OK) { goto cleanup; } outpkt->answers++; } addrindex++; rev_addrs >>= 1; } } #endif /* Write answers to service questions */ for (i = 0; i < MDNS_MAX_SERVICES; ++i) { service = mdns->services[i]; if (!service) { continue; } if (outpkt->serv_replies[i] & REPLY_SERVICE_TYPE_PTR) { res = mdns_add_servicetype_ptr_answer(outpkt, service); if (res != ERR_OK) { goto cleanup; } outpkt->answers++; } if (outpkt->serv_replies[i] & REPLY_SERVICE_NAME_PTR) { res = mdns_add_servicename_ptr_answer(outpkt, service); if (res != ERR_OK) { goto cleanup; } outpkt->answers++; } if (outpkt->serv_replies[i] & REPLY_SERVICE_SRV) { res = mdns_add_srv_answer(outpkt, outpkt->cache_flush, mdns, service); if (res != ERR_OK) { goto cleanup; } outpkt->answers++; } if (outpkt->serv_replies[i] & REPLY_SERVICE_TXT) { res = mdns_add_txt_answer(outpkt, outpkt->cache_flush, service); if (res != ERR_OK) { goto cleanup; } outpkt->answers++; } } /* All answers written, add additional RRs */ for (i = 0; i < MDNS_MAX_SERVICES; ++i) { service = mdns->services[i]; if (!service) { continue; } if (outpkt->serv_replies[i] & REPLY_SERVICE_NAME_PTR) { /* Our service instance requested, include SRV & TXT * if they are already not requested. */ if (!(outpkt->serv_replies[i] & REPLY_SERVICE_SRV)) { res = mdns_add_srv_answer(outpkt, outpkt->cache_flush, mdns, service); if (res != ERR_OK) { goto cleanup; } outpkt->additional++; } if (!(outpkt->serv_replies[i] & REPLY_SERVICE_TXT)) { res = mdns_add_txt_answer(outpkt, outpkt->cache_flush, service); if (res != ERR_OK) { goto cleanup; } outpkt->additional++; } } /* If service instance, SRV, record or an IP address is requested, * supply all addresses for the host */ if ((outpkt->serv_replies[i] & (REPLY_SERVICE_NAME_PTR | REPLY_SERVICE_SRV)) || (outpkt->host_replies & (REPLY_HOST_A | REPLY_HOST_AAAA))) { #if LWIP_IPV6 if (!(outpkt->host_replies & REPLY_HOST_AAAA)) { int addrindex; for (addrindex = 0; addrindex < LWIP_IPV6_NUM_ADDRESSES; ++addrindex) { if (ip6_addr_isvalid(netif_ip6_addr_state(outpkt->netif, addrindex))) { res = mdns_add_aaaa_answer(outpkt, outpkt->cache_flush, outpkt->netif, addrindex); if (res != ERR_OK) { goto cleanup; } outpkt->additional++; } } } #endif #if LWIP_IPV4 if (!(outpkt->host_replies & REPLY_HOST_A)) { res = mdns_add_a_answer(outpkt, outpkt->cache_flush, outpkt->netif); if (res != ERR_OK) { goto cleanup; } outpkt->additional++; } #endif } } if (outpkt->pbuf) { const ip_addr_t *mcast_destaddr; struct dns_hdr hdr; /* Write header */ memset(&hdr, 0, sizeof(hdr)); hdr.flags1 = DNS_FLAG1_RESPONSE | DNS_FLAG1_AUTHORATIVE; hdr.numanswers = lwip_htons(outpkt->answers); hdr.numextrarr = lwip_htons(outpkt->additional); if (outpkt->legacy_query) { hdr.numquestions = lwip_htons(1); hdr.id = lwip_htons(outpkt->tx_id); } pbuf_take(outpkt->pbuf, &hdr, sizeof(hdr)); /* Shrink packet */ pbuf_realloc(outpkt->pbuf, outpkt->write_offset); if (IP_IS_V6_VAL(outpkt->dest_addr)) { #if LWIP_IPV6 mcast_destaddr = &v6group; #endif } else { #if LWIP_IPV4 mcast_destaddr = &v4group; #endif } /* Send created packet */ LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Sending packet, len=%d, unicast=%d\n", outpkt->write_offset, outpkt->unicast_reply)); if (outpkt->unicast_reply) { udp_sendto_if(mdns_pcb, outpkt->pbuf, &outpkt->dest_addr, outpkt->dest_port, outpkt->netif); } else { udp_sendto_if(mdns_pcb, outpkt->pbuf, mcast_destaddr, MDNS_PORT, outpkt->netif); } } cleanup: if (outpkt->pbuf) { pbuf_free(outpkt->pbuf); outpkt->pbuf = NULL; } } /** * Send unsolicited answer containing all our known data * @param netif The network interface to send on * @param destination The target address to send to (usually multicast address) */ static void mdns_announce(struct netif *netif, const ip_addr_t *destination) { struct mdns_outpacket announce; int i; struct mdns_host* mdns = NETIF_TO_HOST(netif); memset(&announce, 0, sizeof(announce)); announce.netif = netif; announce.cache_flush = 1; #if LWIP_IPV4 if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) announce.host_replies = REPLY_HOST_A | REPLY_HOST_PTR_V4; #endif #if LWIP_IPV6 for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { announce.host_replies |= REPLY_HOST_AAAA | REPLY_HOST_PTR_V6; announce.host_reverse_v6_replies |= (1 << i); } } #endif for (i = 0; i < MDNS_MAX_SERVICES; i++) { struct mdns_service *serv = mdns->services[i]; if (serv) { announce.serv_replies[i] = REPLY_SERVICE_TYPE_PTR | REPLY_SERVICE_NAME_PTR | REPLY_SERVICE_SRV | REPLY_SERVICE_TXT; } } announce.dest_port = MDNS_PORT; SMEMCPY(&announce.dest_addr, destination, sizeof(announce.dest_addr)); mdns_send_outpacket(&announce); } /** * Handle question MDNS packet * 1. Parse all questions and set bits what answers to send * 2. Clear pending answers if known answers are supplied * 3. Put chosen answers in new packet and send as reply */ static void mdns_handle_question(struct mdns_packet *pkt) { struct mdns_service *service; struct mdns_outpacket reply; int replies = 0; int i; err_t res; struct mdns_host* mdns = NETIF_TO_HOST(pkt->netif); mdns_init_outpacket(&reply, pkt); while (pkt->questions_left) { struct mdns_question q; res = mdns_read_question(pkt, &q); if (res != ERR_OK) { LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Failed to parse question, skipping query packet\n")); return; } LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Query for domain ")); mdns_domain_debug_print(&q.info.domain); LWIP_DEBUGF(MDNS_DEBUG, (" type %d class %d\n", q.info.type, q.info.klass)); if (q.unicast) { /* Reply unicast if any question is unicast */ reply.unicast_reply = 1; } reply.host_replies |= check_host(pkt->netif, &q.info, &reply.host_reverse_v6_replies); replies |= reply.host_replies; for (i = 0; i < MDNS_MAX_SERVICES; ++i) { service = mdns->services[i]; if (!service) { continue; } reply.serv_replies[i] |= check_service(service, &q.info); replies |= reply.serv_replies[i]; } if (replies && reply.legacy_query) { /* Add question to reply packet (legacy packet only has 1 question) */ res = mdns_add_question(&reply, &q.info.domain, q.info.type, q.info.klass, 0); if (res != ERR_OK) { goto cleanup; } } } /* Handle known answers */ while (pkt->answers_left) { struct mdns_answer ans; u8_t rev_v6; int match; res = mdns_read_answer(pkt, &ans); if (res != ERR_OK) { LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Failed to parse answer, skipping query packet\n")); goto cleanup; } LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Known answer for domain ")); mdns_domain_debug_print(&ans.info.domain); LWIP_DEBUGF(MDNS_DEBUG, (" type %d class %d\n", ans.info.type, ans.info.klass)); if (ans.info.type == DNS_RRTYPE_ANY || ans.info.klass == DNS_RRCLASS_ANY) { /* Skip known answers for ANY type & class */ continue; } rev_v6 = 0; match = reply.host_replies & check_host(pkt->netif, &ans.info, &rev_v6); if (match && (ans.ttl > (mdns->dns_ttl / 2))) { /* The RR in the known answer matches an RR we are planning to send, * and the TTL is less than half gone. * If the payload matches we should not send that answer. */ if (ans.info.type == DNS_RRTYPE_PTR) { /* Read domain and compare */ struct mdns_domain known_ans, my_ans; u16_t len; len = mdns_readname(pkt->pbuf, ans.rd_offset, &known_ans); res = mdns_build_host_domain(&my_ans, mdns); if (len != MDNS_READNAME_ERROR && res == ERR_OK && mdns_domain_eq(&known_ans, &my_ans)) { #if LWIP_IPV4 if (match & REPLY_HOST_PTR_V4) { LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: v4 PTR\n")); reply.host_replies &= ~REPLY_HOST_PTR_V4; } #endif #if LWIP_IPV6 if (match & REPLY_HOST_PTR_V6) { LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: v6 PTR\n")); reply.host_reverse_v6_replies &= ~rev_v6; if (reply.host_reverse_v6_replies == 0) { reply.host_replies &= ~REPLY_HOST_PTR_V6; } } #endif } } else if (match & REPLY_HOST_A) { #if LWIP_IPV4 if (ans.rd_length == sizeof(ip4_addr_t) && pbuf_memcmp(pkt->pbuf, ans.rd_offset, netif_ip4_addr(pkt->netif), ans.rd_length) == 0) { LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: A\n")); reply.host_replies &= ~REPLY_HOST_A; } #endif } else if (match & REPLY_HOST_AAAA) { #if LWIP_IPV6 if (ans.rd_length == sizeof(ip6_addr_t) && /* TODO this clears all AAAA responses if first addr is set as known */ pbuf_memcmp(pkt->pbuf, ans.rd_offset, netif_ip6_addr(pkt->netif, 0), ans.rd_length) == 0) { LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: AAAA\n")); reply.host_replies &= ~REPLY_HOST_AAAA; } #endif } } for (i = 0; i < MDNS_MAX_SERVICES; ++i) { service = mdns->services[i]; if (!service) { continue; } match = reply.serv_replies[i] & check_service(service, &ans.info); if (match && (ans.ttl > (service->dns_ttl / 2))) { /* The RR in the known answer matches an RR we are planning to send, * and the TTL is less than half gone. * If the payload matches we should not send that answer. */ if (ans.info.type == DNS_RRTYPE_PTR) { /* Read domain and compare */ struct mdns_domain known_ans, my_ans; u16_t len; len = mdns_readname(pkt->pbuf, ans.rd_offset, &known_ans); if (len != MDNS_READNAME_ERROR) { if (match & REPLY_SERVICE_TYPE_PTR) { res = mdns_build_service_domain(&my_ans, service, 0); if (res == ERR_OK && mdns_domain_eq(&known_ans, &my_ans)) { LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: service type PTR\n")); reply.serv_replies[i] &= ~REPLY_SERVICE_TYPE_PTR; } } if (match & REPLY_SERVICE_NAME_PTR) { res = mdns_build_service_domain(&my_ans, service, 1); if (res == ERR_OK && mdns_domain_eq(&known_ans, &my_ans)) { LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: service name PTR\n")); reply.serv_replies[i] &= ~REPLY_SERVICE_NAME_PTR; } } } } else if (match & REPLY_SERVICE_SRV) { /* Read and compare to my SRV record */ u16_t field16, len, read_pos; struct mdns_domain known_ans, my_ans; read_pos = ans.rd_offset; do { /* Check priority field */ len = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), read_pos); if (len != sizeof(field16) || lwip_ntohs(field16) != SRV_PRIORITY) { break; } read_pos += len; /* Check weight field */ len = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), read_pos); if (len != sizeof(field16) || lwip_ntohs(field16) != SRV_WEIGHT) { break; } read_pos += len; /* Check port field */ len = pbuf_copy_partial(pkt->pbuf, &field16, sizeof(field16), read_pos); if (len != sizeof(field16) || lwip_ntohs(field16) != service->port) { break; } read_pos += len; /* Check host field */ len = mdns_readname(pkt->pbuf, read_pos, &known_ans); mdns_build_host_domain(&my_ans, mdns); if (len == MDNS_READNAME_ERROR || !mdns_domain_eq(&known_ans, &my_ans)) { break; } LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: SRV\n")); reply.serv_replies[i] &= ~REPLY_SERVICE_SRV; } while (0); } else if (match & REPLY_SERVICE_TXT) { mdns_prepare_txtdata(service); if (service->txtdata.length == ans.rd_length && pbuf_memcmp(pkt->pbuf, ans.rd_offset, service->txtdata.name, ans.rd_length) == 0) { LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Skipping known answer: TXT\n")); reply.serv_replies[i] &= ~REPLY_SERVICE_TXT; } } } } } mdns_send_outpacket(&reply); cleanup: if (reply.pbuf) { /* This should only happen if we fail to alloc/write question for legacy query */ pbuf_free(reply.pbuf); reply.pbuf = NULL; } } /** * Handle response MDNS packet * Only prints debug for now. Will need more code to do conflict resolution. */ static void mdns_handle_response(struct mdns_packet *pkt) { /* Ignore all questions */ while (pkt->questions_left) { struct mdns_question q; err_t res; res = mdns_read_question(pkt, &q); if (res != ERR_OK) { LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Failed to parse question, skipping response packet\n")); return; } } while (pkt->answers_left) { struct mdns_answer ans; err_t res; res = mdns_read_answer(pkt, &ans); if (res != ERR_OK) { LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Failed to parse answer, skipping response packet\n")); return; } LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Answer for domain ")); mdns_domain_debug_print(&ans.info.domain); LWIP_DEBUGF(MDNS_DEBUG, (" type %d class %d\n", ans.info.type, ans.info.klass)); } } /** * Receive input function for MDNS packets. * Handles both IPv4 and IPv6 UDP pcbs. */ static void mdns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) { struct dns_hdr hdr; struct mdns_packet packet; struct netif *recv_netif = ip_current_input_netif(); u16_t offset = 0; LWIP_UNUSED_ARG(arg); LWIP_UNUSED_ARG(pcb); LWIP_DEBUGF(MDNS_DEBUG, ("MDNS: Received IPv%d MDNS packet, len %d\n", IP_IS_V6(addr)? 6 : 4, p->tot_len)); if (NETIF_TO_HOST(recv_netif) == NULL) { /* From netif not configured for MDNS */ goto dealloc; } if (pbuf_copy_partial(p, &hdr, SIZEOF_DNS_HDR, offset) < SIZEOF_DNS_HDR) { /* Too small */ goto dealloc; } offset += SIZEOF_DNS_HDR; if (DNS_HDR_GET_OPCODE(&hdr)) { /* Ignore non-standard queries in multicast packets (RFC 6762, section 18.3) */ goto dealloc; } memset(&packet, 0, sizeof(packet)); SMEMCPY(&packet.source_addr, addr, sizeof(packet.source_addr)); packet.source_port = port; packet.netif = recv_netif; packet.pbuf = p; packet.parse_offset = offset; packet.tx_id = lwip_ntohs(hdr.id); packet.questions = packet.questions_left = lwip_ntohs(hdr.numquestions); packet.answers = packet.answers_left = lwip_ntohs(hdr.numanswers) + lwip_ntohs(hdr.numauthrr) + lwip_ntohs(hdr.numextrarr); #if LWIP_IPV6 if (IP_IS_V6(ip_current_dest_addr())) { if (!ip_addr_cmp(ip_current_dest_addr(), &v6group)) { packet.recv_unicast = 1; } } #endif #if LWIP_IPV4 if (!IP_IS_V6(ip_current_dest_addr())) { if (!ip_addr_cmp(ip_current_dest_addr(), &v4group)) { packet.recv_unicast = 1; } } #endif if (hdr.flags1 & DNS_FLAG1_RESPONSE) { mdns_handle_response(&packet); } else { mdns_handle_question(&packet); } dealloc: pbuf_free(p); } /** * @ingroup mdns * Initiate MDNS responder. Will open UDP sockets on port 5353 */ void mdns_resp_init(void) { err_t res; mdns_pcb = udp_new_ip_type(IPADDR_TYPE_ANY); LWIP_ASSERT("Failed to allocate pcb", mdns_pcb != NULL); #if LWIP_MULTICAST_TX_OPTIONS udp_set_multicast_ttl(mdns_pcb, MDNS_TTL); #else mdns_pcb->ttl = MDNS_TTL; #endif res = udp_bind(mdns_pcb, IP_ANY_TYPE, MDNS_PORT); LWIP_UNUSED_ARG(res); /* in case of LWIP_NOASSERT */ LWIP_ASSERT("Failed to bind pcb", res == ERR_OK); udp_recv(mdns_pcb, mdns_recv, NULL); mdns_netif_client_id = netif_alloc_client_data_id(); } /** * @ingroup mdns * Announce IP settings have changed on netif. * Call this in your callback registered by netif_set_status_callback(). * This function may go away in the future when netif supports registering * multiple callback functions. * @param netif The network interface where settings have changed. */ void mdns_resp_netif_settings_changed(struct netif *netif) { LWIP_ERROR("mdns_resp_netif_ip_changed: netif != NULL", (netif != NULL), return); if (NETIF_TO_HOST(netif) == NULL) { return; } /* Announce on IPv6 and IPv4 */ #if LWIP_IPV6 mdns_announce(netif, IP6_ADDR_ANY); #endif #if LWIP_IPV4 mdns_announce(netif, IP4_ADDR_ANY); #endif } /** * @ingroup mdns * Activate MDNS responder for a network interface and send announce packets. * @param netif The network interface to activate. * @param hostname Name to use. Queries for &lt;hostname&gt;.local will be answered * with the IP addresses of the netif. The hostname will be copied, the * given pointer can be on the stack. * @param dns_ttl Validity time in seconds to send out for IP address data in DNS replies * @return ERR_OK if netif was added, an err_t otherwise */ err_t mdns_resp_add_netif(struct netif *netif, const char *hostname, u32_t dns_ttl) { err_t res; struct mdns_host* mdns; LWIP_ERROR("mdns_resp_add_netif: netif != NULL", (netif != NULL), return ERR_VAL); LWIP_ERROR("mdns_resp_add_netif: Hostname too long", (strlen(hostname) <= MDNS_LABEL_MAXLEN), return ERR_VAL); LWIP_ASSERT("mdns_resp_add_netif: Double add", NETIF_TO_HOST(netif) == NULL); mdns = (struct mdns_host *) mem_malloc(sizeof(struct mdns_host)); LWIP_ERROR("mdns_resp_add_netif: Alloc failed", (mdns != NULL), return ERR_MEM); netif_set_client_data(netif, mdns_netif_client_id, mdns); memset(mdns, 0, sizeof(struct mdns_host)); MEMCPY(&mdns->name, hostname, LWIP_MIN(MDNS_LABEL_MAXLEN, strlen(hostname))); mdns->dns_ttl = dns_ttl; /* Join multicast groups */ #if LWIP_IPV4 res = igmp_joingroup_netif(netif, ip_2_ip4(&v4group)); if (res != ERR_OK) { goto cleanup; } #endif #if LWIP_IPV6 res = mld6_joingroup_netif(netif, ip_2_ip6(&v6group)); if (res != ERR_OK) { goto cleanup; } #endif mdns_resp_netif_settings_changed(netif); return ERR_OK; cleanup: mem_free(mdns); netif_set_client_data(netif, mdns_netif_client_id, NULL); return res; } /** * @ingroup mdns * Stop responding to MDNS queries on this interface, leave multicast groups, * and free the helper structure and any of its services. * @param netif The network interface to remove. * @return ERR_OK if netif was removed, an err_t otherwise */ err_t mdns_resp_remove_netif(struct netif *netif) { int i; struct mdns_host* mdns; LWIP_ASSERT("mdns_resp_remove_netif: Null pointer", netif); mdns = NETIF_TO_HOST(netif); LWIP_ERROR("mdns_resp_remove_netif: Not an active netif", (mdns != NULL), return ERR_VAL); for (i = 0; i < MDNS_MAX_SERVICES; i++) { struct mdns_service *service = mdns->services[i]; if (service) { mem_free(service); } } /* Leave multicast groups */ #if LWIP_IPV4 igmp_leavegroup_netif(netif, ip_2_ip4(&v4group)); #endif #if LWIP_IPV6 mld6_leavegroup_netif(netif, ip_2_ip6(&v6group)); #endif mem_free(mdns); netif_set_client_data(netif, mdns_netif_client_id, NULL); return ERR_OK; } /** * @ingroup mdns * Add a service to the selected network interface. * @param netif The network interface to publish this service on * @param name The name of the service * @param service The service type, like "_http" * @param proto The service protocol, DNSSD_PROTO_TCP for TCP ("_tcp") and DNSSD_PROTO_UDP * for others ("_udp") * @param port The port the service listens to * @param dns_ttl Validity time in seconds to send out for service data in DNS replies * @param txt_fn Callback to get TXT data. Will be called each time a TXT reply is created to * allow dynamic replies. * @param txt_data Userdata pointer for txt_fn * @return ERR_OK if the service was added to the netif, an err_t otherwise */ err_t mdns_resp_add_service(struct netif *netif, const char *name, const char *service, enum mdns_sd_proto proto, u16_t port, u32_t dns_ttl, service_get_txt_fn_t txt_fn, void *txt_data) { int i; int slot = -1; struct mdns_service *srv; struct mdns_host* mdns; LWIP_ASSERT("mdns_resp_add_service: netif != NULL", netif); mdns = NETIF_TO_HOST(netif); LWIP_ERROR("mdns_resp_add_service: Not an mdns netif", (mdns != NULL), return ERR_VAL); LWIP_ERROR("mdns_resp_add_service: Name too long", (strlen(name) <= MDNS_LABEL_MAXLEN), return ERR_VAL); LWIP_ERROR("mdns_resp_add_service: Service too long", (strlen(service) <= MDNS_LABEL_MAXLEN), return ERR_VAL); LWIP_ERROR("mdns_resp_add_service: Bad proto (need TCP or UDP)", (proto == DNSSD_PROTO_TCP || proto == DNSSD_PROTO_UDP), return ERR_VAL); for (i = 0; i < MDNS_MAX_SERVICES; i++) { if (mdns->services[i] == NULL) { slot = i; break; } } LWIP_ERROR("mdns_resp_add_service: Service list full (increase MDNS_MAX_SERVICES)", (slot >= 0), return ERR_MEM); srv = (struct mdns_service*)mem_malloc(sizeof(struct mdns_service)); LWIP_ERROR("mdns_resp_add_service: Alloc failed", (srv != NULL), return ERR_MEM); memset(srv, 0, sizeof(struct mdns_service)); MEMCPY(&srv->name, name, LWIP_MIN(MDNS_LABEL_MAXLEN, strlen(name))); MEMCPY(&srv->service, service, LWIP_MIN(MDNS_LABEL_MAXLEN, strlen(service))); srv->txt_fn = txt_fn; srv->txt_userdata = txt_data; srv->proto = (u16_t)proto; srv->port = port; srv->dns_ttl = dns_ttl; mdns->services[slot] = srv; /* Announce on IPv6 and IPv4 */ #if LWIP_IPV6 mdns_announce(netif, IP6_ADDR_ANY); #endif #if LWIP_IPV4 mdns_announce(netif, IP4_ADDR_ANY); #endif return ERR_OK; } /** * @ingroup mdns * Call this function from inside the service_get_txt_fn_t callback to add text data. * Buffer for TXT data is 256 bytes, and each field is prefixed with a length byte. * @param service The service provided to the get_txt callback * @param txt String to add to the TXT field. * @param txt_len Length of string * @return ERR_OK if the string was added to the reply, an err_t otherwise */ err_t mdns_resp_add_service_txtitem(struct mdns_service *service, const char *txt, u8_t txt_len) { LWIP_ASSERT("mdns_resp_add_service_txtitem: service != NULL", service); /* Use a mdns_domain struct to store txt chunks since it is the same encoding */ return mdns_domain_add_label(&service->txtdata, txt, txt_len); } #endif /* LWIP_MDNS_RESPONDER */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/mdns/mdns.c
C
unknown
66,970
/** * @file * MQTT client * * @defgroup mqtt MQTT client * @ingroup apps * @verbinclude mqtt_client.txt */ /* * Copyright (c) 2016 Erik Andersson <erian747@gmail.com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack * * Author: Erik Andersson <erian747@gmail.com> * * * @todo: * - Handle large outgoing payloads for PUBLISH messages * - Fix restriction of a single topic in each (UN)SUBSCRIBE message (protocol has support for multiple topics) * - Add support for legacy MQTT protocol version * * Please coordinate changes and requests with Erik Andersson * Erik Andersson <erian747@gmail.com> * */ #include "lwip/apps/mqtt.h" #include "lwip/timeouts.h" #include "lwip/ip_addr.h" #include "lwip/mem.h" #include "lwip/err.h" #include "lwip/pbuf.h" #include "lwip/tcp.h" #include <string.h> #if LWIP_TCP && LWIP_CALLBACK_API /** * MQTT_DEBUG: Default is off. */ #if !defined MQTT_DEBUG || defined __DOXYGEN__ #define MQTT_DEBUG LWIP_DBG_OFF #endif #define MQTT_DEBUG_TRACE (MQTT_DEBUG | LWIP_DBG_TRACE) #define MQTT_DEBUG_STATE (MQTT_DEBUG | LWIP_DBG_STATE) #define MQTT_DEBUG_WARN (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING) #define MQTT_DEBUG_WARN_STATE (MQTT_DEBUG | LWIP_DBG_LEVEL_WARNING | LWIP_DBG_STATE) #define MQTT_DEBUG_SERIOUS (MQTT_DEBUG | LWIP_DBG_LEVEL_SERIOUS) static void mqtt_cyclic_timer(void *arg); /** * MQTT client connection states */ enum { TCP_DISCONNECTED, TCP_CONNECTING, MQTT_CONNECTING, MQTT_CONNECTED }; /** * MQTT control message types */ enum mqtt_message_type { MQTT_MSG_TYPE_CONNECT = 1, MQTT_MSG_TYPE_CONNACK = 2, MQTT_MSG_TYPE_PUBLISH = 3, MQTT_MSG_TYPE_PUBACK = 4, MQTT_MSG_TYPE_PUBREC = 5, MQTT_MSG_TYPE_PUBREL = 6, MQTT_MSG_TYPE_PUBCOMP = 7, MQTT_MSG_TYPE_SUBSCRIBE = 8, MQTT_MSG_TYPE_SUBACK = 9, MQTT_MSG_TYPE_UNSUBSCRIBE = 10, MQTT_MSG_TYPE_UNSUBACK = 11, MQTT_MSG_TYPE_PINGREQ = 12, MQTT_MSG_TYPE_PINGRESP = 13, MQTT_MSG_TYPE_DISCONNECT = 14 }; /** Helpers to extract control packet type and qos from first byte in fixed header */ #define MQTT_CTL_PACKET_TYPE(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0xf0) >> 4) #define MQTT_CTL_PACKET_QOS(fixed_hdr_byte0) ((fixed_hdr_byte0 & 0x6) >> 1) /** * MQTT connect flags, only used in CONNECT message */ enum mqtt_connect_flag { MQTT_CONNECT_FLAG_USERNAME = 1 << 7, MQTT_CONNECT_FLAG_PASSWORD = 1 << 6, MQTT_CONNECT_FLAG_WILL_RETAIN = 1 << 5, MQTT_CONNECT_FLAG_WILL = 1 << 2, MQTT_CONNECT_FLAG_CLEAN_SESSION = 1 << 1 }; #if defined(LWIP_DEBUG) static const char * const mqtt_message_type_str[15] = { "UNDEFINED", "CONNECT", "CONNACK", "PUBLISH", "PUBACK", "PUBREC", "PUBREL", "PUBCOMP", "SUBSCRIBE", "SUBACK", "UNSUBSCRIBE", "UNSUBACK", "PINGREQ", "PINGRESP", "DISCONNECT" }; /** * Message type value to string * @param msg_type see enum mqtt_message_type * * @return Control message type text string */ static const char * mqtt_msg_type_to_str(u8_t msg_type) { if (msg_type >= LWIP_ARRAYSIZE(mqtt_message_type_str)) { msg_type = 0; } return mqtt_message_type_str[msg_type]; } #endif /** * Generate MQTT packet identifier * @param client MQTT client * @return New packet identifier, range 1 to 65535 */ static u16_t msg_generate_packet_id(mqtt_client_t *client) { client->pkt_id_seq++; if (client->pkt_id_seq == 0) { client->pkt_id_seq++; } return client->pkt_id_seq; } /*--------------------------------------------------------------------------------------------------------------------- */ /* Output ring buffer */ #define MQTT_RINGBUF_IDX_MASK ((MQTT_OUTPUT_RINGBUF_SIZE) - 1) /** Add single item to ring buffer */ #define mqtt_ringbuf_put(rb, item) ((rb)->buf)[(rb)->put++ & MQTT_RINGBUF_IDX_MASK] = (item) /** Return number of bytes in ring buffer */ #define mqtt_ringbuf_len(rb) ((u16_t)((rb)->put - (rb)->get)) /** Return number of bytes free in ring buffer */ #define mqtt_ringbuf_free(rb) (MQTT_OUTPUT_RINGBUF_SIZE - mqtt_ringbuf_len(rb)) /** Return number of bytes possible to read without wrapping around */ #define mqtt_ringbuf_linear_read_length(rb) LWIP_MIN(mqtt_ringbuf_len(rb), (MQTT_OUTPUT_RINGBUF_SIZE - ((rb)->get & MQTT_RINGBUF_IDX_MASK))) /** Return pointer to ring buffer get position */ #define mqtt_ringbuf_get_ptr(rb) (&(rb)->buf[(rb)->get & MQTT_RINGBUF_IDX_MASK]) #define mqtt_ringbuf_advance_get_idx(rb, len) ((rb)->get += (len)) /** * Try send as many bytes as possible from output ring buffer * @param rb Output ring buffer * @param tpcb TCP connection handle */ static void mqtt_output_send(struct mqtt_ringbuf_t *rb, struct tcp_pcb *tpcb) { err_t err; u8_t wrap = 0; u16_t ringbuf_lin_len = mqtt_ringbuf_linear_read_length(rb); u16_t send_len = tcp_sndbuf(tpcb); LWIP_ASSERT("mqtt_output_send: tpcb != NULL", tpcb != NULL); if (send_len == 0 || ringbuf_lin_len == 0) { return; } LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_output_send: tcp_sndbuf: %d bytes, ringbuf_linear_available: %d, get %d, put %d\n", send_len, ringbuf_lin_len, ((rb)->get & MQTT_RINGBUF_IDX_MASK), ((rb)->put & MQTT_RINGBUF_IDX_MASK))); if (send_len > ringbuf_lin_len) { /* Space in TCP output buffer is larger than available in ring buffer linear portion */ send_len = ringbuf_lin_len; /* Wrap around if more data in ring buffer after linear portion */ wrap = (mqtt_ringbuf_len(rb) > ringbuf_lin_len); } err = tcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY | (wrap ? TCP_WRITE_FLAG_MORE : 0)); if ((err == ERR_OK) && wrap) { mqtt_ringbuf_advance_get_idx(rb, send_len); /* Use the lesser one of ring buffer linear length and TCP send buffer size */ send_len = LWIP_MIN(tcp_sndbuf(tpcb), mqtt_ringbuf_linear_read_length(rb)); err = tcp_write(tpcb, mqtt_ringbuf_get_ptr(rb), send_len, TCP_WRITE_FLAG_COPY); } if (err == ERR_OK) { mqtt_ringbuf_advance_get_idx(rb, send_len); /* Flush */ tcp_output(tpcb); } else { LWIP_DEBUGF(MQTT_DEBUG_WARN, ("mqtt_output_send: Send failed with err %d (\"%s\")\n", err, lwip_strerr(err))); } } /*--------------------------------------------------------------------------------------------------------------------- */ /* Request queue */ /** * Create request item * @param r_objs Pointer to request objects * @param pkt_id Packet identifier of request * @param cb Packet callback to call when requests lifetime ends * @param arg Parameter following callback * @return Request or NULL if failed to create */ static struct mqtt_request_t * mqtt_create_request(struct mqtt_request_t *r_objs, u16_t pkt_id, mqtt_request_cb_t cb, void *arg) { struct mqtt_request_t *r = NULL; u8_t n; LWIP_ASSERT("mqtt_create_request: r_objs != NULL", r_objs != NULL); for (n = 0; n < MQTT_REQ_MAX_IN_FLIGHT; n++) { /* Item point to itself if not in use */ if (r_objs[n].next == &r_objs[n]) { r = &r_objs[n]; r->next = NULL; r->cb = cb; r->arg = arg; r->pkt_id = pkt_id; break; } } return r; } /** * Append request to pending request queue * @param tail Pointer to request queue tail pointer * @param r Request to append */ static void mqtt_append_request(struct mqtt_request_t **tail, struct mqtt_request_t *r) { struct mqtt_request_t *head = NULL; s16_t time_before = 0; struct mqtt_request_t *iter; LWIP_ASSERT("mqtt_append_request: tail != NULL", tail != NULL); /* Iterate trough queue to find head, and count total timeout time */ for (iter = *tail; iter != NULL; iter = iter->next) { time_before += iter->timeout_diff; head = iter; } LWIP_ASSERT("mqtt_append_request: time_before <= MQTT_REQ_TIMEOUT", time_before <= MQTT_REQ_TIMEOUT); r->timeout_diff = MQTT_REQ_TIMEOUT - time_before; if (head == NULL) { *tail = r; } else { head->next = r; } } /** * Delete request item * @param r Request item to delete */ static void mqtt_delete_request(struct mqtt_request_t *r) { if (r != NULL) { r->next = r; } } /** * Remove a request item with a specific packet identifier from request queue * @param tail Pointer to request queue tail pointer * @param pkt_id Packet identifier of request to take * @return Request item if found, NULL if not */ static struct mqtt_request_t * mqtt_take_request(struct mqtt_request_t **tail, u16_t pkt_id) { struct mqtt_request_t *iter = NULL, *prev = NULL; LWIP_ASSERT("mqtt_take_request: tail != NULL", tail != NULL); /* Search all request for pkt_id */ for (iter = *tail; iter != NULL; iter = iter->next) { if (iter->pkt_id == pkt_id) { break; } prev = iter; } /* If request was found */ if (iter != NULL) { /* unchain */ if (prev == NULL) { *tail= iter->next; } else { prev->next = iter->next; } /* If exists, add remaining timeout time for the request to next */ if (iter->next != NULL) { iter->next->timeout_diff += iter->timeout_diff; } iter->next = NULL; } return iter; } /** * Handle requests timeout * @param tail Pointer to request queue tail pointer * @param t Time since last call in seconds */ static void mqtt_request_time_elapsed(struct mqtt_request_t **tail, u8_t t) { struct mqtt_request_t *r; LWIP_ASSERT("mqtt_request_time_elapsed: tail != NULL", tail != NULL); r = *tail; while (t > 0 && r != NULL) { if (t >= r->timeout_diff) { t -= (u8_t)r->timeout_diff; /* Unchain */ *tail = r->next; /* Notify upper layer about timeout */ if (r->cb != NULL) { r->cb(r->arg, ERR_TIMEOUT); } mqtt_delete_request(r); /* Tail might be be modified in callback, so re-read it in every iteration */ r = *(struct mqtt_request_t * const volatile *)tail; } else { r->timeout_diff -= t; t = 0; } } } /** * Free all request items * @param tail Pointer to request queue tail pointer */ static void mqtt_clear_requests(struct mqtt_request_t **tail) { struct mqtt_request_t *iter, *next; LWIP_ASSERT("mqtt_clear_requests: tail != NULL", tail != NULL); for (iter = *tail; iter != NULL; iter = next) { next = iter->next; mqtt_delete_request(iter); } *tail = NULL; } /** * Initialize all request items * @param r_objs Pointer to request objects */ static void mqtt_init_requests(struct mqtt_request_t *r_objs) { u8_t n; LWIP_ASSERT("mqtt_init_requests: r_objs != NULL", r_objs != NULL); for (n = 0; n < MQTT_REQ_MAX_IN_FLIGHT; n++) { /* Item pointing to itself indicates unused */ r_objs[n].next = &r_objs[n]; } } /*--------------------------------------------------------------------------------------------------------------------- */ /* Output message build helpers */ static void mqtt_output_append_u8(struct mqtt_ringbuf_t *rb, u8_t value) { mqtt_ringbuf_put(rb, value); } static void mqtt_output_append_u16(struct mqtt_ringbuf_t *rb, u16_t value) { mqtt_ringbuf_put(rb, value >> 8); mqtt_ringbuf_put(rb, value & 0xff); } static void mqtt_output_append_buf(struct mqtt_ringbuf_t *rb, const void *data, u16_t length) { u16_t n; for (n = 0; n < length; n++) { mqtt_ringbuf_put(rb, ((const u8_t *)data)[n]); } } static void mqtt_output_append_string(struct mqtt_ringbuf_t *rb, const char *str, u16_t length) { u16_t n; mqtt_ringbuf_put(rb, length >> 8); mqtt_ringbuf_put(rb, length & 0xff); for (n = 0; n < length; n++) { mqtt_ringbuf_put(rb, str[n]); } } /** * Append fixed header * @param rb Output ring buffer * @param msg_type see enum mqtt_message_type * @param dup MQTT DUP flag * @param qos MQTT QoS field * @param retain MQTT retain flag * @param r_length Remaining length after fixed header */ static void mqtt_output_append_fixed_header(struct mqtt_ringbuf_t *rb, u8_t msg_type, u8_t dup, u8_t qos, u8_t retain, u16_t r_length) { /* Start with control byte */ mqtt_output_append_u8(rb, (((msg_type & 0x0f) << 4) | ((dup & 1) << 3) | ((qos & 3) << 1) | (retain & 1))); /* Encode remaining length field */ do { mqtt_output_append_u8(rb, (r_length & 0x7f) | (r_length >= 128 ? 0x80 : 0)); r_length >>= 7; } while (r_length > 0); } /** * Check output buffer space * @param rb Output ring buffer * @param r_length Remaining length after fixed header * @return 1 if message will fit, 0 if not enough buffer space */ static u8_t mqtt_output_check_space(struct mqtt_ringbuf_t *rb, u16_t r_length) { /* Start with length of type byte + remaining length */ u16_t total_len = 1 + r_length; LWIP_ASSERT("mqtt_output_check_space: rb != NULL", rb != NULL); /* Calculate number of required bytes to contain the remaining bytes field and add to total*/ do { total_len++; r_length >>= 7; } while (r_length > 0); return (total_len <= mqtt_ringbuf_free(rb)); } /** * Close connection to server * @param client MQTT client * @param reason Reason for disconnection */ static void mqtt_close(mqtt_client_t *client, mqtt_connection_status_t reason) { LWIP_ASSERT("mqtt_close: client != NULL", client != NULL); /* Bring down TCP connection if not already done */ if (client->conn != NULL) { err_t res; tcp_recv(client->conn, NULL); tcp_err(client->conn, NULL); tcp_sent(client->conn, NULL); res = tcp_close(client->conn); if (res != ERR_OK) { tcp_abort(client->conn); LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_close: Close err=%s\n", lwip_strerr(res))); } client->conn = NULL; } /* Remove all pending requests */ mqtt_clear_requests(&client->pend_req_queue); /* Stop cyclic timer */ sys_untimeout(mqtt_cyclic_timer, client); /* Notify upper layer of disconnection if changed state */ if (client->conn_state != TCP_DISCONNECTED) { client->conn_state = TCP_DISCONNECTED; if (client->connect_cb != NULL) { client->connect_cb(client, client->connect_arg, reason); } } } /** * Interval timer, called every MQTT_CYCLIC_TIMER_INTERVAL seconds in MQTT_CONNECTING and MQTT_CONNECTED states * @param arg MQTT client */ static void mqtt_cyclic_timer(void *arg) { u8_t restart_timer = 1; mqtt_client_t *client = (mqtt_client_t *)arg; LWIP_ASSERT("mqtt_cyclic_timer: client != NULL", client != NULL); if (client->conn_state == MQTT_CONNECTING) { client->cyclic_tick++; if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= MQTT_CONNECT_TIMOUT) { LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_cyclic_timer: CONNECT attempt to server timed out\n")); /* Disconnect TCP */ mqtt_close(client, MQTT_CONNECT_TIMEOUT); restart_timer = 0; } } else if (client->conn_state == MQTT_CONNECTED) { /* Handle timeout for pending requests */ mqtt_request_time_elapsed(&client->pend_req_queue, MQTT_CYCLIC_TIMER_INTERVAL); /* keep_alive > 0 means keep alive functionality shall be used */ if (client->keep_alive > 0) { client->server_watchdog++; /* If reception from server has been idle for 1.5*keep_alive time, server is considered unresponsive */ if ((client->server_watchdog * MQTT_CYCLIC_TIMER_INTERVAL) > (client->keep_alive + client->keep_alive/2)) { LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_cyclic_timer: Server incoming keep-alive timeout\n")); mqtt_close(client, MQTT_CONNECT_TIMEOUT); restart_timer = 0; } /* If time for a keep alive message to be sent, transmission has been idle for keep_alive time */ if ((client->cyclic_tick * MQTT_CYCLIC_TIMER_INTERVAL) >= client->keep_alive) { LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_cyclic_timer: Sending keep-alive message to server\n")); if (mqtt_output_check_space(&client->output, 0) != 0) { mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PINGREQ, 0, 0, 0, 0); client->cyclic_tick = 0; } } else { client->cyclic_tick++; } } } else { LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_cyclic_timer: Timer should not be running in state %d\n", client->conn_state)); restart_timer = 0; } if (restart_timer) { sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL*1000, mqtt_cyclic_timer, arg); } } /** * Send PUBACK, PUBREC or PUBREL response message * @param client MQTT client * @param msg PUBACK, PUBREC or PUBREL * @param pkt_id Packet identifier * @param qos QoS value * @return ERR_OK if successful, ERR_MEM if out of memory */ static err_t pub_ack_rec_rel_response(mqtt_client_t *client, u8_t msg, u16_t pkt_id, u8_t qos) { err_t err = ERR_OK; if (mqtt_output_check_space(&client->output, 2)) { mqtt_output_append_fixed_header(&client->output, msg, 0, qos, 0, 2); mqtt_output_append_u16(&client->output, pkt_id); mqtt_output_send(&client->output, client->conn); } else { LWIP_DEBUGF(MQTT_DEBUG_TRACE,("pub_ack_rec_rel_response: OOM creating response: %s with pkt_id: %d\n", mqtt_msg_type_to_str(msg), pkt_id)); err = ERR_MEM; } return err; } /** * Subscribe response from server * @param r Matching request * @param result Result code from server */ static void mqtt_incomming_suback(struct mqtt_request_t *r, u8_t result) { if (r->cb != NULL) { r->cb(r->arg, result < 3 ? ERR_OK : ERR_ABRT); } } /** * Complete MQTT message received or buffer full * @param client MQTT client * @param fixed_hdr_idx header index * @param length length received part * @param remaining_length Remaining length of complete message */ static mqtt_connection_status_t mqtt_message_received(mqtt_client_t *client, u8_t fixed_hdr_idx, u16_t length, u32_t remaining_length) { mqtt_connection_status_t res = MQTT_CONNECT_ACCEPTED; u8_t *var_hdr_payload = client->rx_buffer + fixed_hdr_idx; /* Control packet type */ u8_t pkt_type = MQTT_CTL_PACKET_TYPE(client->rx_buffer[0]); u16_t pkt_id = 0; if (pkt_type == MQTT_MSG_TYPE_CONNACK) { if (client->conn_state == MQTT_CONNECTING) { /* Get result code from CONNACK */ res = (mqtt_connection_status_t)var_hdr_payload[1]; LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_message_received: Connect response code %d\n", res)); if (res == MQTT_CONNECT_ACCEPTED) { /* Reset cyclic_tick when changing to connected state */ client->cyclic_tick = 0; client->conn_state = MQTT_CONNECTED; /* Notify upper layer */ if (client->connect_cb != 0) { client->connect_cb(client, client->connect_arg, res); } } } else { LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_message_received: Received CONNACK in connected state\n")); } } else if (pkt_type == MQTT_MSG_TYPE_PINGRESP) { LWIP_DEBUGF(MQTT_DEBUG_TRACE,( "mqtt_message_received: Received PINGRESP from server\n")); } else if (pkt_type == MQTT_MSG_TYPE_PUBLISH) { u16_t payload_offset = 0; u16_t payload_length = length; u8_t qos = MQTT_CTL_PACKET_QOS(client->rx_buffer[0]); if (client->msg_idx <= MQTT_VAR_HEADER_BUFFER_LEN) { /* Should have topic and pkt id*/ uint8_t *topic; uint16_t after_topic; u8_t bkp; u16_t topic_len = var_hdr_payload[0]; topic_len = (topic_len << 8) + (u16_t)(var_hdr_payload[1]); topic = var_hdr_payload + 2; after_topic = 2 + topic_len; /* Check length, add one byte even for QoS 0 so that zero termination will fit */ if ((after_topic + (qos? 2 : 1)) > length) { LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_message_received: Receive buffer can not fit topic + pkt_id\n")); goto out_disconnect; } /* id for QoS 1 and 2 */ if (qos > 0) { client->inpub_pkt_id = ((u16_t)var_hdr_payload[after_topic] << 8) + (u16_t)var_hdr_payload[after_topic + 1]; after_topic += 2; } else { client->inpub_pkt_id = 0; } /* Take backup of byte after topic */ bkp = topic[topic_len]; /* Zero terminate string */ topic[topic_len] = 0; /* Payload data remaining in receive buffer */ payload_length = length - after_topic; payload_offset = after_topic; LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_incomming_publish: Received message with QoS %d at topic: %s, payload length %d\n", qos, topic, remaining_length + payload_length)); if (client->pub_cb != NULL) { client->pub_cb(client->inpub_arg, (const char *)topic, remaining_length + payload_length); } /* Restore byte after topic */ topic[topic_len] = bkp; } if (payload_length > 0 || remaining_length == 0) { client->data_cb(client->inpub_arg, var_hdr_payload + payload_offset, payload_length, remaining_length == 0 ? MQTT_DATA_FLAG_LAST : 0); /* Reply if QoS > 0 */ if (remaining_length == 0 && qos > 0) { /* Send PUBACK for QoS 1 or PUBREC for QoS 2 */ u8_t resp_msg = (qos == 1) ? MQTT_MSG_TYPE_PUBACK : MQTT_MSG_TYPE_PUBREC; LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_incomming_publish: Sending publish response: %s with pkt_id: %d\n", mqtt_msg_type_to_str(resp_msg), client->inpub_pkt_id)); pub_ack_rec_rel_response(client, resp_msg, client->inpub_pkt_id, 0); } } } else { /* Get packet identifier */ pkt_id = (u16_t)var_hdr_payload[0] << 8; pkt_id |= (u16_t)var_hdr_payload[1]; if (pkt_id == 0) { LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_message_received: Got message with illegal packet identifier: 0\n")); goto out_disconnect; } if (pkt_type == MQTT_MSG_TYPE_PUBREC) { LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_message_received: PUBREC, sending PUBREL with pkt_id: %d\n",pkt_id)); pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBREL, pkt_id, 1); } else if (pkt_type == MQTT_MSG_TYPE_PUBREL) { LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_message_received: PUBREL, sending PUBCOMP response with pkt_id: %d\n",pkt_id)); pub_ack_rec_rel_response(client, MQTT_MSG_TYPE_PUBCOMP, pkt_id, 0); } else if (pkt_type == MQTT_MSG_TYPE_SUBACK || pkt_type == MQTT_MSG_TYPE_UNSUBACK || pkt_type == MQTT_MSG_TYPE_PUBCOMP || pkt_type == MQTT_MSG_TYPE_PUBACK) { struct mqtt_request_t *r = mqtt_take_request(&client->pend_req_queue, pkt_id); if (r != NULL) { LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_message_received: %s response with id %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); if (pkt_type == MQTT_MSG_TYPE_SUBACK) { if (length < 3) { LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_message_received: To small SUBACK packet\n")); goto out_disconnect; } else { mqtt_incomming_suback(r, var_hdr_payload[2]); } } else if (r->cb != NULL) { r->cb(r->arg, ERR_OK); } mqtt_delete_request(r); } else { LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received %s reply, with wrong pkt_id: %d\n", mqtt_msg_type_to_str(pkt_type), pkt_id)); } } else { LWIP_DEBUGF(MQTT_DEBUG_WARN,( "mqtt_message_received: Received unknown message type: %d\n", pkt_type)); goto out_disconnect; } } return res; out_disconnect: return MQTT_CONNECT_DISCONNECTED; } /** * MQTT incoming message parser * @param client MQTT client * @param p PBUF chain of received data * @return Connection status */ static mqtt_connection_status_t mqtt_parse_incoming(mqtt_client_t *client, struct pbuf *p) { u16_t in_offset = 0; u32_t msg_rem_len = 0; u8_t fixed_hdr_idx = 0; u8_t b = 0; while (p->tot_len > in_offset) { if ((fixed_hdr_idx < 2) || ((b & 0x80) != 0)) { if (fixed_hdr_idx < client->msg_idx) { b = client->rx_buffer[fixed_hdr_idx]; } else { b = pbuf_get_at(p, in_offset++); client->rx_buffer[client->msg_idx++] = b; } fixed_hdr_idx++; if (fixed_hdr_idx >= 2) { msg_rem_len |= (u32_t)(b & 0x7f) << ((fixed_hdr_idx - 2) * 7); if ((b & 0x80) == 0) { LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_parse_incoming: Remaining length after fixed header: %d\n", msg_rem_len)); if (msg_rem_len == 0) { /* Complete message with no extra headers of payload received */ mqtt_message_received(client, fixed_hdr_idx, 0, 0); client->msg_idx = 0; fixed_hdr_idx = 0; } else { /* Bytes remaining in message */ msg_rem_len = (msg_rem_len + fixed_hdr_idx) - client->msg_idx; } } } } else { u16_t cpy_len, cpy_start, buffer_space; cpy_start = (client->msg_idx - fixed_hdr_idx) % (MQTT_VAR_HEADER_BUFFER_LEN - fixed_hdr_idx) + fixed_hdr_idx; /* Allow to copy the lesser one of available length in input data or bytes remaining in message */ cpy_len = (u16_t)LWIP_MIN((u16_t)(p->tot_len - in_offset), msg_rem_len); /* Limit to available space in buffer */ buffer_space = MQTT_VAR_HEADER_BUFFER_LEN - cpy_start; if (cpy_len > buffer_space) { cpy_len = buffer_space; } pbuf_copy_partial(p, client->rx_buffer+cpy_start, cpy_len, in_offset); /* Advance get and put indexes */ client->msg_idx += cpy_len; in_offset += cpy_len; msg_rem_len -= cpy_len; LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_parse_incoming: msg_idx: %d, cpy_len: %d, remaining %d\n", client->msg_idx, cpy_len, msg_rem_len)); if (msg_rem_len == 0 || cpy_len == buffer_space) { /* Whole message received or buffer is full */ mqtt_connection_status_t res = mqtt_message_received(client, fixed_hdr_idx, (cpy_start + cpy_len) - fixed_hdr_idx, msg_rem_len); if (res != MQTT_CONNECT_ACCEPTED) { return res; } if (msg_rem_len == 0) { /* Reset parser state */ client->msg_idx = 0; /* msg_tot_len = 0; */ fixed_hdr_idx = 0; } } } } return MQTT_CONNECT_ACCEPTED; } /** * TCP received callback function. @see tcp_recv_fn * @param arg MQTT client * @param p PBUF chain of received data * @param err Passed as return value if not ERR_OK * @return ERR_OK or err passed into callback */ static err_t mqtt_tcp_recv_cb(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) { mqtt_client_t *client = (mqtt_client_t *)arg; LWIP_ASSERT("mqtt_tcp_recv_cb: client != NULL", client != NULL); LWIP_ASSERT("mqtt_tcp_recv_cb: client->conn == pcb", client->conn == pcb); if (p == NULL) { LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_tcp_recv_cb: Recv pbuf=NULL, remote has closed connection\n")); mqtt_close(client, MQTT_CONNECT_DISCONNECTED); } else { mqtt_connection_status_t res; if (err != ERR_OK) { LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_tcp_recv_cb: Recv err=%d\n", err)); pbuf_free(p); return err; } /* Tell remote that data has been received */ tcp_recved(pcb, p->tot_len); res = mqtt_parse_incoming(client, p); pbuf_free(p); if (res != MQTT_CONNECT_ACCEPTED) { mqtt_close(client, res); } /* If keep alive functionality is used */ if (client->keep_alive != 0) { /* Reset server alive watchdog */ client->server_watchdog = 0; } } return ERR_OK; } /** * TCP data sent callback function. @see tcp_sent_fn * @param arg MQTT client * @param tpcb TCP connection handle * @param len Number of bytes sent * @return ERR_OK */ static err_t mqtt_tcp_sent_cb(void *arg, struct tcp_pcb *tpcb, u16_t len) { mqtt_client_t *client = (mqtt_client_t *)arg; LWIP_UNUSED_ARG(tpcb); LWIP_UNUSED_ARG(len); if (client->conn_state == MQTT_CONNECTED) { struct mqtt_request_t *r; /* Reset keep-alive send timer and server watchdog */ client->cyclic_tick = 0; client->server_watchdog = 0; /* QoS 0 publish has no response from server, so call its callbacks here */ while ((r = mqtt_take_request(&client->pend_req_queue, 0)) != NULL) { LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_tcp_sent_cb: Calling QoS 0 publish complete callback\n")); if (r->cb != NULL) { r->cb(r->arg, ERR_OK); } mqtt_delete_request(r); } /* Try send any remaining buffers from output queue */ mqtt_output_send(&client->output, client->conn); } return ERR_OK; } /** * TCP error callback function. @see tcp_err_fn * @param arg MQTT client * @param err Error encountered */ static void mqtt_tcp_err_cb(void *arg, err_t err) { mqtt_client_t *client = (mqtt_client_t *)arg; LWIP_UNUSED_ARG(err); /* only used for debug output */ LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_tcp_err_cb: TCP error callback: error %d, arg: %p\n", err, arg)); LWIP_ASSERT("mqtt_tcp_err_cb: client != NULL", client != NULL); /* Set conn to null before calling close as pcb is already deallocated*/ client->conn = 0; mqtt_close(client, MQTT_CONNECT_DISCONNECTED); } /** * TCP poll callback function. @see tcp_poll_fn * @param arg MQTT client * @param tpcb TCP connection handle * @return err ERR_OK */ static err_t mqtt_tcp_poll_cb(void *arg, struct tcp_pcb *tpcb) { mqtt_client_t *client = (mqtt_client_t *)arg; if (client->conn_state == MQTT_CONNECTED) { /* Try send any remaining buffers from output queue */ mqtt_output_send(&client->output, tpcb); } return ERR_OK; } /** * TCP connect callback function. @see tcp_connected_fn * @param arg MQTT client * @param err Always ERR_OK, mqtt_tcp_err_cb is called in case of error * @return ERR_OK */ static err_t mqtt_tcp_connect_cb(void *arg, struct tcp_pcb *tpcb, err_t err) { mqtt_client_t* client = (mqtt_client_t *)arg; if (err != ERR_OK) { LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_tcp_connect_cb: TCP connect error %d\n", err)); return err; } /* Initiate receiver state */ client->msg_idx = 0; /* Setup TCP callbacks */ tcp_recv(tpcb, mqtt_tcp_recv_cb); tcp_sent(tpcb, mqtt_tcp_sent_cb); tcp_poll(tpcb, mqtt_tcp_poll_cb, 2); LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_tcp_connect_cb: TCP connection established to server\n")); /* Enter MQTT connect state */ client->conn_state = MQTT_CONNECTING; /* Start cyclic timer */ sys_timeout(MQTT_CYCLIC_TIMER_INTERVAL*1000, mqtt_cyclic_timer, client); client->cyclic_tick = 0; /* Start transmission from output queue, connect message is the first one out*/ mqtt_output_send(&client->output, client->conn); return ERR_OK; } /*---------------------------------------------------------------------------------------------------- */ /* Public API */ /** * @ingroup mqtt * MQTT publish function. * @param client MQTT client * @param topic Publish topic string * @param payload Data to publish (NULL is allowed) * @param payload_length: Length of payload (0 is allowed) * @param qos Quality of service, 0 1 or 2 * @param retain MQTT retain flag * @param cb Callback to call when publish is complete or has timed out * @param arg User supplied argument to publish callback * @return ERR_OK if successful * ERR_CONN if client is disconnected * ERR_MEM if short on memory */ err_t mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, mqtt_request_cb_t cb, void *arg) { struct mqtt_request_t *r; u16_t pkt_id; size_t topic_strlen; size_t total_len; u16_t topic_len; u16_t remaining_length; LWIP_ASSERT("mqtt_publish: client != NULL", client); LWIP_ASSERT("mqtt_publish: topic != NULL", topic); LWIP_ERROR("mqtt_publish: TCP disconnected", (client->conn_state != TCP_DISCONNECTED), return ERR_CONN); topic_strlen = strlen(topic); LWIP_ERROR("mqtt_publish: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); topic_len = (u16_t)topic_strlen; total_len = 2 + topic_len + payload_length; LWIP_ERROR("mqtt_publish: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); remaining_length = (u16_t)total_len; LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_publish: Publish with payload length %d to topic \"%s\"\n", payload_length, topic)); if (qos > 0) { remaining_length += 2; /* Generate pkt_id id for QoS1 and 2 */ pkt_id = msg_generate_packet_id(client); } else { /* Use reserved value pkt_id 0 for QoS 0 in request handle */ pkt_id = 0; } r = mqtt_create_request(client->req_list, pkt_id, cb, arg); if (r == NULL) { return ERR_MEM; } if (mqtt_output_check_space(&client->output, remaining_length) == 0) { mqtt_delete_request(r); return ERR_MEM; } /* Append fixed header */ mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_PUBLISH, 0, qos, retain, remaining_length); /* Append Topic */ mqtt_output_append_string(&client->output, topic, topic_len); /* Append packet if for QoS 1 and 2*/ if (qos > 0) { mqtt_output_append_u16(&client->output, pkt_id); } /* Append optional publish payload */ if ((payload != NULL) && (payload_length > 0)) { mqtt_output_append_buf(&client->output, payload, payload_length); } mqtt_append_request(&client->pend_req_queue, r); mqtt_output_send(&client->output, client->conn); return ERR_OK; } /** * @ingroup mqtt * MQTT subscribe/unsubscribe function. * @param client MQTT client * @param topic topic to subscribe to * @param qos Quality of service, 0 1 or 2 (only used for subscribe) * @param cb Callback to call when subscribe/unsubscribe reponse is received * @param arg User supplied argument to publish callback * @param sub 1 for subscribe, 0 for unsubscribe * @return ERR_OK if successful, @see err_t enum for other results */ err_t mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub) { size_t topic_strlen; size_t total_len; u16_t topic_len; u16_t remaining_length; u16_t pkt_id; struct mqtt_request_t *r; LWIP_ASSERT("mqtt_sub_unsub: client != NULL", client); LWIP_ASSERT("mqtt_sub_unsub: topic != NULL", topic); topic_strlen = strlen(topic); LWIP_ERROR("mqtt_sub_unsub: topic length overflow", (topic_strlen <= (0xFFFF - 2)), return ERR_ARG); topic_len = (u16_t)topic_strlen; /* Topic string, pkt_id, qos for subscribe */ total_len = topic_len + 2 + 2 + (sub != 0); LWIP_ERROR("mqtt_sub_unsub: total length overflow", (total_len <= 0xFFFF), return ERR_ARG); remaining_length = (u16_t)total_len; LWIP_ASSERT("mqtt_sub_unsub: qos < 3", qos < 3); if (client->conn_state == TCP_DISCONNECTED) { LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_sub_unsub: Can not (un)subscribe in disconnected state\n")); return ERR_CONN; } pkt_id = msg_generate_packet_id(client); r = mqtt_create_request(client->req_list, pkt_id, cb, arg); if (r == NULL) { return ERR_MEM; } if (mqtt_output_check_space(&client->output, remaining_length) == 0) { mqtt_delete_request(r); return ERR_MEM; } LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_sub_unsub: Client (un)subscribe to topic \"%s\", id: %d\n", topic, pkt_id)); mqtt_output_append_fixed_header(&client->output, sub ? MQTT_MSG_TYPE_SUBSCRIBE : MQTT_MSG_TYPE_UNSUBSCRIBE, 0, 1, 0, remaining_length); /* Packet id */ mqtt_output_append_u16(&client->output, pkt_id); /* Topic */ mqtt_output_append_string(&client->output, topic, topic_len); /* QoS */ if (sub != 0) { mqtt_output_append_u8(&client->output, LWIP_MIN(qos, 2)); } mqtt_append_request(&client->pend_req_queue, r); mqtt_output_send(&client->output, client->conn); return ERR_OK; } /** * @ingroup mqtt * Set callback to handle incoming publish requests from server * @param client MQTT client * @param pub_cb Callback invoked when publish starts, contain topic and total length of payload * @param data_cb Callback for each fragment of payload that arrives * @param arg User supplied argument to both callbacks */ void mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t pub_cb, mqtt_incoming_data_cb_t data_cb, void *arg) { LWIP_ASSERT("mqtt_set_inpub_callback: client != NULL", client != NULL); client->data_cb = data_cb; client->pub_cb = pub_cb; client->inpub_arg = arg; } /** * @ingroup mqtt * Create a new MQTT client instance * @return Pointer to instance on success, NULL otherwise */ mqtt_client_t * mqtt_client_new(void) { mqtt_client_t *client = (mqtt_client_t *)mem_malloc(sizeof(mqtt_client_t)); if (client != NULL) { memset(client, 0, sizeof(mqtt_client_t)); } return client; } /** * @ingroup mqtt * Connect to MQTT server * @param client MQTT client * @param ip_addr Server IP * @param port Server port * @param cb Connection state change callback * @param arg User supplied argument to connection callback * @param client_info Client identification and connection options * @return ERR_OK if successful, @see err_t enum for other results */ err_t mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ip_addr, u16_t port, mqtt_connection_cb_t cb, void *arg, const struct mqtt_connect_client_info_t *client_info) { err_t err; size_t len; u16_t client_id_length; /* Length is the sum of 2+"MQTT", protocol level, flags and keep alive */ u16_t remaining_length = 2 + 4 + 1 + 1 + 2; u8_t flags = 0, will_topic_len = 0, will_msg_len = 0; LWIP_ASSERT("mqtt_client_connect: client != NULL", client != NULL); LWIP_ASSERT("mqtt_client_connect: ip_addr != NULL", ip_addr != NULL); LWIP_ASSERT("mqtt_client_connect: client_info != NULL", client_info != NULL); LWIP_ASSERT("mqtt_client_connect: client_info->client_id != NULL", client_info->client_id != NULL); if (client->conn_state != TCP_DISCONNECTED) { LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_client_connect: Already connected\n")); return ERR_ISCONN; } /* Wipe clean */ memset(client, 0, sizeof(mqtt_client_t)); client->connect_arg = arg; client->connect_cb = cb; client->keep_alive = client_info->keep_alive; mqtt_init_requests(client->req_list); /* Build connect message */ if (client_info->will_topic != NULL && client_info->will_msg != NULL) { flags |= MQTT_CONNECT_FLAG_WILL; flags |= (client_info->will_qos & 3) << 3; if (client_info->will_retain) { flags |= MQTT_CONNECT_FLAG_WILL_RETAIN; } len = strlen(client_info->will_topic); LWIP_ERROR("mqtt_client_connect: client_info->will_topic length overflow", len <= 0xFF, return ERR_VAL); LWIP_ERROR("mqtt_client_connect: client_info->will_topic length must be > 0", len > 0, return ERR_VAL); will_topic_len = (u8_t)len; len = strlen(client_info->will_msg); LWIP_ERROR("mqtt_client_connect: client_info->will_msg length overflow", len <= 0xFF, return ERR_VAL); will_msg_len = (u8_t)len; len = remaining_length + 2 + will_topic_len + 2 + will_msg_len; LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); remaining_length = (u16_t)len; } /* Don't complicate things, always connect using clean session */ flags |= MQTT_CONNECT_FLAG_CLEAN_SESSION; len = strlen(client_info->client_id); LWIP_ERROR("mqtt_client_connect: client_info->client_id length overflow", len <= 0xFFFF, return ERR_VAL); client_id_length = (u16_t)len; len = remaining_length + 2 + client_id_length; LWIP_ERROR("mqtt_client_connect: remaining_length overflow", len <= 0xFFFF, return ERR_VAL); remaining_length = (u16_t)len; if (mqtt_output_check_space(&client->output, remaining_length) == 0) { return ERR_MEM; } client->conn = tcp_new(); if (client->conn == NULL) { return ERR_MEM; } /* Set arg pointer for callbacks */ tcp_arg(client->conn, client); /* Any local address, pick random local port number */ err = tcp_bind(client->conn, IP_ADDR_ANY, 0); if (err != ERR_OK) { LWIP_DEBUGF(MQTT_DEBUG_WARN,("mqtt_client_connect: Error binding to local ip/port, %d\n", err)); goto tcp_fail; } LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_client_connect: Connecting to host: %s at port:%"U16_F"\n", ipaddr_ntoa(ip_addr), port)); /* Connect to server */ err = tcp_connect(client->conn, ip_addr, port, mqtt_tcp_connect_cb); if (err != ERR_OK) { LWIP_DEBUGF(MQTT_DEBUG_TRACE,("mqtt_client_connect: Error connecting to remote ip/port, %d\n", err)); goto tcp_fail; } /* Set error callback */ tcp_err(client->conn, mqtt_tcp_err_cb); client->conn_state = TCP_CONNECTING; /* Append fixed header */ mqtt_output_append_fixed_header(&client->output, MQTT_MSG_TYPE_CONNECT, 0, 0, 0, remaining_length); /* Append Protocol string */ mqtt_output_append_string(&client->output, "MQTT", 4); /* Append Protocol level */ mqtt_output_append_u8(&client->output, 4); /* Append connect flags */ mqtt_output_append_u8(&client->output, flags); /* Append keep-alive */ mqtt_output_append_u16(&client->output, client_info->keep_alive); /* Append client id */ mqtt_output_append_string(&client->output, client_info->client_id, client_id_length); /* Append will message if used */ if ((flags & MQTT_CONNECT_FLAG_WILL) != 0) { mqtt_output_append_string(&client->output, client_info->will_topic, will_topic_len); mqtt_output_append_string(&client->output, client_info->will_msg, will_msg_len); } return ERR_OK; tcp_fail: tcp_abort(client->conn); client->conn = NULL; return err; } /** * @ingroup mqtt * Disconnect from MQTT server * @param client MQTT client */ void mqtt_disconnect(mqtt_client_t *client) { LWIP_ASSERT("mqtt_disconnect: client != NULL", client); /* If connection in not already closed */ if (client->conn_state != TCP_DISCONNECTED) { /* Set conn_state before calling mqtt_close to prevent callback from being called */ client->conn_state = TCP_DISCONNECTED; mqtt_close(client, (mqtt_connection_status_t)0); } } /** * @ingroup mqtt * Check connection with server * @param client MQTT client * @return 1 if connected to server, 0 otherwise */ u8_t mqtt_client_is_connected(mqtt_client_t *client) { LWIP_ASSERT("mqtt_client_is_connected: client != NULL", client); return client->conn_state == MQTT_CONNECTED; } #endif /* LWIP_TCP && LWIP_CALLBACK_API */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/mqtt/mqtt.c
C
unknown
44,961
/** * @file * NetBIOS name service responder */ /** * @defgroup netbiosns NETBIOS responder * @ingroup apps * * This is an example implementation of a NetBIOS name server. * It responds to name queries for a configurable name. * Name resolving is not supported. * * Note that the device doesn't broadcast it's own name so can't * detect duplicate names! */ /* * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * */ #include "lwip/apps/netbiosns.h" #if LWIP_IPV4 && LWIP_UDP /* don't build if not configured for use in lwipopts.h */ #include "lwip/def.h" #include "lwip/udp.h" #include "lwip/netif.h" #include <string.h> /** default port number for "NetBIOS Name service */ #define NETBIOS_PORT 137 /** size of a NetBIOS name */ #define NETBIOS_NAME_LEN 16 /** The Time-To-Live for NetBIOS name responds (in seconds) * Default is 300000 seconds (3 days, 11 hours, 20 minutes) */ #define NETBIOS_NAME_TTL 300000u /** NetBIOS header flags */ #define NETB_HFLAG_RESPONSE 0x8000U #define NETB_HFLAG_OPCODE 0x7800U #define NETB_HFLAG_OPCODE_NAME_QUERY 0x0000U #define NETB_HFLAG_AUTHORATIVE 0x0400U #define NETB_HFLAG_TRUNCATED 0x0200U #define NETB_HFLAG_RECURS_DESIRED 0x0100U #define NETB_HFLAG_RECURS_AVAILABLE 0x0080U #define NETB_HFLAG_BROADCAST 0x0010U #define NETB_HFLAG_REPLYCODE 0x0008U #define NETB_HFLAG_REPLYCODE_NOERROR 0x0000U /** NetBIOS name flags */ #define NETB_NFLAG_UNIQUE 0x8000U #define NETB_NFLAG_NODETYPE 0x6000U #define NETB_NFLAG_NODETYPE_HNODE 0x6000U #define NETB_NFLAG_NODETYPE_MNODE 0x4000U #define NETB_NFLAG_NODETYPE_PNODE 0x2000U #define NETB_NFLAG_NODETYPE_BNODE 0x0000U /** NetBIOS message header */ #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/bpstruct.h" #endif PACK_STRUCT_BEGIN struct netbios_hdr { PACK_STRUCT_FIELD(u16_t trans_id); PACK_STRUCT_FIELD(u16_t flags); PACK_STRUCT_FIELD(u16_t questions); PACK_STRUCT_FIELD(u16_t answerRRs); PACK_STRUCT_FIELD(u16_t authorityRRs); PACK_STRUCT_FIELD(u16_t additionalRRs); } PACK_STRUCT_STRUCT; PACK_STRUCT_END #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/epstruct.h" #endif /** NetBIOS message name part */ #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/bpstruct.h" #endif PACK_STRUCT_BEGIN struct netbios_name_hdr { PACK_STRUCT_FLD_8(u8_t nametype); PACK_STRUCT_FLD_8(u8_t encname[(NETBIOS_NAME_LEN*2)+1]); PACK_STRUCT_FIELD(u16_t type); PACK_STRUCT_FIELD(u16_t cls); PACK_STRUCT_FIELD(u32_t ttl); PACK_STRUCT_FIELD(u16_t datalen); PACK_STRUCT_FIELD(u16_t flags); PACK_STRUCT_FLD_S(ip4_addr_p_t addr); } PACK_STRUCT_STRUCT; PACK_STRUCT_END #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/epstruct.h" #endif /** NetBIOS message */ #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/bpstruct.h" #endif PACK_STRUCT_BEGIN struct netbios_resp { struct netbios_hdr resp_hdr; struct netbios_name_hdr resp_name; } PACK_STRUCT_STRUCT; PACK_STRUCT_END #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/epstruct.h" #endif #ifdef NETBIOS_LWIP_NAME #define NETBIOS_LOCAL_NAME NETBIOS_LWIP_NAME #else static char netbiosns_local_name[NETBIOS_NAME_LEN]; #define NETBIOS_LOCAL_NAME netbiosns_local_name #endif struct udp_pcb *netbiosns_pcb; /** Decode a NetBIOS name (from packet to string) */ static int netbiosns_name_decode(char *name_enc, char *name_dec, int name_dec_len) { char *pname; char cname; char cnbname; int idx = 0; LWIP_UNUSED_ARG(name_dec_len); /* Start decoding netbios name. */ pname = name_enc; for (;;) { /* Every two characters of the first level-encoded name * turn into one character in the decoded name. */ cname = *pname; if (cname == '\0') break; /* no more characters */ if (cname == '.') break; /* scope ID follows */ if (cname < 'A' || cname > 'Z') { /* Not legal. */ return -1; } cname -= 'A'; cnbname = cname << 4; pname++; cname = *pname; if (cname == '\0' || cname == '.') { /* No more characters in the name - but we're in * the middle of a pair. Not legal. */ return -1; } if (cname < 'A' || cname > 'Z') { /* Not legal. */ return -1; } cname -= 'A'; cnbname |= cname; pname++; /* Do we have room to store the character? */ if (idx < NETBIOS_NAME_LEN) { /* Yes - store the character. */ name_dec[idx++] = (cnbname!=' '?cnbname:'\0'); } } return 0; } #if 0 /* function currently unused */ /** Encode a NetBIOS name (from string to packet) - currently unused because we don't ask for names. */ static int netbiosns_name_encode(char *name_enc, char *name_dec, int name_dec_len) { char *pname; char cname; unsigned char ucname; int idx = 0; /* Start encoding netbios name. */ pname = name_enc; for (;;) { /* Every two characters of the first level-encoded name * turn into one character in the decoded name. */ cname = *pname; if (cname == '\0') break; /* no more characters */ if (cname == '.') break; /* scope ID follows */ if ((cname < 'A' || cname > 'Z') && (cname < '0' || cname > '9')) { /* Not legal. */ return -1; } /* Do we have room to store the character? */ if (idx >= name_dec_len) { return -1; } /* Yes - store the character. */ ucname = cname; name_dec[idx++] = ('A'+((ucname>>4) & 0x0F)); name_dec[idx++] = ('A'+( ucname & 0x0F)); pname++; } /* Fill with "space" coding */ for (;idx < name_dec_len - 1;) { name_dec[idx++] = 'C'; name_dec[idx++] = 'A'; } /* Terminate string */ name_dec[idx] = '\0'; return 0; } #endif /* 0 */ /** NetBIOS Name service recv callback */ static void netbiosns_recv(void *arg, struct udp_pcb *upcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) { LWIP_UNUSED_ARG(arg); /* if packet is valid */ if (p != NULL) { char netbios_name[NETBIOS_NAME_LEN+1]; struct netbios_hdr* netbios_hdr = (struct netbios_hdr*)p->payload; struct netbios_name_hdr* netbios_name_hdr = (struct netbios_name_hdr*)(netbios_hdr+1); /* we only answer if we got a default interface */ if (netif_default != NULL) { /* @todo: do we need to check answerRRs/authorityRRs/additionalRRs? */ /* if the packet is a NetBIOS name query question */ if (((netbios_hdr->flags & PP_NTOHS(NETB_HFLAG_OPCODE)) == PP_NTOHS(NETB_HFLAG_OPCODE_NAME_QUERY)) && ((netbios_hdr->flags & PP_NTOHS(NETB_HFLAG_RESPONSE)) == 0) && (netbios_hdr->questions == PP_NTOHS(1))) { /* decode the NetBIOS name */ netbiosns_name_decode((char*)(netbios_name_hdr->encname), netbios_name, sizeof(netbios_name)); /* if the packet is for us */ if (lwip_strnicmp(netbios_name, NETBIOS_LOCAL_NAME, sizeof(NETBIOS_LOCAL_NAME)) == 0) { struct pbuf *q; struct netbios_resp *resp; q = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct netbios_resp), PBUF_RAM); if (q != NULL) { resp = (struct netbios_resp*)q->payload; /* prepare NetBIOS header response */ resp->resp_hdr.trans_id = netbios_hdr->trans_id; resp->resp_hdr.flags = PP_HTONS(NETB_HFLAG_RESPONSE | NETB_HFLAG_OPCODE_NAME_QUERY | NETB_HFLAG_AUTHORATIVE | NETB_HFLAG_RECURS_DESIRED); resp->resp_hdr.questions = 0; resp->resp_hdr.answerRRs = PP_HTONS(1); resp->resp_hdr.authorityRRs = 0; resp->resp_hdr.additionalRRs = 0; /* prepare NetBIOS header datas */ MEMCPY( resp->resp_name.encname, netbios_name_hdr->encname, sizeof(netbios_name_hdr->encname)); resp->resp_name.nametype = netbios_name_hdr->nametype; resp->resp_name.type = netbios_name_hdr->type; resp->resp_name.cls = netbios_name_hdr->cls; resp->resp_name.ttl = PP_HTONL(NETBIOS_NAME_TTL); resp->resp_name.datalen = PP_HTONS(sizeof(resp->resp_name.flags)+sizeof(resp->resp_name.addr)); resp->resp_name.flags = PP_HTONS(NETB_NFLAG_NODETYPE_BNODE); ip4_addr_copy(resp->resp_name.addr, *netif_ip4_addr(netif_default)); /* send the NetBIOS response */ udp_sendto(upcb, q, addr, port); /* free the "reference" pbuf */ pbuf_free(q); } } } } /* free the pbuf */ pbuf_free(p); } } /** * @ingroup netbiosns * Init netbios responder */ void netbiosns_init(void) { #ifdef NETBIOS_LWIP_NAME LWIP_ASSERT("NetBIOS name is too long!", strlen(NETBIOS_LWIP_NAME) < NETBIOS_NAME_LEN); #endif netbiosns_pcb = udp_new_ip_type(IPADDR_TYPE_ANY); if (netbiosns_pcb != NULL) { /* we have to be allowed to send broadcast packets! */ ip_set_option(netbiosns_pcb, SOF_BROADCAST); udp_bind(netbiosns_pcb, IP_ANY_TYPE, NETBIOS_PORT); udp_recv(netbiosns_pcb, netbiosns_recv, netbiosns_pcb); } } #ifndef NETBIOS_LWIP_NAME /** * @ingroup netbiosns * Set netbios name. ATTENTION: the hostname must be less than 15 characters! */ void netbiosns_set_name(const char* hostname) { size_t copy_len = strlen(hostname); LWIP_ASSERT("NetBIOS name is too long!", copy_len < NETBIOS_NAME_LEN); if (copy_len >= NETBIOS_NAME_LEN) { copy_len = NETBIOS_NAME_LEN - 1; } MEMCPY(netbiosns_local_name, hostname, copy_len + 1); } #endif /** * @ingroup netbiosns * Stop netbios responder */ void netbiosns_stop(void) { if (netbiosns_pcb != NULL) { udp_remove(netbiosns_pcb); netbiosns_pcb = NULL; } } #endif /* LWIP_IPV4 && LWIP_UDP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/netbiosns/netbiosns.c
C
unknown
11,702
/** * @file * Abstract Syntax Notation One (ISO 8824, 8825) encoding * * @todo not optimised (yet), favor correctness over speed, favor speed over size */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Christiaan Simons <christiaan.simons@axon.tv> * Martin Hentschel <info@cl-soft.de> */ #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #include "snmp_asn1.h" #define PBUF_OP_EXEC(code) \ if ((code) != ERR_OK) { \ return ERR_BUF; \ } /** * Encodes a TLV into a pbuf stream. * * @param pbuf_stream points to a pbuf stream * @param tlv TLV to encode * @return ERR_OK if successful, ERR_ARG if we can't (or won't) encode */ err_t snmp_ans1_enc_tlv(struct snmp_pbuf_stream* pbuf_stream, struct snmp_asn1_tlv* tlv) { u8_t data; u8_t length_bytes_required; /* write type */ if ((tlv->type & SNMP_ASN1_DATATYPE_MASK) == SNMP_ASN1_DATATYPE_EXTENDED) { /* extended format is not used by SNMP so we do not accept those values */ return ERR_ARG; } if (tlv->type_len != 0) { /* any other value as auto is not accepted for type (we always use one byte because extended syntax is prohibited) */ return ERR_ARG; } PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, tlv->type)); tlv->type_len = 1; /* write length */ if (tlv->value_len <= 127) { length_bytes_required = 1; } else if (tlv->value_len <= 255) { length_bytes_required = 2; } else { length_bytes_required = 3; } /* check for forced min length */ if (tlv->length_len > 0) { if (tlv->length_len < length_bytes_required) { /* unable to code requested length in requested number of bytes */ return ERR_ARG; } length_bytes_required = tlv->length_len; } else { tlv->length_len = length_bytes_required; } if (length_bytes_required > 1) { /* multi byte representation required */ length_bytes_required--; data = 0x80 | length_bytes_required; /* extended length definition, 1 length byte follows */ PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, data)); while (length_bytes_required > 1) { if (length_bytes_required == 2) { /* append high byte */ data = (u8_t)(tlv->value_len >> 8); } else { /* append leading 0x00 */ data = 0x00; } PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, data)); length_bytes_required--; } } /* append low byte */ data = (u8_t)(tlv->value_len & 0xFF); PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, data)); return ERR_OK; } /** * Encodes raw data (octet string, opaque) into a pbuf chained ASN1 msg. * * @param pbuf_stream points to a pbuf stream * @param raw_len raw data length * @param raw points raw data * @return ERR_OK if successful, ERR_ARG if we can't (or won't) encode */ err_t snmp_asn1_enc_raw(struct snmp_pbuf_stream* pbuf_stream, const u8_t *raw, u16_t raw_len) { PBUF_OP_EXEC(snmp_pbuf_stream_writebuf(pbuf_stream, raw, raw_len)); return ERR_OK; } /** * Encodes u32_t (counter, gauge, timeticks) into a pbuf chained ASN1 msg. * * @param pbuf_stream points to a pbuf stream * @param octets_needed encoding length (from snmp_asn1_enc_u32t_cnt()) * @param value is the host order u32_t value to be encoded * @return ERR_OK if successful, ERR_ARG if we can't (or won't) encode * * @see snmp_asn1_enc_u32t_cnt() */ err_t snmp_asn1_enc_u32t(struct snmp_pbuf_stream* pbuf_stream, u16_t octets_needed, u32_t value) { if (octets_needed > 5) { return ERR_ARG; } if (octets_needed == 5) { /* not enough bits in 'value' add leading 0x00 */ PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, 0x00)); octets_needed--; } while (octets_needed > 1) { octets_needed--; PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, (u8_t)(value >> (octets_needed << 3)))); } /* (only) one least significant octet */ PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, (u8_t)value)); return ERR_OK; } /** * Encodes u64_t (counter64) into a pbuf chained ASN1 msg. * * @param pbuf_stream points to a pbuf stream * @param octets_needed encoding length (from snmp_asn1_enc_u32t_cnt()) * @param value is the host order u32_t value to be encoded * @return ERR_OK if successful, ERR_ARG if we can't (or won't) encode * * @see snmp_asn1_enc_u64t_cnt() */ err_t snmp_asn1_enc_u64t(struct snmp_pbuf_stream* pbuf_stream, u16_t octets_needed, const u32_t* value) { if (octets_needed > 9) { return ERR_ARG; } if (octets_needed == 9) { /* not enough bits in 'value' add leading 0x00 */ PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, 0x00)); octets_needed--; } while (octets_needed > 4) { octets_needed--; PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, (u8_t)(*value >> ((octets_needed-4) << 3)))); } /* skip to low u32 */ value++; while (octets_needed > 1) { octets_needed--; PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, (u8_t)(*value >> (octets_needed << 3)))); } /* always write at least one octet (also in case of value == 0) */ PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, (u8_t)(*value))); return ERR_OK; } /** * Encodes s32_t integer into a pbuf chained ASN1 msg. * * @param pbuf_stream points to a pbuf stream * @param octets_needed encoding length (from snmp_asn1_enc_s32t_cnt()) * @param value is the host order s32_t value to be encoded * @return ERR_OK if successful, ERR_ARG if we can't (or won't) encode * * @see snmp_asn1_enc_s32t_cnt() */ err_t snmp_asn1_enc_s32t(struct snmp_pbuf_stream* pbuf_stream, u16_t octets_needed, s32_t value) { while (octets_needed > 1) { octets_needed--; PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, (u8_t)(value >> (octets_needed << 3)))); } /* (only) one least significant octet */ PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, (u8_t)value)); return ERR_OK; } /** * Encodes object identifier into a pbuf chained ASN1 msg. * * @param pbuf_stream points to a pbuf stream * @param oid points to object identifier array * @param oid_len object identifier array length * @return ERR_OK if successful, ERR_ARG if we can't (or won't) encode */ err_t snmp_asn1_enc_oid(struct snmp_pbuf_stream* pbuf_stream, const u32_t *oid, u16_t oid_len) { if (oid_len > 1) { /* write compressed first two sub id's */ u32_t compressed_byte = ((oid[0] * 40) + oid[1]); PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, (u8_t)compressed_byte)); oid_len -= 2; oid += 2; } else { /* @bug: allow empty varbinds for symmetry (we must decode them for getnext), allow partial compression?? */ /* ident_len <= 1, at least we need zeroDotZero (0.0) (ident_len == 2) */ return ERR_ARG; } while (oid_len > 0) { u32_t sub_id; u8_t shift, tail; oid_len--; sub_id = *oid; tail = 0; shift = 28; while (shift > 0) { u8_t code; code = (u8_t)(sub_id >> shift); if ((code != 0) || (tail != 0)) { tail = 1; PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, code | 0x80)); } shift -= 7; } PBUF_OP_EXEC(snmp_pbuf_stream_write(pbuf_stream, (u8_t)sub_id & 0x7F)); /* proceed to next sub-identifier */ oid++; } return ERR_OK; } /** * Returns octet count for length. * * @param length parameter length * @param octets_needed points to the return value */ void snmp_asn1_enc_length_cnt(u16_t length, u8_t *octets_needed) { if (length < 0x80U) { *octets_needed = 1; } else if (length < 0x100U) { *octets_needed = 2; } else { *octets_needed = 3; } } /** * Returns octet count for an u32_t. * * @param value value to be encoded * @param octets_needed points to the return value * * @note ASN coded integers are _always_ signed. E.g. +0xFFFF is coded * as 0x00,0xFF,0xFF. Note the leading sign octet. A positive value * of 0xFFFFFFFF is preceded with 0x00 and the length is 5 octets!! */ void snmp_asn1_enc_u32t_cnt(u32_t value, u16_t *octets_needed) { if (value < 0x80UL) { *octets_needed = 1; } else if (value < 0x8000UL) { *octets_needed = 2; } else if (value < 0x800000UL) { *octets_needed = 3; } else if (value < 0x80000000UL) { *octets_needed = 4; } else { *octets_needed = 5; } } /** * Returns octet count for an u64_t. * * @param value value to be encoded * @param octets_needed points to the return value * * @note ASN coded integers are _always_ signed. E.g. +0xFFFF is coded * as 0x00,0xFF,0xFF. Note the leading sign octet. A positive value * of 0xFFFFFFFF is preceded with 0x00 and the length is 5 octets!! */ void snmp_asn1_enc_u64t_cnt(const u32_t *value, u16_t *octets_needed) { /* check if high u32 is 0 */ if (*value == 0x00) { /* only low u32 is important */ value++; snmp_asn1_enc_u32t_cnt(*value, octets_needed); } else { /* low u32 does not matter for length determination */ snmp_asn1_enc_u32t_cnt(*value, octets_needed); *octets_needed = *octets_needed + 4; /* add the 4 bytes of low u32 */ } } /** * Returns octet count for an s32_t. * * @param value value to be encoded * @param octets_needed points to the return value * * @note ASN coded integers are _always_ signed. */ void snmp_asn1_enc_s32t_cnt(s32_t value, u16_t *octets_needed) { if (value < 0) { value = ~value; } if (value < 0x80L) { *octets_needed = 1; } else if (value < 0x8000L) { *octets_needed = 2; } else if (value < 0x800000L) { *octets_needed = 3; } else { *octets_needed = 4; } } /** * Returns octet count for an object identifier. * * @param oid points to object identifier array * @param oid_len object identifier array length * @param octets_needed points to the return value */ void snmp_asn1_enc_oid_cnt(const u32_t *oid, u16_t oid_len, u16_t *octets_needed) { u32_t sub_id; *octets_needed = 0; if (oid_len > 1) { /* compressed prefix in one octet */ (*octets_needed)++; oid_len -= 2; oid += 2; } while (oid_len > 0) { oid_len--; sub_id = *oid; sub_id >>= 7; (*octets_needed)++; while (sub_id > 0) { sub_id >>= 7; (*octets_needed)++; } oid++; } } /** * Decodes a TLV from a pbuf stream. * * @param pbuf_stream points to a pbuf stream * @param tlv returns decoded TLV * @return ERR_OK if successful, ERR_VAL if we can't decode */ err_t snmp_asn1_dec_tlv(struct snmp_pbuf_stream* pbuf_stream, struct snmp_asn1_tlv* tlv) { u8_t data; /* decode type first */ PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); tlv->type = data; if ((tlv->type & SNMP_ASN1_DATATYPE_MASK) == SNMP_ASN1_DATATYPE_EXTENDED) { /* extended format is not used by SNMP so we do not accept those values */ return ERR_VAL; } tlv->type_len = 1; /* now, decode length */ PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); if (data < 0x80) { /* short form */ tlv->length_len = 1; tlv->value_len = data; } else if (data > 0x80) { /* long form */ u8_t length_bytes = data - 0x80; tlv->length_len = length_bytes + 1; /* this byte + defined number of length bytes following */ tlv->value_len = 0; while (length_bytes > 0) { /* we only support up to u16.maxvalue-1 (2 bytes) but have to accept leading zero bytes */ if (tlv->value_len > 0xFF) { return ERR_VAL; } PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); tlv->value_len <<= 8; tlv->value_len |= data; /* take care for special value used for indefinite length */ if (tlv->value_len == 0xFFFF) { return ERR_VAL; } length_bytes--; } } else { /* data == 0x80 indefinite length form */ /* (not allowed for SNMP; RFC 1157, 3.2.2) */ return ERR_VAL; } return ERR_OK; } /** * Decodes positive integer (counter, gauge, timeticks) into u32_t. * * @param pbuf_stream points to a pbuf stream * @param len length of the coded integer field * @param value return host order integer * @return ERR_OK if successful, ERR_ARG if we can't (or won't) decode * * @note ASN coded integers are _always_ signed. E.g. +0xFFFF is coded * as 0x00,0xFF,0xFF. Note the leading sign octet. A positive value * of 0xFFFFFFFF is preceded with 0x00 and the length is 5 octets!! */ err_t snmp_asn1_dec_u32t(struct snmp_pbuf_stream *pbuf_stream, u16_t len, u32_t *value) { u8_t data; if ((len > 0) && (len <= 5)) { PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); /* expecting sign bit to be zero, only unsigned please! */ if (((len == 5) && (data == 0x00)) || ((len < 5) && ((data & 0x80) == 0))) { *value = data; len--; while (len > 0) { PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); len--; *value <<= 8; *value |= data; } return ERR_OK; } } return ERR_VAL; } /** * Decodes large positive integer (counter64) into 2x u32_t. * * @param pbuf_stream points to a pbuf stream * @param len length of the coded integer field * @param value return host order integer * @return ERR_OK if successful, ERR_ARG if we can't (or won't) decode * * @note ASN coded integers are _always_ signed. E.g. +0xFFFF is coded * as 0x00,0xFF,0xFF. Note the leading sign octet. A positive value * of 0xFFFFFFFF is preceded with 0x00 and the length is 5 octets!! */ err_t snmp_asn1_dec_u64t(struct snmp_pbuf_stream *pbuf_stream, u16_t len, u32_t *value) { u8_t data; if (len <= 4) { /* high u32 is 0 */ *value = 0; /* directly skip to low u32 */ value++; } if ((len > 0) && (len <= 9)) { PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); /* expecting sign bit to be zero, only unsigned please! */ if (((len == 9) && (data == 0x00)) || ((len < 9) && ((data & 0x80) == 0))) { *value = data; len--; while (len > 0) { PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); if (len == 4) { /* skip to low u32 */ value++; *value = 0; } else { *value <<= 8; } *value |= data; len--; } return ERR_OK; } } return ERR_VAL; } /** * Decodes integer into s32_t. * * @param pbuf_stream points to a pbuf stream * @param len length of the coded integer field * @param value return host order integer * @return ERR_OK if successful, ERR_ARG if we can't (or won't) decode * * @note ASN coded integers are _always_ signed! */ err_t snmp_asn1_dec_s32t(struct snmp_pbuf_stream *pbuf_stream, u16_t len, s32_t *value) { #if BYTE_ORDER == LITTLE_ENDIAN u8_t *lsb_ptr = (u8_t*)value; #endif #if BYTE_ORDER == BIG_ENDIAN u8_t *lsb_ptr = (u8_t*)value + sizeof(s32_t) - 1; #endif u8_t sign; u8_t data; if ((len > 0) && (len < 5)) { PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); len--; if (data & 0x80) { /* negative, start from -1 */ *value = -1; sign = 1; *lsb_ptr &= data; } else { /* positive, start from 0 */ *value = 0; sign = 0; *lsb_ptr |= data; } /* OR/AND octets with value */ while (len > 0) { PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); len--; #if BYTE_ORDER == LITTLE_ENDIAN *value <<= 8; #endif #if BYTE_ORDER == BIG_ENDIAN *value >>= 8; #endif if (sign) { *lsb_ptr |= 255; *lsb_ptr &= data; } else { *lsb_ptr |= data; } } return ERR_OK; } return ERR_VAL; } /** * Decodes object identifier from incoming message into array of u32_t. * * @param pbuf_stream points to a pbuf stream * @param len length of the coded object identifier * @param oid return decoded object identifier * @param oid_len return decoded object identifier length * @param oid_max_len size of oid buffer * @return ERR_OK if successful, ERR_ARG if we can't (or won't) decode */ err_t snmp_asn1_dec_oid(struct snmp_pbuf_stream *pbuf_stream, u16_t len, u32_t* oid, u8_t* oid_len, u8_t oid_max_len) { u32_t *oid_ptr; u8_t data; *oid_len = 0; oid_ptr = oid; if (len > 0) { if (oid_max_len < 2) { return ERR_MEM; } PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); len--; /* first compressed octet */ if (data == 0x2B) { /* (most) common case 1.3 (iso.org) */ *oid_ptr = 1; oid_ptr++; *oid_ptr = 3; oid_ptr++; } else if (data < 40) { *oid_ptr = 0; oid_ptr++; *oid_ptr = data; oid_ptr++; } else if (data < 80) { *oid_ptr = 1; oid_ptr++; *oid_ptr = data - 40; oid_ptr++; } else { *oid_ptr = 2; oid_ptr++; *oid_ptr = data - 80; oid_ptr++; } *oid_len = 2; } else { /* accepting zero length identifiers e.g. for getnext operation. uncommon but valid */ return ERR_OK; } while ((len > 0) && (*oid_len < oid_max_len)) { PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); len--; if ((data & 0x80) == 0x00) { /* sub-identifier uses single octet */ *oid_ptr = data; } else { /* sub-identifier uses multiple octets */ u32_t sub_id = (data & ~0x80); while ((len > 0) && ((data & 0x80) != 0)) { PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, &data)); len--; sub_id = (sub_id << 7) + (data & ~0x80); } if ((data & 0x80) != 0) { /* "more bytes following" bit still set at end of len */ return ERR_VAL; } *oid_ptr = sub_id; } oid_ptr++; (*oid_len)++; } if (len > 0) { /* OID to long to fit in our buffer */ return ERR_MEM; } return ERR_OK; } /** * Decodes (copies) raw data (ip-addresses, octet strings, opaque encoding) * from incoming message into array. * * @param pbuf_stream points to a pbuf stream * @param len length of the coded raw data (zero is valid, e.g. empty string!) * @param buf return raw bytes * @param buf_len returns length of the raw return value * @param buf_max_len buffer size * @return ERR_OK if successful, ERR_ARG if we can't (or won't) decode */ err_t snmp_asn1_dec_raw(struct snmp_pbuf_stream *pbuf_stream, u16_t len, u8_t *buf, u16_t* buf_len, u16_t buf_max_len) { if (len > buf_max_len) { /* not enough dst space */ return ERR_MEM; } *buf_len = len; while (len > 0) { PBUF_OP_EXEC(snmp_pbuf_stream_read(pbuf_stream, buf)); buf++; len--; } return ERR_OK; } #endif /* LWIP_SNMP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_asn1.c
C
unknown
20,782
/** * @file * Abstract Syntax Notation One (ISO 8824, 8825) codec. */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * Copyright (c) 2016 Elias Oenal. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Christiaan Simons <christiaan.simons@axon.tv> * Martin Hentschel <info@cl-soft.de> * Elias Oenal <lwip@eliasoenal.com> */ #ifndef LWIP_HDR_APPS_SNMP_ASN1_H #define LWIP_HDR_APPS_SNMP_ASN1_H #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP #include "lwip/err.h" #include "lwip/apps/snmp_core.h" #include "snmp_pbuf_stream.h" #ifdef __cplusplus extern "C" { #endif #define SNMP_ASN1_TLV_INDEFINITE_LENGTH 0x80 #define SNMP_ASN1_CLASS_MASK 0xC0 #define SNMP_ASN1_CONTENTTYPE_MASK 0x20 #define SNMP_ASN1_DATATYPE_MASK 0x1F #define SNMP_ASN1_DATATYPE_EXTENDED 0x1F /* DataType indicating that datatype is encoded in following bytes */ /* context specific (SNMP) tags (from SNMP spec. RFC1157) */ #define SNMP_ASN1_CONTEXT_PDU_GET_REQ 0 #define SNMP_ASN1_CONTEXT_PDU_GET_NEXT_REQ 1 #define SNMP_ASN1_CONTEXT_PDU_GET_RESP 2 #define SNMP_ASN1_CONTEXT_PDU_SET_REQ 3 #define SNMP_ASN1_CONTEXT_PDU_TRAP 4 #define SNMP_ASN1_CONTEXT_PDU_GET_BULK_REQ 5 #define SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_OBJECT 0 #define SNMP_ASN1_CONTEXT_VARBIND_END_OF_MIB_VIEW 2 struct snmp_asn1_tlv { u8_t type; /* only U8 because extended types are not specified by SNMP */ u8_t type_len; /* encoded length of 'type' field (normally 1) */ u8_t length_len; /* indicates how many bytes are required to encode the 'value_len' field */ u16_t value_len; /* encoded length of the value */ }; #define SNMP_ASN1_TLV_HDR_LENGTH(tlv) ((tlv).type_len + (tlv).length_len) #define SNMP_ASN1_TLV_LENGTH(tlv) ((tlv).type_len + (tlv).length_len + (tlv).value_len) #define SNMP_ASN1_SET_TLV_PARAMS(tlv, type_, length_len_, value_len_) do { (tlv).type = (type_); (tlv).type_len = 0; (tlv).length_len = (length_len_); (tlv).value_len = (value_len_); } while (0); err_t snmp_asn1_dec_tlv(struct snmp_pbuf_stream* pbuf_stream, struct snmp_asn1_tlv* tlv); err_t snmp_asn1_dec_u32t(struct snmp_pbuf_stream *pbuf_stream, u16_t len, u32_t *value); err_t snmp_asn1_dec_u64t(struct snmp_pbuf_stream *pbuf_stream, u16_t len, u32_t *value); err_t snmp_asn1_dec_s32t(struct snmp_pbuf_stream *pbuf_stream, u16_t len, s32_t *value); err_t snmp_asn1_dec_oid(struct snmp_pbuf_stream *pbuf_stream, u16_t len, u32_t* oid, u8_t* oid_len, u8_t oid_max_len); err_t snmp_asn1_dec_raw(struct snmp_pbuf_stream *pbuf_stream, u16_t len, u8_t *buf, u16_t* buf_len, u16_t buf_max_len); err_t snmp_ans1_enc_tlv(struct snmp_pbuf_stream* pbuf_stream, struct snmp_asn1_tlv* tlv); void snmp_asn1_enc_length_cnt(u16_t length, u8_t *octets_needed); void snmp_asn1_enc_u32t_cnt(u32_t value, u16_t *octets_needed); void snmp_asn1_enc_u64t_cnt(const u32_t *value, u16_t *octets_needed); void snmp_asn1_enc_s32t_cnt(s32_t value, u16_t *octets_needed); void snmp_asn1_enc_oid_cnt(const u32_t *oid, u16_t oid_len, u16_t *octets_needed); err_t snmp_asn1_enc_oid(struct snmp_pbuf_stream* pbuf_stream, const u32_t *oid, u16_t oid_len); err_t snmp_asn1_enc_s32t(struct snmp_pbuf_stream* pbuf_stream, u16_t octets_needed, s32_t value); err_t snmp_asn1_enc_u32t(struct snmp_pbuf_stream* pbuf_stream, u16_t octets_needed, u32_t value); err_t snmp_asn1_enc_u64t(struct snmp_pbuf_stream* pbuf_stream, u16_t octets_needed, const u32_t* value); err_t snmp_asn1_enc_raw(struct snmp_pbuf_stream* pbuf_stream, const u8_t *raw, u16_t raw_len); #ifdef __cplusplus } #endif #endif /* LWIP_SNMP */ #endif /* LWIP_HDR_APPS_SNMP_ASN1_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_asn1.h
C
unknown
5,146
/** * @file * MIB tree access/construction functions. */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Christiaan Simons <christiaan.simons@axon.tv> * Martin Hentschel <info@cl-soft.de> */ /** * @defgroup snmp SNMPv2c agent * @ingroup apps * SNMPv2c compatible agent\n * There is also a MIB compiler and a MIB viewer in lwIP contrib repository * (lwip-contrib/apps/LwipMibCompiler).\n * The agent implements the most important MIB2 MIBs including IPv6 support * (interfaces, UDP, TCP, SNMP, ICMP, SYSTEM). IP MIB is an older version * whithout IPv6 statistics (TODO).\n * Rewritten by Martin Hentschel <info@cl-soft.de> and * Dirk Ziegelmeier <dziegel@gmx.de>\n * Work on SNMPv3 has started, but is not finished.\n * * 0 Agent Capabilities * ==================== * * Features: * --------- * - SNMPv2c support. * - Low RAM usage - no memory pools, stack only. * - MIB2 implementation is separated from SNMP stack. * - Support for multiple MIBs (snmp_set_mibs() call) - e.g. for private MIB. * - Simple and generic API for MIB implementation. * - Comfortable node types and helper functions for scalar arrays and tables. * - Counter64, bit and truthvalue datatype support. * - Callbacks for SNMP writes e.g. to implement persistency. * - Runs on two APIs: RAW and netconn. * - Async API is gone - the stack now supports netconn API instead, * so blocking operations can be done in MIB calls. * SNMP runs in a worker thread when netconn API is used. * - Simplified thread sync support for MIBs - useful when MIBs * need to access variables shared with other threads where no locking is * possible. Used in MIB2 to access lwIP stats from lwIP thread. * * MIB compiler (code generator): * ------------------------------ * - Provided in lwIP contrib repository. * - Written in C#. MIB viewer used Windows Forms. * - Developed on Windows with Visual Studio 2010. * - Can be compiled and used on all platforms with http://www.monodevelop.com/. * - Based on a heavily modified version of of SharpSnmpLib (a4bd05c6afb4) * (https://sharpsnmplib.codeplex.com/SourceControl/network/forks/Nemo157/MIBParserUpdate). * - MIB parser, C file generation framework and LWIP code generation are cleanly * separated, which means the code may be useful as a base for code generation * of other SNMP agents. * * Notes: * ------ * - Stack and MIB compiler were used to implement a Profinet device. * Compiled/implemented MIBs: LLDP-MIB, LLDP-EXT-DOT3-MIB, LLDP-EXT-PNO-MIB. * * SNMPv1 per RFC1157 and SNMPv2c per RFC 3416 * ------------------------------------------- * Note the S in SNMP stands for "Simple". Note that "Simple" is * relative. SNMP is simple compared to the complex ISO network * management protocols CMIP (Common Management Information Protocol) * and CMOT (CMip Over Tcp). * * MIB II * ------ * The standard lwIP stack management information base. * This is a required MIB, so this is always enabled. * The groups EGP, CMOT and transmission are disabled by default. * * Most mib-2 objects are not writable except: * sysName, sysLocation, sysContact, snmpEnableAuthenTraps. * Writing to or changing the ARP and IP address and route * tables is not possible. * * Note lwIP has a very limited notion of IP routing. It currently * doen't have a route table and doesn't have a notion of the U,G,H flags. * Instead lwIP uses the interface list with only one default interface * acting as a single gateway interface (G) for the default route. * * The agent returns a "virtual table" with the default route 0.0.0.0 * for the default interface and network routes (no H) for each * network interface in the netif_list. * All routes are considered to be up (U). * * Loading additional MIBs * ----------------------- * MIBs can only be added in compile-time, not in run-time. * * * 1 Building the Agent * ==================== * First of all you'll need to add the following define * to your local lwipopts.h: * \#define LWIP_SNMP 1 * * and add the source files your makefile. * * Note you'll might need to adapt you network driver to update * the mib2 variables for your interface. * * 2 Running the Agent * =================== * The following function calls must be made in your program to * actually get the SNMP agent running. * * Before starting the agent you should supply pointers * for sysContact, sysLocation, and snmpEnableAuthenTraps. * You can do this by calling * * - snmp_mib2_set_syscontact() * - snmp_mib2_set_syslocation() * - snmp_set_auth_traps_enabled() * * You can register a callback which is called on successful write access: * snmp_set_write_callback(). * * Additionally you may want to set * * - snmp_mib2_set_sysdescr() * - snmp_set_device_enterprise_oid() * - snmp_mib2_set_sysname() * * Also before starting the agent you need to setup * one or more trap destinations using these calls: * * - snmp_trap_dst_enable() * - snmp_trap_dst_ip_set() * * If you need more than MIB2, set the MIBs you want to use * by snmp_set_mibs(). * * Finally, enable the agent by calling snmp_init() * * @defgroup snmp_core Core * @ingroup snmp * * @defgroup snmp_traps Traps * @ingroup snmp */ #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #include "lwip/apps/snmp.h" #include "lwip/apps/snmp_core.h" #include "snmp_core_priv.h" #include "lwip/netif.h" #include <string.h> #if (LWIP_SNMP && (SNMP_TRAP_DESTINATIONS<=0)) #error "If you want to use SNMP, you have to define SNMP_TRAP_DESTINATIONS>=1 in your lwipopts.h" #endif #if (!LWIP_UDP && LWIP_SNMP) #error "If you want to use SNMP, you have to define LWIP_UDP=1 in your lwipopts.h" #endif struct snmp_statistics snmp_stats; static const struct snmp_obj_id snmp_device_enterprise_oid_default = {SNMP_DEVICE_ENTERPRISE_OID_LEN, SNMP_DEVICE_ENTERPRISE_OID}; static const struct snmp_obj_id* snmp_device_enterprise_oid = &snmp_device_enterprise_oid_default; const u32_t snmp_zero_dot_zero_values[] = { 0, 0 }; const struct snmp_obj_id_const_ref snmp_zero_dot_zero = { LWIP_ARRAYSIZE(snmp_zero_dot_zero_values), snmp_zero_dot_zero_values }; #if SNMP_LWIP_MIB2 #include "lwip/apps/snmp_mib2.h" static const struct snmp_mib* const default_mibs[] = { &mib2 }; static u8_t snmp_num_mibs = 1; #else static const struct snmp_mib* const default_mibs[] = { NULL }; static u8_t snmp_num_mibs = 0; #endif /* List of known mibs */ static struct snmp_mib const * const *snmp_mibs = default_mibs; /** * @ingroup snmp_core * Sets the MIBs to use. * Example: call snmp_set_mibs() as follows: * static const struct snmp_mib *my_snmp_mibs[] = { * &mib2, * &private_mib * }; * snmp_set_mibs(my_snmp_mibs, LWIP_ARRAYSIZE(my_snmp_mibs)); */ void snmp_set_mibs(const struct snmp_mib **mibs, u8_t num_mibs) { LWIP_ASSERT("mibs pointer must be != NULL", (mibs != NULL)); LWIP_ASSERT("num_mibs pointer must be != 0", (num_mibs != 0)); snmp_mibs = mibs; snmp_num_mibs = num_mibs; } /** * @ingroup snmp_core * 'device enterprise oid' is used for 'device OID' field in trap PDU's (for identification of generating device) * as well as for value returned by MIB-2 'sysObjectID' field (if internal MIB2 implementation is used). * The 'device enterprise oid' shall point to an OID located under 'private-enterprises' branch (1.3.6.1.4.1.XXX). If a vendor * wants to provide a custom object there, he has to get its own enterprise oid from IANA (http://www.iana.org). It * is not allowed to use LWIP enterprise ID! * In order to identify a specific device it is recommended to create a dedicated OID for each device type under its own * enterprise oid. * e.g. * device a > 1.3.6.1.4.1.XXX(ent-oid).1(devices).1(device a) * device b > 1.3.6.1.4.1.XXX(ent-oid).1(devices).2(device b) * for more details see description of 'sysObjectID' field in RFC1213-MIB */ void snmp_set_device_enterprise_oid(const struct snmp_obj_id* device_enterprise_oid) { if (device_enterprise_oid == NULL) { snmp_device_enterprise_oid = &snmp_device_enterprise_oid_default; } else { snmp_device_enterprise_oid = device_enterprise_oid; } } /** * @ingroup snmp_core * Get 'device enterprise oid' */ const struct snmp_obj_id* snmp_get_device_enterprise_oid(void) { return snmp_device_enterprise_oid; } #if LWIP_IPV4 /** * Conversion from InetAddressIPv4 oid to lwIP ip4_addr * @param oid points to u32_t ident[4] input * @param ip points to output struct */ u8_t snmp_oid_to_ip4(const u32_t *oid, ip4_addr_t *ip) { if ((oid[0] > 0xFF) || (oid[1] > 0xFF) || (oid[2] > 0xFF) || (oid[3] > 0xFF)) { ip4_addr_copy(*ip, *IP4_ADDR_ANY4); return 0; } IP4_ADDR(ip, oid[0], oid[1], oid[2], oid[3]); return 1; } /** * Convert ip4_addr to InetAddressIPv4 (no InetAddressType) * @param ip points to input struct * @param oid points to u32_t ident[4] output */ void snmp_ip4_to_oid(const ip4_addr_t *ip, u32_t *oid) { oid[0] = ip4_addr1(ip); oid[1] = ip4_addr2(ip); oid[2] = ip4_addr3(ip); oid[3] = ip4_addr4(ip); } #endif /* LWIP_IPV4 */ #if LWIP_IPV6 /** * Conversion from InetAddressIPv6 oid to lwIP ip6_addr * @param oid points to u32_t oid[16] input * @param ip points to output struct */ u8_t snmp_oid_to_ip6(const u32_t *oid, ip6_addr_t *ip) { if ((oid[0] > 0xFF) || (oid[1] > 0xFF) || (oid[2] > 0xFF) || (oid[3] > 0xFF) || (oid[4] > 0xFF) || (oid[5] > 0xFF) || (oid[6] > 0xFF) || (oid[7] > 0xFF) || (oid[8] > 0xFF) || (oid[9] > 0xFF) || (oid[10] > 0xFF) || (oid[11] > 0xFF) || (oid[12] > 0xFF) || (oid[13] > 0xFF) || (oid[14] > 0xFF) || (oid[15] > 0xFF)) { ip6_addr_set_any(ip); return 0; } ip->addr[0] = (oid[0] << 24) | (oid[1] << 16) | (oid[2] << 8) | (oid[3] << 0); ip->addr[1] = (oid[4] << 24) | (oid[5] << 16) | (oid[6] << 8) | (oid[7] << 0); ip->addr[2] = (oid[8] << 24) | (oid[9] << 16) | (oid[10] << 8) | (oid[11] << 0); ip->addr[3] = (oid[12] << 24) | (oid[13] << 16) | (oid[14] << 8) | (oid[15] << 0); return 1; } /** * Convert ip6_addr to InetAddressIPv6 (no InetAddressType) * @param ip points to input struct * @param oid points to u32_t ident[16] output */ void snmp_ip6_to_oid(const ip6_addr_t *ip, u32_t *oid) { oid[0] = (ip->addr[0] & 0xFF000000) >> 24; oid[1] = (ip->addr[0] & 0x00FF0000) >> 16; oid[2] = (ip->addr[0] & 0x0000FF00) >> 8; oid[3] = (ip->addr[0] & 0x000000FF) >> 0; oid[4] = (ip->addr[1] & 0xFF000000) >> 24; oid[5] = (ip->addr[1] & 0x00FF0000) >> 16; oid[6] = (ip->addr[1] & 0x0000FF00) >> 8; oid[7] = (ip->addr[1] & 0x000000FF) >> 0; oid[8] = (ip->addr[2] & 0xFF000000) >> 24; oid[9] = (ip->addr[2] & 0x00FF0000) >> 16; oid[10] = (ip->addr[2] & 0x0000FF00) >> 8; oid[11] = (ip->addr[2] & 0x000000FF) >> 0; oid[12] = (ip->addr[3] & 0xFF000000) >> 24; oid[13] = (ip->addr[3] & 0x00FF0000) >> 16; oid[14] = (ip->addr[3] & 0x0000FF00) >> 8; oid[15] = (ip->addr[3] & 0x000000FF) >> 0; } #endif /* LWIP_IPV6 */ #if LWIP_IPV4 || LWIP_IPV6 /** * Convert to InetAddressType+InetAddress+InetPortNumber * @param ip IP address * @param port Port * @param oid OID * @return OID length */ u8_t snmp_ip_port_to_oid(const ip_addr_t *ip, u16_t port, u32_t *oid) { u8_t idx; idx = snmp_ip_to_oid(ip, oid); oid[idx] = port; idx++; return idx; } /** * Convert to InetAddressType+InetAddress * @param ip IP address * @param oid OID * @return OID length */ u8_t snmp_ip_to_oid(const ip_addr_t *ip, u32_t *oid) { if (IP_IS_ANY_TYPE_VAL(*ip)) { oid[0] = 0; /* any */ oid[1] = 0; /* no IP OIDs follow */ return 2; } else if (IP_IS_V6(ip)) { #if LWIP_IPV6 oid[0] = 2; /* ipv6 */ oid[1] = 16; /* 16 InetAddressIPv6 OIDs follow */ snmp_ip6_to_oid(ip_2_ip6(ip), &oid[2]); return 18; #else /* LWIP_IPV6 */ return 0; #endif /* LWIP_IPV6 */ } else { #if LWIP_IPV4 oid[0] = 1; /* ipv4 */ oid[1] = 4; /* 4 InetAddressIPv4 OIDs follow */ snmp_ip4_to_oid(ip_2_ip4(ip), &oid[2]); return 6; #else /* LWIP_IPV4 */ return 0; #endif /* LWIP_IPV4 */ } } /** * Convert from InetAddressType+InetAddress to ip_addr_t * @param oid OID * @param oid_len OID length * @param ip IP address * @return Parsed OID length */ u8_t snmp_oid_to_ip(const u32_t *oid, u8_t oid_len, ip_addr_t *ip) { /* InetAddressType */ if (oid_len < 1) { return 0; } if (oid[0] == 0) { /* any */ /* 1x InetAddressType, 1x OID len */ if (oid_len < 2) { return 0; } if (oid[1] != 0) { return 0; } memset(ip, 0, sizeof(*ip)); IP_SET_TYPE(ip, IPADDR_TYPE_ANY); return 2; } else if (oid[0] == 1) { /* ipv4 */ #if LWIP_IPV4 /* 1x InetAddressType, 1x OID len, 4x InetAddressIPv4 */ if (oid_len < 6) { return 0; } /* 4x ipv4 OID */ if (oid[1] != 4) { return 0; } IP_SET_TYPE(ip, IPADDR_TYPE_V4); if (!snmp_oid_to_ip4(&oid[2], ip_2_ip4(ip))) { return 0; } return 6; #else /* LWIP_IPV4 */ return 0; #endif /* LWIP_IPV4 */ } else if (oid[0] == 2) { /* ipv6 */ #if LWIP_IPV6 /* 1x InetAddressType, 1x OID len, 16x InetAddressIPv6 */ if (oid_len < 18) { return 0; } /* 16x ipv6 OID */ if (oid[1] != 16) { return 0; } IP_SET_TYPE(ip, IPADDR_TYPE_V6); if (!snmp_oid_to_ip6(&oid[2], ip_2_ip6(ip))) { return 0; } return 18; #else /* LWIP_IPV6 */ return 0; #endif /* LWIP_IPV6 */ } else { /* unsupported InetAddressType */ return 0; } } /** * Convert from InetAddressType+InetAddress+InetPortNumber to ip_addr_t and u16_t * @param oid OID * @param oid_len OID length * @param ip IP address * @param port Port * @return Parsed OID length */ u8_t snmp_oid_to_ip_port(const u32_t *oid, u8_t oid_len, ip_addr_t *ip, u16_t *port) { u8_t idx = 0; /* InetAddressType + InetAddress */ idx += snmp_oid_to_ip(&oid[idx], oid_len-idx, ip); if (idx == 0) { return 0; } /* InetPortNumber */ if (oid_len < (idx+1)) { return 0; } if (oid[idx] > 0xffff) { return 0; } *port = (u16_t)oid[idx]; idx++; return idx; } #endif /* LWIP_IPV4 || LWIP_IPV6 */ /** * Assign an OID to struct snmp_obj_id * @param target Assignment target * @param oid OID * @param oid_len OID length */ void snmp_oid_assign(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len) { LWIP_ASSERT("oid_len <= LWIP_SNMP_OBJ_ID_LEN", oid_len <= SNMP_MAX_OBJ_ID_LEN); target->len = oid_len; if (oid_len > 0) { MEMCPY(target->id, oid, oid_len * sizeof(u32_t)); } } /** * Prefix an OID to OID in struct snmp_obj_id * @param target Assignment target to prefix * @param oid OID * @param oid_len OID length */ void snmp_oid_prefix(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len) { LWIP_ASSERT("target->len + oid_len <= LWIP_SNMP_OBJ_ID_LEN", (target->len + oid_len) <= SNMP_MAX_OBJ_ID_LEN); if (oid_len > 0) { /* move existing OID to make room at the beginning for OID to insert */ int i; for (i = target->len-1; i>=0; i--) { target->id[i + oid_len] = target->id[i]; } /* paste oid at the beginning */ MEMCPY(target->id, oid, oid_len * sizeof(u32_t)); } } /** * Combine two OIDs into struct snmp_obj_id * @param target Assignmet target * @param oid1 OID 1 * @param oid1_len OID 1 length * @param oid2 OID 2 * @param oid2_len OID 2 length */ void snmp_oid_combine(struct snmp_obj_id* target, const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len) { snmp_oid_assign(target, oid1, oid1_len); snmp_oid_append(target, oid2, oid2_len); } /** * Append OIDs to struct snmp_obj_id * @param target Assignment target to append to * @param oid OID * @param oid_len OID length */ void snmp_oid_append(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len) { LWIP_ASSERT("offset + oid_len <= LWIP_SNMP_OBJ_ID_LEN", (target->len + oid_len) <= SNMP_MAX_OBJ_ID_LEN); if (oid_len > 0) { MEMCPY(&target->id[target->len], oid, oid_len * sizeof(u32_t)); target->len += oid_len; } } /** * Compare two OIDs * @param oid1 OID 1 * @param oid1_len OID 1 length * @param oid2 OID 2 * @param oid2_len OID 2 length * @return -1: OID1&lt;OID2 1: OID1 &gt;OID2 0: equal */ s8_t snmp_oid_compare(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len) { u8_t level = 0; LWIP_ASSERT("'oid1' param must not be NULL or 'oid1_len' param be 0!", (oid1 != NULL) || (oid1_len == 0)); LWIP_ASSERT("'oid2' param must not be NULL or 'oid2_len' param be 0!", (oid2 != NULL) || (oid2_len == 0)); while ((level < oid1_len) && (level < oid2_len)) { if (*oid1 < *oid2) { return -1; } if (*oid1 > *oid2) { return 1; } level++; oid1++; oid2++; } /* common part of both OID's is equal, compare length */ if (oid1_len < oid2_len) { return -1; } if (oid1_len > oid2_len) { return 1; } /* they are equal */ return 0; } /** * Check of two OIDs are equal * @param oid1 OID 1 * @param oid1_len OID 1 length * @param oid2 OID 2 * @param oid2_len OID 2 length * @return 1: equal 0: non-equal */ u8_t snmp_oid_equal(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len) { return (snmp_oid_compare(oid1, oid1_len, oid2, oid2_len) == 0)? 1 : 0; } /** * Convert netif to interface index * @param netif netif * @return index */ u8_t netif_to_num(const struct netif *netif) { u8_t result = 0; struct netif *netif_iterator = netif_list; while (netif_iterator != NULL) { result++; if (netif_iterator == netif) { return result; } netif_iterator = netif_iterator->next; } LWIP_ASSERT("netif not found in netif_list", 0); return 0; } static const struct snmp_mib* snmp_get_mib_from_oid(const u32_t *oid, u8_t oid_len) { const u32_t* list_oid; const u32_t* searched_oid; u8_t i, l; u8_t max_match_len = 0; const struct snmp_mib* matched_mib = NULL; LWIP_ASSERT("'oid' param must not be NULL!", (oid != NULL)); if (oid_len == 0) { return NULL; } for (i = 0; i < snmp_num_mibs; i++) { LWIP_ASSERT("MIB array not initialized correctly", (snmp_mibs[i] != NULL)); LWIP_ASSERT("MIB array not initialized correctly - base OID is NULL", (snmp_mibs[i]->base_oid != NULL)); if (oid_len >= snmp_mibs[i]->base_oid_len) { l = snmp_mibs[i]->base_oid_len; list_oid = snmp_mibs[i]->base_oid; searched_oid = oid; while (l > 0) { if (*list_oid != *searched_oid) { break; } l--; list_oid++; searched_oid++; } if ((l == 0) && (snmp_mibs[i]->base_oid_len > max_match_len)) { max_match_len = snmp_mibs[i]->base_oid_len; matched_mib = snmp_mibs[i]; } } } return matched_mib; } static const struct snmp_mib* snmp_get_next_mib(const u32_t *oid, u8_t oid_len) { u8_t i; const struct snmp_mib* next_mib = NULL; LWIP_ASSERT("'oid' param must not be NULL!", (oid != NULL)); if (oid_len == 0) { return NULL; } for (i = 0; i < snmp_num_mibs; i++) { if (snmp_mibs[i]->base_oid != NULL) { /* check if mib is located behind starting point */ if (snmp_oid_compare(snmp_mibs[i]->base_oid, snmp_mibs[i]->base_oid_len, oid, oid_len) > 0) { if ((next_mib == NULL) || (snmp_oid_compare(snmp_mibs[i]->base_oid, snmp_mibs[i]->base_oid_len, next_mib->base_oid, next_mib->base_oid_len) < 0)) { next_mib = snmp_mibs[i]; } } } } return next_mib; } static const struct snmp_mib* snmp_get_mib_between(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len) { const struct snmp_mib* next_mib = snmp_get_next_mib(oid1, oid1_len); LWIP_ASSERT("'oid2' param must not be NULL!", (oid2 != NULL)); LWIP_ASSERT("'oid2_len' param must be greater than 0!", (oid2_len > 0)); if (next_mib != NULL) { if (snmp_oid_compare(next_mib->base_oid, next_mib->base_oid_len, oid2, oid2_len) < 0) { return next_mib; } } return NULL; } u8_t snmp_get_node_instance_from_oid(const u32_t *oid, u8_t oid_len, struct snmp_node_instance* node_instance) { u8_t result = SNMP_ERR_NOSUCHOBJECT; const struct snmp_mib *mib; const struct snmp_node *mn = NULL; mib = snmp_get_mib_from_oid(oid, oid_len); if (mib != NULL) { u8_t oid_instance_len; mn = snmp_mib_tree_resolve_exact(mib, oid, oid_len, &oid_instance_len); if ((mn != NULL) && (mn->node_type != SNMP_NODE_TREE)) { /* get instance */ const struct snmp_leaf_node* leaf_node = (const struct snmp_leaf_node*)(const void*)mn; node_instance->node = mn; snmp_oid_assign(&node_instance->instance_oid, oid + (oid_len - oid_instance_len), oid_instance_len); result = leaf_node->get_instance( oid, oid_len - oid_instance_len, node_instance); #ifdef LWIP_DEBUG if (result == SNMP_ERR_NOERROR) { if (((node_instance->access & SNMP_NODE_INSTANCE_ACCESS_READ) != 0) && (node_instance->get_value == NULL)) { LWIP_DEBUGF(SNMP_DEBUG, ("SNMP inconsistent access: node is readable but no get_value function is specified\n")); } if (((node_instance->access & SNMP_NODE_INSTANCE_ACCESS_WRITE) != 0) && (node_instance->set_value == NULL)) { LWIP_DEBUGF(SNMP_DEBUG, ("SNMP inconsistent access: node is writable but no set_value and/or set_test function is specified\n")); } } #endif } } return result; } u8_t snmp_get_next_node_instance_from_oid(const u32_t *oid, u8_t oid_len, snmp_validate_node_instance_method validate_node_instance_method, void* validate_node_instance_arg, struct snmp_obj_id* node_oid, struct snmp_node_instance* node_instance) { const struct snmp_mib *mib; const struct snmp_node *mn = NULL; const u32_t* start_oid = NULL; u8_t start_oid_len = 0; /* resolve target MIB from passed OID */ mib = snmp_get_mib_from_oid(oid, oid_len); if (mib == NULL) { /* passed OID does not reference any known MIB, start at the next closest MIB */ mib = snmp_get_next_mib(oid, oid_len); if (mib != NULL) { start_oid = mib->base_oid; start_oid_len = mib->base_oid_len; } } else { start_oid = oid; start_oid_len = oid_len; } /* resolve target node from MIB, skip to next MIB if no suitable node is found in current MIB */ while ((mib != NULL) && (mn == NULL)) { u8_t oid_instance_len; /* check if OID directly references a node inside current MIB, in this case we have to ask this node for the next instance */ mn = snmp_mib_tree_resolve_exact(mib, start_oid, start_oid_len, &oid_instance_len); if (mn != NULL) { snmp_oid_assign(node_oid, start_oid, start_oid_len - oid_instance_len); /* set oid to node */ snmp_oid_assign(&node_instance->instance_oid, start_oid + (start_oid_len - oid_instance_len), oid_instance_len); /* set (relative) instance oid */ } else { /* OID does not reference a node, search for the next closest node inside MIB; set instance_oid.len to zero because we want the first instance of this node */ mn = snmp_mib_tree_resolve_next(mib, start_oid, start_oid_len, node_oid); node_instance->instance_oid.len = 0; } /* validate the node; if the node has no further instance or the returned instance is invalid, search for the next in MIB and validate again */ node_instance->node = mn; while (mn != NULL) { u8_t result; /* clear fields which may have values from previous loops */ node_instance->asn1_type = 0; node_instance->access = SNMP_NODE_INSTANCE_NOT_ACCESSIBLE; node_instance->get_value = NULL; node_instance->set_test = NULL; node_instance->set_value = NULL; node_instance->release_instance = NULL; node_instance->reference.ptr = NULL; node_instance->reference_len = 0; result = ((const struct snmp_leaf_node*)(const void*)mn)->get_next_instance( node_oid->id, node_oid->len, node_instance); if (result == SNMP_ERR_NOERROR) { #ifdef LWIP_DEBUG if (((node_instance->access & SNMP_NODE_INSTANCE_ACCESS_READ) != 0) && (node_instance->get_value == NULL)) { LWIP_DEBUGF(SNMP_DEBUG, ("SNMP inconsistent access: node is readable but no get_value function is specified\n")); } if (((node_instance->access & SNMP_NODE_INSTANCE_ACCESS_WRITE) != 0) && (node_instance->set_value == NULL)) { LWIP_DEBUGF(SNMP_DEBUG, ("SNMP inconsistent access: node is writable but no set_value function is specified\n")); } #endif /* validate node because the node may be not accessible for example (but let the caller decide what is valid */ if ((validate_node_instance_method == NULL) || (validate_node_instance_method(node_instance, validate_node_instance_arg) == SNMP_ERR_NOERROR)) { /* node_oid "returns" the full result OID (including the instance part) */ snmp_oid_append(node_oid, node_instance->instance_oid.id, node_instance->instance_oid.len); break; } if (node_instance->release_instance != NULL) { node_instance->release_instance(node_instance); } /* the instance itself is not valid, ask for next instance from same node. we don't have to change any variables because node_instance->instance_oid is used as input (starting point) as well as output (resulting next OID), so we have to simply call get_next_instance method again */ } else { if (node_instance->release_instance != NULL) { node_instance->release_instance(node_instance); } /* the node has no further instance, skip to next node */ mn = snmp_mib_tree_resolve_next(mib, node_oid->id, node_oid->len, &node_instance->instance_oid); /* misuse node_instance->instance_oid as tmp buffer */ if (mn != NULL) { /* prepare for next loop */ snmp_oid_assign(node_oid, node_instance->instance_oid.id, node_instance->instance_oid.len); node_instance->instance_oid.len = 0; node_instance->node = mn; } } } if (mn != NULL) { /* we found a suitable next node, now we have to check if a inner MIB is located between the searched OID and the resulting OID. this is possible because MIB's may be located anywhere in the global tree, that means also in the subtree of another MIB (e.g. if searched OID is .2 and resulting OID is .4, then another MIB having .3 as root node may exist) */ const struct snmp_mib *intermediate_mib; intermediate_mib = snmp_get_mib_between(start_oid, start_oid_len, node_oid->id, node_oid->len); if (intermediate_mib != NULL) { /* search for first node inside intermediate mib in next loop */ if (node_instance->release_instance != NULL) { node_instance->release_instance(node_instance); } mn = NULL; mib = intermediate_mib; start_oid = mib->base_oid; start_oid_len = mib->base_oid_len; } /* else { we found out target node } */ } else { /* there is no further (suitable) node inside this MIB, search for the next MIB with following priority 1. search for inner MIB's (whose root is located inside tree of current MIB) 2. search for surrouding MIB's (where the current MIB is the inner MIB) and continue there if any 3. take the next closest MIB (not being related to the current MIB) */ const struct snmp_mib *next_mib; next_mib = snmp_get_next_mib(start_oid, start_oid_len); /* returns MIB's related to point 1 and 3 */ /* is the found MIB an inner MIB? (point 1) */ if ((next_mib != NULL) && (next_mib->base_oid_len > mib->base_oid_len) && (snmp_oid_compare(next_mib->base_oid, mib->base_oid_len, mib->base_oid, mib->base_oid_len) == 0)) { /* yes it is -> continue at inner MIB */ mib = next_mib; start_oid = mib->base_oid; start_oid_len = mib->base_oid_len; } else { /* check if there is a surrounding mib where to continue (point 2) (only possible if OID length > 1) */ if (mib->base_oid_len > 1) { mib = snmp_get_mib_from_oid(mib->base_oid, mib->base_oid_len - 1); if (mib == NULL) { /* no surrounding mib, use next mib encountered above (point 3) */ mib = next_mib; if (mib != NULL) { start_oid = mib->base_oid; start_oid_len = mib->base_oid_len; } } /* else { start_oid stays the same because we want to continue from current offset in surrounding mib (point 2) } */ } } } } if (mib == NULL) { /* loop is only left when mib == null (error) or mib_node != NULL (success) */ return SNMP_ERR_ENDOFMIBVIEW; } return SNMP_ERR_NOERROR; } /** * Searches tree for the supplied object identifier. * */ const struct snmp_node * snmp_mib_tree_resolve_exact(const struct snmp_mib *mib, const u32_t *oid, u8_t oid_len, u8_t* oid_instance_len) { const struct snmp_node* const* node = &mib->root_node; u8_t oid_offset = mib->base_oid_len; while ((oid_offset < oid_len) && ((*node)->node_type == SNMP_NODE_TREE)) { /* search for matching sub node */ u32_t subnode_oid = *(oid + oid_offset); u32_t i = (*(const struct snmp_tree_node* const*)node)->subnode_count; node = (*(const struct snmp_tree_node* const*)node)->subnodes; while ((i > 0) && ((*node)->oid != subnode_oid)) { node++; i--; } if (i == 0) { /* no matching subnode found */ return NULL; } oid_offset++; } if ((*node)->node_type != SNMP_NODE_TREE) { /* we found a leaf node */ *oid_instance_len = oid_len - oid_offset; return (*node); } return NULL; } const struct snmp_node* snmp_mib_tree_resolve_next(const struct snmp_mib *mib, const u32_t *oid, u8_t oid_len, struct snmp_obj_id* oidret) { u8_t oid_offset = mib->base_oid_len; const struct snmp_node* const* node; const struct snmp_tree_node* node_stack[SNMP_MAX_OBJ_ID_LEN]; s32_t nsi = 0; /* NodeStackIndex */ u32_t subnode_oid; if (mib->root_node->node_type != SNMP_NODE_TREE) { /* a next operation on a mib with only a leaf node will always return NULL because there is no other node */ return NULL; } /* first build node stack related to passed oid (as far as possible), then go backwards to determine the next node */ node_stack[nsi] = (const struct snmp_tree_node*)(const void*)mib->root_node; while (oid_offset < oid_len) { /* search for matching sub node */ u32_t i = node_stack[nsi]->subnode_count; node = node_stack[nsi]->subnodes; subnode_oid = *(oid + oid_offset); while ((i > 0) && ((*node)->oid != subnode_oid)) { node++; i--; } if ((i == 0) || ((*node)->node_type != SNMP_NODE_TREE)) { /* no (matching) tree-subnode found */ break; } nsi++; node_stack[nsi] = (const struct snmp_tree_node*)(const void*)(*node); oid_offset++; } if (oid_offset >= oid_len) { /* passed oid references a tree node -> return first useable sub node of it */ subnode_oid = 0; } else { subnode_oid = *(oid + oid_offset) + 1; } while (nsi >= 0) { const struct snmp_node* subnode = NULL; /* find next node on current level */ s32_t i = node_stack[nsi]->subnode_count; node = node_stack[nsi]->subnodes; while (i > 0) { if ((*node)->oid == subnode_oid) { subnode = *node; break; } else if (((*node)->oid > subnode_oid) && ((subnode == NULL) || ((*node)->oid < subnode->oid))) { subnode = *node; } node++; i--; } if (subnode == NULL) { /* no further node found on this level, go one level up and start searching with index of current node*/ subnode_oid = node_stack[nsi]->node.oid + 1; nsi--; } else { if (subnode->node_type == SNMP_NODE_TREE) { /* next is a tree node, go into it and start searching */ nsi++; node_stack[nsi] = (const struct snmp_tree_node*)(const void*)subnode; subnode_oid = 0; } else { /* we found a leaf node -> fill oidret and return it */ snmp_oid_assign(oidret, mib->base_oid, mib->base_oid_len); i = 1; while (i <= nsi) { oidret->id[oidret->len] = node_stack[i]->node.oid; oidret->len++; i++; } oidret->id[oidret->len] = subnode->oid; oidret->len++; return subnode; } } } return NULL; } /** initialize struct next_oid_state using this function before passing it to next_oid_check */ void snmp_next_oid_init(struct snmp_next_oid_state *state, const u32_t *start_oid, u8_t start_oid_len, u32_t *next_oid_buf, u8_t next_oid_max_len) { state->start_oid = start_oid; state->start_oid_len = start_oid_len; state->next_oid = next_oid_buf; state->next_oid_len = 0; state->next_oid_max_len = next_oid_max_len; state->status = SNMP_NEXT_OID_STATUS_NO_MATCH; } /** checks if the passed incomplete OID may be a possible candidate for snmp_next_oid_check(); this methid is intended if the complete OID is not yet known but it is very expensive to build it up, so it is possible to test the starting part before building up the complete oid and pass it to snmp_next_oid_check()*/ u8_t snmp_next_oid_precheck(struct snmp_next_oid_state *state, const u32_t *oid, const u8_t oid_len) { if (state->status != SNMP_NEXT_OID_STATUS_BUF_TO_SMALL) { u8_t start_oid_len = (oid_len < state->start_oid_len) ? oid_len : state->start_oid_len; /* check passed OID is located behind start offset */ if (snmp_oid_compare(oid, oid_len, state->start_oid, start_oid_len) >= 0) { /* check if new oid is located closer to start oid than current closest oid */ if ((state->status == SNMP_NEXT_OID_STATUS_NO_MATCH) || (snmp_oid_compare(oid, oid_len, state->next_oid, state->next_oid_len) < 0)) { return 1; } } } return 0; } /** checks the passed OID if it is a candidate to be the next one (get_next); returns !=0 if passed oid is currently closest, otherwise 0 */ u8_t snmp_next_oid_check(struct snmp_next_oid_state *state, const u32_t *oid, const u8_t oid_len, void* reference) { /* do not overwrite a fail result */ if (state->status != SNMP_NEXT_OID_STATUS_BUF_TO_SMALL) { /* check passed OID is located behind start offset */ if (snmp_oid_compare(oid, oid_len, state->start_oid, state->start_oid_len) > 0) { /* check if new oid is located closer to start oid than current closest oid */ if ((state->status == SNMP_NEXT_OID_STATUS_NO_MATCH) || (snmp_oid_compare(oid, oid_len, state->next_oid, state->next_oid_len) < 0)) { if (oid_len <= state->next_oid_max_len) { MEMCPY(state->next_oid, oid, oid_len * sizeof(u32_t)); state->next_oid_len = oid_len; state->status = SNMP_NEXT_OID_STATUS_SUCCESS; state->reference = reference; return 1; } else { state->status = SNMP_NEXT_OID_STATUS_BUF_TO_SMALL; } } } } return 0; } u8_t snmp_oid_in_range(const u32_t *oid_in, u8_t oid_len, const struct snmp_oid_range *oid_ranges, u8_t oid_ranges_len) { u8_t i; if (oid_len != oid_ranges_len) { return 0; } for (i = 0; i < oid_ranges_len; i++) { if ((oid_in[i] < oid_ranges[i].min) || (oid_in[i] > oid_ranges[i].max)) { return 0; } } return 1; } snmp_err_t snmp_set_test_ok(struct snmp_node_instance* instance, u16_t value_len, void* value) { LWIP_UNUSED_ARG(instance); LWIP_UNUSED_ARG(value_len); LWIP_UNUSED_ARG(value); return SNMP_ERR_NOERROR; } /** * Decodes BITS pseudotype value from ASN.1 OctetString. * * @note Because BITS pseudo type is encoded as OCTET STRING, it cannot directly * be encoded/decoded by the agent. Instead call this function as required from * get/test/set methods. * * @param buf points to a buffer holding the ASN1 octet string * @param buf_len length of octet string * @param bit_value decoded Bit value with Bit0 == LSB * @return ERR_OK if successful, ERR_ARG if bit value contains more than 32 bit */ err_t snmp_decode_bits(const u8_t *buf, u32_t buf_len, u32_t *bit_value) { u8_t b; u8_t bits_processed = 0; *bit_value = 0; while (buf_len > 0) { /* any bit set in this byte? */ if (*buf != 0x00) { if (bits_processed >= 32) { /* accept more than 4 bytes, but only when no bits are set */ return ERR_VAL; } b = *buf; do { if (b & 0x80) { *bit_value |= (1 << bits_processed); } bits_processed++; b <<= 1; } while ((bits_processed & 0x07) != 0); /* &0x07 -> % 8 */ } else { bits_processed += 8; } buf_len--; buf++; } return ERR_OK; } err_t snmp_decode_truthvalue(const s32_t *asn1_value, u8_t *bool_value) { /* defined by RFC1443: TruthValue ::= TEXTUAL-CONVENTION STATUS current DESCRIPTION "Represents a boolean value." SYNTAX INTEGER { true(1), false(2) } */ if ((asn1_value == NULL) || (bool_value == NULL)) { return ERR_ARG; } if (*asn1_value == 1) { *bool_value = 1; } else if (*asn1_value == 2) { *bool_value = 0; } else { return ERR_VAL; } return ERR_OK; } /** * Encodes BITS pseudotype value into ASN.1 OctetString. * * @note Because BITS pseudo type is encoded as OCTET STRING, it cannot directly * be encoded/decoded by the agent. Instead call this function as required from * get/test/set methods. * * @param buf points to a buffer where the resulting ASN1 octet string is stored to * @param buf_len max length of the bufffer * @param bit_value Bit value to encode with Bit0 == LSB * @param bit_count Number of possible bits for the bit value (according to rfc we have to send all bits independant from their truth value) * @return number of bytes used from buffer to store the resulting OctetString */ u8_t snmp_encode_bits(u8_t *buf, u32_t buf_len, u32_t bit_value, u8_t bit_count) { u8_t len = 0; u8_t min_bytes = (bit_count + 7) >> 3; /* >>3 -> / 8 */ while ((buf_len > 0) && (bit_value != 0x00)) { s8_t i = 7; *buf = 0x00; while (i >= 0) { if (bit_value & 0x01) { *buf |= 0x01; } if (i > 0) { *buf <<= 1; } bit_value >>= 1; i--; } buf++; buf_len--; len++; } if (len < min_bytes) { buf += len; buf_len -= len; while ((len < min_bytes) && (buf_len > 0)) { *buf = 0x00; buf++; buf_len--; len++; } } return len; } u8_t snmp_encode_truthvalue(s32_t *asn1_value, u32_t bool_value) { /* defined by RFC1443: TruthValue ::= TEXTUAL-CONVENTION STATUS current DESCRIPTION "Represents a boolean value." SYNTAX INTEGER { true(1), false(2) } */ if (asn1_value == NULL) { return 0; } if (bool_value) { *asn1_value = 1; /* defined by RFC1443 */ } else { *asn1_value = 2; /* defined by RFC1443 */ } return sizeof(s32_t); } #endif /* LWIP_SNMP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_core.c
C
unknown
42,711
/* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Martin Hentschel <info@cl-soft.de> * */ #ifndef LWIP_HDR_APPS_SNMP_CORE_PRIV_H #define LWIP_HDR_APPS_SNMP_CORE_PRIV_H #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #include "lwip/apps/snmp_core.h" #include "snmp_asn1.h" #ifdef __cplusplus extern "C" { #endif /* (outdated) SNMPv1 error codes * shall not be used by MIBS anymore, nevertheless required from core for properly answering a v1 request */ #define SNMP_ERR_NOSUCHNAME 2 #define SNMP_ERR_BADVALUE 3 #define SNMP_ERR_READONLY 4 /* error codes which are internal and shall not be used by MIBS * shall not be used by MIBS anymore, nevertheless required from core for properly answering a v1 request */ #define SNMP_ERR_TOOBIG 1 #define SNMP_ERR_AUTHORIZATIONERROR 16 #define SNMP_ERR_NOSUCHOBJECT SNMP_VARBIND_EXCEPTION_OFFSET + SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_OBJECT #define SNMP_ERR_ENDOFMIBVIEW SNMP_VARBIND_EXCEPTION_OFFSET + SNMP_ASN1_CONTEXT_VARBIND_END_OF_MIB_VIEW const struct snmp_node* snmp_mib_tree_resolve_exact(const struct snmp_mib *mib, const u32_t *oid, u8_t oid_len, u8_t* oid_instance_len); const struct snmp_node* snmp_mib_tree_resolve_next(const struct snmp_mib *mib, const u32_t *oid, u8_t oid_len, struct snmp_obj_id* oidret); typedef u8_t (*snmp_validate_node_instance_method)(struct snmp_node_instance*, void*); u8_t snmp_get_node_instance_from_oid(const u32_t *oid, u8_t oid_len, struct snmp_node_instance* node_instance); u8_t snmp_get_next_node_instance_from_oid(const u32_t *oid, u8_t oid_len, snmp_validate_node_instance_method validate_node_instance_method, void* validate_node_instance_arg, struct snmp_obj_id* node_oid, struct snmp_node_instance* node_instance); #ifdef __cplusplus } #endif #endif /* LWIP_SNMP */ #endif /* LWIP_HDR_APPS_SNMP_CORE_PRIV_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_core_priv.h
C
unknown
3,497
/** * @file * Management Information Base II (RFC1213) objects and functions. */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> * Christiaan Simons <christiaan.simons@axon.tv> */ /** * @defgroup snmp_mib2 MIB2 * @ingroup snmp */ #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP && SNMP_LWIP_MIB2 /* don't build if not configured for use in lwipopts.h */ #if !LWIP_STATS #error LWIP_SNMP MIB2 needs LWIP_STATS (for MIB2) #endif #if !MIB2_STATS #error LWIP_SNMP MIB2 needs MIB2_STATS (for MIB2) #endif #include "lwip/snmp.h" #include "lwip/apps/snmp.h" #include "lwip/apps/snmp_core.h" #include "lwip/apps/snmp_mib2.h" #include "lwip/apps/snmp_scalar.h" #if SNMP_USE_NETCONN #include "lwip/tcpip.h" #include "lwip/priv/tcpip_priv.h" void snmp_mib2_lwip_synchronizer(snmp_threadsync_called_fn fn, void* arg) { #if LWIP_TCPIP_CORE_LOCKING LOCK_TCPIP_CORE(); fn(arg); UNLOCK_TCPIP_CORE(); #else tcpip_callback(fn, arg); #endif } struct snmp_threadsync_instance snmp_mib2_lwip_locks; #endif /* dot3 and EtherLike MIB not planned. (transmission .1.3.6.1.2.1.10) */ /* historical (some say hysterical). (cmot .1.3.6.1.2.1.9) */ /* lwIP has no EGP, thus may not implement it. (egp .1.3.6.1.2.1.8) */ /* --- mib-2 .1.3.6.1.2.1 ----------------------------------------------------- */ extern const struct snmp_scalar_array_node snmp_mib2_snmp_root; extern const struct snmp_tree_node snmp_mib2_udp_root; extern const struct snmp_tree_node snmp_mib2_tcp_root; extern const struct snmp_scalar_array_node snmp_mib2_icmp_root; extern const struct snmp_tree_node snmp_mib2_interface_root; extern const struct snmp_scalar_array_node snmp_mib2_system_node; extern const struct snmp_tree_node snmp_mib2_at_root; extern const struct snmp_tree_node snmp_mib2_ip_root; static const struct snmp_node* const mib2_nodes[] = { &snmp_mib2_system_node.node.node, &snmp_mib2_interface_root.node, #if LWIP_ARP && LWIP_IPV4 &snmp_mib2_at_root.node, #endif /* LWIP_ARP && LWIP_IPV4 */ #if LWIP_IPV4 &snmp_mib2_ip_root.node, #endif /* LWIP_IPV4 */ #if LWIP_ICMP &snmp_mib2_icmp_root.node.node, #endif /* LWIP_ICMP */ #if LWIP_TCP &snmp_mib2_tcp_root.node, #endif /* LWIP_TCP */ #if LWIP_UDP &snmp_mib2_udp_root.node, #endif /* LWIP_UDP */ &snmp_mib2_snmp_root.node.node }; static const struct snmp_tree_node mib2_root = SNMP_CREATE_TREE_NODE(1, mib2_nodes); static const u32_t mib2_base_oid_arr[] = { 1,3,6,1,2,1 }; const struct snmp_mib mib2 = SNMP_MIB_CREATE(mib2_base_oid_arr, &mib2_root.node); #endif /* LWIP_SNMP && SNMP_LWIP_MIB2 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_mib2.c
C
unknown
4,170
/** * @file * Management Information Base II (RFC1213) ICMP objects and functions. */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> * Christiaan Simons <christiaan.simons@axon.tv> */ #include "lwip/snmp.h" #include "lwip/apps/snmp.h" #include "lwip/apps/snmp_core.h" #include "lwip/apps/snmp_mib2.h" #include "lwip/apps/snmp_table.h" #include "lwip/apps/snmp_scalar.h" #include "lwip/icmp.h" #include "lwip/stats.h" #if LWIP_SNMP && SNMP_LWIP_MIB2 && LWIP_ICMP #if SNMP_USE_NETCONN #define SYNC_NODE_NAME(node_name) node_name ## _synced #define CREATE_LWIP_SYNC_NODE(oid, node_name) \ static const struct snmp_threadsync_node node_name ## _synced = SNMP_CREATE_THREAD_SYNC_NODE(oid, &node_name.node, &snmp_mib2_lwip_locks); #else #define SYNC_NODE_NAME(node_name) node_name #define CREATE_LWIP_SYNC_NODE(oid, node_name) #endif /* --- icmp .1.3.6.1.2.1.5 ----------------------------------------------------- */ static s16_t icmp_get_value(const struct snmp_scalar_array_node_def *node, void *value) { u32_t *uint_ptr = (u32_t*)value; switch (node->oid) { case 1: /* icmpInMsgs */ *uint_ptr = STATS_GET(mib2.icmpinmsgs); return sizeof(*uint_ptr); case 2: /* icmpInErrors */ *uint_ptr = STATS_GET(mib2.icmpinerrors); return sizeof(*uint_ptr); case 3: /* icmpInDestUnreachs */ *uint_ptr = STATS_GET(mib2.icmpindestunreachs); return sizeof(*uint_ptr); case 4: /* icmpInTimeExcds */ *uint_ptr = STATS_GET(mib2.icmpintimeexcds); return sizeof(*uint_ptr); case 5: /* icmpInParmProbs */ *uint_ptr = STATS_GET(mib2.icmpinparmprobs); return sizeof(*uint_ptr); case 6: /* icmpInSrcQuenchs */ *uint_ptr = STATS_GET(mib2.icmpinsrcquenchs); return sizeof(*uint_ptr); case 7: /* icmpInRedirects */ *uint_ptr = STATS_GET(mib2.icmpinredirects); return sizeof(*uint_ptr); case 8: /* icmpInEchos */ *uint_ptr = STATS_GET(mib2.icmpinechos); return sizeof(*uint_ptr); case 9: /* icmpInEchoReps */ *uint_ptr = STATS_GET(mib2.icmpinechoreps); return sizeof(*uint_ptr); case 10: /* icmpInTimestamps */ *uint_ptr = STATS_GET(mib2.icmpintimestamps); return sizeof(*uint_ptr); case 11: /* icmpInTimestampReps */ *uint_ptr = STATS_GET(mib2.icmpintimestampreps); return sizeof(*uint_ptr); case 12: /* icmpInAddrMasks */ *uint_ptr = STATS_GET(mib2.icmpinaddrmasks); return sizeof(*uint_ptr); case 13: /* icmpInAddrMaskReps */ *uint_ptr = STATS_GET(mib2.icmpinaddrmaskreps); return sizeof(*uint_ptr); case 14: /* icmpOutMsgs */ *uint_ptr = STATS_GET(mib2.icmpoutmsgs); return sizeof(*uint_ptr); case 15: /* icmpOutErrors */ *uint_ptr = STATS_GET(mib2.icmpouterrors); return sizeof(*uint_ptr); case 16: /* icmpOutDestUnreachs */ *uint_ptr = STATS_GET(mib2.icmpoutdestunreachs); return sizeof(*uint_ptr); case 17: /* icmpOutTimeExcds */ *uint_ptr = STATS_GET(mib2.icmpouttimeexcds); return sizeof(*uint_ptr); case 18: /* icmpOutParmProbs: not supported -> always 0 */ *uint_ptr = 0; return sizeof(*uint_ptr); case 19: /* icmpOutSrcQuenchs: not supported -> always 0 */ *uint_ptr = 0; return sizeof(*uint_ptr); case 20: /* icmpOutRedirects: not supported -> always 0 */ *uint_ptr = 0; return sizeof(*uint_ptr); case 21: /* icmpOutEchos */ *uint_ptr = STATS_GET(mib2.icmpoutechos); return sizeof(*uint_ptr); case 22: /* icmpOutEchoReps */ *uint_ptr = STATS_GET(mib2.icmpoutechoreps); return sizeof(*uint_ptr); case 23: /* icmpOutTimestamps: not supported -> always 0 */ *uint_ptr = 0; return sizeof(*uint_ptr); case 24: /* icmpOutTimestampReps: not supported -> always 0 */ *uint_ptr = 0; return sizeof(*uint_ptr); case 25: /* icmpOutAddrMasks: not supported -> always 0 */ *uint_ptr = 0; return sizeof(*uint_ptr); case 26: /* icmpOutAddrMaskReps: not supported -> always 0 */ *uint_ptr = 0; return sizeof(*uint_ptr); default: LWIP_DEBUGF(SNMP_MIB_DEBUG,("icmp_get_value(): unknown id: %"S32_F"\n", node->oid)); break; } return 0; } static const struct snmp_scalar_array_node_def icmp_nodes[] = { { 1, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, { 2, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, { 3, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, { 4, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, { 5, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, { 6, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, { 7, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, { 8, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, { 9, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {10, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {11, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {12, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {13, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {14, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {15, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {16, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {17, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {18, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {19, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {20, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {21, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {22, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {23, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {24, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {25, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, {26, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY} }; const struct snmp_scalar_array_node snmp_mib2_icmp_root = SNMP_SCALAR_CREATE_ARRAY_NODE(5, icmp_nodes, icmp_get_value, NULL, NULL); #endif /* LWIP_SNMP && SNMP_LWIP_MIB2 && LWIP_ICMP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_mib2_icmp.c
C
unknown
7,646
/** * @file * Management Information Base II (RFC1213) INTERFACES objects and functions. */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> * Christiaan Simons <christiaan.simons@axon.tv> */ #include "lwip/snmp.h" #include "lwip/apps/snmp.h" #include "lwip/apps/snmp_core.h" #include "lwip/apps/snmp_mib2.h" #include "lwip/apps/snmp_table.h" #include "lwip/apps/snmp_scalar.h" #include "lwip/netif.h" #include "lwip/stats.h" #include <string.h> #if LWIP_SNMP && SNMP_LWIP_MIB2 #if SNMP_USE_NETCONN #define SYNC_NODE_NAME(node_name) node_name ## _synced #define CREATE_LWIP_SYNC_NODE(oid, node_name) \ static const struct snmp_threadsync_node node_name ## _synced = SNMP_CREATE_THREAD_SYNC_NODE(oid, &node_name.node, &snmp_mib2_lwip_locks); #else #define SYNC_NODE_NAME(node_name) node_name #define CREATE_LWIP_SYNC_NODE(oid, node_name) #endif /* --- interfaces .1.3.6.1.2.1.2 ----------------------------------------------------- */ static s16_t interfaces_get_value(struct snmp_node_instance* instance, void* value) { if (instance->node->oid == 1) { s32_t *sint_ptr = (s32_t*)value; s32_t num_netifs = 0; struct netif *netif = netif_list; while (netif != NULL) { num_netifs++; netif = netif->next; } *sint_ptr = num_netifs; return sizeof(*sint_ptr); } return 0; } /* list of allowed value ranges for incoming OID */ static const struct snmp_oid_range interfaces_Table_oid_ranges[] = { { 1, 0xff } /* netif->num is u8_t */ }; static const u8_t iftable_ifOutQLen = 0; static const u8_t iftable_ifOperStatus_up = 1; static const u8_t iftable_ifOperStatus_down = 2; static const u8_t iftable_ifAdminStatus_up = 1; static const u8_t iftable_ifAdminStatus_lowerLayerDown = 7; static const u8_t iftable_ifAdminStatus_down = 2; static snmp_err_t interfaces_Table_get_cell_instance(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, struct snmp_node_instance* cell_instance) { u32_t ifIndex; struct netif *netif; LWIP_UNUSED_ARG(column); /* check if incoming OID length and if values are in plausible range */ if (!snmp_oid_in_range(row_oid, row_oid_len, interfaces_Table_oid_ranges, LWIP_ARRAYSIZE(interfaces_Table_oid_ranges))) { return SNMP_ERR_NOSUCHINSTANCE; } /* get netif index from incoming OID */ ifIndex = row_oid[0]; /* find netif with index */ netif = netif_list; while (netif != NULL) { if (netif_to_num(netif) == ifIndex) { /* store netif pointer for subsequent operations (get/test/set) */ cell_instance->reference.ptr = netif; return SNMP_ERR_NOERROR; } netif = netif->next; } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } static snmp_err_t interfaces_Table_get_next_cell_instance(const u32_t* column, struct snmp_obj_id* row_oid, struct snmp_node_instance* cell_instance) { struct netif *netif; struct snmp_next_oid_state state; u32_t result_temp[LWIP_ARRAYSIZE(interfaces_Table_oid_ranges)]; LWIP_UNUSED_ARG(column); /* init struct to search next oid */ snmp_next_oid_init(&state, row_oid->id, row_oid->len, result_temp, LWIP_ARRAYSIZE(interfaces_Table_oid_ranges)); /* iterate over all possible OIDs to find the next one */ netif = netif_list; while (netif != NULL) { u32_t test_oid[LWIP_ARRAYSIZE(interfaces_Table_oid_ranges)]; test_oid[0] = netif_to_num(netif); /* check generated OID: is it a candidate for the next one? */ snmp_next_oid_check(&state, test_oid, LWIP_ARRAYSIZE(interfaces_Table_oid_ranges), netif); netif = netif->next; } /* did we find a next one? */ if (state.status == SNMP_NEXT_OID_STATUS_SUCCESS) { snmp_oid_assign(row_oid, state.next_oid, state.next_oid_len); /* store netif pointer for subsequent operations (get/test/set) */ cell_instance->reference.ptr = /* (struct netif*) */state.reference; return SNMP_ERR_NOERROR; } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } static s16_t interfaces_Table_get_value(struct snmp_node_instance* instance, void* value) { struct netif *netif = (struct netif*)instance->reference.ptr; u32_t* value_u32 = (u32_t*)value; s32_t* value_s32 = (s32_t*)value; u16_t value_len; switch (SNMP_TABLE_GET_COLUMN_FROM_OID(instance->instance_oid.id)) { case 1: /* ifIndex */ *value_s32 = netif_to_num(netif); value_len = sizeof(*value_s32); break; case 2: /* ifDescr */ value_len = sizeof(netif->name); MEMCPY(value, netif->name, value_len); break; case 3: /* ifType */ *value_s32 = netif->link_type; value_len = sizeof(*value_s32); break; case 4: /* ifMtu */ *value_s32 = netif->mtu; value_len = sizeof(*value_s32); break; case 5: /* ifSpeed */ *value_u32 = netif->link_speed; value_len = sizeof(*value_u32); break; case 6: /* ifPhysAddress */ value_len = sizeof(netif->hwaddr); MEMCPY(value, &netif->hwaddr, value_len); break; case 7: /* ifAdminStatus */ if (netif_is_up(netif)) { *value_s32 = iftable_ifOperStatus_up; } else { *value_s32 = iftable_ifOperStatus_down; } value_len = sizeof(*value_s32); break; case 8: /* ifOperStatus */ if (netif_is_up(netif)) { if (netif_is_link_up(netif)) { *value_s32 = iftable_ifAdminStatus_up; } else { *value_s32 = iftable_ifAdminStatus_lowerLayerDown; } } else { *value_s32 = iftable_ifAdminStatus_down; } value_len = sizeof(*value_s32); break; case 9: /* ifLastChange */ *value_u32 = netif->ts; value_len = sizeof(*value_u32); break; case 10: /* ifInOctets */ *value_u32 = netif->mib2_counters.ifinoctets; value_len = sizeof(*value_u32); break; case 11: /* ifInUcastPkts */ *value_u32 = netif->mib2_counters.ifinucastpkts; value_len = sizeof(*value_u32); break; case 12: /* ifInNUcastPkts */ *value_u32 = netif->mib2_counters.ifinnucastpkts; value_len = sizeof(*value_u32); break; case 13: /* ifInDiscards */ *value_u32 = netif->mib2_counters.ifindiscards; value_len = sizeof(*value_u32); break; case 14: /* ifInErrors */ *value_u32 = netif->mib2_counters.ifinerrors; value_len = sizeof(*value_u32); break; case 15: /* ifInUnkownProtos */ *value_u32 = netif->mib2_counters.ifinunknownprotos; value_len = sizeof(*value_u32); break; case 16: /* ifOutOctets */ *value_u32 = netif->mib2_counters.ifoutoctets; value_len = sizeof(*value_u32); break; case 17: /* ifOutUcastPkts */ *value_u32 = netif->mib2_counters.ifoutucastpkts; value_len = sizeof(*value_u32); break; case 18: /* ifOutNUcastPkts */ *value_u32 = netif->mib2_counters.ifoutnucastpkts; value_len = sizeof(*value_u32); break; case 19: /* ifOutDiscarts */ *value_u32 = netif->mib2_counters.ifoutdiscards; value_len = sizeof(*value_u32); break; case 20: /* ifOutErrors */ *value_u32 = netif->mib2_counters.ifouterrors; value_len = sizeof(*value_u32); break; case 21: /* ifOutQLen */ *value_u32 = iftable_ifOutQLen; value_len = sizeof(*value_u32); break; /** @note returning zeroDotZero (0.0) no media specific MIB support */ case 22: /* ifSpecific */ value_len = snmp_zero_dot_zero.len * sizeof(u32_t); MEMCPY(value, snmp_zero_dot_zero.id, value_len); break; default: return 0; } return value_len; } #if !SNMP_SAFE_REQUESTS static snmp_err_t interfaces_Table_set_test(struct snmp_node_instance* instance, u16_t len, void *value) { s32_t *sint_ptr = (s32_t*)value; /* stack should never call this method for another column, because all other columns are set to readonly */ LWIP_ASSERT("Invalid column", (SNMP_TABLE_GET_COLUMN_FROM_OID(instance->instance_oid.id) == 7)); LWIP_UNUSED_ARG(len); if (*sint_ptr == 1 || *sint_ptr == 2) { return SNMP_ERR_NOERROR; } return SNMP_ERR_WRONGVALUE; } static snmp_err_t interfaces_Table_set_value(struct snmp_node_instance* instance, u16_t len, void *value) { struct netif *netif = (struct netif*)instance->reference.ptr; s32_t *sint_ptr = (s32_t*)value; /* stack should never call this method for another column, because all other columns are set to readonly */ LWIP_ASSERT("Invalid column", (SNMP_TABLE_GET_COLUMN_FROM_OID(instance->instance_oid.id) == 7)); LWIP_UNUSED_ARG(len); if (*sint_ptr == 1) { netif_set_up(netif); } else if (*sint_ptr == 2) { netif_set_down(netif); } return SNMP_ERR_NOERROR; } #endif /* SNMP_SAFE_REQUESTS */ static const struct snmp_scalar_node interfaces_Number = SNMP_SCALAR_CREATE_NODE_READONLY(1, SNMP_ASN1_TYPE_INTEGER, interfaces_get_value); static const struct snmp_table_col_def interfaces_Table_columns[] = { { 1, SNMP_ASN1_TYPE_INTEGER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifIndex */ { 2, SNMP_ASN1_TYPE_OCTET_STRING, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifDescr */ { 3, SNMP_ASN1_TYPE_INTEGER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifType */ { 4, SNMP_ASN1_TYPE_INTEGER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifMtu */ { 5, SNMP_ASN1_TYPE_GAUGE, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifSpeed */ { 6, SNMP_ASN1_TYPE_OCTET_STRING, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifPhysAddress */ #if !SNMP_SAFE_REQUESTS { 7, SNMP_ASN1_TYPE_INTEGER, SNMP_NODE_INSTANCE_READ_WRITE }, /* ifAdminStatus */ #else { 7, SNMP_ASN1_TYPE_INTEGER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifAdminStatus */ #endif { 8, SNMP_ASN1_TYPE_INTEGER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifOperStatus */ { 9, SNMP_ASN1_TYPE_TIMETICKS, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifLastChange */ { 10, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifInOctets */ { 11, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifInUcastPkts */ { 12, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifInNUcastPkts */ { 13, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifInDiscarts */ { 14, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifInErrors */ { 15, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifInUnkownProtos */ { 16, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifOutOctets */ { 17, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifOutUcastPkts */ { 18, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifOutNUcastPkts */ { 19, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifOutDiscarts */ { 20, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifOutErrors */ { 21, SNMP_ASN1_TYPE_GAUGE, SNMP_NODE_INSTANCE_READ_ONLY }, /* ifOutQLen */ { 22, SNMP_ASN1_TYPE_OBJECT_ID, SNMP_NODE_INSTANCE_READ_ONLY } /* ifSpecific */ }; #if !SNMP_SAFE_REQUESTS static const struct snmp_table_node interfaces_Table = SNMP_TABLE_CREATE( 2, interfaces_Table_columns, interfaces_Table_get_cell_instance, interfaces_Table_get_next_cell_instance, interfaces_Table_get_value, interfaces_Table_set_test, interfaces_Table_set_value); #else static const struct snmp_table_node interfaces_Table = SNMP_TABLE_CREATE( 2, interfaces_Table_columns, interfaces_Table_get_cell_instance, interfaces_Table_get_next_cell_instance, interfaces_Table_get_value, NULL, NULL); #endif /* the following nodes access variables in LWIP stack from SNMP worker thread and must therefore be synced to LWIP (TCPIP) thread */ CREATE_LWIP_SYNC_NODE(1, interfaces_Number) CREATE_LWIP_SYNC_NODE(2, interfaces_Table) static const struct snmp_node* const interface_nodes[] = { &SYNC_NODE_NAME(interfaces_Number).node.node, &SYNC_NODE_NAME(interfaces_Table).node.node }; const struct snmp_tree_node snmp_mib2_interface_root = SNMP_CREATE_TREE_NODE(2, interface_nodes); #endif /* LWIP_SNMP && SNMP_LWIP_MIB2 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_mib2_interfaces.c
C
unknown
13,742
/** * @file * Management Information Base II (RFC1213) IP objects and functions. */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> * Christiaan Simons <christiaan.simons@axon.tv> */ #include "lwip/snmp.h" #include "lwip/apps/snmp.h" #include "lwip/apps/snmp_core.h" #include "lwip/apps/snmp_mib2.h" #include "lwip/apps/snmp_table.h" #include "lwip/apps/snmp_scalar.h" #include "lwip/stats.h" #include "lwip/netif.h" #include "lwip/ip.h" #include "lwip/etharp.h" #if LWIP_SNMP && SNMP_LWIP_MIB2 #if SNMP_USE_NETCONN #define SYNC_NODE_NAME(node_name) node_name ## _synced #define CREATE_LWIP_SYNC_NODE(oid, node_name) \ static const struct snmp_threadsync_node node_name ## _synced = SNMP_CREATE_THREAD_SYNC_NODE(oid, &node_name.node, &snmp_mib2_lwip_locks); #else #define SYNC_NODE_NAME(node_name) node_name #define CREATE_LWIP_SYNC_NODE(oid, node_name) #endif #if LWIP_IPV4 /* --- ip .1.3.6.1.2.1.4 ----------------------------------------------------- */ static s16_t ip_get_value(struct snmp_node_instance* instance, void* value) { s32_t* sint_ptr = (s32_t*)value; u32_t* uint_ptr = (u32_t*)value; switch (instance->node->oid) { case 1: /* ipForwarding */ #if IP_FORWARD /* forwarding */ *sint_ptr = 1; #else /* not-forwarding */ *sint_ptr = 2; #endif return sizeof(*sint_ptr); case 2: /* ipDefaultTTL */ *sint_ptr = IP_DEFAULT_TTL; return sizeof(*sint_ptr); case 3: /* ipInReceives */ *uint_ptr = STATS_GET(mib2.ipinreceives); return sizeof(*uint_ptr); case 4: /* ipInHdrErrors */ *uint_ptr = STATS_GET(mib2.ipinhdrerrors); return sizeof(*uint_ptr); case 5: /* ipInAddrErrors */ *uint_ptr = STATS_GET(mib2.ipinaddrerrors); return sizeof(*uint_ptr); case 6: /* ipForwDatagrams */ *uint_ptr = STATS_GET(mib2.ipforwdatagrams); return sizeof(*uint_ptr); case 7: /* ipInUnknownProtos */ *uint_ptr = STATS_GET(mib2.ipinunknownprotos); return sizeof(*uint_ptr); case 8: /* ipInDiscards */ *uint_ptr = STATS_GET(mib2.ipindiscards); return sizeof(*uint_ptr); case 9: /* ipInDelivers */ *uint_ptr = STATS_GET(mib2.ipindelivers); return sizeof(*uint_ptr); case 10: /* ipOutRequests */ *uint_ptr = STATS_GET(mib2.ipoutrequests); return sizeof(*uint_ptr); case 11: /* ipOutDiscards */ *uint_ptr = STATS_GET(mib2.ipoutdiscards); return sizeof(*uint_ptr); case 12: /* ipOutNoRoutes */ *uint_ptr = STATS_GET(mib2.ipoutnoroutes); return sizeof(*uint_ptr); case 13: /* ipReasmTimeout */ #if IP_REASSEMBLY *sint_ptr = IP_REASS_MAXAGE; #else *sint_ptr = 0; #endif return sizeof(*sint_ptr); case 14: /* ipReasmReqds */ *uint_ptr = STATS_GET(mib2.ipreasmreqds); return sizeof(*uint_ptr); case 15: /* ipReasmOKs */ *uint_ptr = STATS_GET(mib2.ipreasmoks); return sizeof(*uint_ptr); case 16: /* ipReasmFails */ *uint_ptr = STATS_GET(mib2.ipreasmfails); return sizeof(*uint_ptr); case 17: /* ipFragOKs */ *uint_ptr = STATS_GET(mib2.ipfragoks); return sizeof(*uint_ptr); case 18: /* ipFragFails */ *uint_ptr = STATS_GET(mib2.ipfragfails); return sizeof(*uint_ptr); case 19: /* ipFragCreates */ *uint_ptr = STATS_GET(mib2.ipfragcreates); return sizeof(*uint_ptr); case 23: /* ipRoutingDiscards: not supported -> always 0 */ *uint_ptr = 0; return sizeof(*uint_ptr); default: LWIP_DEBUGF(SNMP_MIB_DEBUG,("ip_get_value(): unknown id: %"S32_F"\n", instance->node->oid)); break; } return 0; } /** * Test ip object value before setting. * * @param instance node instance * @param len return value space (in bytes) * @param value points to (varbind) space to copy value from. * * @note we allow set if the value matches the hardwired value, * otherwise return badvalue. */ static snmp_err_t ip_set_test(struct snmp_node_instance* instance, u16_t len, void *value) { snmp_err_t ret = SNMP_ERR_WRONGVALUE; s32_t *sint_ptr = (s32_t*)value; LWIP_UNUSED_ARG(len); switch (instance->node->oid) { case 1: /* ipForwarding */ #if IP_FORWARD /* forwarding */ if (*sint_ptr == 1) #else /* not-forwarding */ if (*sint_ptr == 2) #endif { ret = SNMP_ERR_NOERROR; } break; case 2: /* ipDefaultTTL */ if (*sint_ptr == IP_DEFAULT_TTL) { ret = SNMP_ERR_NOERROR; } break; default: LWIP_DEBUGF(SNMP_MIB_DEBUG,("ip_set_test(): unknown id: %"S32_F"\n", instance->node->oid)); break; } return ret; } static snmp_err_t ip_set_value(struct snmp_node_instance* instance, u16_t len, void *value) { LWIP_UNUSED_ARG(instance); LWIP_UNUSED_ARG(len); LWIP_UNUSED_ARG(value); /* nothing to do here because in set_test we only accept values being the same as our own stored value -> no need to store anything */ return SNMP_ERR_NOERROR; } /* --- ipAddrTable --- */ /* list of allowed value ranges for incoming OID */ static const struct snmp_oid_range ip_AddrTable_oid_ranges[] = { { 0, 0xff }, /* IP A */ { 0, 0xff }, /* IP B */ { 0, 0xff }, /* IP C */ { 0, 0xff } /* IP D */ }; static snmp_err_t ip_AddrTable_get_cell_value_core(struct netif *netif, const u32_t* column, union snmp_variant_value* value, u32_t* value_len) { LWIP_UNUSED_ARG(value_len); switch (*column) { case 1: /* ipAdEntAddr */ value->u32 = netif_ip4_addr(netif)->addr; break; case 2: /* ipAdEntIfIndex */ value->u32 = netif_to_num(netif); break; case 3: /* ipAdEntNetMask */ value->u32 = netif_ip4_netmask(netif)->addr; break; case 4: /* ipAdEntBcastAddr */ /* lwIP oddity, there's no broadcast address in the netif we can rely on */ value->u32 = IPADDR_BROADCAST & 1; break; case 5: /* ipAdEntReasmMaxSize */ #if IP_REASSEMBLY /* @todo The theoretical maximum is IP_REASS_MAX_PBUFS * size of the pbufs, * but only if receiving one fragmented packet at a time. * The current solution is to calculate for 2 simultaneous packets... */ value->u32 = (IP_HLEN + ((IP_REASS_MAX_PBUFS/2) * (PBUF_POOL_BUFSIZE - PBUF_LINK_ENCAPSULATION_HLEN - PBUF_LINK_HLEN - IP_HLEN))); #else /** @todo returning MTU would be a bad thing and returning a wild guess like '576' isn't good either */ value->u32 = 0; #endif break; default: return SNMP_ERR_NOSUCHINSTANCE; } return SNMP_ERR_NOERROR; } static snmp_err_t ip_AddrTable_get_cell_value(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len) { ip4_addr_t ip; struct netif *netif; /* check if incoming OID length and if values are in plausible range */ if (!snmp_oid_in_range(row_oid, row_oid_len, ip_AddrTable_oid_ranges, LWIP_ARRAYSIZE(ip_AddrTable_oid_ranges))) { return SNMP_ERR_NOSUCHINSTANCE; } /* get IP from incoming OID */ snmp_oid_to_ip4(&row_oid[0], &ip); /* we know it succeeds because of oid_in_range check above */ /* find netif with requested ip */ netif = netif_list; while (netif != NULL) { if (ip4_addr_cmp(&ip, netif_ip4_addr(netif))) { /* fill in object properties */ return ip_AddrTable_get_cell_value_core(netif, column, value, value_len); } netif = netif->next; } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } static snmp_err_t ip_AddrTable_get_next_cell_instance_and_value(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len) { struct netif *netif; struct snmp_next_oid_state state; u32_t result_temp[LWIP_ARRAYSIZE(ip_AddrTable_oid_ranges)]; /* init struct to search next oid */ snmp_next_oid_init(&state, row_oid->id, row_oid->len, result_temp, LWIP_ARRAYSIZE(ip_AddrTable_oid_ranges)); /* iterate over all possible OIDs to find the next one */ netif = netif_list; while (netif != NULL) { u32_t test_oid[LWIP_ARRAYSIZE(ip_AddrTable_oid_ranges)]; snmp_ip4_to_oid(netif_ip4_addr(netif), &test_oid[0]); /* check generated OID: is it a candidate for the next one? */ snmp_next_oid_check(&state, test_oid, LWIP_ARRAYSIZE(ip_AddrTable_oid_ranges), netif); netif = netif->next; } /* did we find a next one? */ if (state.status == SNMP_NEXT_OID_STATUS_SUCCESS) { snmp_oid_assign(row_oid, state.next_oid, state.next_oid_len); /* fill in object properties */ return ip_AddrTable_get_cell_value_core((struct netif*)state.reference, column, value, value_len); } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } /* --- ipRouteTable --- */ /* list of allowed value ranges for incoming OID */ static const struct snmp_oid_range ip_RouteTable_oid_ranges[] = { { 0, 0xff }, /* IP A */ { 0, 0xff }, /* IP B */ { 0, 0xff }, /* IP C */ { 0, 0xff }, /* IP D */ }; static snmp_err_t ip_RouteTable_get_cell_value_core(struct netif *netif, u8_t default_route, const u32_t* column, union snmp_variant_value* value, u32_t* value_len) { switch (*column) { case 1: /* ipRouteDest */ if (default_route) { /* default rte has 0.0.0.0 dest */ value->u32 = IP4_ADDR_ANY4->addr; } else { /* netifs have netaddress dest */ ip4_addr_t tmp; ip4_addr_get_network(&tmp, netif_ip4_addr(netif), netif_ip4_netmask(netif)); value->u32 = tmp.addr; } break; case 2: /* ipRouteIfIndex */ value->u32 = netif_to_num(netif); break; case 3: /* ipRouteMetric1 */ if (default_route) { value->s32 = 1; /* default */ } else { value->s32 = 0; /* normal */ } break; case 4: /* ipRouteMetric2 */ case 5: /* ipRouteMetric3 */ case 6: /* ipRouteMetric4 */ value->s32 = -1; /* none */ break; case 7: /* ipRouteNextHop */ if (default_route) { /* default rte: gateway */ value->u32 = netif_ip4_gw(netif)->addr; } else { /* other rtes: netif ip_addr */ value->u32 = netif_ip4_addr(netif)->addr; } break; case 8: /* ipRouteType */ if (default_route) { /* default rte is indirect */ value->u32 = 4; /* indirect */ } else { /* other rtes are direct */ value->u32 = 3; /* direct */ } break; case 9: /* ipRouteProto */ /* locally defined routes */ value->u32 = 2; /* local */ break; case 10: /* ipRouteAge */ /* @todo (sysuptime - timestamp last change) / 100 */ value->u32 = 0; break; case 11: /* ipRouteMask */ if (default_route) { /* default rte use 0.0.0.0 mask */ value->u32 = IP4_ADDR_ANY4->addr; } else { /* other rtes use netmask */ value->u32 = netif_ip4_netmask(netif)->addr; } break; case 12: /* ipRouteMetric5 */ value->s32 = -1; /* none */ break; case 13: /* ipRouteInfo */ value->const_ptr = snmp_zero_dot_zero.id; *value_len = snmp_zero_dot_zero.len * sizeof(u32_t); break; default: return SNMP_ERR_NOSUCHINSTANCE; } return SNMP_ERR_NOERROR; } static snmp_err_t ip_RouteTable_get_cell_value(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len) { ip4_addr_t test_ip; struct netif *netif; /* check if incoming OID length and if values are in plausible range */ if (!snmp_oid_in_range(row_oid, row_oid_len, ip_RouteTable_oid_ranges, LWIP_ARRAYSIZE(ip_RouteTable_oid_ranges))) { return SNMP_ERR_NOSUCHINSTANCE; } /* get IP and port from incoming OID */ snmp_oid_to_ip4(&row_oid[0], &test_ip); /* we know it succeeds because of oid_in_range check above */ /* default route is on default netif */ if (ip4_addr_isany_val(test_ip) && (netif_default != NULL)) { /* fill in object properties */ return ip_RouteTable_get_cell_value_core(netif_default, 1, column, value, value_len); } /* find netif with requested route */ netif = netif_list; while (netif != NULL) { ip4_addr_t dst; ip4_addr_get_network(&dst, netif_ip4_addr(netif), netif_ip4_netmask(netif)); if (ip4_addr_cmp(&dst, &test_ip)) { /* fill in object properties */ return ip_RouteTable_get_cell_value_core(netif, 0, column, value, value_len); } netif = netif->next; } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } static snmp_err_t ip_RouteTable_get_next_cell_instance_and_value(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len) { struct netif *netif; struct snmp_next_oid_state state; u32_t result_temp[LWIP_ARRAYSIZE(ip_RouteTable_oid_ranges)]; u32_t test_oid[LWIP_ARRAYSIZE(ip_RouteTable_oid_ranges)]; /* init struct to search next oid */ snmp_next_oid_init(&state, row_oid->id, row_oid->len, result_temp, LWIP_ARRAYSIZE(ip_RouteTable_oid_ranges)); /* check default route */ if (netif_default != NULL) { snmp_ip4_to_oid(IP4_ADDR_ANY4, &test_oid[0]); snmp_next_oid_check(&state, test_oid, LWIP_ARRAYSIZE(ip_RouteTable_oid_ranges), netif_default); } /* iterate over all possible OIDs to find the next one */ netif = netif_list; while (netif != NULL) { ip4_addr_t dst; ip4_addr_get_network(&dst, netif_ip4_addr(netif), netif_ip4_netmask(netif)); /* check generated OID: is it a candidate for the next one? */ if (!ip4_addr_isany_val(dst)) { snmp_ip4_to_oid(&dst, &test_oid[0]); snmp_next_oid_check(&state, test_oid, LWIP_ARRAYSIZE(ip_RouteTable_oid_ranges), netif); } netif = netif->next; } /* did we find a next one? */ if (state.status == SNMP_NEXT_OID_STATUS_SUCCESS) { ip4_addr_t dst; snmp_oid_to_ip4(&result_temp[0], &dst); snmp_oid_assign(row_oid, state.next_oid, state.next_oid_len); /* fill in object properties */ return ip_RouteTable_get_cell_value_core((struct netif*)state.reference, ip4_addr_isany_val(dst), column, value, value_len); } else { /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } } #if LWIP_ARP && LWIP_IPV4 /* --- ipNetToMediaTable --- */ /* list of allowed value ranges for incoming OID */ static const struct snmp_oid_range ip_NetToMediaTable_oid_ranges[] = { { 1, 0xff }, /* IfIndex */ { 0, 0xff }, /* IP A */ { 0, 0xff }, /* IP B */ { 0, 0xff }, /* IP C */ { 0, 0xff } /* IP D */ }; static snmp_err_t ip_NetToMediaTable_get_cell_value_core(u8_t arp_table_index, const u32_t* column, union snmp_variant_value* value, u32_t* value_len) { ip4_addr_t *ip; struct netif *netif; struct eth_addr *ethaddr; etharp_get_entry(arp_table_index, &ip, &netif, &ethaddr); /* value */ switch (*column) { case 1: /* atIfIndex / ipNetToMediaIfIndex */ value->u32 = netif_to_num(netif); break; case 2: /* atPhysAddress / ipNetToMediaPhysAddress */ value->ptr = ethaddr; *value_len = sizeof(*ethaddr); break; case 3: /* atNetAddress / ipNetToMediaNetAddress */ value->u32 = ip->addr; break; case 4: /* ipNetToMediaType */ value->u32 = 3; /* dynamic*/ break; default: return SNMP_ERR_NOSUCHINSTANCE; } return SNMP_ERR_NOERROR; } static snmp_err_t ip_NetToMediaTable_get_cell_value(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len) { ip4_addr_t ip_in; u8_t netif_index; u8_t i; /* check if incoming OID length and if values are in plausible range */ if (!snmp_oid_in_range(row_oid, row_oid_len, ip_NetToMediaTable_oid_ranges, LWIP_ARRAYSIZE(ip_NetToMediaTable_oid_ranges))) { return SNMP_ERR_NOSUCHINSTANCE; } /* get IP from incoming OID */ netif_index = (u8_t)row_oid[0]; snmp_oid_to_ip4(&row_oid[1], &ip_in); /* we know it succeeds because of oid_in_range check above */ /* find requested entry */ for (i=0; i<ARP_TABLE_SIZE; i++) { ip4_addr_t *ip; struct netif *netif; struct eth_addr *ethaddr; if (etharp_get_entry(i, &ip, &netif, &ethaddr)) { if ((netif_index == netif_to_num(netif)) && ip4_addr_cmp(&ip_in, ip)) { /* fill in object properties */ return ip_NetToMediaTable_get_cell_value_core(i, column, value, value_len); } } } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } static snmp_err_t ip_NetToMediaTable_get_next_cell_instance_and_value(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len) { u8_t i; struct snmp_next_oid_state state; u32_t result_temp[LWIP_ARRAYSIZE(ip_NetToMediaTable_oid_ranges)]; /* init struct to search next oid */ snmp_next_oid_init(&state, row_oid->id, row_oid->len, result_temp, LWIP_ARRAYSIZE(ip_NetToMediaTable_oid_ranges)); /* iterate over all possible OIDs to find the next one */ for (i=0; i<ARP_TABLE_SIZE; i++) { ip4_addr_t *ip; struct netif *netif; struct eth_addr *ethaddr; if (etharp_get_entry(i, &ip, &netif, &ethaddr)) { u32_t test_oid[LWIP_ARRAYSIZE(ip_NetToMediaTable_oid_ranges)]; test_oid[0] = netif_to_num(netif); snmp_ip4_to_oid(ip, &test_oid[1]); /* check generated OID: is it a candidate for the next one? */ snmp_next_oid_check(&state, test_oid, LWIP_ARRAYSIZE(ip_NetToMediaTable_oid_ranges), LWIP_PTR_NUMERIC_CAST(void*, i)); } } /* did we find a next one? */ if (state.status == SNMP_NEXT_OID_STATUS_SUCCESS) { snmp_oid_assign(row_oid, state.next_oid, state.next_oid_len); /* fill in object properties */ return ip_NetToMediaTable_get_cell_value_core(LWIP_PTR_NUMERIC_CAST(u8_t, state.reference), column, value, value_len); } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } #endif /* LWIP_ARP && LWIP_IPV4 */ static const struct snmp_scalar_node ip_Forwarding = SNMP_SCALAR_CREATE_NODE(1, SNMP_NODE_INSTANCE_READ_WRITE, SNMP_ASN1_TYPE_INTEGER, ip_get_value, ip_set_test, ip_set_value); static const struct snmp_scalar_node ip_DefaultTTL = SNMP_SCALAR_CREATE_NODE(2, SNMP_NODE_INSTANCE_READ_WRITE, SNMP_ASN1_TYPE_INTEGER, ip_get_value, ip_set_test, ip_set_value); static const struct snmp_scalar_node ip_InReceives = SNMP_SCALAR_CREATE_NODE_READONLY(3, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_InHdrErrors = SNMP_SCALAR_CREATE_NODE_READONLY(4, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_InAddrErrors = SNMP_SCALAR_CREATE_NODE_READONLY(5, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_ForwDatagrams = SNMP_SCALAR_CREATE_NODE_READONLY(6, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_InUnknownProtos = SNMP_SCALAR_CREATE_NODE_READONLY(7, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_InDiscards = SNMP_SCALAR_CREATE_NODE_READONLY(8, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_InDelivers = SNMP_SCALAR_CREATE_NODE_READONLY(9, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_OutRequests = SNMP_SCALAR_CREATE_NODE_READONLY(10, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_OutDiscards = SNMP_SCALAR_CREATE_NODE_READONLY(11, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_OutNoRoutes = SNMP_SCALAR_CREATE_NODE_READONLY(12, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_ReasmTimeout = SNMP_SCALAR_CREATE_NODE_READONLY(13, SNMP_ASN1_TYPE_INTEGER, ip_get_value); static const struct snmp_scalar_node ip_ReasmReqds = SNMP_SCALAR_CREATE_NODE_READONLY(14, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_ReasmOKs = SNMP_SCALAR_CREATE_NODE_READONLY(15, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_ReasmFails = SNMP_SCALAR_CREATE_NODE_READONLY(16, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_FragOKs = SNMP_SCALAR_CREATE_NODE_READONLY(17, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_FragFails = SNMP_SCALAR_CREATE_NODE_READONLY(18, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_FragCreates = SNMP_SCALAR_CREATE_NODE_READONLY(19, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_scalar_node ip_RoutingDiscards = SNMP_SCALAR_CREATE_NODE_READONLY(23, SNMP_ASN1_TYPE_COUNTER, ip_get_value); static const struct snmp_table_simple_col_def ip_AddrTable_columns[] = { { 1, SNMP_ASN1_TYPE_IPADDR, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipAdEntAddr */ { 2, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipAdEntIfIndex */ { 3, SNMP_ASN1_TYPE_IPADDR, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipAdEntNetMask */ { 4, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipAdEntBcastAddr */ { 5, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 } /* ipAdEntReasmMaxSize */ }; static const struct snmp_table_simple_node ip_AddrTable = SNMP_TABLE_CREATE_SIMPLE(20, ip_AddrTable_columns, ip_AddrTable_get_cell_value, ip_AddrTable_get_next_cell_instance_and_value); static const struct snmp_table_simple_col_def ip_RouteTable_columns[] = { { 1, SNMP_ASN1_TYPE_IPADDR, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipRouteDest */ { 2, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipRouteIfIndex */ { 3, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_S32 }, /* ipRouteMetric1 */ { 4, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_S32 }, /* ipRouteMetric2 */ { 5, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_S32 }, /* ipRouteMetric3 */ { 6, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_S32 }, /* ipRouteMetric4 */ { 7, SNMP_ASN1_TYPE_IPADDR, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipRouteNextHop */ { 8, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipRouteType */ { 9, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipRouteProto */ { 10, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipRouteAge */ { 11, SNMP_ASN1_TYPE_IPADDR, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipRouteMask */ { 12, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_S32 }, /* ipRouteMetric5 */ { 13, SNMP_ASN1_TYPE_OBJECT_ID, SNMP_VARIANT_VALUE_TYPE_PTR } /* ipRouteInfo */ }; static const struct snmp_table_simple_node ip_RouteTable = SNMP_TABLE_CREATE_SIMPLE(21, ip_RouteTable_columns, ip_RouteTable_get_cell_value, ip_RouteTable_get_next_cell_instance_and_value); #endif /* LWIP_IPV4 */ #if LWIP_ARP && LWIP_IPV4 static const struct snmp_table_simple_col_def ip_NetToMediaTable_columns[] = { { 1, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipNetToMediaIfIndex */ { 2, SNMP_ASN1_TYPE_OCTET_STRING, SNMP_VARIANT_VALUE_TYPE_PTR }, /* ipNetToMediaPhysAddress */ { 3, SNMP_ASN1_TYPE_IPADDR, SNMP_VARIANT_VALUE_TYPE_U32 }, /* ipNetToMediaNetAddress */ { 4, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 } /* ipNetToMediaType */ }; static const struct snmp_table_simple_node ip_NetToMediaTable = SNMP_TABLE_CREATE_SIMPLE(22, ip_NetToMediaTable_columns, ip_NetToMediaTable_get_cell_value, ip_NetToMediaTable_get_next_cell_instance_and_value); #endif /* LWIP_ARP && LWIP_IPV4 */ #if LWIP_IPV4 /* the following nodes access variables in LWIP stack from SNMP worker thread and must therefore be synced to LWIP (TCPIP) thread */ CREATE_LWIP_SYNC_NODE( 1, ip_Forwarding) CREATE_LWIP_SYNC_NODE( 2, ip_DefaultTTL) CREATE_LWIP_SYNC_NODE( 3, ip_InReceives) CREATE_LWIP_SYNC_NODE( 4, ip_InHdrErrors) CREATE_LWIP_SYNC_NODE( 5, ip_InAddrErrors) CREATE_LWIP_SYNC_NODE( 6, ip_ForwDatagrams) CREATE_LWIP_SYNC_NODE( 7, ip_InUnknownProtos) CREATE_LWIP_SYNC_NODE( 8, ip_InDiscards) CREATE_LWIP_SYNC_NODE( 9, ip_InDelivers) CREATE_LWIP_SYNC_NODE(10, ip_OutRequests) CREATE_LWIP_SYNC_NODE(11, ip_OutDiscards) CREATE_LWIP_SYNC_NODE(12, ip_OutNoRoutes) CREATE_LWIP_SYNC_NODE(13, ip_ReasmTimeout) CREATE_LWIP_SYNC_NODE(14, ip_ReasmReqds) CREATE_LWIP_SYNC_NODE(15, ip_ReasmOKs) CREATE_LWIP_SYNC_NODE(15, ip_ReasmFails) CREATE_LWIP_SYNC_NODE(17, ip_FragOKs) CREATE_LWIP_SYNC_NODE(18, ip_FragFails) CREATE_LWIP_SYNC_NODE(19, ip_FragCreates) CREATE_LWIP_SYNC_NODE(20, ip_AddrTable) CREATE_LWIP_SYNC_NODE(21, ip_RouteTable) #if LWIP_ARP CREATE_LWIP_SYNC_NODE(22, ip_NetToMediaTable) #endif /* LWIP_ARP */ CREATE_LWIP_SYNC_NODE(23, ip_RoutingDiscards) static const struct snmp_node* const ip_nodes[] = { &SYNC_NODE_NAME(ip_Forwarding).node.node, &SYNC_NODE_NAME(ip_DefaultTTL).node.node, &SYNC_NODE_NAME(ip_InReceives).node.node, &SYNC_NODE_NAME(ip_InHdrErrors).node.node, &SYNC_NODE_NAME(ip_InAddrErrors).node.node, &SYNC_NODE_NAME(ip_ForwDatagrams).node.node, &SYNC_NODE_NAME(ip_InUnknownProtos).node.node, &SYNC_NODE_NAME(ip_InDiscards).node.node, &SYNC_NODE_NAME(ip_InDelivers).node.node, &SYNC_NODE_NAME(ip_OutRequests).node.node, &SYNC_NODE_NAME(ip_OutDiscards).node.node, &SYNC_NODE_NAME(ip_OutNoRoutes).node.node, &SYNC_NODE_NAME(ip_ReasmTimeout).node.node, &SYNC_NODE_NAME(ip_ReasmReqds).node.node, &SYNC_NODE_NAME(ip_ReasmOKs).node.node, &SYNC_NODE_NAME(ip_ReasmFails).node.node, &SYNC_NODE_NAME(ip_FragOKs).node.node, &SYNC_NODE_NAME(ip_FragFails).node.node, &SYNC_NODE_NAME(ip_FragCreates).node.node, &SYNC_NODE_NAME(ip_AddrTable).node.node, &SYNC_NODE_NAME(ip_RouteTable).node.node, #if LWIP_ARP &SYNC_NODE_NAME(ip_NetToMediaTable).node.node, #endif /* LWIP_ARP */ &SYNC_NODE_NAME(ip_RoutingDiscards).node.node }; const struct snmp_tree_node snmp_mib2_ip_root = SNMP_CREATE_TREE_NODE(4, ip_nodes); #endif /* LWIP_IPV4 */ /* --- at .1.3.6.1.2.1.3 ----------------------------------------------------- */ #if LWIP_ARP && LWIP_IPV4 /* at node table is a subset of ip_nettomedia table (same rows but less columns) */ static const struct snmp_table_simple_col_def at_Table_columns[] = { { 1, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 }, /* atIfIndex */ { 2, SNMP_ASN1_TYPE_OCTET_STRING, SNMP_VARIANT_VALUE_TYPE_PTR }, /* atPhysAddress */ { 3, SNMP_ASN1_TYPE_IPADDR, SNMP_VARIANT_VALUE_TYPE_U32 } /* atNetAddress */ }; static const struct snmp_table_simple_node at_Table = SNMP_TABLE_CREATE_SIMPLE(1, at_Table_columns, ip_NetToMediaTable_get_cell_value, ip_NetToMediaTable_get_next_cell_instance_and_value); /* the following nodes access variables in LWIP stack from SNMP worker thread and must therefore be synced to LWIP (TCPIP) thread */ CREATE_LWIP_SYNC_NODE(1, at_Table) static const struct snmp_node* const at_nodes[] = { &SYNC_NODE_NAME(at_Table).node.node }; const struct snmp_tree_node snmp_mib2_at_root = SNMP_CREATE_TREE_NODE(3, at_nodes); #endif /* LWIP_ARP && LWIP_IPV4 */ #endif /* LWIP_SNMP && SNMP_LWIP_MIB2 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_mib2_ip.c
C
unknown
28,823
/** * @file * Management Information Base II (RFC1213) SNMP objects and functions. */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> * Christiaan Simons <christiaan.simons@axon.tv> */ #include "lwip/snmp.h" #include "lwip/apps/snmp.h" #include "lwip/apps/snmp_core.h" #include "lwip/apps/snmp_mib2.h" #include "lwip/apps/snmp_scalar.h" #if LWIP_SNMP && SNMP_LWIP_MIB2 #define MIB2_AUTH_TRAPS_ENABLED 1 #define MIB2_AUTH_TRAPS_DISABLED 2 /* --- snmp .1.3.6.1.2.1.11 ----------------------------------------------------- */ static s16_t snmp_get_value(const struct snmp_scalar_array_node_def *node, void *value) { u32_t *uint_ptr = (u32_t*)value; switch (node->oid) { case 1: /* snmpInPkts */ *uint_ptr = snmp_stats.inpkts; break; case 2: /* snmpOutPkts */ *uint_ptr = snmp_stats.outpkts; break; case 3: /* snmpInBadVersions */ *uint_ptr = snmp_stats.inbadversions; break; case 4: /* snmpInBadCommunityNames */ *uint_ptr = snmp_stats.inbadcommunitynames; break; case 5: /* snmpInBadCommunityUses */ *uint_ptr = snmp_stats.inbadcommunityuses; break; case 6: /* snmpInASNParseErrs */ *uint_ptr = snmp_stats.inasnparseerrs; break; case 8: /* snmpInTooBigs */ *uint_ptr = snmp_stats.intoobigs; break; case 9: /* snmpInNoSuchNames */ *uint_ptr = snmp_stats.innosuchnames; break; case 10: /* snmpInBadValues */ *uint_ptr = snmp_stats.inbadvalues; break; case 11: /* snmpInReadOnlys */ *uint_ptr = snmp_stats.inreadonlys; break; case 12: /* snmpInGenErrs */ *uint_ptr = snmp_stats.ingenerrs; break; case 13: /* snmpInTotalReqVars */ *uint_ptr = snmp_stats.intotalreqvars; break; case 14: /* snmpInTotalSetVars */ *uint_ptr = snmp_stats.intotalsetvars; break; case 15: /* snmpInGetRequests */ *uint_ptr = snmp_stats.ingetrequests; break; case 16: /* snmpInGetNexts */ *uint_ptr = snmp_stats.ingetnexts; break; case 17: /* snmpInSetRequests */ *uint_ptr = snmp_stats.insetrequests; break; case 18: /* snmpInGetResponses */ *uint_ptr = snmp_stats.ingetresponses; break; case 19: /* snmpInTraps */ *uint_ptr = snmp_stats.intraps; break; case 20: /* snmpOutTooBigs */ *uint_ptr = snmp_stats.outtoobigs; break; case 21: /* snmpOutNoSuchNames */ *uint_ptr = snmp_stats.outnosuchnames; break; case 22: /* snmpOutBadValues */ *uint_ptr = snmp_stats.outbadvalues; break; case 24: /* snmpOutGenErrs */ *uint_ptr = snmp_stats.outgenerrs; break; case 25: /* snmpOutGetRequests */ *uint_ptr = snmp_stats.outgetrequests; break; case 26: /* snmpOutGetNexts */ *uint_ptr = snmp_stats.outgetnexts; break; case 27: /* snmpOutSetRequests */ *uint_ptr = snmp_stats.outsetrequests; break; case 28: /* snmpOutGetResponses */ *uint_ptr = snmp_stats.outgetresponses; break; case 29: /* snmpOutTraps */ *uint_ptr = snmp_stats.outtraps; break; case 30: /* snmpEnableAuthenTraps */ if (snmp_get_auth_traps_enabled() == SNMP_AUTH_TRAPS_DISABLED) { *uint_ptr = MIB2_AUTH_TRAPS_DISABLED; } else { *uint_ptr = MIB2_AUTH_TRAPS_ENABLED; } break; case 31: /* snmpSilentDrops */ *uint_ptr = 0; /* not supported */ break; case 32: /* snmpProxyDrops */ *uint_ptr = 0; /* not supported */ break; default: LWIP_DEBUGF(SNMP_MIB_DEBUG,("snmp_get_value(): unknown id: %"S32_F"\n", node->oid)); return 0; } return sizeof(*uint_ptr); } static snmp_err_t snmp_set_test(const struct snmp_scalar_array_node_def *node, u16_t len, void *value) { snmp_err_t ret = SNMP_ERR_WRONGVALUE; LWIP_UNUSED_ARG(len); if (node->oid == 30) { /* snmpEnableAuthenTraps */ s32_t *sint_ptr = (s32_t*)value; /* we should have writable non-volatile mem here */ if ((*sint_ptr == MIB2_AUTH_TRAPS_DISABLED) || (*sint_ptr == MIB2_AUTH_TRAPS_ENABLED)) { ret = SNMP_ERR_NOERROR; } } return ret; } static snmp_err_t snmp_set_value(const struct snmp_scalar_array_node_def *node, u16_t len, void *value) { LWIP_UNUSED_ARG(len); if (node->oid == 30) { /* snmpEnableAuthenTraps */ s32_t *sint_ptr = (s32_t*)value; if (*sint_ptr == MIB2_AUTH_TRAPS_DISABLED) { snmp_set_auth_traps_enabled(SNMP_AUTH_TRAPS_DISABLED); } else { snmp_set_auth_traps_enabled(SNMP_AUTH_TRAPS_ENABLED); } } return SNMP_ERR_NOERROR; } /* the following nodes access variables in SNMP stack (snmp_stats) from SNMP worker thread -> OK, no sync needed */ static const struct snmp_scalar_array_node_def snmp_nodes[] = { { 1, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInPkts */ { 2, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpOutPkts */ { 3, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInBadVersions */ { 4, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInBadCommunityNames */ { 5, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInBadCommunityUses */ { 6, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInASNParseErrs */ { 8, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInTooBigs */ { 9, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInNoSuchNames */ {10, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInBadValues */ {11, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInReadOnlys */ {12, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInGenErrs */ {13, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInTotalReqVars */ {14, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInTotalSetVars */ {15, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInGetRequests */ {16, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInGetNexts */ {17, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInSetRequests */ {18, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInGetResponses */ {19, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpInTraps */ {20, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpOutTooBigs */ {21, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpOutNoSuchNames */ {22, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpOutBadValues */ {24, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpOutGenErrs */ {25, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpOutGetRequests */ {26, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpOutGetNexts */ {27, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpOutSetRequests */ {28, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpOutGetResponses */ {29, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpOutTraps */ {30, SNMP_ASN1_TYPE_INTEGER, SNMP_NODE_INSTANCE_READ_WRITE}, /* snmpEnableAuthenTraps */ {31, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY}, /* snmpSilentDrops */ {32, SNMP_ASN1_TYPE_COUNTER, SNMP_NODE_INSTANCE_READ_ONLY} /* snmpProxyDrops */ }; const struct snmp_scalar_array_node snmp_mib2_snmp_root = SNMP_SCALAR_CREATE_ARRAY_NODE(11, snmp_nodes, snmp_get_value, snmp_set_test, snmp_set_value); #endif /* LWIP_SNMP && SNMP_LWIP_MIB2 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_mib2_snmp.c
C
unknown
9,142
/** * @file * Management Information Base II (RFC1213) SYSTEM objects and functions. */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> * Christiaan Simons <christiaan.simons@axon.tv> */ #include "lwip/snmp.h" #include "lwip/apps/snmp.h" #include "lwip/apps/snmp_core.h" #include "lwip/apps/snmp_mib2.h" #include "lwip/apps/snmp_table.h" #include "lwip/apps/snmp_scalar.h" #include "lwip/sys.h" #include <string.h> #if LWIP_SNMP && SNMP_LWIP_MIB2 #if SNMP_USE_NETCONN #define SYNC_NODE_NAME(node_name) node_name ## _synced #define CREATE_LWIP_SYNC_NODE(oid, node_name) \ static const struct snmp_threadsync_node node_name ## _synced = SNMP_CREATE_THREAD_SYNC_NODE(oid, &node_name.node, &snmp_mib2_lwip_locks); #else #define SYNC_NODE_NAME(node_name) node_name #define CREATE_LWIP_SYNC_NODE(oid, node_name) #endif /* --- system .1.3.6.1.2.1.1 ----------------------------------------------------- */ /** mib-2.system.sysDescr */ static const u8_t sysdescr_default[] = SNMP_LWIP_MIB2_SYSDESC; static const u8_t* sysdescr = sysdescr_default; static const u16_t* sysdescr_len = NULL; /* use strlen for determining len */ /** mib-2.system.sysContact */ static const u8_t syscontact_default[] = SNMP_LWIP_MIB2_SYSCONTACT; static const u8_t* syscontact = syscontact_default; static const u16_t* syscontact_len = NULL; /* use strlen for determining len */ static u8_t* syscontact_wr = NULL; /* if writable, points to the same buffer as syscontact (required for correct constness) */ static u16_t* syscontact_wr_len = NULL; /* if writable, points to the same buffer as syscontact_len (required for correct constness) */ static u16_t syscontact_bufsize = 0; /* 0=not writable */ /** mib-2.system.sysName */ static const u8_t sysname_default[] = SNMP_LWIP_MIB2_SYSNAME; static const u8_t* sysname = sysname_default; static const u16_t* sysname_len = NULL; /* use strlen for determining len */ static u8_t* sysname_wr = NULL; /* if writable, points to the same buffer as sysname (required for correct constness) */ static u16_t* sysname_wr_len = NULL; /* if writable, points to the same buffer as sysname_len (required for correct constness) */ static u16_t sysname_bufsize = 0; /* 0=not writable */ /** mib-2.system.sysLocation */ static const u8_t syslocation_default[] = SNMP_LWIP_MIB2_SYSLOCATION; static const u8_t* syslocation = syslocation_default; static const u16_t* syslocation_len = NULL; /* use strlen for determining len */ static u8_t* syslocation_wr = NULL; /* if writable, points to the same buffer as syslocation (required for correct constness) */ static u16_t* syslocation_wr_len = NULL; /* if writable, points to the same buffer as syslocation_len (required for correct constness) */ static u16_t syslocation_bufsize = 0; /* 0=not writable */ /** * @ingroup snmp_mib2 * Initializes sysDescr pointers. * * @param str if non-NULL then copy str pointer * @param len points to string length, excluding zero terminator */ void snmp_mib2_set_sysdescr(const u8_t *str, const u16_t *len) { if (str != NULL) { sysdescr = str; sysdescr_len = len; } } /** * @ingroup snmp_mib2 * Initializes sysContact pointers * * @param ocstr if non-NULL then copy str pointer * @param ocstrlen points to string length, excluding zero terminator. * if set to NULL it is assumed that ocstr is NULL-terminated. * @param bufsize size of the buffer in bytes. * (this is required because the buffer can be overwritten by snmp-set) * if ocstrlen is NULL buffer needs space for terminating 0 byte. * otherwise complete buffer is used for string. * if bufsize is set to 0, the value is regarded as read-only. */ void snmp_mib2_set_syscontact(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize) { if (ocstr != NULL) { syscontact = ocstr; syscontact_wr = ocstr; syscontact_len = ocstrlen; syscontact_wr_len = ocstrlen; syscontact_bufsize = bufsize; } } /** * @ingroup snmp_mib2 * see \ref snmp_mib2_set_syscontact but set pointer to readonly memory */ void snmp_mib2_set_syscontact_readonly(const u8_t *ocstr, const u16_t *ocstrlen) { if (ocstr != NULL) { syscontact = ocstr; syscontact_len = ocstrlen; syscontact_wr = NULL; syscontact_wr_len = NULL; syscontact_bufsize = 0; } } /** * @ingroup snmp_mib2 * Initializes sysName pointers * * @param ocstr if non-NULL then copy str pointer * @param ocstrlen points to string length, excluding zero terminator. * if set to NULL it is assumed that ocstr is NULL-terminated. * @param bufsize size of the buffer in bytes. * (this is required because the buffer can be overwritten by snmp-set) * if ocstrlen is NULL buffer needs space for terminating 0 byte. * otherwise complete buffer is used for string. * if bufsize is set to 0, the value is regarded as read-only. */ void snmp_mib2_set_sysname(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize) { if (ocstr != NULL) { sysname = ocstr; sysname_wr = ocstr; sysname_len = ocstrlen; sysname_wr_len = ocstrlen; sysname_bufsize = bufsize; } } /** * @ingroup snmp_mib2 * see \ref snmp_mib2_set_sysname but set pointer to readonly memory */ void snmp_mib2_set_sysname_readonly(const u8_t *ocstr, const u16_t *ocstrlen) { if (ocstr != NULL) { sysname = ocstr; sysname_len = ocstrlen; sysname_wr = NULL; sysname_wr_len = NULL; sysname_bufsize = 0; } } /** * @ingroup snmp_mib2 * Initializes sysLocation pointers * * @param ocstr if non-NULL then copy str pointer * @param ocstrlen points to string length, excluding zero terminator. * if set to NULL it is assumed that ocstr is NULL-terminated. * @param bufsize size of the buffer in bytes. * (this is required because the buffer can be overwritten by snmp-set) * if ocstrlen is NULL buffer needs space for terminating 0 byte. * otherwise complete buffer is used for string. * if bufsize is set to 0, the value is regarded as read-only. */ void snmp_mib2_set_syslocation(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize) { if (ocstr != NULL) { syslocation = ocstr; syslocation_wr = ocstr; syslocation_len = ocstrlen; syslocation_wr_len = ocstrlen; syslocation_bufsize = bufsize; } } /** * @ingroup snmp_mib2 * see \ref snmp_mib2_set_syslocation but set pointer to readonly memory */ void snmp_mib2_set_syslocation_readonly(const u8_t *ocstr, const u16_t *ocstrlen) { if (ocstr != NULL) { syslocation = ocstr; syslocation_len = ocstrlen; syslocation_wr = NULL; syslocation_wr_len = NULL; syslocation_bufsize = 0; } } static s16_t system_get_value(const struct snmp_scalar_array_node_def *node, void *value) { const u8_t* var = NULL; const s16_t* var_len; u16_t result; switch (node->oid) { case 1: /* sysDescr */ var = sysdescr; var_len = (const s16_t*)sysdescr_len; break; case 2: /* sysObjectID */ { const struct snmp_obj_id* dev_enterprise_oid = snmp_get_device_enterprise_oid(); MEMCPY(value, dev_enterprise_oid->id, dev_enterprise_oid->len * sizeof(u32_t)); return dev_enterprise_oid->len * sizeof(u32_t); } case 3: /* sysUpTime */ MIB2_COPY_SYSUPTIME_TO((u32_t*)value); return sizeof(u32_t); case 4: /* sysContact */ var = syscontact; var_len = (const s16_t*)syscontact_len; break; case 5: /* sysName */ var = sysname; var_len = (const s16_t*)sysname_len; break; case 6: /* sysLocation */ var = syslocation; var_len = (const s16_t*)syslocation_len; break; case 7: /* sysServices */ *(s32_t*)value = SNMP_SYSSERVICES; return sizeof(s32_t); default: LWIP_DEBUGF(SNMP_MIB_DEBUG,("system_get_value(): unknown id: %"S32_F"\n", node->oid)); return 0; } /* handle string values (OID 1,4,5 and 6) */ LWIP_ASSERT("", (value != NULL)); if (var_len == NULL) { result = (s16_t)strlen((const char*)var); } else { result = *var_len; } MEMCPY(value, var, result); return result; } static snmp_err_t system_set_test(const struct snmp_scalar_array_node_def *node, u16_t len, void *value) { snmp_err_t ret = SNMP_ERR_WRONGVALUE; const u16_t* var_bufsize = NULL; const u16_t* var_wr_len; LWIP_UNUSED_ARG(value); switch (node->oid) { case 4: /* sysContact */ var_bufsize = &syscontact_bufsize; var_wr_len = syscontact_wr_len; break; case 5: /* sysName */ var_bufsize = &sysname_bufsize; var_wr_len = sysname_wr_len; break; case 6: /* sysLocation */ var_bufsize = &syslocation_bufsize; var_wr_len = syslocation_wr_len; break; default: LWIP_DEBUGF(SNMP_MIB_DEBUG,("system_set_test(): unknown id: %"S32_F"\n", node->oid)); return ret; } /* check if value is writable at all */ if (*var_bufsize > 0) { if (var_wr_len == NULL) { /* we have to take the terminating 0 into account */ if (len < *var_bufsize) { ret = SNMP_ERR_NOERROR; } } else { if (len <= *var_bufsize) { ret = SNMP_ERR_NOERROR; } } } else { ret = SNMP_ERR_NOTWRITABLE; } return ret; } static snmp_err_t system_set_value(const struct snmp_scalar_array_node_def *node, u16_t len, void *value) { u8_t* var_wr = NULL; u16_t* var_wr_len; switch (node->oid) { case 4: /* sysContact */ var_wr = syscontact_wr; var_wr_len = syscontact_wr_len; break; case 5: /* sysName */ var_wr = sysname_wr; var_wr_len = sysname_wr_len; break; case 6: /* sysLocation */ var_wr = syslocation_wr; var_wr_len = syslocation_wr_len; break; default: LWIP_DEBUGF(SNMP_MIB_DEBUG,("system_set_value(): unknown id: %"S32_F"\n", node->oid)); return SNMP_ERR_GENERROR; } /* no need to check size of target buffer, this was already done in set_test method */ LWIP_ASSERT("", var_wr != NULL); MEMCPY(var_wr, value, len); if (var_wr_len == NULL) { /* add terminating 0 */ var_wr[len] = 0; } else { *var_wr_len = len; } return SNMP_ERR_NOERROR; } static const struct snmp_scalar_array_node_def system_nodes[] = { {1, SNMP_ASN1_TYPE_OCTET_STRING, SNMP_NODE_INSTANCE_READ_ONLY}, /* sysDescr */ {2, SNMP_ASN1_TYPE_OBJECT_ID, SNMP_NODE_INSTANCE_READ_ONLY}, /* sysObjectID */ {3, SNMP_ASN1_TYPE_TIMETICKS, SNMP_NODE_INSTANCE_READ_ONLY}, /* sysUpTime */ {4, SNMP_ASN1_TYPE_OCTET_STRING, SNMP_NODE_INSTANCE_READ_WRITE}, /* sysContact */ {5, SNMP_ASN1_TYPE_OCTET_STRING, SNMP_NODE_INSTANCE_READ_WRITE}, /* sysName */ {6, SNMP_ASN1_TYPE_OCTET_STRING, SNMP_NODE_INSTANCE_READ_WRITE}, /* sysLocation */ {7, SNMP_ASN1_TYPE_INTEGER, SNMP_NODE_INSTANCE_READ_ONLY} /* sysServices */ }; const struct snmp_scalar_array_node snmp_mib2_system_node = SNMP_SCALAR_CREATE_ARRAY_NODE(1, system_nodes, system_get_value, system_set_test, system_set_value); #endif /* LWIP_SNMP && SNMP_LWIP_MIB2 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_mib2_system.c
C
unknown
13,225
/** * @file * Management Information Base II (RFC1213) TCP objects and functions. */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> * Christiaan Simons <christiaan.simons@axon.tv> */ #include "lwip/snmp.h" #include "lwip/apps/snmp.h" #include "lwip/apps/snmp_core.h" #include "lwip/apps/snmp_mib2.h" #include "lwip/apps/snmp_table.h" #include "lwip/apps/snmp_scalar.h" #include "lwip/tcp.h" #include "lwip/priv/tcp_priv.h" #include "lwip/stats.h" #include <string.h> #if LWIP_SNMP && SNMP_LWIP_MIB2 && LWIP_TCP #if SNMP_USE_NETCONN #define SYNC_NODE_NAME(node_name) node_name ## _synced #define CREATE_LWIP_SYNC_NODE(oid, node_name) \ static const struct snmp_threadsync_node node_name ## _synced = SNMP_CREATE_THREAD_SYNC_NODE(oid, &node_name.node, &snmp_mib2_lwip_locks); #else #define SYNC_NODE_NAME(node_name) node_name #define CREATE_LWIP_SYNC_NODE(oid, node_name) #endif /* --- tcp .1.3.6.1.2.1.6 ----------------------------------------------------- */ static s16_t tcp_get_value(struct snmp_node_instance* instance, void* value) { u32_t *uint_ptr = (u32_t*)value; s32_t *sint_ptr = (s32_t*)value; switch (instance->node->oid) { case 1: /* tcpRtoAlgorithm, vanj(4) */ *sint_ptr = 4; return sizeof(*sint_ptr); case 2: /* tcpRtoMin */ /* @todo not the actual value, a guess, needs to be calculated */ *sint_ptr = 1000; return sizeof(*sint_ptr); case 3: /* tcpRtoMax */ /* @todo not the actual value, a guess, needs to be calculated */ *sint_ptr = 60000; return sizeof(*sint_ptr); case 4: /* tcpMaxConn */ *sint_ptr = MEMP_NUM_TCP_PCB; return sizeof(*sint_ptr); case 5: /* tcpActiveOpens */ *uint_ptr = STATS_GET(mib2.tcpactiveopens); return sizeof(*uint_ptr); case 6: /* tcpPassiveOpens */ *uint_ptr = STATS_GET(mib2.tcppassiveopens); return sizeof(*uint_ptr); case 7: /* tcpAttemptFails */ *uint_ptr = STATS_GET(mib2.tcpattemptfails); return sizeof(*uint_ptr); case 8: /* tcpEstabResets */ *uint_ptr = STATS_GET(mib2.tcpestabresets); return sizeof(*uint_ptr); case 9: /* tcpCurrEstab */ { u16_t tcpcurrestab = 0; struct tcp_pcb *pcb = tcp_active_pcbs; while (pcb != NULL) { if ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT)) { tcpcurrestab++; } pcb = pcb->next; } *uint_ptr = tcpcurrestab; } return sizeof(*uint_ptr); case 10: /* tcpInSegs */ *uint_ptr = STATS_GET(mib2.tcpinsegs); return sizeof(*uint_ptr); case 11: /* tcpOutSegs */ *uint_ptr = STATS_GET(mib2.tcpoutsegs); return sizeof(*uint_ptr); case 12: /* tcpRetransSegs */ *uint_ptr = STATS_GET(mib2.tcpretranssegs); return sizeof(*uint_ptr); case 14: /* tcpInErrs */ *uint_ptr = STATS_GET(mib2.tcpinerrs); return sizeof(*uint_ptr); case 15: /* tcpOutRsts */ *uint_ptr = STATS_GET(mib2.tcpoutrsts); return sizeof(*uint_ptr); case 17: /* tcpHCInSegs */ memset(value, 0, 2*sizeof(u32_t)); /* not supported */ return 2*sizeof(u32_t); case 18: /* tcpHCOutSegs */ memset(value, 0, 2*sizeof(u32_t)); /* not supported */ return 2*sizeof(u32_t); default: LWIP_DEBUGF(SNMP_MIB_DEBUG,("tcp_get_value(): unknown id: %"S32_F"\n", instance->node->oid)); break; } return 0; } /* --- tcpConnTable --- */ #if LWIP_IPV4 /* list of allowed value ranges for incoming OID */ static const struct snmp_oid_range tcp_ConnTable_oid_ranges[] = { { 0, 0xff }, /* IP A */ { 0, 0xff }, /* IP B */ { 0, 0xff }, /* IP C */ { 0, 0xff }, /* IP D */ { 0, 0xffff }, /* Port */ { 0, 0xff }, /* IP A */ { 0, 0xff }, /* IP B */ { 0, 0xff }, /* IP C */ { 0, 0xff }, /* IP D */ { 0, 0xffff } /* Port */ }; static snmp_err_t tcp_ConnTable_get_cell_value_core(struct tcp_pcb *pcb, const u32_t* column, union snmp_variant_value* value, u32_t* value_len) { LWIP_UNUSED_ARG(value_len); /* value */ switch (*column) { case 1: /* tcpConnState */ value->u32 = pcb->state + 1; break; case 2: /* tcpConnLocalAddress */ value->u32 = ip_2_ip4(&pcb->local_ip)->addr; break; case 3: /* tcpConnLocalPort */ value->u32 = pcb->local_port; break; case 4: /* tcpConnRemAddress */ if (pcb->state == LISTEN) { value->u32 = IP4_ADDR_ANY4->addr; } else { value->u32 = ip_2_ip4(&pcb->remote_ip)->addr; } break; case 5: /* tcpConnRemPort */ if (pcb->state == LISTEN) { value->u32 = 0; } else { value->u32 = pcb->remote_port; } break; default: LWIP_ASSERT("invalid id", 0); return SNMP_ERR_NOSUCHINSTANCE; } return SNMP_ERR_NOERROR; } static snmp_err_t tcp_ConnTable_get_cell_value(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len) { u8_t i; ip4_addr_t local_ip; ip4_addr_t remote_ip; u16_t local_port; u16_t remote_port; struct tcp_pcb *pcb; /* check if incoming OID length and if values are in plausible range */ if (!snmp_oid_in_range(row_oid, row_oid_len, tcp_ConnTable_oid_ranges, LWIP_ARRAYSIZE(tcp_ConnTable_oid_ranges))) { return SNMP_ERR_NOSUCHINSTANCE; } /* get IPs and ports from incoming OID */ snmp_oid_to_ip4(&row_oid[0], &local_ip); /* we know it succeeds because of oid_in_range check above */ local_port = (u16_t)row_oid[4]; snmp_oid_to_ip4(&row_oid[5], &remote_ip); /* we know it succeeds because of oid_in_range check above */ remote_port = (u16_t)row_oid[9]; /* find tcp_pcb with requested ips and ports */ for (i = 0; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) { pcb = *tcp_pcb_lists[i]; while (pcb != NULL) { /* do local IP and local port match? */ if (IP_IS_V4_VAL(pcb->local_ip) && ip4_addr_cmp(&local_ip, ip_2_ip4(&pcb->local_ip)) && (local_port == pcb->local_port)) { /* PCBs in state LISTEN are not connected and have no remote_ip or remote_port */ if (pcb->state == LISTEN) { if (ip4_addr_cmp(&remote_ip, IP4_ADDR_ANY4) && (remote_port == 0)) { /* fill in object properties */ return tcp_ConnTable_get_cell_value_core(pcb, column, value, value_len); } } else { if (IP_IS_V4_VAL(pcb->remote_ip) && ip4_addr_cmp(&remote_ip, ip_2_ip4(&pcb->remote_ip)) && (remote_port == pcb->remote_port)) { /* fill in object properties */ return tcp_ConnTable_get_cell_value_core(pcb, column, value, value_len); } } } pcb = pcb->next; } } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } static snmp_err_t tcp_ConnTable_get_next_cell_instance_and_value(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len) { u8_t i; struct tcp_pcb *pcb; struct snmp_next_oid_state state; u32_t result_temp[LWIP_ARRAYSIZE(tcp_ConnTable_oid_ranges)]; /* init struct to search next oid */ snmp_next_oid_init(&state, row_oid->id, row_oid->len, result_temp, LWIP_ARRAYSIZE(tcp_ConnTable_oid_ranges)); /* iterate over all possible OIDs to find the next one */ for (i = 0; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) { pcb = *tcp_pcb_lists[i]; while (pcb != NULL) { u32_t test_oid[LWIP_ARRAYSIZE(tcp_ConnTable_oid_ranges)]; if (IP_IS_V4_VAL(pcb->local_ip)) { snmp_ip4_to_oid(ip_2_ip4(&pcb->local_ip), &test_oid[0]); test_oid[4] = pcb->local_port; /* PCBs in state LISTEN are not connected and have no remote_ip or remote_port */ if (pcb->state == LISTEN) { snmp_ip4_to_oid(IP4_ADDR_ANY4, &test_oid[5]); test_oid[9] = 0; } else { if (IP_IS_V6_VAL(pcb->remote_ip)) { /* should never happen */ continue; } snmp_ip4_to_oid(ip_2_ip4(&pcb->remote_ip), &test_oid[5]); test_oid[9] = pcb->remote_port; } /* check generated OID: is it a candidate for the next one? */ snmp_next_oid_check(&state, test_oid, LWIP_ARRAYSIZE(tcp_ConnTable_oid_ranges), pcb); } pcb = pcb->next; } } /* did we find a next one? */ if (state.status == SNMP_NEXT_OID_STATUS_SUCCESS) { snmp_oid_assign(row_oid, state.next_oid, state.next_oid_len); /* fill in object properties */ return tcp_ConnTable_get_cell_value_core((struct tcp_pcb*)state.reference, column, value, value_len); } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } #endif /* LWIP_IPV4 */ /* --- tcpConnectionTable --- */ static snmp_err_t tcp_ConnectionTable_get_cell_value_core(const u32_t* column, struct tcp_pcb *pcb, union snmp_variant_value* value) { /* all items except tcpConnectionState and tcpConnectionProcess are declared as not-accessible */ switch (*column) { case 7: /* tcpConnectionState */ value->u32 = pcb->state + 1; break; case 8: /* tcpConnectionProcess */ value->u32 = 0; /* not supported */ break; default: return SNMP_ERR_NOSUCHINSTANCE; } return SNMP_ERR_NOERROR; } static snmp_err_t tcp_ConnectionTable_get_cell_value(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len) { ip_addr_t local_ip, remote_ip; u16_t local_port, remote_port; struct tcp_pcb *pcb; u8_t idx = 0; u8_t i; struct tcp_pcb ** const tcp_pcb_nonlisten_lists[] = {&tcp_bound_pcbs, &tcp_active_pcbs, &tcp_tw_pcbs}; LWIP_UNUSED_ARG(value_len); /* tcpConnectionLocalAddressType + tcpConnectionLocalAddress + tcpConnectionLocalPort */ idx += snmp_oid_to_ip_port(&row_oid[idx], row_oid_len-idx, &local_ip, &local_port); if (idx == 0) { return SNMP_ERR_NOSUCHINSTANCE; } /* tcpConnectionRemAddressType + tcpConnectionRemAddress + tcpConnectionRemPort */ idx += snmp_oid_to_ip_port(&row_oid[idx], row_oid_len-idx, &remote_ip, &remote_port); if (idx == 0) { return SNMP_ERR_NOSUCHINSTANCE; } /* find tcp_pcb with requested ip and port*/ for (i = 0; i < LWIP_ARRAYSIZE(tcp_pcb_nonlisten_lists); i++) { pcb = *tcp_pcb_nonlisten_lists[i]; while (pcb != NULL) { if (ip_addr_cmp(&local_ip, &pcb->local_ip) && (local_port == pcb->local_port) && ip_addr_cmp(&remote_ip, &pcb->remote_ip) && (remote_port == pcb->remote_port)) { /* fill in object properties */ return tcp_ConnectionTable_get_cell_value_core(column, pcb, value); } pcb = pcb->next; } } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } static snmp_err_t tcp_ConnectionTable_get_next_cell_instance_and_value(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len) { struct tcp_pcb *pcb; struct snmp_next_oid_state state; /* 1x tcpConnectionLocalAddressType + 1x OID len + 16x tcpConnectionLocalAddress + 1x tcpConnectionLocalPort * 1x tcpConnectionRemAddressType + 1x OID len + 16x tcpConnectionRemAddress + 1x tcpConnectionRemPort */ u32_t result_temp[38]; u8_t i; struct tcp_pcb ** const tcp_pcb_nonlisten_lists[] = {&tcp_bound_pcbs, &tcp_active_pcbs, &tcp_tw_pcbs}; LWIP_UNUSED_ARG(value_len); /* init struct to search next oid */ snmp_next_oid_init(&state, row_oid->id, row_oid->len, result_temp, LWIP_ARRAYSIZE(result_temp)); /* iterate over all possible OIDs to find the next one */ for (i = 0; i < LWIP_ARRAYSIZE(tcp_pcb_nonlisten_lists); i++) { pcb = *tcp_pcb_nonlisten_lists[i]; while (pcb != NULL) { u8_t idx = 0; u32_t test_oid[LWIP_ARRAYSIZE(result_temp)]; /* tcpConnectionLocalAddressType + tcpConnectionLocalAddress + tcpConnectionLocalPort */ idx += snmp_ip_port_to_oid(&pcb->local_ip, pcb->local_port, &test_oid[idx]); /* tcpConnectionRemAddressType + tcpConnectionRemAddress + tcpConnectionRemPort */ idx += snmp_ip_port_to_oid(&pcb->remote_ip, pcb->remote_port, &test_oid[idx]); /* check generated OID: is it a candidate for the next one? */ snmp_next_oid_check(&state, test_oid, idx, pcb); pcb = pcb->next; } } /* did we find a next one? */ if (state.status == SNMP_NEXT_OID_STATUS_SUCCESS) { snmp_oid_assign(row_oid, state.next_oid, state.next_oid_len); /* fill in object properties */ return tcp_ConnectionTable_get_cell_value_core(column, (struct tcp_pcb*)state.reference, value); } else { /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } } /* --- tcpListenerTable --- */ static snmp_err_t tcp_ListenerTable_get_cell_value_core(const u32_t* column, union snmp_variant_value* value) { /* all items except tcpListenerProcess are declared as not-accessible */ switch (*column) { case 4: /* tcpListenerProcess */ value->u32 = 0; /* not supported */ break; default: return SNMP_ERR_NOSUCHINSTANCE; } return SNMP_ERR_NOERROR; } static snmp_err_t tcp_ListenerTable_get_cell_value(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len) { ip_addr_t local_ip; u16_t local_port; struct tcp_pcb_listen *pcb; u8_t idx = 0; LWIP_UNUSED_ARG(value_len); /* tcpListenerLocalAddressType + tcpListenerLocalAddress + tcpListenerLocalPort */ idx += snmp_oid_to_ip_port(&row_oid[idx], row_oid_len-idx, &local_ip, &local_port); if (idx == 0) { return SNMP_ERR_NOSUCHINSTANCE; } /* find tcp_pcb with requested ip and port*/ pcb = tcp_listen_pcbs.listen_pcbs; while (pcb != NULL) { if (ip_addr_cmp(&local_ip, &pcb->local_ip) && (local_port == pcb->local_port)) { /* fill in object properties */ return tcp_ListenerTable_get_cell_value_core(column, value); } pcb = pcb->next; } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } static snmp_err_t tcp_ListenerTable_get_next_cell_instance_and_value(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len) { struct tcp_pcb_listen *pcb; struct snmp_next_oid_state state; /* 1x tcpListenerLocalAddressType + 1x OID len + 16x tcpListenerLocalAddress + 1x tcpListenerLocalPort */ u32_t result_temp[19]; LWIP_UNUSED_ARG(value_len); /* init struct to search next oid */ snmp_next_oid_init(&state, row_oid->id, row_oid->len, result_temp, LWIP_ARRAYSIZE(result_temp)); /* iterate over all possible OIDs to find the next one */ pcb = tcp_listen_pcbs.listen_pcbs; while (pcb != NULL) { u8_t idx = 0; u32_t test_oid[LWIP_ARRAYSIZE(result_temp)]; /* tcpListenerLocalAddressType + tcpListenerLocalAddress + tcpListenerLocalPort */ idx += snmp_ip_port_to_oid(&pcb->local_ip, pcb->local_port, &test_oid[idx]); /* check generated OID: is it a candidate for the next one? */ snmp_next_oid_check(&state, test_oid, idx, NULL); pcb = pcb->next; } /* did we find a next one? */ if (state.status == SNMP_NEXT_OID_STATUS_SUCCESS) { snmp_oid_assign(row_oid, state.next_oid, state.next_oid_len); /* fill in object properties */ return tcp_ListenerTable_get_cell_value_core(column, value); } else { /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } } static const struct snmp_scalar_node tcp_RtoAlgorithm = SNMP_SCALAR_CREATE_NODE_READONLY(1, SNMP_ASN1_TYPE_INTEGER, tcp_get_value); static const struct snmp_scalar_node tcp_RtoMin = SNMP_SCALAR_CREATE_NODE_READONLY(2, SNMP_ASN1_TYPE_INTEGER, tcp_get_value); static const struct snmp_scalar_node tcp_RtoMax = SNMP_SCALAR_CREATE_NODE_READONLY(3, SNMP_ASN1_TYPE_INTEGER, tcp_get_value); static const struct snmp_scalar_node tcp_MaxConn = SNMP_SCALAR_CREATE_NODE_READONLY(4, SNMP_ASN1_TYPE_INTEGER, tcp_get_value); static const struct snmp_scalar_node tcp_ActiveOpens = SNMP_SCALAR_CREATE_NODE_READONLY(5, SNMP_ASN1_TYPE_COUNTER, tcp_get_value); static const struct snmp_scalar_node tcp_PassiveOpens = SNMP_SCALAR_CREATE_NODE_READONLY(6, SNMP_ASN1_TYPE_COUNTER, tcp_get_value); static const struct snmp_scalar_node tcp_AttemptFails = SNMP_SCALAR_CREATE_NODE_READONLY(7, SNMP_ASN1_TYPE_COUNTER, tcp_get_value); static const struct snmp_scalar_node tcp_EstabResets = SNMP_SCALAR_CREATE_NODE_READONLY(8, SNMP_ASN1_TYPE_COUNTER, tcp_get_value); static const struct snmp_scalar_node tcp_CurrEstab = SNMP_SCALAR_CREATE_NODE_READONLY(9, SNMP_ASN1_TYPE_GAUGE, tcp_get_value); static const struct snmp_scalar_node tcp_InSegs = SNMP_SCALAR_CREATE_NODE_READONLY(10, SNMP_ASN1_TYPE_COUNTER, tcp_get_value); static const struct snmp_scalar_node tcp_OutSegs = SNMP_SCALAR_CREATE_NODE_READONLY(11, SNMP_ASN1_TYPE_COUNTER, tcp_get_value); static const struct snmp_scalar_node tcp_RetransSegs = SNMP_SCALAR_CREATE_NODE_READONLY(12, SNMP_ASN1_TYPE_COUNTER, tcp_get_value); static const struct snmp_scalar_node tcp_InErrs = SNMP_SCALAR_CREATE_NODE_READONLY(14, SNMP_ASN1_TYPE_COUNTER, tcp_get_value); static const struct snmp_scalar_node tcp_OutRsts = SNMP_SCALAR_CREATE_NODE_READONLY(15, SNMP_ASN1_TYPE_COUNTER, tcp_get_value); static const struct snmp_scalar_node tcp_HCInSegs = SNMP_SCALAR_CREATE_NODE_READONLY(17, SNMP_ASN1_TYPE_COUNTER64, tcp_get_value); static const struct snmp_scalar_node tcp_HCOutSegs = SNMP_SCALAR_CREATE_NODE_READONLY(18, SNMP_ASN1_TYPE_COUNTER64, tcp_get_value); #if LWIP_IPV4 static const struct snmp_table_simple_col_def tcp_ConnTable_columns[] = { { 1, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 }, /* tcpConnState */ { 2, SNMP_ASN1_TYPE_IPADDR, SNMP_VARIANT_VALUE_TYPE_U32 }, /* tcpConnLocalAddress */ { 3, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 }, /* tcpConnLocalPort */ { 4, SNMP_ASN1_TYPE_IPADDR, SNMP_VARIANT_VALUE_TYPE_U32 }, /* tcpConnRemAddress */ { 5, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 } /* tcpConnRemPort */ }; static const struct snmp_table_simple_node tcp_ConnTable = SNMP_TABLE_CREATE_SIMPLE(13, tcp_ConnTable_columns, tcp_ConnTable_get_cell_value, tcp_ConnTable_get_next_cell_instance_and_value); #endif /* LWIP_IPV4 */ static const struct snmp_table_simple_col_def tcp_ConnectionTable_columns[] = { /* all items except tcpConnectionState and tcpConnectionProcess are declared as not-accessible */ { 7, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 }, /* tcpConnectionState */ { 8, SNMP_ASN1_TYPE_UNSIGNED32, SNMP_VARIANT_VALUE_TYPE_U32 } /* tcpConnectionProcess */ }; static const struct snmp_table_simple_node tcp_ConnectionTable = SNMP_TABLE_CREATE_SIMPLE(19, tcp_ConnectionTable_columns, tcp_ConnectionTable_get_cell_value, tcp_ConnectionTable_get_next_cell_instance_and_value); static const struct snmp_table_simple_col_def tcp_ListenerTable_columns[] = { /* all items except tcpListenerProcess are declared as not-accessible */ { 4, SNMP_ASN1_TYPE_UNSIGNED32, SNMP_VARIANT_VALUE_TYPE_U32 } /* tcpListenerProcess */ }; static const struct snmp_table_simple_node tcp_ListenerTable = SNMP_TABLE_CREATE_SIMPLE(20, tcp_ListenerTable_columns, tcp_ListenerTable_get_cell_value, tcp_ListenerTable_get_next_cell_instance_and_value); /* the following nodes access variables in LWIP stack from SNMP worker thread and must therefore be synced to LWIP (TCPIP) thread */ CREATE_LWIP_SYNC_NODE( 1, tcp_RtoAlgorithm) CREATE_LWIP_SYNC_NODE( 2, tcp_RtoMin) CREATE_LWIP_SYNC_NODE( 3, tcp_RtoMax) CREATE_LWIP_SYNC_NODE( 4, tcp_MaxConn) CREATE_LWIP_SYNC_NODE( 5, tcp_ActiveOpens) CREATE_LWIP_SYNC_NODE( 6, tcp_PassiveOpens) CREATE_LWIP_SYNC_NODE( 7, tcp_AttemptFails) CREATE_LWIP_SYNC_NODE( 8, tcp_EstabResets) CREATE_LWIP_SYNC_NODE( 9, tcp_CurrEstab) CREATE_LWIP_SYNC_NODE(10, tcp_InSegs) CREATE_LWIP_SYNC_NODE(11, tcp_OutSegs) CREATE_LWIP_SYNC_NODE(12, tcp_RetransSegs) #if LWIP_IPV4 CREATE_LWIP_SYNC_NODE(13, tcp_ConnTable) #endif /* LWIP_IPV4 */ CREATE_LWIP_SYNC_NODE(14, tcp_InErrs) CREATE_LWIP_SYNC_NODE(15, tcp_OutRsts) CREATE_LWIP_SYNC_NODE(17, tcp_HCInSegs) CREATE_LWIP_SYNC_NODE(18, tcp_HCOutSegs) CREATE_LWIP_SYNC_NODE(19, tcp_ConnectionTable) CREATE_LWIP_SYNC_NODE(20, tcp_ListenerTable) static const struct snmp_node* const tcp_nodes[] = { &SYNC_NODE_NAME(tcp_RtoAlgorithm).node.node, &SYNC_NODE_NAME(tcp_RtoMin).node.node, &SYNC_NODE_NAME(tcp_RtoMax).node.node, &SYNC_NODE_NAME(tcp_MaxConn).node.node, &SYNC_NODE_NAME(tcp_ActiveOpens).node.node, &SYNC_NODE_NAME(tcp_PassiveOpens).node.node, &SYNC_NODE_NAME(tcp_AttemptFails).node.node, &SYNC_NODE_NAME(tcp_EstabResets).node.node, &SYNC_NODE_NAME(tcp_CurrEstab).node.node, &SYNC_NODE_NAME(tcp_InSegs).node.node, &SYNC_NODE_NAME(tcp_OutSegs).node.node, &SYNC_NODE_NAME(tcp_RetransSegs).node.node, #if LWIP_IPV4 &SYNC_NODE_NAME(tcp_ConnTable).node.node, #endif /* LWIP_IPV4 */ &SYNC_NODE_NAME(tcp_InErrs).node.node, &SYNC_NODE_NAME(tcp_OutRsts).node.node, &SYNC_NODE_NAME(tcp_HCInSegs).node.node, &SYNC_NODE_NAME(tcp_HCOutSegs).node.node, &SYNC_NODE_NAME(tcp_ConnectionTable).node.node, &SYNC_NODE_NAME(tcp_ListenerTable).node.node }; const struct snmp_tree_node snmp_mib2_tcp_root = SNMP_CREATE_TREE_NODE(6, tcp_nodes); #endif /* LWIP_SNMP && SNMP_LWIP_MIB2 && LWIP_TCP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_mib2_tcp.c
C
unknown
23,175
/** * @file * Management Information Base II (RFC1213) UDP objects and functions. */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> * Christiaan Simons <christiaan.simons@axon.tv> */ #include "lwip/snmp.h" #include "lwip/apps/snmp.h" #include "lwip/apps/snmp_core.h" #include "lwip/apps/snmp_mib2.h" #include "lwip/apps/snmp_table.h" #include "lwip/apps/snmp_scalar.h" #include "lwip/udp.h" #include "lwip/stats.h" #include <string.h> #if LWIP_SNMP && SNMP_LWIP_MIB2 && LWIP_UDP #if SNMP_USE_NETCONN #define SYNC_NODE_NAME(node_name) node_name ## _synced #define CREATE_LWIP_SYNC_NODE(oid, node_name) \ static const struct snmp_threadsync_node node_name ## _synced = SNMP_CREATE_THREAD_SYNC_NODE(oid, &node_name.node, &snmp_mib2_lwip_locks); #else #define SYNC_NODE_NAME(node_name) node_name #define CREATE_LWIP_SYNC_NODE(oid, node_name) #endif /* --- udp .1.3.6.1.2.1.7 ----------------------------------------------------- */ static s16_t udp_get_value(struct snmp_node_instance* instance, void* value) { u32_t *uint_ptr = (u32_t*)value; switch (instance->node->oid) { case 1: /* udpInDatagrams */ *uint_ptr = STATS_GET(mib2.udpindatagrams); return sizeof(*uint_ptr); case 2: /* udpNoPorts */ *uint_ptr = STATS_GET(mib2.udpnoports); return sizeof(*uint_ptr); case 3: /* udpInErrors */ *uint_ptr = STATS_GET(mib2.udpinerrors); return sizeof(*uint_ptr); case 4: /* udpOutDatagrams */ *uint_ptr = STATS_GET(mib2.udpoutdatagrams); return sizeof(*uint_ptr); case 8: /* udpHCInDatagrams */ memset(value, 0, 2*sizeof(u32_t)); /* not supported */ return 2*sizeof(u32_t); case 9: /* udpHCOutDatagrams */ memset(value, 0, 2*sizeof(u32_t)); /* not supported */ return 2*sizeof(u32_t); default: LWIP_DEBUGF(SNMP_MIB_DEBUG,("udp_get_value(): unknown id: %"S32_F"\n", instance->node->oid)); break; } return 0; } /* --- udpEndpointTable --- */ static snmp_err_t udp_endpointTable_get_cell_value_core(const u32_t* column, union snmp_variant_value* value) { /* all items except udpEndpointProcess are declared as not-accessible */ switch (*column) { case 8: /* udpEndpointProcess */ value->u32 = 0; /* not supported */ break; default: return SNMP_ERR_NOSUCHINSTANCE; } return SNMP_ERR_NOERROR; } static snmp_err_t udp_endpointTable_get_cell_value(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len) { ip_addr_t local_ip, remote_ip; u16_t local_port, remote_port; struct udp_pcb *pcb; u8_t idx = 0; LWIP_UNUSED_ARG(value_len); /* udpEndpointLocalAddressType + udpEndpointLocalAddress + udpEndpointLocalPort */ idx += snmp_oid_to_ip_port(&row_oid[idx], row_oid_len-idx, &local_ip, &local_port); if (idx == 0) { return SNMP_ERR_NOSUCHINSTANCE; } /* udpEndpointRemoteAddressType + udpEndpointRemoteAddress + udpEndpointRemotePort */ idx += snmp_oid_to_ip_port(&row_oid[idx], row_oid_len-idx, &remote_ip, &remote_port); if (idx == 0) { return SNMP_ERR_NOSUCHINSTANCE; } /* udpEndpointInstance */ if (row_oid_len < (idx+1)) { return SNMP_ERR_NOSUCHINSTANCE; } if (row_oid[idx] != 0) { return SNMP_ERR_NOSUCHINSTANCE; } /* find udp_pcb with requested ip and port*/ pcb = udp_pcbs; while (pcb != NULL) { if (ip_addr_cmp(&local_ip, &pcb->local_ip) && (local_port == pcb->local_port) && ip_addr_cmp(&remote_ip, &pcb->remote_ip) && (remote_port == pcb->remote_port)) { /* fill in object properties */ return udp_endpointTable_get_cell_value_core(column, value); } pcb = pcb->next; } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } static snmp_err_t udp_endpointTable_get_next_cell_instance_and_value(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len) { struct udp_pcb *pcb; struct snmp_next_oid_state state; /* 1x udpEndpointLocalAddressType + 1x OID len + 16x udpEndpointLocalAddress + 1x udpEndpointLocalPort + * 1x udpEndpointRemoteAddressType + 1x OID len + 16x udpEndpointRemoteAddress + 1x udpEndpointRemotePort + * 1x udpEndpointInstance = 39 */ u32_t result_temp[39]; LWIP_UNUSED_ARG(value_len); /* init struct to search next oid */ snmp_next_oid_init(&state, row_oid->id, row_oid->len, result_temp, LWIP_ARRAYSIZE(result_temp)); /* iterate over all possible OIDs to find the next one */ pcb = udp_pcbs; while (pcb != NULL) { u32_t test_oid[LWIP_ARRAYSIZE(result_temp)]; u8_t idx = 0; /* udpEndpointLocalAddressType + udpEndpointLocalAddress + udpEndpointLocalPort */ idx += snmp_ip_port_to_oid(&pcb->local_ip, pcb->local_port, &test_oid[idx]); /* udpEndpointRemoteAddressType + udpEndpointRemoteAddress + udpEndpointRemotePort */ idx += snmp_ip_port_to_oid(&pcb->remote_ip, pcb->remote_port, &test_oid[idx]); test_oid[idx] = 0; /* udpEndpointInstance */ idx++; /* check generated OID: is it a candidate for the next one? */ snmp_next_oid_check(&state, test_oid, idx, NULL); pcb = pcb->next; } /* did we find a next one? */ if (state.status == SNMP_NEXT_OID_STATUS_SUCCESS) { snmp_oid_assign(row_oid, state.next_oid, state.next_oid_len); /* fill in object properties */ return udp_endpointTable_get_cell_value_core(column, value); } else { /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } } /* --- udpTable --- */ #if LWIP_IPV4 /* list of allowed value ranges for incoming OID */ static const struct snmp_oid_range udp_Table_oid_ranges[] = { { 0, 0xff }, /* IP A */ { 0, 0xff }, /* IP B */ { 0, 0xff }, /* IP C */ { 0, 0xff }, /* IP D */ { 1, 0xffff } /* Port */ }; static snmp_err_t udp_Table_get_cell_value_core(struct udp_pcb *pcb, const u32_t* column, union snmp_variant_value* value, u32_t* value_len) { LWIP_UNUSED_ARG(value_len); switch (*column) { case 1: /* udpLocalAddress */ /* set reference to PCB local IP and return a generic node that copies IP4 addresses */ value->u32 = ip_2_ip4(&pcb->local_ip)->addr; break; case 2: /* udpLocalPort */ /* set reference to PCB local port and return a generic node that copies u16_t values */ value->u32 = pcb->local_port; break; default: return SNMP_ERR_NOSUCHINSTANCE; } return SNMP_ERR_NOERROR; } static snmp_err_t udp_Table_get_cell_value(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len) { ip4_addr_t ip; u16_t port; struct udp_pcb *pcb; /* check if incoming OID length and if values are in plausible range */ if (!snmp_oid_in_range(row_oid, row_oid_len, udp_Table_oid_ranges, LWIP_ARRAYSIZE(udp_Table_oid_ranges))) { return SNMP_ERR_NOSUCHINSTANCE; } /* get IP and port from incoming OID */ snmp_oid_to_ip4(&row_oid[0], &ip); /* we know it succeeds because of oid_in_range check above */ port = (u16_t)row_oid[4]; /* find udp_pcb with requested ip and port*/ pcb = udp_pcbs; while (pcb != NULL) { if (IP_IS_V4_VAL(pcb->local_ip)) { if (ip4_addr_cmp(&ip, ip_2_ip4(&pcb->local_ip)) && (port == pcb->local_port)) { /* fill in object properties */ return udp_Table_get_cell_value_core(pcb, column, value, value_len); } } pcb = pcb->next; } /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } static snmp_err_t udp_Table_get_next_cell_instance_and_value(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len) { struct udp_pcb *pcb; struct snmp_next_oid_state state; u32_t result_temp[LWIP_ARRAYSIZE(udp_Table_oid_ranges)]; /* init struct to search next oid */ snmp_next_oid_init(&state, row_oid->id, row_oid->len, result_temp, LWIP_ARRAYSIZE(udp_Table_oid_ranges)); /* iterate over all possible OIDs to find the next one */ pcb = udp_pcbs; while (pcb != NULL) { u32_t test_oid[LWIP_ARRAYSIZE(udp_Table_oid_ranges)]; if (IP_IS_V4_VAL(pcb->local_ip)) { snmp_ip4_to_oid(ip_2_ip4(&pcb->local_ip), &test_oid[0]); test_oid[4] = pcb->local_port; /* check generated OID: is it a candidate for the next one? */ snmp_next_oid_check(&state, test_oid, LWIP_ARRAYSIZE(udp_Table_oid_ranges), pcb); } pcb = pcb->next; } /* did we find a next one? */ if (state.status == SNMP_NEXT_OID_STATUS_SUCCESS) { snmp_oid_assign(row_oid, state.next_oid, state.next_oid_len); /* fill in object properties */ return udp_Table_get_cell_value_core((struct udp_pcb*)state.reference, column, value, value_len); } else { /* not found */ return SNMP_ERR_NOSUCHINSTANCE; } } #endif /* LWIP_IPV4 */ static const struct snmp_scalar_node udp_inDatagrams = SNMP_SCALAR_CREATE_NODE_READONLY(1, SNMP_ASN1_TYPE_COUNTER, udp_get_value); static const struct snmp_scalar_node udp_noPorts = SNMP_SCALAR_CREATE_NODE_READONLY(2, SNMP_ASN1_TYPE_COUNTER, udp_get_value); static const struct snmp_scalar_node udp_inErrors = SNMP_SCALAR_CREATE_NODE_READONLY(3, SNMP_ASN1_TYPE_COUNTER, udp_get_value); static const struct snmp_scalar_node udp_outDatagrams = SNMP_SCALAR_CREATE_NODE_READONLY(4, SNMP_ASN1_TYPE_COUNTER, udp_get_value); static const struct snmp_scalar_node udp_HCInDatagrams = SNMP_SCALAR_CREATE_NODE_READONLY(8, SNMP_ASN1_TYPE_COUNTER64, udp_get_value); static const struct snmp_scalar_node udp_HCOutDatagrams = SNMP_SCALAR_CREATE_NODE_READONLY(9, SNMP_ASN1_TYPE_COUNTER64, udp_get_value); #if LWIP_IPV4 static const struct snmp_table_simple_col_def udp_Table_columns[] = { { 1, SNMP_ASN1_TYPE_IPADDR, SNMP_VARIANT_VALUE_TYPE_U32 }, /* udpLocalAddress */ { 2, SNMP_ASN1_TYPE_INTEGER, SNMP_VARIANT_VALUE_TYPE_U32 } /* udpLocalPort */ }; static const struct snmp_table_simple_node udp_Table = SNMP_TABLE_CREATE_SIMPLE(5, udp_Table_columns, udp_Table_get_cell_value, udp_Table_get_next_cell_instance_and_value); #endif /* LWIP_IPV4 */ static const struct snmp_table_simple_col_def udp_endpointTable_columns[] = { /* all items except udpEndpointProcess are declared as not-accessible */ { 8, SNMP_ASN1_TYPE_UNSIGNED32, SNMP_VARIANT_VALUE_TYPE_U32 } /* udpEndpointProcess */ }; static const struct snmp_table_simple_node udp_endpointTable = SNMP_TABLE_CREATE_SIMPLE(7, udp_endpointTable_columns, udp_endpointTable_get_cell_value, udp_endpointTable_get_next_cell_instance_and_value); /* the following nodes access variables in LWIP stack from SNMP worker thread and must therefore be synced to LWIP (TCPIP) thread */ CREATE_LWIP_SYNC_NODE(1, udp_inDatagrams) CREATE_LWIP_SYNC_NODE(2, udp_noPorts) CREATE_LWIP_SYNC_NODE(3, udp_inErrors) CREATE_LWIP_SYNC_NODE(4, udp_outDatagrams) #if LWIP_IPV4 CREATE_LWIP_SYNC_NODE(5, udp_Table) #endif /* LWIP_IPV4 */ CREATE_LWIP_SYNC_NODE(7, udp_endpointTable) CREATE_LWIP_SYNC_NODE(8, udp_HCInDatagrams) CREATE_LWIP_SYNC_NODE(9, udp_HCOutDatagrams) static const struct snmp_node* const udp_nodes[] = { &SYNC_NODE_NAME(udp_inDatagrams).node.node, &SYNC_NODE_NAME(udp_noPorts).node.node, &SYNC_NODE_NAME(udp_inErrors).node.node, &SYNC_NODE_NAME(udp_outDatagrams).node.node, #if LWIP_IPV4 &SYNC_NODE_NAME(udp_Table).node.node, #endif /* LWIP_IPV4 */ &SYNC_NODE_NAME(udp_endpointTable).node.node, &SYNC_NODE_NAME(udp_HCInDatagrams).node.node, &SYNC_NODE_NAME(udp_HCOutDatagrams).node.node }; const struct snmp_tree_node snmp_mib2_udp_root = SNMP_CREATE_TREE_NODE(7, udp_nodes); #endif /* LWIP_SNMP && SNMP_LWIP_MIB2 && LWIP_UDP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_mib2_udp.c
C
unknown
13,448
/** * @file * SNMP message processing (RFC1157). */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * Copyright (c) 2016 Elias Oenal. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Christiaan Simons <christiaan.simons@axon.tv> * Martin Hentschel <info@cl-soft.de> * Elias Oenal <lwip@eliasoenal.com> */ #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #include "snmp_msg.h" #include "snmp_asn1.h" #include "snmp_core_priv.h" #include "lwip/ip_addr.h" #include "lwip/stats.h" #if LWIP_SNMP_V3 #include "lwip/apps/snmpv3.h" #include "snmpv3_priv.h" #ifdef LWIP_SNMPV3_INCLUDE_ENGINE #include LWIP_SNMPV3_INCLUDE_ENGINE #endif #endif #include <string.h> /* public (non-static) constants */ /** SNMP community string */ const char *snmp_community = SNMP_COMMUNITY; /** SNMP community string for write access */ const char *snmp_community_write = SNMP_COMMUNITY_WRITE; /** SNMP community string for sending traps */ const char *snmp_community_trap = SNMP_COMMUNITY_TRAP; snmp_write_callback_fct snmp_write_callback = NULL; void* snmp_write_callback_arg = NULL; /** * @ingroup snmp_core * Returns current SNMP community string. * @return current SNMP community string */ const char * snmp_get_community(void) { return snmp_community; } /** * @ingroup snmp_core * Sets SNMP community string. * The string itself (its storage) must be valid throughout the whole life of * program (or until it is changed to sth else). * * @param community is a pointer to new community string */ void snmp_set_community(const char * const community) { LWIP_ASSERT("community string is too long!", strlen(community) <= SNMP_MAX_COMMUNITY_STR_LEN); snmp_community = community; } /** * @ingroup snmp_core * Returns current SNMP write-access community string. * @return current SNMP write-access community string */ const char * snmp_get_community_write(void) { return snmp_community_write; } /** * @ingroup snmp_traps * Returns current SNMP community string used for sending traps. * @return current SNMP community string used for sending traps */ const char * snmp_get_community_trap(void) { return snmp_community_trap; } /** * @ingroup snmp_core * Sets SNMP community string for write-access. * The string itself (its storage) must be valid throughout the whole life of * program (or until it is changed to sth else). * * @param community is a pointer to new write-access community string */ void snmp_set_community_write(const char * const community) { LWIP_ASSERT("community string must not be NULL", community != NULL); LWIP_ASSERT("community string is too long!", strlen(community) <= SNMP_MAX_COMMUNITY_STR_LEN); snmp_community_write = community; } /** * @ingroup snmp_traps * Sets SNMP community string used for sending traps. * The string itself (its storage) must be valid throughout the whole life of * program (or until it is changed to sth else). * * @param community is a pointer to new trap community string */ void snmp_set_community_trap(const char * const community) { LWIP_ASSERT("community string is too long!", strlen(community) <= SNMP_MAX_COMMUNITY_STR_LEN); snmp_community_trap = community; } /** * @ingroup snmp_core * Callback fired on every successful write access */ void snmp_set_write_callback(snmp_write_callback_fct write_callback, void* callback_arg) { snmp_write_callback = write_callback; snmp_write_callback_arg = callback_arg; } /* ----------------------------------------------------------------------- */ /* forward declarations */ /* ----------------------------------------------------------------------- */ static err_t snmp_process_get_request(struct snmp_request *request); static err_t snmp_process_getnext_request(struct snmp_request *request); static err_t snmp_process_getbulk_request(struct snmp_request *request); static err_t snmp_process_set_request(struct snmp_request *request); static err_t snmp_parse_inbound_frame(struct snmp_request *request); static err_t snmp_prepare_outbound_frame(struct snmp_request *request); static err_t snmp_complete_outbound_frame(struct snmp_request *request); static void snmp_execute_write_callbacks(struct snmp_request *request); /* ----------------------------------------------------------------------- */ /* implementation */ /* ----------------------------------------------------------------------- */ void snmp_receive(void *handle, struct pbuf *p, const ip_addr_t *source_ip, u16_t port) { err_t err; struct snmp_request request; memset(&request, 0, sizeof(request)); request.handle = handle; request.source_ip = source_ip; request.source_port = port; request.inbound_pbuf = p; snmp_stats.inpkts++; err = snmp_parse_inbound_frame(&request); if (err == ERR_OK) { err = snmp_prepare_outbound_frame(&request); if (err == ERR_OK) { if (request.error_status == SNMP_ERR_NOERROR) { /* only process frame if we do not already have an error to return (e.g. all readonly) */ if (request.request_type == SNMP_ASN1_CONTEXT_PDU_GET_REQ) { err = snmp_process_get_request(&request); } else if (request.request_type == SNMP_ASN1_CONTEXT_PDU_GET_NEXT_REQ) { err = snmp_process_getnext_request(&request); } else if (request.request_type == SNMP_ASN1_CONTEXT_PDU_GET_BULK_REQ) { err = snmp_process_getbulk_request(&request); } else if (request.request_type == SNMP_ASN1_CONTEXT_PDU_SET_REQ) { err = snmp_process_set_request(&request); } } if (err == ERR_OK) { err = snmp_complete_outbound_frame(&request); if (err == ERR_OK) { err = snmp_sendto(request.handle, request.outbound_pbuf, request.source_ip, request.source_port); if ((request.request_type == SNMP_ASN1_CONTEXT_PDU_SET_REQ) && (request.error_status == SNMP_ERR_NOERROR) && (snmp_write_callback != NULL)) { /* raise write notification for all written objects */ snmp_execute_write_callbacks(&request); } } } } if (request.outbound_pbuf != NULL) { pbuf_free(request.outbound_pbuf); } } } static u8_t snmp_msg_getnext_validate_node_inst(struct snmp_node_instance* node_instance, void* validate_arg) { if (((node_instance->access & SNMP_NODE_INSTANCE_ACCESS_READ) != SNMP_NODE_INSTANCE_ACCESS_READ) || (node_instance->get_value == NULL)) { return SNMP_ERR_NOSUCHINSTANCE; } if ((node_instance->asn1_type == SNMP_ASN1_TYPE_COUNTER64) && (((struct snmp_request*)validate_arg)->version == SNMP_VERSION_1)) { /* according to RFC 2089 skip Counter64 objects in GetNext requests from v1 clients */ return SNMP_ERR_NOSUCHINSTANCE; } return SNMP_ERR_NOERROR; } static void snmp_process_varbind(struct snmp_request *request, struct snmp_varbind *vb, u8_t get_next) { err_t err; struct snmp_node_instance node_instance; memset(&node_instance, 0, sizeof(node_instance)); if (get_next) { struct snmp_obj_id result_oid; request->error_status = snmp_get_next_node_instance_from_oid(vb->oid.id, vb->oid.len, snmp_msg_getnext_validate_node_inst, request, &result_oid, &node_instance); if (request->error_status == SNMP_ERR_NOERROR) { snmp_oid_assign(&vb->oid, result_oid.id, result_oid.len); } } else { request->error_status = snmp_get_node_instance_from_oid(vb->oid.id, vb->oid.len, &node_instance); if (request->error_status == SNMP_ERR_NOERROR) { /* use 'getnext_validate' method for validation to avoid code duplication (some checks have to be executed here) */ request->error_status = snmp_msg_getnext_validate_node_inst(&node_instance, request); if (request->error_status != SNMP_ERR_NOERROR) { if (node_instance.release_instance != NULL) { node_instance.release_instance(&node_instance); } } } } if (request->error_status != SNMP_ERR_NOERROR) { if (request->error_status >= SNMP_VARBIND_EXCEPTION_OFFSET) { if ((request->version == SNMP_VERSION_2c) || request->version == SNMP_VERSION_3) { /* in SNMP v2c a varbind related exception is stored in varbind and not in frame header */ vb->type = (SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_CLASS_CONTEXT | (request->error_status & SNMP_VARBIND_EXCEPTION_MASK)); vb->value_len = 0; err = snmp_append_outbound_varbind(&(request->outbound_pbuf_stream), vb); if (err == ERR_OK) { /* we stored the exception in varbind -> go on */ request->error_status = SNMP_ERR_NOERROR; } else if (err == ERR_BUF) { request->error_status = SNMP_ERR_TOOBIG; } else { request->error_status = SNMP_ERR_GENERROR; } } } else { /* according to RFC 1157/1905, all other errors only return genError */ request->error_status = SNMP_ERR_GENERROR; } } else { s16_t len = node_instance.get_value(&node_instance, vb->value); vb->type = node_instance.asn1_type; if(len >= 0) { vb->value_len = (u16_t)len; /* cast is OK because we checked >= 0 above */ LWIP_ASSERT("SNMP_MAX_VALUE_SIZE is configured too low", (vb->value_len & ~SNMP_GET_VALUE_RAW_DATA) <= SNMP_MAX_VALUE_SIZE); err = snmp_append_outbound_varbind(&request->outbound_pbuf_stream, vb); if (err == ERR_BUF) { request->error_status = SNMP_ERR_TOOBIG; } else if (err != ERR_OK) { request->error_status = SNMP_ERR_GENERROR; } } else { request->error_status = SNMP_ERR_GENERROR; } if (node_instance.release_instance != NULL) { node_instance.release_instance(&node_instance); } } } /** * Service an internal or external event for SNMP GET. * * @param request points to the associated message process state */ static err_t snmp_process_get_request(struct snmp_request *request) { snmp_vb_enumerator_err_t err; struct snmp_varbind vb; vb.value = request->value_buffer; LWIP_DEBUGF(SNMP_DEBUG, ("SNMP get request\n")); while (request->error_status == SNMP_ERR_NOERROR) { err = snmp_vb_enumerator_get_next(&request->inbound_varbind_enumerator, &vb); if (err == SNMP_VB_ENUMERATOR_ERR_OK) { if ((vb.type == SNMP_ASN1_TYPE_NULL) && (vb.value_len == 0)) { snmp_process_varbind(request, &vb, 0); } else { request->error_status = SNMP_ERR_GENERROR; } } else if (err == SNMP_VB_ENUMERATOR_ERR_EOVB) { /* no more varbinds in request */ break; } else if (err == SNMP_VB_ENUMERATOR_ERR_ASN1ERROR) { /* malformed ASN.1, don't answer */ return ERR_ARG; } else { request->error_status = SNMP_ERR_GENERROR; } } return ERR_OK; } /** * Service an internal or external event for SNMP GET. * * @param request points to the associated message process state */ static err_t snmp_process_getnext_request(struct snmp_request *request) { snmp_vb_enumerator_err_t err; struct snmp_varbind vb; vb.value = request->value_buffer; LWIP_DEBUGF(SNMP_DEBUG, ("SNMP get-next request\n")); while (request->error_status == SNMP_ERR_NOERROR) { err = snmp_vb_enumerator_get_next(&request->inbound_varbind_enumerator, &vb); if (err == SNMP_VB_ENUMERATOR_ERR_OK) { if ((vb.type == SNMP_ASN1_TYPE_NULL) && (vb.value_len == 0)) { snmp_process_varbind(request, &vb, 1); } else { request->error_status = SNMP_ERR_GENERROR; } } else if (err == SNMP_VB_ENUMERATOR_ERR_EOVB) { /* no more varbinds in request */ break; } else if (err == SNMP_VB_ENUMERATOR_ERR_ASN1ERROR) { /* malformed ASN.1, don't answer */ return ERR_ARG; } else { request->error_status = SNMP_ERR_GENERROR; } } return ERR_OK; } /** * Service an internal or external event for SNMP GETBULKT. * * @param request points to the associated message process state */ static err_t snmp_process_getbulk_request(struct snmp_request *request) { snmp_vb_enumerator_err_t err; s32_t non_repeaters = request->non_repeaters; s32_t repetitions; u16_t repetition_offset = 0; struct snmp_varbind_enumerator repetition_varbind_enumerator; struct snmp_varbind vb; vb.value = request->value_buffer; if (SNMP_LWIP_GETBULK_MAX_REPETITIONS > 0) { repetitions = LWIP_MIN(request->max_repetitions, SNMP_LWIP_GETBULK_MAX_REPETITIONS); } else { repetitions = request->max_repetitions; } LWIP_DEBUGF(SNMP_DEBUG, ("SNMP get-bulk request\n")); /* process non repeaters and first repetition */ while (request->error_status == SNMP_ERR_NOERROR) { if (non_repeaters == 0) { repetition_offset = request->outbound_pbuf_stream.offset; if (repetitions == 0) { /* do not resolve repeaters when repetitions is set to 0 */ break; } repetitions--; } err = snmp_vb_enumerator_get_next(&request->inbound_varbind_enumerator, &vb); if (err == SNMP_VB_ENUMERATOR_ERR_EOVB) { /* no more varbinds in request */ break; } else if (err == SNMP_VB_ENUMERATOR_ERR_ASN1ERROR) { /* malformed ASN.1, don't answer */ return ERR_ARG; } else if ((err != SNMP_VB_ENUMERATOR_ERR_OK) || (vb.type != SNMP_ASN1_TYPE_NULL) || (vb.value_len != 0)) { request->error_status = SNMP_ERR_GENERROR; } else { snmp_process_varbind(request, &vb, 1); non_repeaters--; } } /* process repetitions > 1 */ while ((request->error_status == SNMP_ERR_NOERROR) && (repetitions > 0) && (request->outbound_pbuf_stream.offset != repetition_offset)) { u8_t all_endofmibview = 1; snmp_vb_enumerator_init(&repetition_varbind_enumerator, request->outbound_pbuf, repetition_offset, request->outbound_pbuf_stream.offset - repetition_offset); repetition_offset = request->outbound_pbuf_stream.offset; /* for next loop */ while (request->error_status == SNMP_ERR_NOERROR) { vb.value = NULL; /* do NOT decode value (we enumerate outbound buffer here, so all varbinds have values assigned) */ err = snmp_vb_enumerator_get_next(&repetition_varbind_enumerator, &vb); if (err == SNMP_VB_ENUMERATOR_ERR_OK) { vb.value = request->value_buffer; snmp_process_varbind(request, &vb, 1); if (request->error_status != SNMP_ERR_NOERROR) { /* already set correct error-index (here it cannot be taken from inbound varbind enumerator) */ request->error_index = request->non_repeaters + repetition_varbind_enumerator.varbind_count; } else if (vb.type != (SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_CLASS_CONTEXT | SNMP_ASN1_CONTEXT_VARBIND_END_OF_MIB_VIEW)) { all_endofmibview = 0; } } else if (err == SNMP_VB_ENUMERATOR_ERR_EOVB) { /* no more varbinds in request */ break; } else { LWIP_DEBUGF(SNMP_DEBUG, ("Very strange, we cannot parse the varbind output that we created just before!")); request->error_status = SNMP_ERR_GENERROR; request->error_index = request->non_repeaters + repetition_varbind_enumerator.varbind_count; } } if ((request->error_status == SNMP_ERR_NOERROR) && all_endofmibview) { /* stop when all varbinds in a loop return EndOfMibView */ break; } repetitions--; } if (request->error_status == SNMP_ERR_TOOBIG) { /* for GetBulk it is ok, if not all requested variables fit into the response -> just return the varbinds added so far */ request->error_status = SNMP_ERR_NOERROR; } return ERR_OK; } /** * Service an internal or external event for SNMP SET. * * @param request points to the associated message process state */ static err_t snmp_process_set_request(struct snmp_request *request) { snmp_vb_enumerator_err_t err; struct snmp_varbind vb; vb.value = request->value_buffer; LWIP_DEBUGF(SNMP_DEBUG, ("SNMP set request\n")); /* perform set test on all objects */ while (request->error_status == SNMP_ERR_NOERROR) { err = snmp_vb_enumerator_get_next(&request->inbound_varbind_enumerator, &vb); if (err == SNMP_VB_ENUMERATOR_ERR_OK) { struct snmp_node_instance node_instance; memset(&node_instance, 0, sizeof(node_instance)); request->error_status = snmp_get_node_instance_from_oid(vb.oid.id, vb.oid.len, &node_instance); if (request->error_status == SNMP_ERR_NOERROR) { if (node_instance.asn1_type != vb.type) { request->error_status = SNMP_ERR_WRONGTYPE; } else if (((node_instance.access & SNMP_NODE_INSTANCE_ACCESS_WRITE) != SNMP_NODE_INSTANCE_ACCESS_WRITE) || (node_instance.set_value == NULL)) { request->error_status = SNMP_ERR_NOTWRITABLE; } else { if (node_instance.set_test != NULL) { request->error_status = node_instance.set_test(&node_instance, vb.value_len, vb.value); } } if (node_instance.release_instance != NULL) { node_instance.release_instance(&node_instance); } } } else if (err == SNMP_VB_ENUMERATOR_ERR_EOVB) { /* no more varbinds in request */ break; } else if (err == SNMP_VB_ENUMERATOR_ERR_INVALIDLENGTH) { request->error_status = SNMP_ERR_WRONGLENGTH; } else if (err == SNMP_VB_ENUMERATOR_ERR_ASN1ERROR) { /* malformed ASN.1, don't answer */ return ERR_ARG; } else { request->error_status = SNMP_ERR_GENERROR; } } /* perform real set operation on all objects */ if (request->error_status == SNMP_ERR_NOERROR) { snmp_vb_enumerator_init(&request->inbound_varbind_enumerator, request->inbound_pbuf, request->inbound_varbind_offset, request->inbound_varbind_len); while (request->error_status == SNMP_ERR_NOERROR) { err = snmp_vb_enumerator_get_next(&request->inbound_varbind_enumerator, &vb); if (err == SNMP_VB_ENUMERATOR_ERR_OK) { struct snmp_node_instance node_instance; memset(&node_instance, 0, sizeof(node_instance)); request->error_status = snmp_get_node_instance_from_oid(vb.oid.id, vb.oid.len, &node_instance); if (request->error_status == SNMP_ERR_NOERROR) { if (node_instance.set_value(&node_instance, vb.value_len, vb.value) != SNMP_ERR_NOERROR) { if (request->inbound_varbind_enumerator.varbind_count == 1) { request->error_status = SNMP_ERR_COMMITFAILED; } else { /* we cannot undo the set operations done so far */ request->error_status = SNMP_ERR_UNDOFAILED; } } if (node_instance.release_instance != NULL) { node_instance.release_instance(&node_instance); } } } else if (err == SNMP_VB_ENUMERATOR_ERR_EOVB) { /* no more varbinds in request */ break; } else { /* first time enumerating varbinds work but second time not, although nothing should have changed in between ??? */ request->error_status = SNMP_ERR_GENERROR; } } } return ERR_OK; } #define PARSE_EXEC(code, retValue) \ if ((code) != ERR_OK) { \ LWIP_DEBUGF(SNMP_DEBUG, ("Malformed ASN.1 detected.\n")); \ snmp_stats.inasnparseerrs++; \ return retValue; \ } #define PARSE_ASSERT(cond, retValue) \ if (!(cond)) { \ LWIP_DEBUGF(SNMP_DEBUG, ("SNMP parse assertion failed!: " # cond)); \ snmp_stats.inasnparseerrs++; \ return retValue; \ } #define BUILD_EXEC(code, retValue) \ if ((code) != ERR_OK) { \ LWIP_DEBUGF(SNMP_DEBUG, ("SNMP error during creation of outbound frame!: " # code)); \ return retValue; \ } #define IF_PARSE_EXEC(code) PARSE_EXEC(code, ERR_ARG) #define IF_PARSE_ASSERT(code) PARSE_ASSERT(code, ERR_ARG) /** * Checks and decodes incoming SNMP message header, logs header errors. * * @param request points to the current message request state return * @return * - ERR_OK SNMP header is sane and accepted * - ERR_VAL SNMP header is either malformed or rejected */ static err_t snmp_parse_inbound_frame(struct snmp_request *request) { struct snmp_pbuf_stream pbuf_stream; struct snmp_asn1_tlv tlv; s32_t parent_tlv_value_len; s32_t s32_value; err_t err; IF_PARSE_EXEC(snmp_pbuf_stream_init(&pbuf_stream, request->inbound_pbuf, 0, request->inbound_pbuf->tot_len)); /* decode main container consisting of version, community and PDU */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT((tlv.type == SNMP_ASN1_TYPE_SEQUENCE) && (tlv.value_len == pbuf_stream.length)); parent_tlv_value_len = tlv.value_len; /* decode version */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_INTEGER); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &s32_value)); if ((s32_value != SNMP_VERSION_1) && (s32_value != SNMP_VERSION_2c) #if LWIP_SNMP_V3 && (s32_value != SNMP_VERSION_3) #endif ) { /* unsupported SNMP version */ snmp_stats.inbadversions++; return ERR_ARG; } request->version = (u8_t)s32_value; #if LWIP_SNMP_V3 if (request->version == SNMP_VERSION_3) { u16_t u16_value; u16_t inbound_msgAuthenticationParameters_offset; /* SNMPv3 doesn't use communities */ /* @todo: Differentiate read/write access */ strcpy((char*)request->community, snmp_community); request->community_strlen = (u16_t)strlen(snmp_community); /* RFC3414 globalData */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_SEQUENCE); parent_tlv_value_len -= SNMP_ASN1_TLV_HDR_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); /* decode msgID */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_INTEGER); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &s32_value)); request->msg_id = s32_value; /* decode msgMaxSize */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_INTEGER); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &s32_value)); request->msg_max_size = s32_value; /* decode msgFlags */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_OCTET_STRING); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &s32_value)); request->msg_flags = (u8_t)s32_value; /* decode msgSecurityModel */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_INTEGER); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &s32_value)); request->msg_security_model = s32_value; /* RFC3414 msgSecurityParameters * The User-based Security Model defines the contents of the OCTET * STRING as a SEQUENCE. * * We skip the protective dummy OCTET STRING header * to access the SEQUENCE header. */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_OCTET_STRING); parent_tlv_value_len -= SNMP_ASN1_TLV_HDR_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); /* msgSecurityParameters SEQUENCE header */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_SEQUENCE); parent_tlv_value_len -= SNMP_ASN1_TLV_HDR_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); /* decode msgAuthoritativeEngineID */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_OCTET_STRING); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_raw(&pbuf_stream, tlv.value_len, request->msg_authoritative_engine_id, &u16_value, SNMP_V3_MAX_ENGINE_ID_LENGTH)); request->msg_authoritative_engine_id_len = (u8_t)u16_value; /* msgAuthoritativeEngineBoots */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_INTEGER); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &request->msg_authoritative_engine_boots)); /* msgAuthoritativeEngineTime */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_INTEGER); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &request->msg_authoritative_engine_time)); /* @todo: Implement time window checking */ /* msgUserName */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_OCTET_STRING); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_raw(&pbuf_stream, tlv.value_len, request->msg_user_name, &u16_value, SNMP_V3_MAX_USER_LENGTH)); request->msg_user_name_len = (u8_t)u16_value; /* @todo: Implement unknown user error response */ IF_PARSE_EXEC(snmpv3_get_user((char*)request->msg_user_name, NULL, NULL, NULL, NULL)); /* msgAuthenticationParameters */ memset(request->msg_authentication_parameters, 0, SNMP_V3_MAX_AUTH_PARAM_LENGTH); IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_OCTET_STRING); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); /* Remember position */ inbound_msgAuthenticationParameters_offset = pbuf_stream.offset; LWIP_UNUSED_ARG(inbound_msgAuthenticationParameters_offset); /* Read auth parameters */ IF_PARSE_ASSERT(tlv.value_len <= SNMP_V3_MAX_AUTH_PARAM_LENGTH); IF_PARSE_EXEC(snmp_asn1_dec_raw(&pbuf_stream, tlv.value_len, request->msg_authentication_parameters, &u16_value, tlv.value_len)); #if LWIP_SNMP_V3_CRYPTO if (request->msg_flags & SNMP_V3_AUTH_FLAG) { const u8_t zero_arr[SNMP_V3_MAX_AUTH_PARAM_LENGTH] = { 0 }; u8_t key[20]; u8_t algo; u8_t hmac[LWIP_MAX(SNMP_V3_SHA_LEN, SNMP_V3_MD5_LEN)]; struct snmp_pbuf_stream auth_stream; /* Rewind stream */ IF_PARSE_EXEC(snmp_pbuf_stream_init(&pbuf_stream, request->inbound_pbuf, 0, request->inbound_pbuf->tot_len)); IF_PARSE_EXEC(snmp_pbuf_stream_seek_abs(&pbuf_stream, inbound_msgAuthenticationParameters_offset)); /* Set auth parameters to zero for verification */ IF_PARSE_EXEC(snmp_asn1_enc_raw(&pbuf_stream, zero_arr, tlv.value_len)); /* Verify authentication */ IF_PARSE_EXEC(snmp_pbuf_stream_init(&auth_stream, request->inbound_pbuf, 0, request->inbound_pbuf->tot_len)); IF_PARSE_EXEC(snmpv3_get_user((char*)request->msg_user_name, &algo, key, NULL, NULL)); IF_PARSE_EXEC(snmpv3_auth(&auth_stream, request->inbound_pbuf->tot_len, key, algo, hmac)); /* @todo: Implement error response */ IF_PARSE_EXEC(memcmp(request->msg_authentication_parameters, hmac, SNMP_V3_MAX_AUTH_PARAM_LENGTH)); } #else /* Ungraceful exit if we encounter cryptography and don't support it. * @todo: Implement error response */ IF_PARSE_ASSERT(!(request->msg_flags & (SNMP_V3_AUTH_FLAG | SNMP_V3_PRIV_FLAG))); #endif /* msgPrivacyParameters */ memset(request->msg_privacy_parameters, 0, SNMP_V3_MAX_PRIV_PARAM_LENGTH); IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_OCTET_STRING); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_raw(&pbuf_stream, tlv.value_len, request->msg_privacy_parameters, &u16_value, SNMP_V3_MAX_PRIV_PARAM_LENGTH)); #if LWIP_SNMP_V3_CRYPTO /* Decrypt message */ if (request->msg_flags & SNMP_V3_PRIV_FLAG) { u8_t key[20]; u8_t algo; IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_OCTET_STRING); parent_tlv_value_len -= SNMP_ASN1_TLV_HDR_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmpv3_get_user((char*)request->msg_user_name, NULL, NULL, &algo, key)); IF_PARSE_EXEC(snmpv3_crypt(&pbuf_stream, tlv.value_len, key, request->msg_privacy_parameters, request->msg_authoritative_engine_boots, request->msg_authoritative_engine_time, algo, SNMP_V3_PRIV_MODE_DECRYPT)); } #endif /* Scoped PDU * Encryption context */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_SEQUENCE); parent_tlv_value_len -= SNMP_ASN1_TLV_HDR_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); /* contextEngineID */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_OCTET_STRING); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_raw(&pbuf_stream, tlv.value_len, request->context_engine_id, &u16_value, SNMP_V3_MAX_ENGINE_ID_LENGTH)); request->context_engine_id_len = (u8_t)u16_value; /* contextName */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_OCTET_STRING); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_raw(&pbuf_stream, tlv.value_len, request->context_name, &u16_value, SNMP_V3_MAX_ENGINE_ID_LENGTH)); request->context_name_len = (u8_t)u16_value; } else #endif { /* decode community */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_OCTET_STRING); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); err = snmp_asn1_dec_raw(&pbuf_stream, tlv.value_len, request->community, &request->community_strlen, SNMP_MAX_COMMUNITY_STR_LEN); if (err == ERR_MEM) { /* community string does not fit in our buffer -> its too long -> its invalid */ request->community_strlen = 0; snmp_pbuf_stream_seek(&pbuf_stream, tlv.value_len); } else { IF_PARSE_ASSERT(err == ERR_OK); } /* add zero terminator */ request->community[request->community_strlen] = 0; } /* decode PDU type (next container level) */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.value_len <= pbuf_stream.length); request->inbound_padding_len = pbuf_stream.length - tlv.value_len; parent_tlv_value_len = tlv.value_len; /* validate PDU type */ switch(tlv.type) { case (SNMP_ASN1_CLASS_CONTEXT | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_CONTEXT_PDU_GET_REQ): /* GetRequest PDU */ snmp_stats.ingetrequests++; break; case (SNMP_ASN1_CLASS_CONTEXT | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_CONTEXT_PDU_GET_NEXT_REQ): /* GetNextRequest PDU */ snmp_stats.ingetnexts++; break; case (SNMP_ASN1_CLASS_CONTEXT | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_CONTEXT_PDU_GET_BULK_REQ): /* GetBulkRequest PDU */ if (request->version < SNMP_VERSION_2c) { /* RFC2089: invalid, drop packet */ return ERR_ARG; } break; case (SNMP_ASN1_CLASS_CONTEXT | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_CONTEXT_PDU_SET_REQ): /* SetRequest PDU */ snmp_stats.insetrequests++; break; default: /* unsupported input PDU for this agent (no parse error) */ LWIP_DEBUGF(SNMP_DEBUG, ("Unknown/Invalid SNMP PDU type received: %d", tlv.type)); \ return ERR_ARG; break; } request->request_type = tlv.type & SNMP_ASN1_DATATYPE_MASK; /* validate community (do this after decoding PDU type because we don't want to increase 'inbadcommunitynames' for wrong frame types */ if (request->community_strlen == 0) { /* community string was too long or really empty*/ snmp_stats.inbadcommunitynames++; snmp_authfail_trap(); return ERR_ARG; } else if (request->request_type == SNMP_ASN1_CONTEXT_PDU_SET_REQ) { if (snmp_community_write[0] == 0) { /* our write community is empty, that means all our objects are readonly */ request->error_status = SNMP_ERR_NOTWRITABLE; request->error_index = 1; } else if (strncmp(snmp_community_write, (const char*)request->community, SNMP_MAX_COMMUNITY_STR_LEN) != 0) { /* community name does not match */ snmp_stats.inbadcommunitynames++; snmp_authfail_trap(); return ERR_ARG; } } else { if (strncmp(snmp_community, (const char*)request->community, SNMP_MAX_COMMUNITY_STR_LEN) != 0) { /* community name does not match */ snmp_stats.inbadcommunitynames++; snmp_authfail_trap(); return ERR_ARG; } } /* decode request ID */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_INTEGER); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &request->request_id)); /* decode error status / non-repeaters */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_INTEGER); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); if (request->request_type == SNMP_ASN1_CONTEXT_PDU_GET_BULK_REQ) { IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &request->non_repeaters)); if (request->non_repeaters < 0) { /* RFC 1905, 4.2.3 */ request->non_repeaters = 0; } } else { /* only check valid value, don't touch 'request->error_status', maybe a response error status was already set to above; */ IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &s32_value)); IF_PARSE_ASSERT(s32_value == SNMP_ERR_NOERROR); } /* decode error index / max-repetitions */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT(tlv.type == SNMP_ASN1_TYPE_INTEGER); parent_tlv_value_len -= SNMP_ASN1_TLV_LENGTH(tlv); IF_PARSE_ASSERT(parent_tlv_value_len > 0); if (request->request_type == SNMP_ASN1_CONTEXT_PDU_GET_BULK_REQ) { IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &request->max_repetitions)); if (request->max_repetitions < 0) { /* RFC 1905, 4.2.3 */ request->max_repetitions = 0; } } else { IF_PARSE_EXEC(snmp_asn1_dec_s32t(&pbuf_stream, tlv.value_len, &request->error_index)); IF_PARSE_ASSERT(s32_value == 0); } /* decode varbind-list type (next container level) */ IF_PARSE_EXEC(snmp_asn1_dec_tlv(&pbuf_stream, &tlv)); IF_PARSE_ASSERT((tlv.type == SNMP_ASN1_TYPE_SEQUENCE) && (tlv.value_len <= pbuf_stream.length)); request->inbound_varbind_offset = pbuf_stream.offset; request->inbound_varbind_len = pbuf_stream.length - request->inbound_padding_len; snmp_vb_enumerator_init(&(request->inbound_varbind_enumerator), request->inbound_pbuf, request->inbound_varbind_offset, request->inbound_varbind_len); return ERR_OK; } #define OF_BUILD_EXEC(code) BUILD_EXEC(code, ERR_ARG) static err_t snmp_prepare_outbound_frame(struct snmp_request *request) { struct snmp_asn1_tlv tlv; struct snmp_pbuf_stream* pbuf_stream = &(request->outbound_pbuf_stream); /* try allocating pbuf(s) for maximum response size */ request->outbound_pbuf = pbuf_alloc(PBUF_TRANSPORT, 1472, PBUF_RAM); if (request->outbound_pbuf == NULL) { return ERR_MEM; } snmp_pbuf_stream_init(pbuf_stream, request->outbound_pbuf, 0, request->outbound_pbuf->tot_len); /* 'Message' sequence */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 3, 0); OF_BUILD_EXEC( snmp_ans1_enc_tlv(pbuf_stream, &tlv) ); /* version */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 0); snmp_asn1_enc_s32t_cnt(request->version, &tlv.value_len); OF_BUILD_EXEC( snmp_ans1_enc_tlv(pbuf_stream, &tlv) ); OF_BUILD_EXEC( snmp_asn1_enc_s32t(pbuf_stream, tlv.value_len, request->version) ); #if LWIP_SNMP_V3 if (request->version < SNMP_VERSION_3) { #endif /* community */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 0, request->community_strlen); OF_BUILD_EXEC( snmp_ans1_enc_tlv(pbuf_stream, &tlv) ); OF_BUILD_EXEC( snmp_asn1_enc_raw(pbuf_stream, request->community, request->community_strlen) ); #if LWIP_SNMP_V3 } else { const char* id; /* globalData */ request->outbound_msg_global_data_offset = pbuf_stream->offset; SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 1, 0); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); /* msgID */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 1); snmp_asn1_enc_s32t_cnt(request->msg_id, &tlv.value_len); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_s32t(pbuf_stream, tlv.value_len, request->msg_id)); /* msgMaxSize */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 1); snmp_asn1_enc_s32t_cnt(request->msg_max_size, &tlv.value_len); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_s32t(pbuf_stream, tlv.value_len, request->msg_max_size)); /* msgFlags */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 0, 1); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_raw(pbuf_stream, &request->msg_flags, 1)); /* msgSecurityModel */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 1); snmp_asn1_enc_s32t_cnt(request->msg_security_model, &tlv.value_len); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_s32t(pbuf_stream, tlv.value_len, request->msg_security_model)); /* end of msgGlobalData */ request->outbound_msg_global_data_end = pbuf_stream->offset; /* msgSecurityParameters */ request->outbound_msg_security_parameters_str_offset = pbuf_stream->offset; SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 1, 0); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); request->outbound_msg_security_parameters_seq_offset = pbuf_stream->offset; SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 1, 0); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); /* msgAuthoritativeEngineID */ snmpv3_get_engine_id(&id, &request->msg_authoritative_engine_id_len); MEMCPY(request->msg_authoritative_engine_id, id, request->msg_authoritative_engine_id_len); SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 0, request->msg_authoritative_engine_id_len); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_raw(pbuf_stream, request->msg_authoritative_engine_id, request->msg_authoritative_engine_id_len)); request->msg_authoritative_engine_time = snmpv3_get_engine_time(); request->msg_authoritative_engine_boots = snmpv3_get_engine_boots(); /* msgAuthoritativeEngineBoots */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 0); snmp_asn1_enc_s32t_cnt(request->msg_authoritative_engine_boots, &tlv.value_len); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_s32t(pbuf_stream, tlv.value_len, request->msg_authoritative_engine_boots)); /* msgAuthoritativeEngineTime */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 0); snmp_asn1_enc_s32t_cnt(request->msg_authoritative_engine_time, &tlv.value_len); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_s32t(pbuf_stream, tlv.value_len, request->msg_authoritative_engine_time)); /* msgUserName */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 0, request->msg_user_name_len); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_raw(pbuf_stream, request->msg_user_name, request->msg_user_name_len)); #if LWIP_SNMP_V3_CRYPTO /* msgAuthenticationParameters */ if (request->msg_flags & SNMP_V3_AUTH_FLAG) { memset(request->msg_authentication_parameters, 0, SNMP_V3_MAX_AUTH_PARAM_LENGTH); request->outbound_msg_authentication_parameters_offset = pbuf_stream->offset; SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 1, SNMP_V3_MAX_AUTH_PARAM_LENGTH); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_raw(pbuf_stream, request->msg_authentication_parameters, SNMP_V3_MAX_AUTH_PARAM_LENGTH)); } else #endif { SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 0, 0); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); } #if LWIP_SNMP_V3_CRYPTO /* msgPrivacyParameters */ if (request->msg_flags & SNMP_V3_PRIV_FLAG) { snmpv3_build_priv_param(request->msg_privacy_parameters); SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 0, SNMP_V3_MAX_PRIV_PARAM_LENGTH); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_raw(pbuf_stream, request->msg_privacy_parameters, SNMP_V3_MAX_PRIV_PARAM_LENGTH)); } else #endif { SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 0, 0); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv) ); } /* End of msgSecurityParameters, so we can calculate the length of this sequence later */ request->outbound_msg_security_parameters_end = pbuf_stream->offset; #if LWIP_SNMP_V3_CRYPTO /* For encryption we have to encapsulate the payload in an octet string */ if (request->msg_flags & SNMP_V3_PRIV_FLAG) { request->outbound_scoped_pdu_string_offset = pbuf_stream->offset; SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 3, 0); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); } #endif /* Scoped PDU * Encryption context */ request->outbound_scoped_pdu_seq_offset = pbuf_stream->offset; SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 3, 0); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); /* contextEngineID */ snmpv3_get_engine_id(&id, &request->context_engine_id_len); MEMCPY(request->context_engine_id, id, request->context_engine_id_len); SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 0, request->context_engine_id_len); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_raw(pbuf_stream, request->context_engine_id, request->context_engine_id_len)); /* contextName */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 0, request->context_name_len); OF_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_raw(pbuf_stream, request->context_name, request->context_name_len)); } #endif /* 'PDU' sequence */ request->outbound_pdu_offset = pbuf_stream->offset; SNMP_ASN1_SET_TLV_PARAMS(tlv, (SNMP_ASN1_CLASS_CONTEXT | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_CONTEXT_PDU_GET_RESP), 3, 0); OF_BUILD_EXEC( snmp_ans1_enc_tlv(pbuf_stream, &tlv) ); /* request ID */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 0); snmp_asn1_enc_s32t_cnt(request->request_id, &tlv.value_len); OF_BUILD_EXEC( snmp_ans1_enc_tlv(pbuf_stream, &tlv) ); OF_BUILD_EXEC( snmp_asn1_enc_s32t(pbuf_stream, tlv.value_len, request->request_id) ); /* error status */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 1); OF_BUILD_EXEC( snmp_ans1_enc_tlv(pbuf_stream, &tlv) ); request->outbound_error_status_offset = pbuf_stream->offset; OF_BUILD_EXEC( snmp_pbuf_stream_write(pbuf_stream, 0) ); /* error index */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 1); OF_BUILD_EXEC( snmp_ans1_enc_tlv(pbuf_stream, &tlv) ); request->outbound_error_index_offset = pbuf_stream->offset; OF_BUILD_EXEC( snmp_pbuf_stream_write(pbuf_stream, 0) ); /* 'VarBindList' sequence */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 3, 0); OF_BUILD_EXEC( snmp_ans1_enc_tlv(pbuf_stream, &tlv) ); request->outbound_varbind_offset = pbuf_stream->offset; return ERR_OK; } /** Calculate the length of a varbind list */ err_t snmp_varbind_length(struct snmp_varbind *varbind, struct snmp_varbind_len *len) { /* calculate required lengths */ snmp_asn1_enc_oid_cnt(varbind->oid.id, varbind->oid.len, &len->oid_value_len); snmp_asn1_enc_length_cnt(len->oid_value_len, &len->oid_len_len); if (varbind->value_len == 0) { len->value_value_len = 0; } else if (varbind->value_len & SNMP_GET_VALUE_RAW_DATA) { len->value_value_len = varbind->value_len & (~SNMP_GET_VALUE_RAW_DATA); } else { switch (varbind->type) { case SNMP_ASN1_TYPE_INTEGER: if (varbind->value_len != sizeof (s32_t)) { return ERR_VAL; } snmp_asn1_enc_s32t_cnt(*((s32_t*) varbind->value), &len->value_value_len); break; case SNMP_ASN1_TYPE_COUNTER: case SNMP_ASN1_TYPE_GAUGE: case SNMP_ASN1_TYPE_TIMETICKS: if (varbind->value_len != sizeof (u32_t)) { return ERR_VAL; } snmp_asn1_enc_u32t_cnt(*((u32_t*) varbind->value), &len->value_value_len); break; case SNMP_ASN1_TYPE_OCTET_STRING: case SNMP_ASN1_TYPE_IPADDR: case SNMP_ASN1_TYPE_OPAQUE: len->value_value_len = varbind->value_len; break; case SNMP_ASN1_TYPE_NULL: if (varbind->value_len != 0) { return ERR_VAL; } len->value_value_len = 0; break; case SNMP_ASN1_TYPE_OBJECT_ID: if ((varbind->value_len & 0x03) != 0) { return ERR_VAL; } snmp_asn1_enc_oid_cnt((u32_t*) varbind->value, varbind->value_len >> 2, &len->value_value_len); break; case SNMP_ASN1_TYPE_COUNTER64: if (varbind->value_len != (2 * sizeof (u32_t))) { return ERR_VAL; } snmp_asn1_enc_u64t_cnt((u32_t*) varbind->value, &len->value_value_len); break; default: /* unsupported type */ return ERR_VAL; } } snmp_asn1_enc_length_cnt(len->value_value_len, &len->value_len_len); len->vb_value_len = 1 + len->oid_len_len + len->oid_value_len + 1 + len->value_len_len + len->value_value_len; snmp_asn1_enc_length_cnt(len->vb_value_len, &len->vb_len_len); return ERR_OK; } #define OVB_BUILD_EXEC(code) BUILD_EXEC(code, ERR_ARG) err_t snmp_append_outbound_varbind(struct snmp_pbuf_stream *pbuf_stream, struct snmp_varbind* varbind) { struct snmp_asn1_tlv tlv; struct snmp_varbind_len len; err_t err; err = snmp_varbind_length(varbind, &len); if (err != ERR_OK) { return err; } /* check length already before adding first data because in case of GetBulk, * data added so far is returned and therefore no partial data shall be added */ if ((1 + len.vb_len_len + len.vb_value_len) > pbuf_stream->length) { return ERR_BUF; } /* 'VarBind' sequence */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, len.vb_len_len, len.vb_value_len); OVB_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); /* VarBind OID */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OBJECT_ID, len.oid_len_len, len.oid_value_len); OVB_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); OVB_BUILD_EXEC(snmp_asn1_enc_oid(pbuf_stream, varbind->oid.id, varbind->oid.len)); /* VarBind value */ SNMP_ASN1_SET_TLV_PARAMS(tlv, varbind->type, len.value_len_len, len.value_value_len); OVB_BUILD_EXEC(snmp_ans1_enc_tlv(pbuf_stream, &tlv)); if (len.value_value_len > 0) { if (varbind->value_len & SNMP_GET_VALUE_RAW_DATA) { OVB_BUILD_EXEC(snmp_asn1_enc_raw(pbuf_stream, (u8_t*) varbind->value, len.value_value_len)); } else { switch (varbind->type) { case SNMP_ASN1_TYPE_INTEGER: OVB_BUILD_EXEC(snmp_asn1_enc_s32t(pbuf_stream, len.value_value_len, *((s32_t*) varbind->value))); break; case SNMP_ASN1_TYPE_COUNTER: case SNMP_ASN1_TYPE_GAUGE: case SNMP_ASN1_TYPE_TIMETICKS: OVB_BUILD_EXEC(snmp_asn1_enc_u32t(pbuf_stream, len.value_value_len, *((u32_t*) varbind->value))); break; case SNMP_ASN1_TYPE_OCTET_STRING: case SNMP_ASN1_TYPE_IPADDR: case SNMP_ASN1_TYPE_OPAQUE: OVB_BUILD_EXEC(snmp_asn1_enc_raw(pbuf_stream, (u8_t*) varbind->value, len.value_value_len)); len.value_value_len = varbind->value_len; break; case SNMP_ASN1_TYPE_OBJECT_ID: OVB_BUILD_EXEC(snmp_asn1_enc_oid(pbuf_stream, (u32_t*) varbind->value, varbind->value_len / sizeof (u32_t))); break; case SNMP_ASN1_TYPE_COUNTER64: OVB_BUILD_EXEC(snmp_asn1_enc_u64t(pbuf_stream, len.value_value_len, (u32_t*) varbind->value)); break; default: LWIP_ASSERT("Unknown variable type", 0); break; } } } return ERR_OK; } static err_t snmp_complete_outbound_frame(struct snmp_request *request) { struct snmp_asn1_tlv tlv; u16_t frame_size; u8_t outbound_padding = 0; if (request->version == SNMP_VERSION_1) { if (request->error_status != SNMP_ERR_NOERROR) { /* map v2c error codes to v1 compliant error code (according to RFC 2089) */ switch (request->error_status) { /* mapping of implementation specific "virtual" error codes * (during processing of frame we already stored them in error_status field, * so no need to check all varbinds here for those exceptions as suggested by RFC) */ case SNMP_ERR_NOSUCHINSTANCE: case SNMP_ERR_NOSUCHOBJECT: case SNMP_ERR_ENDOFMIBVIEW: request->error_status = SNMP_ERR_NOSUCHNAME; break; /* mapping according to RFC */ case SNMP_ERR_WRONGVALUE: case SNMP_ERR_WRONGENCODING: case SNMP_ERR_WRONGTYPE: case SNMP_ERR_WRONGLENGTH: case SNMP_ERR_INCONSISTENTVALUE: request->error_status = SNMP_ERR_BADVALUE; break; case SNMP_ERR_NOACCESS: case SNMP_ERR_NOTWRITABLE: case SNMP_ERR_NOCREATION: case SNMP_ERR_INCONSISTENTNAME: case SNMP_ERR_AUTHORIZATIONERROR: request->error_status = SNMP_ERR_NOSUCHNAME; break; case SNMP_ERR_RESOURCEUNAVAILABLE: case SNMP_ERR_COMMITFAILED: case SNMP_ERR_UNDOFAILED: default: request->error_status = SNMP_ERR_GENERROR; break; } } } else { if (request->request_type == SNMP_ASN1_CONTEXT_PDU_SET_REQ) { /* map error codes to according to RFC 1905 (4.2.5. The SetRequest-PDU) return 'NotWritable' for unknown OIDs) */ switch (request->error_status) { case SNMP_ERR_NOSUCHINSTANCE: case SNMP_ERR_NOSUCHOBJECT: case SNMP_ERR_ENDOFMIBVIEW: request->error_status = SNMP_ERR_NOTWRITABLE; break; default: break; } } if (request->error_status >= SNMP_VARBIND_EXCEPTION_OFFSET) { /* should never occur because v2 frames store exceptions directly inside varbinds and not as frame error_status */ LWIP_DEBUGF(SNMP_DEBUG, ("snmp_complete_outbound_frame() > Found v2 request with varbind exception code stored as error status!\n")); return ERR_ARG; } } if ((request->error_status != SNMP_ERR_NOERROR) || (request->request_type == SNMP_ASN1_CONTEXT_PDU_SET_REQ)) { /* all inbound vars are returned in response without any modification for error responses and successful set requests*/ struct snmp_pbuf_stream inbound_stream; OF_BUILD_EXEC( snmp_pbuf_stream_init(&inbound_stream, request->inbound_pbuf, request->inbound_varbind_offset, request->inbound_varbind_len) ); OF_BUILD_EXEC( snmp_pbuf_stream_init(&(request->outbound_pbuf_stream), request->outbound_pbuf, request->outbound_varbind_offset, request->outbound_pbuf->tot_len - request->outbound_varbind_offset) ); snmp_pbuf_stream_writeto(&inbound_stream, &(request->outbound_pbuf_stream), 0); } frame_size = request->outbound_pbuf_stream.offset; #if LWIP_SNMP_V3 && LWIP_SNMP_V3_CRYPTO /* Calculate padding for encryption */ if (request->version == SNMP_VERSION_3 && (request->msg_flags & SNMP_V3_PRIV_FLAG)) { u8_t i; outbound_padding = (8 - (u8_t)((frame_size - request->outbound_scoped_pdu_seq_offset) & 0x07)) & 0x07; for (i = 0; i < outbound_padding; i++) { snmp_pbuf_stream_write(&request->outbound_pbuf_stream, 0); } } #endif /* complete missing length in 'Message' sequence ; 'Message' tlv is located at the beginning (offset 0) */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 3, frame_size + outbound_padding - 1 - 3); /* - type - length_len(fixed, see snmp_prepare_outbound_frame()) */ OF_BUILD_EXEC( snmp_pbuf_stream_init(&(request->outbound_pbuf_stream), request->outbound_pbuf, 0, request->outbound_pbuf->tot_len) ); OF_BUILD_EXEC( snmp_ans1_enc_tlv(&(request->outbound_pbuf_stream), &tlv) ); #if LWIP_SNMP_V3 if (request->version == SNMP_VERSION_3) { /* complete missing length in 'globalData' sequence */ /* - type - length_len(fixed, see snmp_prepare_outbound_frame()) */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 1, request->outbound_msg_global_data_end - request->outbound_msg_global_data_offset - 1 - 1); OF_BUILD_EXEC(snmp_pbuf_stream_seek_abs(&(request->outbound_pbuf_stream), request->outbound_msg_global_data_offset)); OF_BUILD_EXEC(snmp_ans1_enc_tlv(&(request->outbound_pbuf_stream), &tlv)); /* complete missing length in 'msgSecurityParameters' sequence */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 1, request->outbound_msg_security_parameters_end - request->outbound_msg_security_parameters_str_offset - 1 - 1); OF_BUILD_EXEC(snmp_pbuf_stream_seek_abs(&(request->outbound_pbuf_stream), request->outbound_msg_security_parameters_str_offset)); OF_BUILD_EXEC(snmp_ans1_enc_tlv(&(request->outbound_pbuf_stream), &tlv)); SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 1, request->outbound_msg_security_parameters_end - request->outbound_msg_security_parameters_seq_offset - 1 - 1); OF_BUILD_EXEC(snmp_pbuf_stream_seek_abs(&(request->outbound_pbuf_stream), request->outbound_msg_security_parameters_seq_offset)); OF_BUILD_EXEC(snmp_ans1_enc_tlv(&(request->outbound_pbuf_stream), &tlv)); /* complete missing length in scoped PDU sequence */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 3, frame_size - request->outbound_scoped_pdu_seq_offset - 1 - 3); OF_BUILD_EXEC(snmp_pbuf_stream_seek_abs(&(request->outbound_pbuf_stream), request->outbound_scoped_pdu_seq_offset)); OF_BUILD_EXEC(snmp_ans1_enc_tlv(&(request->outbound_pbuf_stream), &tlv)); } #endif /* complete missing length in 'PDU' sequence */ SNMP_ASN1_SET_TLV_PARAMS(tlv, (SNMP_ASN1_CLASS_CONTEXT | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_CONTEXT_PDU_GET_RESP), 3, frame_size - request->outbound_pdu_offset - 1 - 3); /* - type - length_len(fixed, see snmp_prepare_outbound_frame()) */ OF_BUILD_EXEC( snmp_pbuf_stream_seek_abs(&(request->outbound_pbuf_stream), request->outbound_pdu_offset) ); OF_BUILD_EXEC( snmp_ans1_enc_tlv(&(request->outbound_pbuf_stream), &tlv) ); /* process and encode final error status */ if (request->error_status != 0) { u16_t len; snmp_asn1_enc_s32t_cnt(request->error_status, &len); if (len != 1) { /* error, we only reserved one byte for it */ return ERR_ARG; } OF_BUILD_EXEC( snmp_pbuf_stream_seek_abs(&(request->outbound_pbuf_stream), request->outbound_error_status_offset) ); OF_BUILD_EXEC( snmp_asn1_enc_s32t(&(request->outbound_pbuf_stream), len, request->error_status) ); /* for compatibility to v1, log statistics; in v2 (RFC 1907) these statistics are obsoleted */ switch (request->error_status) { case SNMP_ERR_TOOBIG: snmp_stats.outtoobigs++; break; case SNMP_ERR_NOSUCHNAME: snmp_stats.outnosuchnames++; break; case SNMP_ERR_BADVALUE: snmp_stats.outbadvalues++; break; case SNMP_ERR_GENERROR: default: snmp_stats.outgenerrs++; break; } if (request->error_status == SNMP_ERR_TOOBIG) { request->error_index = 0; /* defined by RFC 1157 */ } else if (request->error_index == 0) { /* set index to varbind where error occured (if not already set before, e.g. during GetBulk processing) */ request->error_index = request->inbound_varbind_enumerator.varbind_count; } } else { if (request->request_type == SNMP_ASN1_CONTEXT_PDU_SET_REQ) { snmp_stats.intotalsetvars += request->inbound_varbind_enumerator.varbind_count; } else { snmp_stats.intotalreqvars += request->inbound_varbind_enumerator.varbind_count; } } /* encode final error index*/ if (request->error_index != 0) { u16_t len; snmp_asn1_enc_s32t_cnt(request->error_index, &len); if (len != 1) { /* error, we only reserved one byte for it */ return ERR_VAL; } OF_BUILD_EXEC( snmp_pbuf_stream_seek_abs(&(request->outbound_pbuf_stream), request->outbound_error_index_offset) ); OF_BUILD_EXEC( snmp_asn1_enc_s32t(&(request->outbound_pbuf_stream), len, request->error_index) ); } /* complete missing length in 'VarBindList' sequence ; 'VarBindList' tlv is located directly before varbind offset */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 3, frame_size - request->outbound_varbind_offset); OF_BUILD_EXEC( snmp_pbuf_stream_seek_abs(&(request->outbound_pbuf_stream), request->outbound_varbind_offset - 1 - 3) ); /* - type - length_len(fixed, see snmp_prepare_outbound_frame()) */ OF_BUILD_EXEC( snmp_ans1_enc_tlv(&(request->outbound_pbuf_stream), &tlv) ); /* Authenticate response */ #if LWIP_SNMP_V3 && LWIP_SNMP_V3_CRYPTO /* Encrypt response */ if (request->version == SNMP_VERSION_3 && (request->msg_flags & SNMP_V3_PRIV_FLAG)) { u8_t key[20]; u8_t algo; /* complete missing length in PDU sequence */ OF_BUILD_EXEC(snmp_pbuf_stream_init(&request->outbound_pbuf_stream, request->outbound_pbuf, 0, request->outbound_pbuf->tot_len)); OF_BUILD_EXEC(snmp_pbuf_stream_seek_abs(&(request->outbound_pbuf_stream), request->outbound_scoped_pdu_string_offset)); SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 3, frame_size + outbound_padding - request->outbound_scoped_pdu_string_offset - 1 - 3); OF_BUILD_EXEC(snmp_ans1_enc_tlv(&(request->outbound_pbuf_stream), &tlv)); OF_BUILD_EXEC(snmpv3_get_user((char*)request->msg_user_name, NULL, NULL, &algo, key)); OF_BUILD_EXEC(snmpv3_crypt(&request->outbound_pbuf_stream, tlv.value_len, key, request->msg_privacy_parameters, request->msg_authoritative_engine_boots, request->msg_authoritative_engine_time, algo, SNMP_V3_PRIV_MODE_ENCRYPT)); } if (request->version == SNMP_VERSION_3 && (request->msg_flags & SNMP_V3_AUTH_FLAG)) { u8_t key[20]; u8_t algo; u8_t hmac[20]; OF_BUILD_EXEC(snmpv3_get_user((char*)request->msg_user_name, &algo, key, NULL, NULL)); OF_BUILD_EXEC(snmp_pbuf_stream_init(&(request->outbound_pbuf_stream), request->outbound_pbuf, 0, request->outbound_pbuf->tot_len)); OF_BUILD_EXEC(snmpv3_auth(&request->outbound_pbuf_stream, frame_size + outbound_padding, key, algo, hmac)); MEMCPY(request->msg_authentication_parameters, hmac, SNMP_V3_MAX_AUTH_PARAM_LENGTH); OF_BUILD_EXEC(snmp_pbuf_stream_init(&request->outbound_pbuf_stream, request->outbound_pbuf, 0, request->outbound_pbuf->tot_len)); OF_BUILD_EXEC(snmp_pbuf_stream_seek_abs(&request->outbound_pbuf_stream, request->outbound_msg_authentication_parameters_offset)); SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 1, SNMP_V3_MAX_AUTH_PARAM_LENGTH); OF_BUILD_EXEC(snmp_ans1_enc_tlv(&request->outbound_pbuf_stream, &tlv)); OF_BUILD_EXEC(snmp_asn1_enc_raw(&request->outbound_pbuf_stream, request->msg_authentication_parameters, SNMP_V3_MAX_AUTH_PARAM_LENGTH)); } #endif pbuf_realloc(request->outbound_pbuf, frame_size + outbound_padding); snmp_stats.outgetresponses++; snmp_stats.outpkts++; return ERR_OK; } static void snmp_execute_write_callbacks(struct snmp_request *request) { struct snmp_varbind_enumerator inbound_varbind_enumerator; struct snmp_varbind vb; snmp_vb_enumerator_init(&inbound_varbind_enumerator, request->inbound_pbuf, request->inbound_varbind_offset, request->inbound_varbind_len); vb.value = NULL; /* do NOT decode value (we enumerate outbound buffer here, so all varbinds have values assigned, which we don't need here) */ while (snmp_vb_enumerator_get_next(&inbound_varbind_enumerator, &vb) == SNMP_VB_ENUMERATOR_ERR_OK) { snmp_write_callback(vb.oid.id, vb.oid.len, snmp_write_callback_arg); } } /* ----------------------------------------------------------------------- */ /* VarBind enumerator methods */ /* ----------------------------------------------------------------------- */ void snmp_vb_enumerator_init(struct snmp_varbind_enumerator* enumerator, struct pbuf* p, u16_t offset, u16_t length) { snmp_pbuf_stream_init(&(enumerator->pbuf_stream), p, offset, length); enumerator->varbind_count = 0; } #define VB_PARSE_EXEC(code) PARSE_EXEC(code, SNMP_VB_ENUMERATOR_ERR_ASN1ERROR) #define VB_PARSE_ASSERT(code) PARSE_ASSERT(code, SNMP_VB_ENUMERATOR_ERR_ASN1ERROR) snmp_vb_enumerator_err_t snmp_vb_enumerator_get_next(struct snmp_varbind_enumerator* enumerator, struct snmp_varbind* varbind) { struct snmp_asn1_tlv tlv; u16_t varbind_len; err_t err; if (enumerator->pbuf_stream.length == 0) { return SNMP_VB_ENUMERATOR_ERR_EOVB; } enumerator->varbind_count++; /* decode varbind itself (parent container of a varbind) */ VB_PARSE_EXEC(snmp_asn1_dec_tlv(&(enumerator->pbuf_stream), &tlv)); VB_PARSE_ASSERT((tlv.type == SNMP_ASN1_TYPE_SEQUENCE) && (tlv.value_len <= enumerator->pbuf_stream.length)); varbind_len = tlv.value_len; /* decode varbind name (object id) */ VB_PARSE_EXEC(snmp_asn1_dec_tlv(&(enumerator->pbuf_stream), &tlv)); VB_PARSE_ASSERT((tlv.type == SNMP_ASN1_TYPE_OBJECT_ID) && (SNMP_ASN1_TLV_LENGTH(tlv) < varbind_len) && (tlv.value_len < enumerator->pbuf_stream.length)); VB_PARSE_EXEC(snmp_asn1_dec_oid(&(enumerator->pbuf_stream), tlv.value_len, varbind->oid.id, &(varbind->oid.len), SNMP_MAX_OBJ_ID_LEN)); varbind_len -= SNMP_ASN1_TLV_LENGTH(tlv); /* decode varbind value (object id) */ VB_PARSE_EXEC(snmp_asn1_dec_tlv(&(enumerator->pbuf_stream), &tlv)); VB_PARSE_ASSERT((SNMP_ASN1_TLV_LENGTH(tlv) == varbind_len) && (tlv.value_len <= enumerator->pbuf_stream.length)); varbind->type = tlv.type; /* shall the value be decoded ? */ if (varbind->value != NULL) { switch (varbind->type) { case SNMP_ASN1_TYPE_INTEGER: VB_PARSE_EXEC(snmp_asn1_dec_s32t(&(enumerator->pbuf_stream), tlv.value_len, (s32_t*)varbind->value)); varbind->value_len = sizeof(s32_t*); break; case SNMP_ASN1_TYPE_COUNTER: case SNMP_ASN1_TYPE_GAUGE: case SNMP_ASN1_TYPE_TIMETICKS: VB_PARSE_EXEC(snmp_asn1_dec_u32t(&(enumerator->pbuf_stream), tlv.value_len, (u32_t*)varbind->value)); varbind->value_len = sizeof(u32_t*); break; case SNMP_ASN1_TYPE_OCTET_STRING: case SNMP_ASN1_TYPE_OPAQUE: err = snmp_asn1_dec_raw(&(enumerator->pbuf_stream), tlv.value_len, (u8_t*)varbind->value, &varbind->value_len, SNMP_MAX_VALUE_SIZE); if (err == ERR_MEM) { return SNMP_VB_ENUMERATOR_ERR_INVALIDLENGTH; } VB_PARSE_ASSERT(err == ERR_OK); break; case SNMP_ASN1_TYPE_NULL: varbind->value_len = 0; break; case SNMP_ASN1_TYPE_OBJECT_ID: /* misuse tlv.length_len as OID_length transporter */ err = snmp_asn1_dec_oid(&(enumerator->pbuf_stream), tlv.value_len, (u32_t*)varbind->value, &tlv.length_len, SNMP_MAX_OBJ_ID_LEN); if (err == ERR_MEM) { return SNMP_VB_ENUMERATOR_ERR_INVALIDLENGTH; } VB_PARSE_ASSERT(err == ERR_OK); varbind->value_len = tlv.length_len * sizeof(u32_t); break; case SNMP_ASN1_TYPE_IPADDR: if (tlv.value_len == 4) { /* must be exactly 4 octets! */ VB_PARSE_EXEC(snmp_asn1_dec_raw(&(enumerator->pbuf_stream), tlv.value_len, (u8_t*)varbind->value, &varbind->value_len, SNMP_MAX_VALUE_SIZE)); } else { VB_PARSE_ASSERT(0); } break; case SNMP_ASN1_TYPE_COUNTER64: VB_PARSE_EXEC(snmp_asn1_dec_u64t(&(enumerator->pbuf_stream), tlv.value_len, (u32_t*)varbind->value)); varbind->value_len = 2 * sizeof(u32_t*); break; default: VB_PARSE_ASSERT(0); break; } } else { snmp_pbuf_stream_seek(&(enumerator->pbuf_stream), tlv.value_len); varbind->value_len = tlv.value_len; } return SNMP_VB_ENUMERATOR_ERR_OK; } #endif /* LWIP_SNMP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_msg.c
C
unknown
69,039
/** * @file * SNMP Agent message handling structures (internal API, do not use in client code). */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * Copyright (c) 2016 Elias Oenal. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Christiaan Simons <christiaan.simons@axon.tv> * Martin Hentschel <info@cl-soft.de> * Elias Oenal <lwip@eliasoenal.com> */ #ifndef LWIP_HDR_APPS_SNMP_MSG_H #define LWIP_HDR_APPS_SNMP_MSG_H #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP #include "lwip/apps/snmp.h" #include "lwip/apps/snmp_core.h" #include "snmp_pbuf_stream.h" #include "lwip/ip_addr.h" #include "lwip/err.h" #if LWIP_SNMP_V3 #include "snmpv3_priv.h" #endif #ifdef __cplusplus extern "C" { #endif /* The listen port of the SNMP agent. Clients have to make their requests to this port. Most standard clients won't work if you change this! */ #ifndef SNMP_IN_PORT #define SNMP_IN_PORT 161 #endif /* The remote port the SNMP agent sends traps to. Most standard trap sinks won't work if you change this! */ #ifndef SNMP_TRAP_PORT #define SNMP_TRAP_PORT 162 #endif /* version defines used in PDU */ #define SNMP_VERSION_1 0 #define SNMP_VERSION_2c 1 #define SNMP_VERSION_3 3 struct snmp_varbind_enumerator { struct snmp_pbuf_stream pbuf_stream; u16_t varbind_count; }; typedef enum { SNMP_VB_ENUMERATOR_ERR_OK = 0, SNMP_VB_ENUMERATOR_ERR_EOVB = 1, SNMP_VB_ENUMERATOR_ERR_ASN1ERROR = 2, SNMP_VB_ENUMERATOR_ERR_INVALIDLENGTH = 3 } snmp_vb_enumerator_err_t; void snmp_vb_enumerator_init(struct snmp_varbind_enumerator* enumerator, struct pbuf* p, u16_t offset, u16_t length); snmp_vb_enumerator_err_t snmp_vb_enumerator_get_next(struct snmp_varbind_enumerator* enumerator, struct snmp_varbind* varbind); struct snmp_request { /* Communication handle */ void *handle; /* source IP address */ const ip_addr_t *source_ip; /* source UDP port */ u16_t source_port; /* incoming snmp version */ u8_t version; /* community name (zero terminated) */ u8_t community[SNMP_MAX_COMMUNITY_STR_LEN + 1]; /* community string length (exclusive zero term) */ u16_t community_strlen; /* request type */ u8_t request_type; /* request ID */ s32_t request_id; /* error status */ s32_t error_status; /* error index */ s32_t error_index; /* non-repeaters (getBulkRequest (SNMPv2c)) */ s32_t non_repeaters; /* max-repetitions (getBulkRequest (SNMPv2c)) */ s32_t max_repetitions; #if LWIP_SNMP_V3 s32_t msg_id; s32_t msg_max_size; u8_t msg_flags; s32_t msg_security_model; u8_t msg_authoritative_engine_id[SNMP_V3_MAX_ENGINE_ID_LENGTH]; u8_t msg_authoritative_engine_id_len; s32_t msg_authoritative_engine_boots; s32_t msg_authoritative_engine_time; u8_t msg_user_name[SNMP_V3_MAX_USER_LENGTH]; u8_t msg_user_name_len; u8_t msg_authentication_parameters[SNMP_V3_MAX_AUTH_PARAM_LENGTH]; u8_t msg_privacy_parameters[SNMP_V3_MAX_PRIV_PARAM_LENGTH]; u8_t context_engine_id[SNMP_V3_MAX_ENGINE_ID_LENGTH]; u8_t context_engine_id_len; u8_t context_name[SNMP_V3_MAX_ENGINE_ID_LENGTH]; u8_t context_name_len; #endif struct pbuf *inbound_pbuf; struct snmp_varbind_enumerator inbound_varbind_enumerator; u16_t inbound_varbind_offset; u16_t inbound_varbind_len; u16_t inbound_padding_len; struct pbuf *outbound_pbuf; struct snmp_pbuf_stream outbound_pbuf_stream; u16_t outbound_pdu_offset; u16_t outbound_error_status_offset; u16_t outbound_error_index_offset; u16_t outbound_varbind_offset; #if LWIP_SNMP_V3 u16_t outbound_msg_global_data_offset; u16_t outbound_msg_global_data_end; u16_t outbound_msg_security_parameters_str_offset; u16_t outbound_msg_security_parameters_seq_offset; u16_t outbound_msg_security_parameters_end; u16_t outbound_msg_authentication_parameters_offset; u16_t outbound_scoped_pdu_seq_offset; u16_t outbound_scoped_pdu_string_offset; #endif u8_t value_buffer[SNMP_MAX_VALUE_SIZE]; }; /** A helper struct keeping length information about varbinds */ struct snmp_varbind_len { u8_t vb_len_len; u16_t vb_value_len; u8_t oid_len_len; u16_t oid_value_len; u8_t value_len_len; u16_t value_value_len; }; /** Agent community string */ extern const char *snmp_community; /** Agent community string for write access */ extern const char *snmp_community_write; /** handle for sending traps */ extern void* snmp_traps_handle; void snmp_receive(void *handle, struct pbuf *p, const ip_addr_t *source_ip, u16_t port); err_t snmp_sendto(void *handle, struct pbuf *p, const ip_addr_t *dst, u16_t port); u8_t snmp_get_local_ip_for_dst(void* handle, const ip_addr_t *dst, ip_addr_t *result); err_t snmp_varbind_length(struct snmp_varbind *varbind, struct snmp_varbind_len *len); err_t snmp_append_outbound_varbind(struct snmp_pbuf_stream *pbuf_stream, struct snmp_varbind* varbind); #ifdef __cplusplus } #endif #endif /* LWIP_SNMP */ #endif /* LWIP_HDR_APPS_SNMP_MSG_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_msg.h
C
unknown
6,574
/** * @file * SNMP netconn frontend. */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> */ #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP && SNMP_USE_NETCONN #include <string.h> #include "lwip/api.h" #include "lwip/ip.h" #include "lwip/udp.h" #include "snmp_msg.h" #include "lwip/sys.h" /** SNMP netconn API worker thread */ static void snmp_netconn_thread(void *arg) { struct netconn *conn; struct netbuf *buf; err_t err; LWIP_UNUSED_ARG(arg); /* Bind to SNMP port with default IP address */ #if LWIP_IPV6 conn = netconn_new(NETCONN_UDP_IPV6); netconn_bind(conn, IP6_ADDR_ANY, SNMP_IN_PORT); #else /* LWIP_IPV6 */ conn = netconn_new(NETCONN_UDP); netconn_bind(conn, IP4_ADDR_ANY, SNMP_IN_PORT); #endif /* LWIP_IPV6 */ LWIP_ERROR("snmp_netconn: invalid conn", (conn != NULL), return;); snmp_traps_handle = conn; do { err = netconn_recv(conn, &buf); if (err == ERR_OK) { snmp_receive(conn, buf->p, &buf->addr, buf->port); } if (buf != NULL) { netbuf_delete(buf); } } while(1); } err_t snmp_sendto(void *handle, struct pbuf *p, const ip_addr_t *dst, u16_t port) { err_t result; struct netbuf buf; memset(&buf, 0, sizeof(buf)); buf.p = p; result = netconn_sendto((struct netconn*)handle, &buf, dst, port); return result; } u8_t snmp_get_local_ip_for_dst(void* handle, const ip_addr_t *dst, ip_addr_t *result) { struct netconn* conn = (struct netconn*)handle; struct netif *dst_if; const ip_addr_t* dst_ip; LWIP_UNUSED_ARG(conn); /* unused in case of IPV4 only configuration */ ip_route_get_local_ip(&conn->pcb.udp->local_ip, dst, dst_if, dst_ip); if ((dst_if != NULL) && (dst_ip != NULL)) { ip_addr_copy(*result, *dst_ip); return 1; } else { return 0; } } /** * Starts SNMP Agent. */ void snmp_init(void) { sys_thread_new("snmp_netconn", snmp_netconn_thread, NULL, SNMP_STACK_SIZE, SNMP_THREAD_PRIO); } #endif /* LWIP_SNMP && SNMP_USE_NETCONN */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_netconn.c
C
unknown
3,593
/** * @file * SNMP pbuf stream wrapper implementation (internal API, do not use in client code). */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Martin Hentschel <info@cl-soft.de> * */ #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #include "snmp_pbuf_stream.h" #include "lwip/def.h" #include <string.h> err_t snmp_pbuf_stream_init(struct snmp_pbuf_stream* pbuf_stream, struct pbuf* p, u16_t offset, u16_t length) { pbuf_stream->offset = offset; pbuf_stream->length = length; pbuf_stream->pbuf = p; return ERR_OK; } err_t snmp_pbuf_stream_read(struct snmp_pbuf_stream* pbuf_stream, u8_t* data) { if (pbuf_stream->length == 0) { return ERR_BUF; } if (pbuf_copy_partial(pbuf_stream->pbuf, data, 1, pbuf_stream->offset) == 0) { return ERR_BUF; } pbuf_stream->offset++; pbuf_stream->length--; return ERR_OK; } err_t snmp_pbuf_stream_write(struct snmp_pbuf_stream* pbuf_stream, u8_t data) { return snmp_pbuf_stream_writebuf(pbuf_stream, &data, 1); } err_t snmp_pbuf_stream_writebuf(struct snmp_pbuf_stream* pbuf_stream, const void* buf, u16_t buf_len) { if (pbuf_stream->length < buf_len) { return ERR_BUF; } if (pbuf_take_at(pbuf_stream->pbuf, buf, buf_len, pbuf_stream->offset) != ERR_OK) { return ERR_BUF; } pbuf_stream->offset += buf_len; pbuf_stream->length -= buf_len; return ERR_OK; } err_t snmp_pbuf_stream_writeto(struct snmp_pbuf_stream* pbuf_stream, struct snmp_pbuf_stream* target_pbuf_stream, u16_t len) { if ((pbuf_stream == NULL) || (target_pbuf_stream == NULL)) { return ERR_ARG; } if ((len > pbuf_stream->length) || (len > target_pbuf_stream->length)) { return ERR_ARG; } if (len == 0) { len = LWIP_MIN(pbuf_stream->length, target_pbuf_stream->length); } while (len > 0) { u16_t chunk_len; err_t err; u16_t target_offset; struct pbuf* pbuf = pbuf_skip(pbuf_stream->pbuf, pbuf_stream->offset, &target_offset); if ((pbuf == NULL) || (pbuf->len == 0)) { return ERR_BUF; } chunk_len = LWIP_MIN(len, pbuf->len); err = snmp_pbuf_stream_writebuf(target_pbuf_stream, &((u8_t*)pbuf->payload)[target_offset], chunk_len); if (err != ERR_OK) { return err; } pbuf_stream->offset += chunk_len; pbuf_stream->length -= chunk_len; len -= chunk_len; } return ERR_OK; } err_t snmp_pbuf_stream_seek(struct snmp_pbuf_stream* pbuf_stream, s32_t offset) { if ((offset < 0) || (offset > pbuf_stream->length)) { /* we cannot seek backwards or forward behind stream end */ return ERR_ARG; } pbuf_stream->offset += (u16_t)offset; pbuf_stream->length -= (u16_t)offset; return ERR_OK; } err_t snmp_pbuf_stream_seek_abs(struct snmp_pbuf_stream* pbuf_stream, u32_t offset) { s32_t rel_offset = offset - pbuf_stream->offset; return snmp_pbuf_stream_seek(pbuf_stream, rel_offset); } #endif /* LWIP_SNMP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_pbuf_stream.c
C
unknown
4,622
/** * @file * SNMP pbuf stream wrapper (internal API, do not use in client code). */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Martin Hentschel <info@cl-soft.de> * */ #ifndef LWIP_HDR_APPS_SNMP_PBUF_STREAM_H #define LWIP_HDR_APPS_SNMP_PBUF_STREAM_H #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP #include "lwip/err.h" #include "lwip/pbuf.h" #ifdef __cplusplus extern "C" { #endif struct snmp_pbuf_stream { struct pbuf* pbuf; u16_t offset; u16_t length; }; err_t snmp_pbuf_stream_init(struct snmp_pbuf_stream* pbuf_stream, struct pbuf* p, u16_t offset, u16_t length); err_t snmp_pbuf_stream_read(struct snmp_pbuf_stream* pbuf_stream, u8_t* data); err_t snmp_pbuf_stream_write(struct snmp_pbuf_stream* pbuf_stream, u8_t data); err_t snmp_pbuf_stream_writebuf(struct snmp_pbuf_stream* pbuf_stream, const void* buf, u16_t buf_len); err_t snmp_pbuf_stream_writeto(struct snmp_pbuf_stream* pbuf_stream, struct snmp_pbuf_stream* target_pbuf_stream, u16_t len); err_t snmp_pbuf_stream_seek(struct snmp_pbuf_stream* pbuf_stream, s32_t offset); err_t snmp_pbuf_stream_seek_abs(struct snmp_pbuf_stream* pbuf_stream, u32_t offset); #ifdef __cplusplus } #endif #endif /* LWIP_SNMP */ #endif /* LWIP_HDR_APPS_SNMP_PBUF_STREAM_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_pbuf_stream.h
C
unknown
2,829
/** * @file * SNMP RAW API frontend. */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> */ #include "lwip/apps/snmp_opts.h" #include "lwip/ip_addr.h" #if LWIP_SNMP && SNMP_USE_RAW #include "lwip/udp.h" #include "lwip/ip.h" #include "snmp_msg.h" /* lwIP UDP receive callback function */ static void snmp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) { LWIP_UNUSED_ARG(arg); snmp_receive(pcb, p, addr, port); pbuf_free(p); } err_t snmp_sendto(void *handle, struct pbuf *p, const ip_addr_t *dst, u16_t port) { return udp_sendto((struct udp_pcb*)handle, p, dst, port); } u8_t snmp_get_local_ip_for_dst(void* handle, const ip_addr_t *dst, ip_addr_t *result) { struct udp_pcb* udp_pcb = (struct udp_pcb*)handle; struct netif *dst_if; const ip_addr_t* dst_ip; LWIP_UNUSED_ARG(udp_pcb); /* unused in case of IPV4 only configuration */ ip_route_get_local_ip(&udp_pcb->local_ip, dst, dst_if, dst_ip); if ((dst_if != NULL) && (dst_ip != NULL)) { ip_addr_copy(*result, *dst_ip); return 1; } else { return 0; } } /** * @ingroup snmp_core * Starts SNMP Agent. * Allocates UDP pcb and binds it to IP_ANY_TYPE port 161. */ void snmp_init(void) { err_t err; struct udp_pcb *snmp_pcb = udp_new_ip_type(IPADDR_TYPE_ANY); LWIP_ERROR("snmp_raw: no PCB", (snmp_pcb != NULL), return;); snmp_traps_handle = snmp_pcb; udp_recv(snmp_pcb, snmp_recv, (void *)SNMP_IN_PORT); err = udp_bind(snmp_pcb, IP_ANY_TYPE, SNMP_IN_PORT); LWIP_ERROR("snmp_raw: Unable to bind PCB", (err == ERR_OK), return;); } #endif /* LWIP_SNMP && SNMP_USE_RAW */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_raw.c
C
unknown
3,225
/** * @file * SNMP scalar node support implementation. */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Martin Hentschel <info@cl-soft.de> * */ #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #include "lwip/apps/snmp_scalar.h" #include "lwip/apps/snmp_core.h" static s16_t snmp_scalar_array_get_value(struct snmp_node_instance* instance, void* value); static snmp_err_t snmp_scalar_array_set_test(struct snmp_node_instance* instance, u16_t value_len, void* value); static snmp_err_t snmp_scalar_array_set_value(struct snmp_node_instance* instance, u16_t value_len, void* value); snmp_err_t snmp_scalar_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance) { const struct snmp_scalar_node* scalar_node = (const struct snmp_scalar_node*)(const void*)instance->node; LWIP_UNUSED_ARG(root_oid); LWIP_UNUSED_ARG(root_oid_len); /* scalar only has one dedicated instance: .0 */ if ((instance->instance_oid.len != 1) || (instance->instance_oid.id[0] != 0)) { return SNMP_ERR_NOSUCHINSTANCE; } instance->access = scalar_node->access; instance->asn1_type = scalar_node->asn1_type; instance->get_value = scalar_node->get_value; instance->set_test = scalar_node->set_test; instance->set_value = scalar_node->set_value; return SNMP_ERR_NOERROR; } snmp_err_t snmp_scalar_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance) { /* because our only instance is .0 we can only return a next instance if no instance oid is passed */ if (instance->instance_oid.len == 0) { instance->instance_oid.len = 1; instance->instance_oid.id[0] = 0; return snmp_scalar_get_instance(root_oid, root_oid_len, instance); } return SNMP_ERR_NOSUCHINSTANCE; } snmp_err_t snmp_scalar_array_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance) { LWIP_UNUSED_ARG(root_oid); LWIP_UNUSED_ARG(root_oid_len); if ((instance->instance_oid.len == 2) && (instance->instance_oid.id[1] == 0)) { const struct snmp_scalar_array_node* array_node = (const struct snmp_scalar_array_node*)(const void*)instance->node; const struct snmp_scalar_array_node_def* array_node_def = array_node->array_nodes; u32_t i = 0; while (i < array_node->array_node_count) { if (array_node_def->oid == instance->instance_oid.id[0]) { break; } array_node_def++; i++; } if (i < array_node->array_node_count) { instance->access = array_node_def->access; instance->asn1_type = array_node_def->asn1_type; instance->get_value = snmp_scalar_array_get_value; instance->set_test = snmp_scalar_array_set_test; instance->set_value = snmp_scalar_array_set_value; instance->reference.const_ptr = array_node_def; return SNMP_ERR_NOERROR; } } return SNMP_ERR_NOSUCHINSTANCE; } snmp_err_t snmp_scalar_array_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance) { const struct snmp_scalar_array_node* array_node = (const struct snmp_scalar_array_node*)(const void*)instance->node; const struct snmp_scalar_array_node_def* array_node_def = array_node->array_nodes; const struct snmp_scalar_array_node_def* result = NULL; LWIP_UNUSED_ARG(root_oid); LWIP_UNUSED_ARG(root_oid_len); if ((instance->instance_oid.len == 0) && (array_node->array_node_count > 0)) { /* return node with lowest OID */ u16_t i = 0; result = array_node_def; array_node_def++; for (i = 1; i < array_node->array_node_count; i++) { if (array_node_def->oid < result->oid) { result = array_node_def; } array_node_def++; } } else if (instance->instance_oid.len >= 1) { if (instance->instance_oid.len == 1) { /* if we have the requested OID we return its instance, otherwise we search for the next available */ u16_t i = 0; while (i < array_node->array_node_count) { if (array_node_def->oid == instance->instance_oid.id[0]) { result = array_node_def; break; } array_node_def++; i++; } } if (result == NULL) { u32_t oid_dist = 0xFFFFFFFFUL; u16_t i = 0; array_node_def = array_node->array_nodes; /* may be already at the end when if case before was executed without result -> reinitialize to start */ while (i < array_node->array_node_count) { if ((array_node_def->oid > instance->instance_oid.id[0]) && ((u32_t)(array_node_def->oid - instance->instance_oid.id[0]) < oid_dist)) { result = array_node_def; oid_dist = array_node_def->oid - instance->instance_oid.id[0]; } array_node_def++; i++; } } } if (result == NULL) { /* nothing to return */ return SNMP_ERR_NOSUCHINSTANCE; } instance->instance_oid.len = 2; instance->instance_oid.id[0] = result->oid; instance->instance_oid.id[1] = 0; instance->access = result->access; instance->asn1_type = result->asn1_type; instance->get_value = snmp_scalar_array_get_value; instance->set_test = snmp_scalar_array_set_test; instance->set_value = snmp_scalar_array_set_value; instance->reference.const_ptr = result; return SNMP_ERR_NOERROR; } static s16_t snmp_scalar_array_get_value(struct snmp_node_instance* instance, void* value) { const struct snmp_scalar_array_node* array_node = (const struct snmp_scalar_array_node*)(const void*)instance->node; const struct snmp_scalar_array_node_def* array_node_def = (const struct snmp_scalar_array_node_def*)instance->reference.const_ptr; return array_node->get_value(array_node_def, value); } static snmp_err_t snmp_scalar_array_set_test(struct snmp_node_instance* instance, u16_t value_len, void* value) { const struct snmp_scalar_array_node* array_node = (const struct snmp_scalar_array_node*)(const void*)instance->node; const struct snmp_scalar_array_node_def* array_node_def = (const struct snmp_scalar_array_node_def*)instance->reference.const_ptr; return array_node->set_test(array_node_def, value_len, value); } static snmp_err_t snmp_scalar_array_set_value(struct snmp_node_instance* instance, u16_t value_len, void* value) { const struct snmp_scalar_array_node* array_node = (const struct snmp_scalar_array_node*)(const void*)instance->node; const struct snmp_scalar_array_node_def* array_node_def = (const struct snmp_scalar_array_node_def*)instance->reference.const_ptr; return array_node->set_value(array_node_def, value_len, value); } #endif /* LWIP_SNMP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_scalar.c
C
unknown
8,527
/** * @file * SNMP table support implementation. */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Martin Hentschel <info@cl-soft.de> * */ #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #include "lwip/apps/snmp_core.h" #include "lwip/apps/snmp_table.h" #include <string.h> snmp_err_t snmp_table_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance) { snmp_err_t ret = SNMP_ERR_NOSUCHINSTANCE; const struct snmp_table_node* table_node = (const struct snmp_table_node*)(const void*)instance->node; LWIP_UNUSED_ARG(root_oid); LWIP_UNUSED_ARG(root_oid_len); /* check min. length (fixed row entry definition, column, row instance oid with at least one entry */ /* fixed row entry always has oid 1 */ if ((instance->instance_oid.len >= 3) && (instance->instance_oid.id[0] == 1)) { /* search column */ const struct snmp_table_col_def* col_def = table_node->columns; u16_t i = table_node->column_count; while (i > 0) { if (col_def->index == instance->instance_oid.id[1]) { break; } col_def++; i--; } if (i > 0) { /* everything may be overwritten by get_cell_instance_method() in order to implement special handling for single columns/cells */ instance->asn1_type = col_def->asn1_type; instance->access = col_def->access; instance->get_value = table_node->get_value; instance->set_test = table_node->set_test; instance->set_value = table_node->set_value; ret = table_node->get_cell_instance( &(instance->instance_oid.id[1]), &(instance->instance_oid.id[2]), instance->instance_oid.len-2, instance); } } return ret; } snmp_err_t snmp_table_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance) { const struct snmp_table_node* table_node = (const struct snmp_table_node*)(const void*)instance->node; const struct snmp_table_col_def* col_def; struct snmp_obj_id row_oid; u32_t column = 0; snmp_err_t result; LWIP_UNUSED_ARG(root_oid); LWIP_UNUSED_ARG(root_oid_len); /* check that first part of id is 0 or 1, referencing fixed row entry */ if ((instance->instance_oid.len > 0) && (instance->instance_oid.id[0] > 1)) { return SNMP_ERR_NOSUCHINSTANCE; } if (instance->instance_oid.len > 1) { column = instance->instance_oid.id[1]; } if (instance->instance_oid.len > 2) { snmp_oid_assign(&row_oid, &(instance->instance_oid.id[2]), instance->instance_oid.len - 2); } else { row_oid.len = 0; } instance->get_value = table_node->get_value; instance->set_test = table_node->set_test; instance->set_value = table_node->set_value; /* resolve column and value */ do { u16_t i; const struct snmp_table_col_def* next_col_def = NULL; col_def = table_node->columns; for (i = 0; i < table_node->column_count; i++) { if (col_def->index == column) { next_col_def = col_def; break; } else if ((col_def->index > column) && ((next_col_def == NULL) || (col_def->index < next_col_def->index))) { next_col_def = col_def; } col_def++; } if (next_col_def == NULL) { /* no further column found */ return SNMP_ERR_NOSUCHINSTANCE; } instance->asn1_type = next_col_def->asn1_type; instance->access = next_col_def->access; result = table_node->get_next_cell_instance( &next_col_def->index, &row_oid, instance); if (result == SNMP_ERR_NOERROR) { col_def = next_col_def; break; } row_oid.len = 0; /* reset row_oid because we switch to next column and start with the first entry there */ column = next_col_def->index + 1; } while (1); /* build resulting oid */ instance->instance_oid.len = 2; instance->instance_oid.id[0] = 1; instance->instance_oid.id[1] = col_def->index; snmp_oid_append(&instance->instance_oid, row_oid.id, row_oid.len); return SNMP_ERR_NOERROR; } snmp_err_t snmp_table_simple_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance) { snmp_err_t ret = SNMP_ERR_NOSUCHINSTANCE; const struct snmp_table_simple_node* table_node = (const struct snmp_table_simple_node*)(const void*)instance->node; LWIP_UNUSED_ARG(root_oid); LWIP_UNUSED_ARG(root_oid_len); /* check min. length (fixed row entry definition, column, row instance oid with at least one entry */ /* fixed row entry always has oid 1 */ if ((instance->instance_oid.len >= 3) && (instance->instance_oid.id[0] == 1)) { ret = table_node->get_cell_value( &(instance->instance_oid.id[1]), &(instance->instance_oid.id[2]), instance->instance_oid.len-2, &instance->reference, &instance->reference_len); if (ret == SNMP_ERR_NOERROR) { /* search column */ const struct snmp_table_simple_col_def* col_def = table_node->columns; u32_t i = table_node->column_count; while (i > 0) { if (col_def->index == instance->instance_oid.id[1]) { break; } col_def++; i--; } if (i > 0) { instance->asn1_type = col_def->asn1_type; instance->access = SNMP_NODE_INSTANCE_READ_ONLY; instance->set_test = NULL; instance->set_value = NULL; switch (col_def->data_type) { case SNMP_VARIANT_VALUE_TYPE_U32: instance->get_value = snmp_table_extract_value_from_u32ref; break; case SNMP_VARIANT_VALUE_TYPE_S32: instance->get_value = snmp_table_extract_value_from_s32ref; break; case SNMP_VARIANT_VALUE_TYPE_PTR: /* fall through */ case SNMP_VARIANT_VALUE_TYPE_CONST_PTR: instance->get_value = snmp_table_extract_value_from_refconstptr; break; default: LWIP_DEBUGF(SNMP_DEBUG, ("snmp_table_simple_get_instance(): unknown column data_type: %d\n", col_def->data_type)); return SNMP_ERR_GENERROR; } ret = SNMP_ERR_NOERROR; } else { ret = SNMP_ERR_NOSUCHINSTANCE; } } } return ret; } snmp_err_t snmp_table_simple_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance) { const struct snmp_table_simple_node* table_node = (const struct snmp_table_simple_node*)(const void*)instance->node; const struct snmp_table_simple_col_def* col_def; struct snmp_obj_id row_oid; u32_t column = 0; snmp_err_t result; LWIP_UNUSED_ARG(root_oid); LWIP_UNUSED_ARG(root_oid_len); /* check that first part of id is 0 or 1, referencing fixed row entry */ if ((instance->instance_oid.len > 0) && (instance->instance_oid.id[0] > 1)) { return SNMP_ERR_NOSUCHINSTANCE; } if (instance->instance_oid.len > 1) { column = instance->instance_oid.id[1]; } if (instance->instance_oid.len > 2) { snmp_oid_assign(&row_oid, &(instance->instance_oid.id[2]), instance->instance_oid.len - 2); } else { row_oid.len = 0; } /* resolve column and value */ do { u32_t i; const struct snmp_table_simple_col_def* next_col_def = NULL; col_def = table_node->columns; for (i = 0; i < table_node->column_count; i++) { if (col_def->index == column) { next_col_def = col_def; break; } else if ((col_def->index > column) && ((next_col_def == NULL) || (col_def->index < next_col_def->index))) { next_col_def = col_def; } col_def++; } if (next_col_def == NULL) { /* no further column found */ return SNMP_ERR_NOSUCHINSTANCE; } result = table_node->get_next_cell_instance_and_value( &next_col_def->index, &row_oid, &instance->reference, &instance->reference_len); if (result == SNMP_ERR_NOERROR) { col_def = next_col_def; break; } row_oid.len = 0; /* reset row_oid because we switch to next column and start with the first entry there */ column = next_col_def->index + 1; } while (1); instance->asn1_type = col_def->asn1_type; instance->access = SNMP_NODE_INSTANCE_READ_ONLY; instance->set_test = NULL; instance->set_value = NULL; switch (col_def->data_type) { case SNMP_VARIANT_VALUE_TYPE_U32: instance->get_value = snmp_table_extract_value_from_u32ref; break; case SNMP_VARIANT_VALUE_TYPE_S32: instance->get_value = snmp_table_extract_value_from_s32ref; break; case SNMP_VARIANT_VALUE_TYPE_PTR: /* fall through */ case SNMP_VARIANT_VALUE_TYPE_CONST_PTR: instance->get_value = snmp_table_extract_value_from_refconstptr; break; default: LWIP_DEBUGF(SNMP_DEBUG, ("snmp_table_simple_get_instance(): unknown column data_type: %d\n", col_def->data_type)); return SNMP_ERR_GENERROR; } /* build resulting oid */ instance->instance_oid.len = 2; instance->instance_oid.id[0] = 1; instance->instance_oid.id[1] = col_def->index; snmp_oid_append(&instance->instance_oid, row_oid.id, row_oid.len); return SNMP_ERR_NOERROR; } s16_t snmp_table_extract_value_from_s32ref(struct snmp_node_instance* instance, void* value) { s32_t *dst = (s32_t*)value; *dst = instance->reference.s32; return sizeof(*dst); } s16_t snmp_table_extract_value_from_u32ref(struct snmp_node_instance* instance, void* value) { u32_t *dst = (u32_t*)value; *dst = instance->reference.u32; return sizeof(*dst); } s16_t snmp_table_extract_value_from_refconstptr(struct snmp_node_instance* instance, void* value) { MEMCPY(value, instance->reference.const_ptr, instance->reference_len); return (u16_t)instance->reference_len; } #endif /* LWIP_SNMP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_table.c
C
unknown
11,678
/** * @file * SNMP thread synchronization implementation. */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> */ #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP && (NO_SYS == 0) /* don't build if not configured for use in lwipopts.h */ #include "lwip/apps/snmp_threadsync.h" #include "lwip/apps/snmp_core.h" #include "lwip/sys.h" #include <string.h> static void call_synced_function(struct threadsync_data *call_data, snmp_threadsync_called_fn fn) { sys_mutex_lock(&call_data->threadsync_node->instance->sem_usage_mutex); call_data->threadsync_node->instance->sync_fn(fn, call_data); sys_sem_wait(&call_data->threadsync_node->instance->sem); sys_mutex_unlock(&call_data->threadsync_node->instance->sem_usage_mutex); } static void threadsync_get_value_synced(void *ctx) { struct threadsync_data *call_data = (struct threadsync_data*)ctx; call_data->retval.s16 = call_data->proxy_instance.get_value(&call_data->proxy_instance, call_data->arg1.value); sys_sem_signal(&call_data->threadsync_node->instance->sem); } static s16_t threadsync_get_value(struct snmp_node_instance* instance, void* value) { struct threadsync_data *call_data = (struct threadsync_data*)instance->reference.ptr; call_data->arg1.value = value; call_synced_function(call_data, threadsync_get_value_synced); return call_data->retval.s16; } static void threadsync_set_test_synced(void *ctx) { struct threadsync_data *call_data = (struct threadsync_data*)ctx; call_data->retval.err = call_data->proxy_instance.set_test(&call_data->proxy_instance, call_data->arg2.len, call_data->arg1.value); sys_sem_signal(&call_data->threadsync_node->instance->sem); } static snmp_err_t threadsync_set_test(struct snmp_node_instance* instance, u16_t len, void *value) { struct threadsync_data *call_data = (struct threadsync_data*)instance->reference.ptr; call_data->arg1.value = value; call_data->arg2.len = len; call_synced_function(call_data, threadsync_set_test_synced); return call_data->retval.err; } static void threadsync_set_value_synced(void *ctx) { struct threadsync_data *call_data = (struct threadsync_data*)ctx; call_data->retval.err = call_data->proxy_instance.set_value(&call_data->proxy_instance, call_data->arg2.len, call_data->arg1.value); sys_sem_signal(&call_data->threadsync_node->instance->sem); } static snmp_err_t threadsync_set_value(struct snmp_node_instance* instance, u16_t len, void *value) { struct threadsync_data *call_data = (struct threadsync_data*)instance->reference.ptr; call_data->arg1.value = value; call_data->arg2.len = len; call_synced_function(call_data, threadsync_set_value_synced); return call_data->retval.err; } static void threadsync_release_instance_synced(void* ctx) { struct threadsync_data *call_data = (struct threadsync_data*)ctx; call_data->proxy_instance.release_instance(&call_data->proxy_instance); sys_sem_signal(&call_data->threadsync_node->instance->sem); } static void threadsync_release_instance(struct snmp_node_instance *instance) { struct threadsync_data *call_data = (struct threadsync_data*)instance->reference.ptr; if (call_data->proxy_instance.release_instance != NULL) { call_synced_function(call_data, threadsync_release_instance_synced); } } static void get_instance_synced(void* ctx) { struct threadsync_data *call_data = (struct threadsync_data*)ctx; const struct snmp_leaf_node *leaf = (const struct snmp_leaf_node*)(const void*)call_data->proxy_instance.node; call_data->retval.err = leaf->get_instance(call_data->arg1.root_oid, call_data->arg2.root_oid_len, &call_data->proxy_instance); sys_sem_signal(&call_data->threadsync_node->instance->sem); } static void get_next_instance_synced(void* ctx) { struct threadsync_data *call_data = (struct threadsync_data*)ctx; const struct snmp_leaf_node *leaf = (const struct snmp_leaf_node*)(const void*)call_data->proxy_instance.node; call_data->retval.err = leaf->get_next_instance(call_data->arg1.root_oid, call_data->arg2.root_oid_len, &call_data->proxy_instance); sys_sem_signal(&call_data->threadsync_node->instance->sem); } static snmp_err_t do_sync(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance, snmp_threadsync_called_fn fn) { const struct snmp_threadsync_node *threadsync_node = (const struct snmp_threadsync_node*)(const void*)instance->node; struct threadsync_data *call_data = &threadsync_node->instance->data; if (threadsync_node->node.node.oid != threadsync_node->target->node.oid) { LWIP_DEBUGF(SNMP_DEBUG, ("Sync node OID does not match target node OID")); return SNMP_ERR_NOSUCHINSTANCE; } memset(&call_data->proxy_instance, 0, sizeof(call_data->proxy_instance)); instance->reference.ptr = call_data; snmp_oid_assign(&call_data->proxy_instance.instance_oid, instance->instance_oid.id, instance->instance_oid.len); call_data->proxy_instance.node = &threadsync_node->target->node; call_data->threadsync_node = threadsync_node; call_data->arg1.root_oid = root_oid; call_data->arg2.root_oid_len = root_oid_len; call_synced_function(call_data, fn); if (call_data->retval.err == SNMP_ERR_NOERROR) { instance->access = call_data->proxy_instance.access; instance->asn1_type = call_data->proxy_instance.asn1_type; instance->release_instance = threadsync_release_instance; instance->get_value = (call_data->proxy_instance.get_value != NULL)? threadsync_get_value : NULL; instance->set_value = (call_data->proxy_instance.set_value != NULL)? threadsync_set_value : NULL; instance->set_test = (call_data->proxy_instance.set_test != NULL)? threadsync_set_test : NULL; snmp_oid_assign(&instance->instance_oid, call_data->proxy_instance.instance_oid.id, call_data->proxy_instance.instance_oid.len); } return call_data->retval.err; } snmp_err_t snmp_threadsync_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance) { return do_sync(root_oid, root_oid_len, instance, get_instance_synced); } snmp_err_t snmp_threadsync_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance) { return do_sync(root_oid, root_oid_len, instance, get_next_instance_synced); } /** Initializes thread synchronization instance */ void snmp_threadsync_init(struct snmp_threadsync_instance *instance, snmp_threadsync_synchronizer_fn sync_fn) { err_t err = sys_mutex_new(&instance->sem_usage_mutex); LWIP_ASSERT("Failed to set up mutex", err == ERR_OK); err = sys_sem_new(&instance->sem, 0); LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */ LWIP_ASSERT("Failed to set up semaphore", err == ERR_OK); instance->sync_fn = sync_fn; } #endif /* LWIP_SNMP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_threadsync.c
C
unknown
8,516
/** * @file * SNMPv1 traps implementation. */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Martin Hentschel * Christiaan Simons <christiaan.simons@axon.tv> * */ #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #include <string.h> #include "lwip/snmp.h" #include "lwip/sys.h" #include "lwip/apps/snmp.h" #include "lwip/apps/snmp_core.h" #include "snmp_msg.h" #include "snmp_asn1.h" #include "snmp_core_priv.h" struct snmp_msg_trap { /* source enterprise ID (sysObjectID) */ const struct snmp_obj_id *enterprise; /* source IP address, raw network order format */ ip_addr_t sip; /* generic trap code */ u32_t gen_trap; /* specific trap code */ u32_t spc_trap; /* timestamp */ u32_t ts; /* snmp_version */ u32_t snmp_version; /* output trap lengths used in ASN encoding */ /* encoding pdu length */ u16_t pdulen; /* encoding community length */ u16_t comlen; /* encoding sequence length */ u16_t seqlen; /* encoding varbinds sequence length */ u16_t vbseqlen; }; static u16_t snmp_trap_varbind_sum(struct snmp_msg_trap *trap, struct snmp_varbind *varbinds); static u16_t snmp_trap_header_sum(struct snmp_msg_trap *trap, u16_t vb_len); static void snmp_trap_header_enc(struct snmp_msg_trap *trap, struct snmp_pbuf_stream *pbuf_stream); static void snmp_trap_varbind_enc(struct snmp_msg_trap *trap, struct snmp_pbuf_stream *pbuf_stream, struct snmp_varbind *varbinds); /** Agent community string for sending traps */ extern const char *snmp_community_trap; void* snmp_traps_handle; struct snmp_trap_dst { /* destination IP address in network order */ ip_addr_t dip; /* set to 0 when disabled, >0 when enabled */ u8_t enable; }; static struct snmp_trap_dst trap_dst[SNMP_TRAP_DESTINATIONS]; static u8_t snmp_auth_traps_enabled = 0; /** * @ingroup snmp_traps * Sets enable switch for this trap destination. * @param dst_idx index in 0 .. SNMP_TRAP_DESTINATIONS-1 * @param enable switch if 0 destination is disabled >0 enabled. */ void snmp_trap_dst_enable(u8_t dst_idx, u8_t enable) { if (dst_idx < SNMP_TRAP_DESTINATIONS) { trap_dst[dst_idx].enable = enable; } } /** * @ingroup snmp_traps * Sets IPv4 address for this trap destination. * @param dst_idx index in 0 .. SNMP_TRAP_DESTINATIONS-1 * @param dst IPv4 address in host order. */ void snmp_trap_dst_ip_set(u8_t dst_idx, const ip_addr_t *dst) { if (dst_idx < SNMP_TRAP_DESTINATIONS) { ip_addr_set(&trap_dst[dst_idx].dip, dst); } } /** * @ingroup snmp_traps * Enable/disable authentication traps */ void snmp_set_auth_traps_enabled(u8_t enable) { snmp_auth_traps_enabled = enable; } /** * @ingroup snmp_traps * Get authentication traps enabled state */ u8_t snmp_get_auth_traps_enabled(void) { return snmp_auth_traps_enabled; } /** * @ingroup snmp_traps * Sends a generic or enterprise specific trap message. * * @param eoid points to enterprise object identifier * @param generic_trap is the trap code * @param specific_trap used for enterprise traps when generic_trap == 6 * @param varbinds linked list of varbinds to be sent * @return ERR_OK when success, ERR_MEM if we're out of memory * * @note the use of the enterprise identifier field * is per RFC1215. * Use .iso.org.dod.internet.mgmt.mib-2.snmp for generic traps * and .iso.org.dod.internet.private.enterprises.yourenterprise * (sysObjectID) for specific traps. */ err_t snmp_send_trap(const struct snmp_obj_id* eoid, s32_t generic_trap, s32_t specific_trap, struct snmp_varbind *varbinds) { struct snmp_msg_trap trap_msg; struct snmp_trap_dst *td; struct pbuf *p; u16_t i, tot_len; err_t err = ERR_OK; trap_msg.snmp_version = 0; for (i = 0, td = &trap_dst[0]; i < SNMP_TRAP_DESTINATIONS; i++, td++) { if ((td->enable != 0) && !ip_addr_isany(&td->dip)) { /* lookup current source address for this dst */ if (snmp_get_local_ip_for_dst(snmp_traps_handle, &td->dip, &trap_msg.sip)) { if (eoid == NULL) { trap_msg.enterprise = snmp_get_device_enterprise_oid(); } else { trap_msg.enterprise = eoid; } trap_msg.gen_trap = generic_trap; if (generic_trap == SNMP_GENTRAP_ENTERPRISE_SPECIFIC) { trap_msg.spc_trap = specific_trap; } else { trap_msg.spc_trap = 0; } MIB2_COPY_SYSUPTIME_TO(&trap_msg.ts); /* pass 0, calculate length fields */ tot_len = snmp_trap_varbind_sum(&trap_msg, varbinds); tot_len = snmp_trap_header_sum(&trap_msg, tot_len); /* allocate pbuf(s) */ p = pbuf_alloc(PBUF_TRANSPORT, tot_len, PBUF_RAM); if (p != NULL) { struct snmp_pbuf_stream pbuf_stream; snmp_pbuf_stream_init(&pbuf_stream, p, 0, tot_len); /* pass 1, encode packet ino the pbuf(s) */ snmp_trap_header_enc(&trap_msg, &pbuf_stream); snmp_trap_varbind_enc(&trap_msg, &pbuf_stream, varbinds); snmp_stats.outtraps++; snmp_stats.outpkts++; /** send to the TRAP destination */ snmp_sendto(snmp_traps_handle, p, &td->dip, SNMP_TRAP_PORT); pbuf_free(p); } else { err = ERR_MEM; } } else { /* routing error */ err = ERR_RTE; } } } return err; } /** * @ingroup snmp_traps * Send generic SNMP trap */ err_t snmp_send_trap_generic(s32_t generic_trap) { static const struct snmp_obj_id oid = { 7, { 1, 3, 6, 1, 2, 1, 11 } }; return snmp_send_trap(&oid, generic_trap, 0, NULL); } /** * @ingroup snmp_traps * Send specific SNMP trap with variable bindings */ err_t snmp_send_trap_specific(s32_t specific_trap, struct snmp_varbind *varbinds) { return snmp_send_trap(NULL, SNMP_GENTRAP_ENTERPRISE_SPECIFIC, specific_trap, varbinds); } /** * @ingroup snmp_traps * Send coldstart trap */ void snmp_coldstart_trap(void) { snmp_send_trap_generic(SNMP_GENTRAP_COLDSTART); } /** * @ingroup snmp_traps * Send authentication failure trap (used internally by agent) */ void snmp_authfail_trap(void) { if (snmp_auth_traps_enabled != 0) { snmp_send_trap_generic(SNMP_GENTRAP_AUTH_FAILURE); } } static u16_t snmp_trap_varbind_sum(struct snmp_msg_trap *trap, struct snmp_varbind *varbinds) { struct snmp_varbind *varbind; u16_t tot_len; u8_t tot_len_len; tot_len = 0; varbind = varbinds; while (varbind != NULL) { struct snmp_varbind_len len; if (snmp_varbind_length(varbind, &len) == ERR_OK) { tot_len += 1 + len.vb_len_len + len.vb_value_len; } varbind = varbind->next; } trap->vbseqlen = tot_len; snmp_asn1_enc_length_cnt(trap->vbseqlen, &tot_len_len); tot_len += 1 + tot_len_len; return tot_len; } /** * Sums trap header field lengths from tail to head and * returns trap_header_lengths for second encoding pass. * * @param trap Trap message * @param vb_len varbind-list length * @return the required length for encoding the trap header */ static u16_t snmp_trap_header_sum(struct snmp_msg_trap *trap, u16_t vb_len) { u16_t tot_len; u16_t len; u8_t lenlen; tot_len = vb_len; snmp_asn1_enc_u32t_cnt(trap->ts, &len); snmp_asn1_enc_length_cnt(len, &lenlen); tot_len += 1 + len + lenlen; snmp_asn1_enc_s32t_cnt(trap->spc_trap, &len); snmp_asn1_enc_length_cnt(len, &lenlen); tot_len += 1 + len + lenlen; snmp_asn1_enc_s32t_cnt(trap->gen_trap, &len); snmp_asn1_enc_length_cnt(len, &lenlen); tot_len += 1 + len + lenlen; if (IP_IS_V6_VAL(trap->sip)) { #if LWIP_IPV6 len = sizeof(ip_2_ip6(&trap->sip)->addr); #endif } else { #if LWIP_IPV4 len = sizeof(ip_2_ip4(&trap->sip)->addr); #endif } snmp_asn1_enc_length_cnt(len, &lenlen); tot_len += 1 + len + lenlen; snmp_asn1_enc_oid_cnt(trap->enterprise->id, trap->enterprise->len, &len); snmp_asn1_enc_length_cnt(len, &lenlen); tot_len += 1 + len + lenlen; trap->pdulen = tot_len; snmp_asn1_enc_length_cnt(trap->pdulen, &lenlen); tot_len += 1 + lenlen; trap->comlen = (u16_t)LWIP_MIN(strlen(snmp_community_trap), 0xFFFF); snmp_asn1_enc_length_cnt(trap->comlen, &lenlen); tot_len += 1 + lenlen + trap->comlen; snmp_asn1_enc_s32t_cnt(trap->snmp_version, &len); snmp_asn1_enc_length_cnt(len, &lenlen); tot_len += 1 + len + lenlen; trap->seqlen = tot_len; snmp_asn1_enc_length_cnt(trap->seqlen, &lenlen); tot_len += 1 + lenlen; return tot_len; } static void snmp_trap_varbind_enc(struct snmp_msg_trap *trap, struct snmp_pbuf_stream *pbuf_stream, struct snmp_varbind *varbinds) { struct snmp_asn1_tlv tlv; struct snmp_varbind *varbind; varbind = varbinds; SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 0, trap->vbseqlen); snmp_ans1_enc_tlv(pbuf_stream, &tlv); while (varbind != NULL) { snmp_append_outbound_varbind(pbuf_stream, varbind); varbind = varbind->next; } } /** * Encodes trap header from head to tail. */ static void snmp_trap_header_enc(struct snmp_msg_trap *trap, struct snmp_pbuf_stream *pbuf_stream) { struct snmp_asn1_tlv tlv; /* 'Message' sequence */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_SEQUENCE, 0, trap->seqlen); snmp_ans1_enc_tlv(pbuf_stream, &tlv); /* version */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 0); snmp_asn1_enc_s32t_cnt(trap->snmp_version, &tlv.value_len); snmp_ans1_enc_tlv(pbuf_stream, &tlv); snmp_asn1_enc_s32t(pbuf_stream, tlv.value_len, trap->snmp_version); /* community */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OCTET_STRING, 0, trap->comlen); snmp_ans1_enc_tlv(pbuf_stream, &tlv); snmp_asn1_enc_raw(pbuf_stream, (const u8_t *)snmp_community_trap, trap->comlen); /* 'PDU' sequence */ SNMP_ASN1_SET_TLV_PARAMS(tlv, (SNMP_ASN1_CLASS_CONTEXT | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_CONTEXT_PDU_TRAP), 0, trap->pdulen); snmp_ans1_enc_tlv(pbuf_stream, &tlv); /* object ID */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_OBJECT_ID, 0, 0); snmp_asn1_enc_oid_cnt(trap->enterprise->id, trap->enterprise->len, &tlv.value_len); snmp_ans1_enc_tlv(pbuf_stream, &tlv); snmp_asn1_enc_oid(pbuf_stream, trap->enterprise->id, trap->enterprise->len); /* IP addr */ if (IP_IS_V6_VAL(trap->sip)) { #if LWIP_IPV6 SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_IPADDR, 0, sizeof(ip_2_ip6(&trap->sip)->addr)); snmp_ans1_enc_tlv(pbuf_stream, &tlv); snmp_asn1_enc_raw(pbuf_stream, (const u8_t *)&ip_2_ip6(&trap->sip)->addr, sizeof(ip_2_ip6(&trap->sip)->addr)); #endif } else { #if LWIP_IPV4 SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_IPADDR, 0, sizeof(ip_2_ip4(&trap->sip)->addr)); snmp_ans1_enc_tlv(pbuf_stream, &tlv); snmp_asn1_enc_raw(pbuf_stream, (const u8_t *)&ip_2_ip4(&trap->sip)->addr, sizeof(ip_2_ip4(&trap->sip)->addr)); #endif } /* trap length */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 0); snmp_asn1_enc_s32t_cnt(trap->gen_trap, &tlv.value_len); snmp_ans1_enc_tlv(pbuf_stream, &tlv); snmp_asn1_enc_s32t(pbuf_stream, tlv.value_len, trap->gen_trap); /* specific trap */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_INTEGER, 0, 0); snmp_asn1_enc_s32t_cnt(trap->spc_trap, &tlv.value_len); snmp_ans1_enc_tlv(pbuf_stream, &tlv); snmp_asn1_enc_s32t(pbuf_stream, tlv.value_len, trap->spc_trap); /* timestamp */ SNMP_ASN1_SET_TLV_PARAMS(tlv, SNMP_ASN1_TYPE_TIMETICKS, 0, 0); snmp_asn1_enc_s32t_cnt(trap->ts, &tlv.value_len); snmp_ans1_enc_tlv(pbuf_stream, &tlv); snmp_asn1_enc_s32t(pbuf_stream, tlv.value_len, trap->ts); } #endif /* LWIP_SNMP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmp_traps.c
C
unknown
13,539
/** * @file * Additional SNMPv3 functionality RFC3414 and RFC3826. */ /* * Copyright (c) 2016 Elias Oenal. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Elias Oenal <lwip@eliasoenal.com> */ #include "snmpv3_priv.h" #include "lwip/apps/snmpv3.h" #include "lwip/sys.h" #include <string.h> #if LWIP_SNMP && LWIP_SNMP_V3 #ifdef LWIP_SNMPV3_INCLUDE_ENGINE #include LWIP_SNMPV3_INCLUDE_ENGINE #endif #define SNMP_MAX_TIME_BOOT 2147483647UL /** Call this if engine has been changed. Has to reset boots, see below */ void snmpv3_engine_id_changed(void) { snmpv3_set_engine_boots(0); } /** According to RFC3414 2.2.2. * * The number of times that the SNMP engine has * (re-)initialized itself since snmpEngineID * was last configured. */ u32_t snmpv3_get_engine_boots_internal(void) { if (snmpv3_get_engine_boots() == 0 || snmpv3_get_engine_boots() < SNMP_MAX_TIME_BOOT) { return snmpv3_get_engine_boots(); } snmpv3_set_engine_boots(SNMP_MAX_TIME_BOOT); return snmpv3_get_engine_boots(); } /** RFC3414 2.2.2. * * Once the timer reaches 2147483647 it gets reset to zero and the * engine boot ups get incremented. */ u32_t snmpv3_get_engine_time_internal(void) { if (snmpv3_get_engine_time() >= SNMP_MAX_TIME_BOOT) { snmpv3_reset_engine_time(); if (snmpv3_get_engine_boots() < SNMP_MAX_TIME_BOOT - 1) { snmpv3_set_engine_boots(snmpv3_get_engine_boots() + 1); } else { snmpv3_set_engine_boots(SNMP_MAX_TIME_BOOT); } } return snmpv3_get_engine_time(); } #if LWIP_SNMP_V3_CRYPTO /* This function ignores the byte order suggestion in RFC3414 * since it simply doesn't influence the effectiveness of an IV. * * Implementing RFC3826 priv param algorithm if LWIP_RAND is available. * * @todo: This is a potential thread safety issue. */ err_t snmpv3_build_priv_param(u8_t* priv_param) { #ifdef LWIP_RAND /* Based on RFC3826 */ static u8_t init; static u32_t priv1, priv2; /* Lazy initialisation */ if (init == 0) { init = 1; priv1 = LWIP_RAND(); priv2 = LWIP_RAND(); } SMEMCPY(&priv_param[0], &priv1, sizeof(priv1)); SMEMCPY(&priv_param[4], &priv2, sizeof(priv2)); /* Emulate 64bit increment */ priv1++; if (!priv1) { /* Overflow */ priv2++; } #else /* Based on RFC3414 */ static u32_t ctr; u32_t boots = LWIP_SNMPV3_GET_ENGINE_BOOTS(); SMEMCPY(&priv_param[0], &boots, 4); SMEMCPY(&priv_param[4], &ctr, 4); ctr++; #endif return ERR_OK; } #endif /* LWIP_SNMP_V3_CRYPTO */ #endif
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmpv3.c
C
unknown
4,031
/** * @file * Dummy SNMPv3 functions. */ /* * Copyright (c) 2016 Elias Oenal. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Elias Oenal <lwip@eliasoenal.com> * Dirk Ziegelmeier <dirk@ziegelmeier.net> */ #include "lwip/apps/snmpv3.h" #include "snmpv3_priv.h" #include <string.h> #include "lwip/err.h" #if LWIP_SNMP && LWIP_SNMP_V3 /** * @param username is a pointer to a string. * @param auth_algo is a pointer to u8_t. The implementation has to set this if user was found. * @param auth_key is a pointer to a pointer to a string. Implementation has to set this if user was found. * @param priv_algo is a pointer to u8_t. The implementation has to set this if user was found. * @param priv_key is a pointer to a pointer to a string. Implementation has to set this if user was found. */ err_t snmpv3_get_user(const char* username, u8_t *auth_algo, u8_t *auth_key, u8_t *priv_algo, u8_t *priv_key) { const char* engine_id; u8_t engine_id_len; if(strlen(username) == 0) { return ERR_OK; } if(memcmp(username, "lwip", 4) != 0) { return ERR_VAL; } snmpv3_get_engine_id(&engine_id, &engine_id_len); if(auth_key != NULL) { snmpv3_password_to_key_sha((const u8_t*)"maplesyrup", 10, (const u8_t*)engine_id, engine_id_len, auth_key); *auth_algo = SNMP_V3_AUTH_ALGO_SHA; } if(priv_key != NULL) { snmpv3_password_to_key_sha((const u8_t*)"maplesyrup", 10, (const u8_t*)engine_id, engine_id_len, priv_key); *priv_algo = SNMP_V3_PRIV_ALGO_DES; } return ERR_OK; } /** * Get engine ID from persistence * @param id * @param len */ void snmpv3_get_engine_id(const char **id, u8_t *len) { *id = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"; *len = 12; } /** * Store engine ID in persistence * @param id * @param len */ err_t snmpv3_set_engine_id(const char *id, u8_t len) { LWIP_UNUSED_ARG(id); LWIP_UNUSED_ARG(len); return ERR_OK; } /** * Get engine boots from persistence. Must be increased on each boot. * @return */ u32_t snmpv3_get_engine_boots(void) { return 0; } /** * Store engine boots in persistence * @param boots */ void snmpv3_set_engine_boots(u32_t boots) { LWIP_UNUSED_ARG(boots); } /** * RFC3414 2.2.2. * Once the timer reaches 2147483647 it gets reset to zero and the * engine boot ups get incremented. */ u32_t snmpv3_get_engine_time(void) { return 0; } /** * Reset current engine time to 0 */ void snmpv3_reset_engine_time(void) { } #endif /* LWIP_SNMP && LWIP_SNMP_V3 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmpv3_dummy.c
C
unknown
4,069
/** * @file * SNMPv3 crypto/auth functions implemented for ARM mbedtls. */ /* * Copyright (c) 2016 Elias Oenal and Dirk Ziegelmeier. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Elias Oenal <lwip@eliasoenal.com> * Dirk Ziegelmeier <dirk@ziegelmeier.net> */ #include "lwip/apps/snmpv3.h" #include "snmpv3_priv.h" #include "lwip/arch.h" #include "snmp_msg.h" #include "lwip/sys.h" #include <string.h> #if LWIP_SNMP && LWIP_SNMP_V3 && LWIP_SNMP_V3_MBEDTLS #include "mbedtls/md.h" #include "mbedtls/cipher.h" #include "mbedtls/md5.h" #include "mbedtls/sha1.h" err_t snmpv3_auth(struct snmp_pbuf_stream* stream, u16_t length, const u8_t* key, u8_t algo, u8_t* hmac_out) { u32_t i; u8_t key_len; const mbedtls_md_info_t *md_info; mbedtls_md_context_t ctx; struct snmp_pbuf_stream read_stream; snmp_pbuf_stream_init(&read_stream, stream->pbuf, stream->offset, stream->length); if (algo == SNMP_V3_AUTH_ALGO_MD5) { md_info = mbedtls_md_info_from_type(MBEDTLS_MD_MD5); key_len = SNMP_V3_MD5_LEN; } else if (algo == SNMP_V3_AUTH_ALGO_SHA) { md_info = mbedtls_md_info_from_type(MBEDTLS_MD_SHA1); key_len = SNMP_V3_SHA_LEN; } else { return ERR_ARG; } mbedtls_md_init(&ctx); if(mbedtls_md_setup(&ctx, md_info, 1) != 0) { return ERR_ARG; } if (mbedtls_md_hmac_starts(&ctx, key, key_len) != 0) { goto free_md; } for (i = 0; i < length; i++) { u8_t byte; if (snmp_pbuf_stream_read(&read_stream, &byte)) { goto free_md; } if (mbedtls_md_hmac_update(&ctx, &byte, 1) != 0) { goto free_md; } } if (mbedtls_md_hmac_finish(&ctx, hmac_out) != 0) { goto free_md; } mbedtls_md_free(&ctx); return ERR_OK; free_md: mbedtls_md_free(&ctx); return ERR_ARG; } #if LWIP_SNMP_V3_CRYPTO err_t snmpv3_crypt(struct snmp_pbuf_stream* stream, u16_t length, const u8_t* key, const u8_t* priv_param, const u32_t engine_boots, const u32_t engine_time, u8_t algo, u8_t mode) { size_t i; mbedtls_cipher_context_t ctx; const mbedtls_cipher_info_t *cipher_info; struct snmp_pbuf_stream read_stream; struct snmp_pbuf_stream write_stream; snmp_pbuf_stream_init(&read_stream, stream->pbuf, stream->offset, stream->length); snmp_pbuf_stream_init(&write_stream, stream->pbuf, stream->offset, stream->length); mbedtls_cipher_init(&ctx); if (algo == SNMP_V3_PRIV_ALGO_DES) { u8_t iv_local[8]; u8_t out_bytes[8]; size_t out_len; /* RFC 3414 mandates padding for DES */ if ((length & 0x07) != 0) { return ERR_ARG; } cipher_info = mbedtls_cipher_info_from_type(MBEDTLS_CIPHER_DES_CBC); if(mbedtls_cipher_setup(&ctx, cipher_info) != 0) { return ERR_ARG; } if(mbedtls_cipher_set_padding_mode(&ctx, MBEDTLS_PADDING_NONE) != 0) { return ERR_ARG; } if(mbedtls_cipher_setkey(&ctx, key, 8*8, (mode == SNMP_V3_PRIV_MODE_ENCRYPT)? MBEDTLS_ENCRYPT : MBEDTLS_DECRYPT) != 0) { goto error; } /* Prepare IV */ for (i = 0; i < LWIP_ARRAYSIZE(iv_local); i++) { iv_local[i] = priv_param[i] ^ key[i + 8]; } if(mbedtls_cipher_set_iv(&ctx, iv_local, LWIP_ARRAYSIZE(iv_local)) != 0) { goto error; } for (i = 0; i < length; i += 8) { size_t j; u8_t in_bytes[8]; out_len = LWIP_ARRAYSIZE(out_bytes) ; for (j = 0; j < LWIP_ARRAYSIZE(in_bytes); j++) { snmp_pbuf_stream_read(&read_stream, &in_bytes[j]); } if(mbedtls_cipher_update(&ctx, in_bytes, LWIP_ARRAYSIZE(in_bytes), out_bytes, &out_len) != 0) { goto error; } snmp_pbuf_stream_writebuf(&write_stream, out_bytes, out_len); } out_len = LWIP_ARRAYSIZE(out_bytes); if(mbedtls_cipher_finish(&ctx, out_bytes, &out_len) != 0) { goto error; } snmp_pbuf_stream_writebuf(&write_stream, out_bytes, out_len); } else if (algo == SNMP_V3_PRIV_ALGO_AES) { u8_t iv_local[16]; cipher_info = mbedtls_cipher_info_from_type(MBEDTLS_CIPHER_AES_128_CFB128); if(mbedtls_cipher_setup(&ctx, cipher_info) != 0) { return ERR_ARG; } if(mbedtls_cipher_setkey(&ctx, key, 16*8, (mode == SNMP_V3_PRIV_MODE_ENCRYPT)? MBEDTLS_ENCRYPT : MBEDTLS_DECRYPT) != 0) { goto error; } /* * IV is the big endian concatenation of boots, * uptime and priv param - see RFC3826. */ iv_local[0 + 0] = (engine_boots >> 24) & 0xFF; iv_local[0 + 1] = (engine_boots >> 16) & 0xFF; iv_local[0 + 2] = (engine_boots >> 8) & 0xFF; iv_local[0 + 3] = (engine_boots >> 0) & 0xFF; iv_local[4 + 0] = (engine_time >> 24) & 0xFF; iv_local[4 + 1] = (engine_time >> 16) & 0xFF; iv_local[4 + 2] = (engine_time >> 8) & 0xFF; iv_local[4 + 3] = (engine_time >> 0) & 0xFF; SMEMCPY(iv_local + 8, priv_param, 8); if(mbedtls_cipher_set_iv(&ctx, iv_local, LWIP_ARRAYSIZE(iv_local)) != 0) { goto error; } for (i = 0; i < length; i++) { u8_t in_byte; u8_t out_byte; size_t out_len = sizeof(out_byte); snmp_pbuf_stream_read(&read_stream, &in_byte); if(mbedtls_cipher_update(&ctx, &in_byte, sizeof(in_byte), &out_byte, &out_len) != 0) { goto error; } snmp_pbuf_stream_write(&write_stream, out_byte); } } else { return ERR_ARG; } mbedtls_cipher_free(&ctx); return ERR_OK; error: mbedtls_cipher_free(&ctx); return ERR_OK; } #endif /* LWIP_SNMP_V3_CRYPTO */ /* A.2.1. Password to Key Sample Code for MD5 */ void snmpv3_password_to_key_md5( const u8_t *password, /* IN */ u8_t passwordlen, /* IN */ const u8_t *engineID, /* IN - pointer to snmpEngineID */ u8_t engineLength,/* IN - length of snmpEngineID */ u8_t *key) /* OUT - pointer to caller 16-octet buffer */ { mbedtls_md5_context MD; u8_t *cp, password_buf[64]; u32_t password_index = 0; u8_t i; u32_t count = 0; mbedtls_md5_init(&MD); /* initialize MD5 */ mbedtls_md5_starts(&MD); /**********************************************/ /* Use while loop until we've done 1 Megabyte */ /**********************************************/ while (count < 1048576) { cp = password_buf; for (i = 0; i < 64; i++) { /*************************************************/ /* Take the next octet of the password, wrapping */ /* to the beginning of the password as necessary.*/ /*************************************************/ *cp++ = password[password_index++ % passwordlen]; } mbedtls_md5_update(&MD, password_buf, 64); count += 64; } mbedtls_md5_finish(&MD, key); /* tell MD5 we're done */ /*****************************************************/ /* Now localize the key with the engineID and pass */ /* through MD5 to produce final key */ /* May want to ensure that engineLength <= 32, */ /* otherwise need to use a buffer larger than 64 */ /*****************************************************/ SMEMCPY(password_buf, key, 16); MEMCPY(password_buf + 16, engineID, engineLength); SMEMCPY(password_buf + 16 + engineLength, key, 16); mbedtls_md5_starts(&MD); mbedtls_md5_update(&MD, password_buf, 32 + engineLength); mbedtls_md5_finish(&MD, key); mbedtls_md5_free(&MD); return; } /* A.2.2. Password to Key Sample Code for SHA */ void snmpv3_password_to_key_sha( const u8_t *password, /* IN */ u8_t passwordlen, /* IN */ const u8_t *engineID, /* IN - pointer to snmpEngineID */ u8_t engineLength,/* IN - length of snmpEngineID */ u8_t *key) /* OUT - pointer to caller 20-octet buffer */ { mbedtls_sha1_context SH; u8_t *cp, password_buf[72]; u32_t password_index = 0; u8_t i; u32_t count = 0; mbedtls_sha1_init(&SH); /* initialize SHA */ mbedtls_sha1_starts(&SH); /**********************************************/ /* Use while loop until we've done 1 Megabyte */ /**********************************************/ while (count < 1048576) { cp = password_buf; for (i = 0; i < 64; i++) { /*************************************************/ /* Take the next octet of the password, wrapping */ /* to the beginning of the password as necessary.*/ /*************************************************/ *cp++ = password[password_index++ % passwordlen]; } mbedtls_sha1_update(&SH, password_buf, 64); count += 64; } mbedtls_sha1_finish(&SH, key); /* tell SHA we're done */ /*****************************************************/ /* Now localize the key with the engineID and pass */ /* through SHA to produce final key */ /* May want to ensure that engineLength <= 32, */ /* otherwise need to use a buffer larger than 72 */ /*****************************************************/ SMEMCPY(password_buf, key, 20); MEMCPY(password_buf + 20, engineID, engineLength); SMEMCPY(password_buf + 20 + engineLength, key, 20); mbedtls_sha1_starts(&SH); mbedtls_sha1_update(&SH, password_buf, 40 + engineLength); mbedtls_sha1_finish(&SH, key); mbedtls_sha1_free(&SH); return; } #endif /* LWIP_SNMP && LWIP_SNMP_V3 && LWIP_SNMP_V3_MBEDTLS */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmpv3_mbedtls.c
C
unknown
10,978
/** * @file * Additional SNMPv3 functionality RFC3414 and RFC3826 (internal API, do not use in client code). */ /* * Copyright (c) 2016 Elias Oenal. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Elias Oenal <lwip@eliasoenal.com> */ #ifndef LWIP_HDR_APPS_SNMP_V3_PRIV_H #define LWIP_HDR_APPS_SNMP_V3_PRIV_H #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP && LWIP_SNMP_V3 #include "snmp_pbuf_stream.h" /* According to RFC 3411 */ #define SNMP_V3_MAX_ENGINE_ID_LENGTH 32 #define SNMP_V3_MAX_USER_LENGTH 32 #define SNMP_V3_MAX_AUTH_PARAM_LENGTH 12 #define SNMP_V3_MAX_PRIV_PARAM_LENGTH 8 #define SNMP_V3_AUTH_FLAG 0x01 #define SNMP_V3_PRIV_FLAG 0x02 #define SNMP_V3_MD5_LEN 16 #define SNMP_V3_SHA_LEN 20 u32_t snmpv3_get_engine_boots_internal(void); u32_t snmpv3_get_engine_time_internal(void); err_t snmpv3_auth(struct snmp_pbuf_stream* stream, u16_t length, const u8_t* key, u8_t algo, u8_t* hmac_out); err_t snmpv3_crypt(struct snmp_pbuf_stream* stream, u16_t length, const u8_t* key, const u8_t* priv_param, const u32_t engine_boots, const u32_t engine_time, u8_t algo, u8_t mode); err_t snmpv3_build_priv_param(u8_t* priv_param); #endif #endif /* LWIP_HDR_APPS_SNMP_V3_PRIV_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/snmp/snmpv3_priv.h
C
unknown
2,693
/** * @file * SNTP client module */ /* * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Frédéric Bernon, Simon Goldschmidt */ /** * @defgroup sntp SNTP * @ingroup apps * * This is simple "SNTP" client for the lwIP raw API. * It is a minimal implementation of SNTPv4 as specified in RFC 4330. * * For a list of some public NTP servers, see this link : * http://support.ntp.org/bin/view/Servers/NTPPoolServers * * @todo: * - set/change servers at runtime * - complete SNTP_CHECK_RESPONSE checks 3 and 4 */ #include "lwip/apps/sntp.h" #include "lwip/opt.h" #include "lwip/timeouts.h" #include "lwip/udp.h" #include "lwip/dns.h" #include "lwip/ip_addr.h" #include "lwip/pbuf.h" #include "lwip/dhcp.h" #include <string.h> #include <time.h> #if LWIP_UDP /* Handle support for more than one server via SNTP_MAX_SERVERS */ #if SNTP_MAX_SERVERS > 1 #define SNTP_SUPPORT_MULTIPLE_SERVERS 1 #else /* NTP_MAX_SERVERS > 1 */ #define SNTP_SUPPORT_MULTIPLE_SERVERS 0 #endif /* NTP_MAX_SERVERS > 1 */ #if (SNTP_UPDATE_DELAY < 15000) && !defined(SNTP_SUPPRESS_DELAY_CHECK) #error "SNTPv4 RFC 4330 enforces a minimum update time of 15 seconds (define SNTP_SUPPRESS_DELAY_CHECK to disable this error)!" #endif /* Configure behaviour depending on microsecond or second precision */ #ifdef SNTP_SET_SYSTEM_TIME_US #define SNTP_CALC_TIME_US 1 #define SNTP_RECEIVE_TIME_SIZE 2 #else #define SNTP_SET_SYSTEM_TIME_US(sec, us) #define SNTP_CALC_TIME_US 0 #define SNTP_RECEIVE_TIME_SIZE 1 #endif /* the various debug levels for this file */ #define SNTP_DEBUG_TRACE (SNTP_DEBUG | LWIP_DBG_TRACE) #define SNTP_DEBUG_STATE (SNTP_DEBUG | LWIP_DBG_STATE) #define SNTP_DEBUG_WARN (SNTP_DEBUG | LWIP_DBG_LEVEL_WARNING) #define SNTP_DEBUG_WARN_STATE (SNTP_DEBUG | LWIP_DBG_LEVEL_WARNING | LWIP_DBG_STATE) #define SNTP_DEBUG_SERIOUS (SNTP_DEBUG | LWIP_DBG_LEVEL_SERIOUS) #define SNTP_ERR_KOD 1 /* SNTP protocol defines */ #define SNTP_MSG_LEN 48 #define SNTP_OFFSET_LI_VN_MODE 0 #define SNTP_LI_MASK 0xC0 #define SNTP_LI_NO_WARNING 0x00 #define SNTP_LI_LAST_MINUTE_61_SEC 0x01 #define SNTP_LI_LAST_MINUTE_59_SEC 0x02 #define SNTP_LI_ALARM_CONDITION 0x03 /* (clock not synchronized) */ #define SNTP_VERSION_MASK 0x38 #define SNTP_VERSION (4/* NTP Version 4*/<<3) #define SNTP_MODE_MASK 0x07 #define SNTP_MODE_CLIENT 0x03 #define SNTP_MODE_SERVER 0x04 #define SNTP_MODE_BROADCAST 0x05 #define SNTP_OFFSET_STRATUM 1 #define SNTP_STRATUM_KOD 0x00 #define SNTP_OFFSET_ORIGINATE_TIME 24 #define SNTP_OFFSET_RECEIVE_TIME 32 #define SNTP_OFFSET_TRANSMIT_TIME 40 /* number of seconds between 1900 and 1970 (MSB=1)*/ #define DIFF_SEC_1900_1970 (2208988800UL) /* number of seconds between 1970 and Feb 7, 2036 (6:28:16 UTC) (MSB=0) */ #define DIFF_SEC_1970_2036 (2085978496UL) /** * SNTP packet format (without optional fields) * Timestamps are coded as 64 bits: * - 32 bits seconds since Jan 01, 1970, 00:00 * - 32 bits seconds fraction (0-padded) * For future use, if the MSB in the seconds part is set, seconds are based * on Feb 07, 2036, 06:28:16. */ #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/bpstruct.h" #endif PACK_STRUCT_BEGIN struct sntp_msg { PACK_STRUCT_FLD_8(u8_t li_vn_mode); PACK_STRUCT_FLD_8(u8_t stratum); PACK_STRUCT_FLD_8(u8_t poll); PACK_STRUCT_FLD_8(u8_t precision); PACK_STRUCT_FIELD(u32_t root_delay); PACK_STRUCT_FIELD(u32_t root_dispersion); PACK_STRUCT_FIELD(u32_t reference_identifier); PACK_STRUCT_FIELD(u32_t reference_timestamp[2]); PACK_STRUCT_FIELD(u32_t originate_timestamp[2]); PACK_STRUCT_FIELD(u32_t receive_timestamp[2]); PACK_STRUCT_FIELD(u32_t transmit_timestamp[2]); } PACK_STRUCT_STRUCT; PACK_STRUCT_END #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/epstruct.h" #endif /* function prototypes */ static void sntp_request(void *arg); /** The operating mode */ static u8_t sntp_opmode; /** The UDP pcb used by the SNTP client */ static struct udp_pcb* sntp_pcb; /** Names/Addresses of servers */ struct sntp_server { #if SNTP_SERVER_DNS char* name; #endif /* SNTP_SERVER_DNS */ ip_addr_t addr; }; static struct sntp_server sntp_servers[SNTP_MAX_SERVERS]; #if SNTP_GET_SERVERS_FROM_DHCP static u8_t sntp_set_servers_from_dhcp; #endif /* SNTP_GET_SERVERS_FROM_DHCP */ #if SNTP_SUPPORT_MULTIPLE_SERVERS /** The currently used server (initialized to 0) */ static u8_t sntp_current_server; #else /* SNTP_SUPPORT_MULTIPLE_SERVERS */ #define sntp_current_server 0 #endif /* SNTP_SUPPORT_MULTIPLE_SERVERS */ #if SNTP_RETRY_TIMEOUT_EXP #define SNTP_RESET_RETRY_TIMEOUT() sntp_retry_timeout = SNTP_RETRY_TIMEOUT /** Retry time, initialized with SNTP_RETRY_TIMEOUT and doubled with each retry. */ static u32_t sntp_retry_timeout; #else /* SNTP_RETRY_TIMEOUT_EXP */ #define SNTP_RESET_RETRY_TIMEOUT() #define sntp_retry_timeout SNTP_RETRY_TIMEOUT #endif /* SNTP_RETRY_TIMEOUT_EXP */ #if SNTP_CHECK_RESPONSE >= 1 /** Saves the last server address to compare with response */ static ip_addr_t sntp_last_server_address; #endif /* SNTP_CHECK_RESPONSE >= 1 */ #if SNTP_CHECK_RESPONSE >= 2 /** Saves the last timestamp sent (which is sent back by the server) * to compare against in response */ static u32_t sntp_last_timestamp_sent[2]; #endif /* SNTP_CHECK_RESPONSE >= 2 */ /** * SNTP processing of received timestamp */ static void sntp_process(u32_t *receive_timestamp) { /* convert SNTP time (1900-based) to unix GMT time (1970-based) * if MSB is 0, SNTP time is 2036-based! */ u32_t rx_secs = lwip_ntohl(receive_timestamp[0]); int is_1900_based = ((rx_secs & 0x80000000) != 0); u32_t t = is_1900_based ? (rx_secs - DIFF_SEC_1900_1970) : (rx_secs + DIFF_SEC_1970_2036); time_t tim = t; #if SNTP_CALC_TIME_US u32_t us = lwip_ntohl(receive_timestamp[1]) / 4295; SNTP_SET_SYSTEM_TIME_US(t, us); /* display local time from GMT time */ LWIP_DEBUGF(SNTP_DEBUG_TRACE, ("sntp_process: %s, %"U32_F" us", ctime(&tim), us)); #else /* SNTP_CALC_TIME_US */ /* change system time and/or the update the RTC clock */ SNTP_SET_SYSTEM_TIME(t); /* display local time from GMT time */ LWIP_DEBUGF(SNTP_DEBUG_TRACE, ("sntp_process: %s", ctime(&tim))); #endif /* SNTP_CALC_TIME_US */ LWIP_UNUSED_ARG(tim); } /** * Initialize request struct to be sent to server. */ static void sntp_initialize_request(struct sntp_msg *req) { memset(req, 0, SNTP_MSG_LEN); req->li_vn_mode = SNTP_LI_NO_WARNING | SNTP_VERSION | SNTP_MODE_CLIENT; #if SNTP_CHECK_RESPONSE >= 2 { u32_t sntp_time_sec, sntp_time_us; /* fill in transmit timestamp and save it in 'sntp_last_timestamp_sent' */ SNTP_GET_SYSTEM_TIME(sntp_time_sec, sntp_time_us); sntp_last_timestamp_sent[0] = lwip_htonl(sntp_time_sec + DIFF_SEC_1900_1970); req->transmit_timestamp[0] = sntp_last_timestamp_sent[0]; /* we send/save us instead of fraction to be faster... */ sntp_last_timestamp_sent[1] = lwip_htonl(sntp_time_us); req->transmit_timestamp[1] = sntp_last_timestamp_sent[1]; } #endif /* SNTP_CHECK_RESPONSE >= 2 */ } /** * Retry: send a new request (and increase retry timeout). * * @param arg is unused (only necessary to conform to sys_timeout) */ static void sntp_retry(void* arg) { LWIP_UNUSED_ARG(arg); LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_retry: Next request will be sent in %"U32_F" ms\n", sntp_retry_timeout)); /* set up a timer to send a retry and increase the retry delay */ sys_timeout(sntp_retry_timeout, sntp_request, NULL); #if SNTP_RETRY_TIMEOUT_EXP { u32_t new_retry_timeout; /* increase the timeout for next retry */ new_retry_timeout = sntp_retry_timeout << 1; /* limit to maximum timeout and prevent overflow */ if ((new_retry_timeout <= SNTP_RETRY_TIMEOUT_MAX) && (new_retry_timeout > sntp_retry_timeout)) { sntp_retry_timeout = new_retry_timeout; } } #endif /* SNTP_RETRY_TIMEOUT_EXP */ } #if SNTP_SUPPORT_MULTIPLE_SERVERS /** * If Kiss-of-Death is received (or another packet parsing error), * try the next server or retry the current server and increase the retry * timeout if only one server is available. * (implicitly, SNTP_MAX_SERVERS > 1) * * @param arg is unused (only necessary to conform to sys_timeout) */ static void sntp_try_next_server(void* arg) { u8_t old_server, i; LWIP_UNUSED_ARG(arg); old_server = sntp_current_server; for (i = 0; i < SNTP_MAX_SERVERS - 1; i++) { sntp_current_server++; if (sntp_current_server >= SNTP_MAX_SERVERS) { sntp_current_server = 0; } if (!ip_addr_isany(&sntp_servers[sntp_current_server].addr) #if SNTP_SERVER_DNS || (sntp_servers[sntp_current_server].name != NULL) #endif ) { LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_try_next_server: Sending request to server %"U16_F"\n", (u16_t)sntp_current_server)); /* new server: reset retry timeout */ SNTP_RESET_RETRY_TIMEOUT(); /* instantly send a request to the next server */ sntp_request(NULL); return; } } /* no other valid server found */ sntp_current_server = old_server; sntp_retry(NULL); } #else /* SNTP_SUPPORT_MULTIPLE_SERVERS */ /* Always retry on error if only one server is supported */ #define sntp_try_next_server sntp_retry #endif /* SNTP_SUPPORT_MULTIPLE_SERVERS */ /** UDP recv callback for the sntp pcb */ static void sntp_recv(void *arg, struct udp_pcb* pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) { u8_t mode; u8_t stratum; u32_t receive_timestamp[SNTP_RECEIVE_TIME_SIZE]; err_t err; LWIP_UNUSED_ARG(arg); LWIP_UNUSED_ARG(pcb); /* packet received: stop retry timeout */ sys_untimeout(sntp_try_next_server, NULL); sys_untimeout(sntp_request, NULL); err = ERR_ARG; #if SNTP_CHECK_RESPONSE >= 1 /* check server address and port */ if (((sntp_opmode != SNTP_OPMODE_POLL) || ip_addr_cmp(addr, &sntp_last_server_address)) && (port == SNTP_PORT)) #else /* SNTP_CHECK_RESPONSE >= 1 */ LWIP_UNUSED_ARG(addr); LWIP_UNUSED_ARG(port); #endif /* SNTP_CHECK_RESPONSE >= 1 */ { /* process the response */ if (p->tot_len == SNTP_MSG_LEN) { pbuf_copy_partial(p, &mode, 1, SNTP_OFFSET_LI_VN_MODE); mode &= SNTP_MODE_MASK; /* if this is a SNTP response... */ if (((sntp_opmode == SNTP_OPMODE_POLL) && (mode == SNTP_MODE_SERVER)) || ((sntp_opmode == SNTP_OPMODE_LISTENONLY) && (mode == SNTP_MODE_BROADCAST))) { pbuf_copy_partial(p, &stratum, 1, SNTP_OFFSET_STRATUM); if (stratum == SNTP_STRATUM_KOD) { /* Kiss-of-death packet. Use another server or increase UPDATE_DELAY. */ err = SNTP_ERR_KOD; LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_recv: Received Kiss-of-Death\n")); } else { #if SNTP_CHECK_RESPONSE >= 2 /* check originate_timetamp against sntp_last_timestamp_sent */ u32_t originate_timestamp[2]; pbuf_copy_partial(p, &originate_timestamp, 8, SNTP_OFFSET_ORIGINATE_TIME); if ((originate_timestamp[0] != sntp_last_timestamp_sent[0]) || (originate_timestamp[1] != sntp_last_timestamp_sent[1])) { LWIP_DEBUGF(SNTP_DEBUG_WARN, ("sntp_recv: Invalid originate timestamp in response\n")); } else #endif /* SNTP_CHECK_RESPONSE >= 2 */ /* @todo: add code for SNTP_CHECK_RESPONSE >= 3 and >= 4 here */ { /* correct answer */ err = ERR_OK; pbuf_copy_partial(p, &receive_timestamp, SNTP_RECEIVE_TIME_SIZE * 4, SNTP_OFFSET_TRANSMIT_TIME); } } } else { LWIP_DEBUGF(SNTP_DEBUG_WARN, ("sntp_recv: Invalid mode in response: %"U16_F"\n", (u16_t)mode)); /* wait for correct response */ err = ERR_TIMEOUT; } } else { LWIP_DEBUGF(SNTP_DEBUG_WARN, ("sntp_recv: Invalid packet length: %"U16_F"\n", p->tot_len)); } } #if SNTP_CHECK_RESPONSE >= 1 else { /* packet from wrong remote address or port, wait for correct response */ err = ERR_TIMEOUT; } #endif /* SNTP_CHECK_RESPONSE >= 1 */ pbuf_free(p); if (err == ERR_OK) { sntp_process(receive_timestamp); /* Set up timeout for next request (only if poll response was received)*/ if (sntp_opmode == SNTP_OPMODE_POLL) { /* Correct response, reset retry timeout */ SNTP_RESET_RETRY_TIMEOUT(); sys_timeout((u32_t)SNTP_UPDATE_DELAY, sntp_request, NULL); LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_recv: Scheduled next time request: %"U32_F" ms\n", (u32_t)SNTP_UPDATE_DELAY)); } } else if (err != ERR_TIMEOUT) { /* Errors are only processed in case of an explicit poll response */ if (sntp_opmode == SNTP_OPMODE_POLL) { if (err == SNTP_ERR_KOD) { /* Kiss-of-death packet. Use another server or increase UPDATE_DELAY. */ sntp_try_next_server(NULL); } else { /* another error, try the same server again */ sntp_retry(NULL); } } } } /** Actually send an sntp request to a server. * * @param server_addr resolved IP address of the SNTP server */ static void sntp_send_request(const ip_addr_t *server_addr) { struct pbuf* p; p = pbuf_alloc(PBUF_TRANSPORT, SNTP_MSG_LEN, PBUF_RAM); if (p != NULL) { struct sntp_msg *sntpmsg = (struct sntp_msg *)p->payload; LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_send_request: Sending request to server\n")); /* initialize request message */ sntp_initialize_request(sntpmsg); /* send request */ udp_sendto(sntp_pcb, p, server_addr, SNTP_PORT); /* free the pbuf after sending it */ pbuf_free(p); /* set up receive timeout: try next server or retry on timeout */ sys_timeout((u32_t)SNTP_RECV_TIMEOUT, sntp_try_next_server, NULL); #if SNTP_CHECK_RESPONSE >= 1 /* save server address to verify it in sntp_recv */ ip_addr_set(&sntp_last_server_address, server_addr); #endif /* SNTP_CHECK_RESPONSE >= 1 */ } else { LWIP_DEBUGF(SNTP_DEBUG_SERIOUS, ("sntp_send_request: Out of memory, trying again in %"U32_F" ms\n", (u32_t)SNTP_RETRY_TIMEOUT)); /* out of memory: set up a timer to send a retry */ sys_timeout((u32_t)SNTP_RETRY_TIMEOUT, sntp_request, NULL); } } #if SNTP_SERVER_DNS /** * DNS found callback when using DNS names as server address. */ static void sntp_dns_found(const char* hostname, const ip_addr_t *ipaddr, void *arg) { LWIP_UNUSED_ARG(hostname); LWIP_UNUSED_ARG(arg); if (ipaddr != NULL) { /* Address resolved, send request */ LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_dns_found: Server address resolved, sending request\n")); sntp_send_request(ipaddr); } else { /* DNS resolving failed -> try another server */ LWIP_DEBUGF(SNTP_DEBUG_WARN_STATE, ("sntp_dns_found: Failed to resolve server address resolved, trying next server\n")); sntp_try_next_server(NULL); } } #endif /* SNTP_SERVER_DNS */ /** * Send out an sntp request. * * @param arg is unused (only necessary to conform to sys_timeout) */ static void sntp_request(void *arg) { ip_addr_t sntp_server_address; err_t err; LWIP_UNUSED_ARG(arg); /* initialize SNTP server address */ #if SNTP_SERVER_DNS if (sntp_servers[sntp_current_server].name) { /* always resolve the name and rely on dns-internal caching & timeout */ ip_addr_set_zero(&sntp_servers[sntp_current_server].addr); err = dns_gethostbyname(sntp_servers[sntp_current_server].name, &sntp_server_address, sntp_dns_found, NULL); if (err == ERR_INPROGRESS) { /* DNS request sent, wait for sntp_dns_found being called */ LWIP_DEBUGF(SNTP_DEBUG_STATE, ("sntp_request: Waiting for server address to be resolved.\n")); return; } else if (err == ERR_OK) { sntp_servers[sntp_current_server].addr = sntp_server_address; } } else #endif /* SNTP_SERVER_DNS */ { sntp_server_address = sntp_servers[sntp_current_server].addr; err = (ip_addr_isany_val(sntp_server_address)) ? ERR_ARG : ERR_OK; } if (err == ERR_OK) { LWIP_DEBUGF(SNTP_DEBUG_TRACE, ("sntp_request: current server address is %s\n", ipaddr_ntoa(&sntp_server_address))); sntp_send_request(&sntp_server_address); } else { /* address conversion failed, try another server */ LWIP_DEBUGF(SNTP_DEBUG_WARN_STATE, ("sntp_request: Invalid server address, trying next server.\n")); sys_timeout((u32_t)SNTP_RETRY_TIMEOUT, sntp_try_next_server, NULL); } } /** * @ingroup sntp * Initialize this module. * Send out request instantly or after SNTP_STARTUP_DELAY(_FUNC). */ void sntp_init(void) { #ifdef SNTP_SERVER_ADDRESS #if SNTP_SERVER_DNS sntp_setservername(0, SNTP_SERVER_ADDRESS); #else #error SNTP_SERVER_ADDRESS string not supported SNTP_SERVER_DNS==0 #endif #endif /* SNTP_SERVER_ADDRESS */ if (sntp_pcb == NULL) { sntp_pcb = udp_new_ip_type(IPADDR_TYPE_ANY); LWIP_ASSERT("Failed to allocate udp pcb for sntp client", sntp_pcb != NULL); if (sntp_pcb != NULL) { udp_recv(sntp_pcb, sntp_recv, NULL); if (sntp_opmode == SNTP_OPMODE_POLL) { SNTP_RESET_RETRY_TIMEOUT(); #if SNTP_STARTUP_DELAY sys_timeout((u32_t)SNTP_STARTUP_DELAY_FUNC, sntp_request, NULL); #else sntp_request(NULL); #endif } else if (sntp_opmode == SNTP_OPMODE_LISTENONLY) { ip_set_option(sntp_pcb, SOF_BROADCAST); udp_bind(sntp_pcb, IP_ANY_TYPE, SNTP_PORT); } } } } /** * @ingroup sntp * Stop this module. */ void sntp_stop(void) { if (sntp_pcb != NULL) { sys_untimeout(sntp_request, NULL); sys_untimeout(sntp_try_next_server, NULL); udp_remove(sntp_pcb); sntp_pcb = NULL; } } /** * @ingroup sntp * Get enabled state. */ u8_t sntp_enabled(void) { return (sntp_pcb != NULL)? 1 : 0; } /** * @ingroup sntp * Sets the operating mode. * @param operating_mode one of the available operating modes */ void sntp_setoperatingmode(u8_t operating_mode) { LWIP_ASSERT("Invalid operating mode", operating_mode <= SNTP_OPMODE_LISTENONLY); LWIP_ASSERT("Operating mode must not be set while SNTP client is running", sntp_pcb == NULL); sntp_opmode = operating_mode; } /** * @ingroup sntp * Gets the operating mode. */ u8_t sntp_getoperatingmode(void) { return sntp_opmode; } #if SNTP_GET_SERVERS_FROM_DHCP /** * Config SNTP server handling by IP address, name, or DHCP; clear table * @param set_servers_from_dhcp enable or disable getting server addresses from dhcp */ void sntp_servermode_dhcp(int set_servers_from_dhcp) { u8_t new_mode = set_servers_from_dhcp ? 1 : 0; if (sntp_set_servers_from_dhcp != new_mode) { sntp_set_servers_from_dhcp = new_mode; } } #endif /* SNTP_GET_SERVERS_FROM_DHCP */ /** * @ingroup sntp * Initialize one of the NTP servers by IP address * * @param idx the index of the NTP server to set must be < SNTP_MAX_SERVERS * @param server IP address of the NTP server to set */ void sntp_setserver(u8_t idx, const ip_addr_t *server) { if (idx < SNTP_MAX_SERVERS) { if (server != NULL) { sntp_servers[idx].addr = (*server); } else { ip_addr_set_zero(&sntp_servers[idx].addr); } #if SNTP_SERVER_DNS sntp_servers[idx].name = NULL; #endif } } #if LWIP_DHCP && SNTP_GET_SERVERS_FROM_DHCP /** * Initialize one of the NTP servers by IP address, required by DHCP * * @param numdns the index of the NTP server to set must be < SNTP_MAX_SERVERS * @param dnsserver IP address of the NTP server to set */ void dhcp_set_ntp_servers(u8_t num, const ip4_addr_t *server) { LWIP_DEBUGF(SNTP_DEBUG_TRACE, ("sntp: %s %u.%u.%u.%u as NTP server #%u via DHCP\n", (sntp_set_servers_from_dhcp ? "Got" : "Rejected"), ip4_addr1(server), ip4_addr2(server), ip4_addr3(server), ip4_addr4(server), num)); if (sntp_set_servers_from_dhcp && num) { u8_t i; for (i = 0; (i < num) && (i < SNTP_MAX_SERVERS); i++) { ip_addr_t addr; ip_addr_copy_from_ip4(addr, server[i]); sntp_setserver(i, &addr); } for (i = num; i < SNTP_MAX_SERVERS; i++) { sntp_setserver(i, NULL); } } } #endif /* LWIP_DHCP && SNTP_GET_SERVERS_FROM_DHCP */ /** * @ingroup sntp * Obtain one of the currently configured by IP address (or DHCP) NTP servers * * @param idx the index of the NTP server * @return IP address of the indexed NTP server or "ip_addr_any" if the NTP * server has not been configured by address (or at all). */ const ip_addr_t* sntp_getserver(u8_t idx) { if (idx < SNTP_MAX_SERVERS) { return &sntp_servers[idx].addr; } return IP_ADDR_ANY; } #if SNTP_SERVER_DNS /** * Initialize one of the NTP servers by name * * @param numdns the index of the NTP server to set must be < SNTP_MAX_SERVERS * @param dnsserver DNS name of the NTP server to set, to be resolved at contact time */ void sntp_setservername(u8_t idx, char *server) { if (idx < SNTP_MAX_SERVERS) { sntp_servers[idx].name = server; } } /** * Obtain one of the currently configured by name NTP servers. * * @param numdns the index of the NTP server * @return IP address of the indexed NTP server or NULL if the NTP * server has not been configured by name (or at all) */ char * sntp_getservername(u8_t idx) { if (idx < SNTP_MAX_SERVERS) { return sntp_servers[idx].name; } return NULL; } #endif /* SNTP_SERVER_DNS */ #endif /* LWIP_UDP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/sntp/sntp.c
C
unknown
23,709
/****************************************************************//** * * @file tftp_server.c * * @author Logan Gunthorpe <logang@deltatee.com> * Dirk Ziegelmeier <dziegel@gmx.de> * * @brief Trivial File Transfer Protocol (RFC 1350) * * Copyright (c) Deltatee Enterprises Ltd. 2013 * All rights reserved. * ********************************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification,are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Logan Gunthorpe <logang@deltatee.com> * Dirk Ziegelmeier <dziegel@gmx.de> * */ /** * @defgroup tftp TFTP server * @ingroup apps * * This is simple TFTP server for the lwIP raw API. */ #include "lwip/apps/tftp_server.h" #if LWIP_UDP #include "lwip/udp.h" #include "lwip/timeouts.h" #include "lwip/debug.h" #define TFTP_MAX_PAYLOAD_SIZE 512 #define TFTP_HEADER_LENGTH 4 #define TFTP_RRQ 1 #define TFTP_WRQ 2 #define TFTP_DATA 3 #define TFTP_ACK 4 #define TFTP_ERROR 5 enum tftp_error { TFTP_ERROR_FILE_NOT_FOUND = 1, TFTP_ERROR_ACCESS_VIOLATION = 2, TFTP_ERROR_DISK_FULL = 3, TFTP_ERROR_ILLEGAL_OPERATION = 4, TFTP_ERROR_UNKNOWN_TRFR_ID = 5, TFTP_ERROR_FILE_EXISTS = 6, TFTP_ERROR_NO_SUCH_USER = 7 }; #include <string.h> struct tftp_state { const struct tftp_context *ctx; void *handle; struct pbuf *last_data; struct udp_pcb *upcb; ip_addr_t addr; u16_t port; int timer; int last_pkt; u16_t blknum; u8_t retries; u8_t mode_write; }; static struct tftp_state tftp_state; static void tftp_tmr(void* arg); static void close_handle(void) { tftp_state.port = 0; ip_addr_set_any(0, &tftp_state.addr); if(tftp_state.last_data != NULL) { pbuf_free(tftp_state.last_data); tftp_state.last_data = NULL; } sys_untimeout(tftp_tmr, NULL); if (tftp_state.handle) { tftp_state.ctx->close(tftp_state.handle); tftp_state.handle = NULL; LWIP_DEBUGF(TFTP_DEBUG | LWIP_DBG_STATE, ("tftp: closing\n")); } } static void send_error(const ip_addr_t *addr, u16_t port, enum tftp_error code, const char *str) { int str_length = strlen(str); struct pbuf* p; u16_t* payload; p = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(TFTP_HEADER_LENGTH + str_length + 1), PBUF_RAM); if(p == NULL) { return; } payload = (u16_t*) p->payload; payload[0] = PP_HTONS(TFTP_ERROR); payload[1] = lwip_htons(code); MEMCPY(&payload[2], str, str_length + 1); udp_sendto(tftp_state.upcb, p, addr, port); pbuf_free(p); } static void send_ack(u16_t blknum) { struct pbuf* p; u16_t* payload; p = pbuf_alloc(PBUF_TRANSPORT, TFTP_HEADER_LENGTH, PBUF_RAM); if(p == NULL) { return; } payload = (u16_t*) p->payload; payload[0] = PP_HTONS(TFTP_ACK); payload[1] = lwip_htons(blknum); udp_sendto(tftp_state.upcb, p, &tftp_state.addr, tftp_state.port); pbuf_free(p); } static void resend_data(void) { struct pbuf *p = pbuf_alloc(PBUF_TRANSPORT, tftp_state.last_data->len, PBUF_RAM); if(p == NULL) { return; } if(pbuf_copy(p, tftp_state.last_data) != ERR_OK) { pbuf_free(p); return; } udp_sendto(tftp_state.upcb, p, &tftp_state.addr, tftp_state.port); pbuf_free(p); } static void send_data(void) { u16_t *payload; int ret; if(tftp_state.last_data != NULL) { pbuf_free(tftp_state.last_data); } tftp_state.last_data = pbuf_alloc(PBUF_TRANSPORT, TFTP_HEADER_LENGTH + TFTP_MAX_PAYLOAD_SIZE, PBUF_RAM); if(tftp_state.last_data == NULL) { return; } payload = (u16_t *) tftp_state.last_data->payload; payload[0] = PP_HTONS(TFTP_DATA); payload[1] = lwip_htons(tftp_state.blknum); ret = tftp_state.ctx->read(tftp_state.handle, &payload[2], TFTP_MAX_PAYLOAD_SIZE); if (ret < 0) { send_error(&tftp_state.addr, tftp_state.port, TFTP_ERROR_ACCESS_VIOLATION, "Error occured while reading the file."); close_handle(); return; } pbuf_realloc(tftp_state.last_data, (u16_t)(TFTP_HEADER_LENGTH + ret)); resend_data(); } static void recv(void *arg, struct udp_pcb *upcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) { u16_t *sbuf = (u16_t *) p->payload; int opcode; LWIP_UNUSED_ARG(arg); LWIP_UNUSED_ARG(upcb); if (((tftp_state.port != 0) && (port != tftp_state.port)) || (!ip_addr_isany_val(tftp_state.addr) && !ip_addr_cmp(&tftp_state.addr, addr))) { send_error(addr, port, TFTP_ERROR_ACCESS_VIOLATION, "Only one connection at a time is supported"); pbuf_free(p); return; } opcode = sbuf[0]; tftp_state.last_pkt = tftp_state.timer; tftp_state.retries = 0; switch (opcode) { case PP_HTONS(TFTP_RRQ): /* fall through */ case PP_HTONS(TFTP_WRQ): { const char tftp_null = 0; char filename[TFTP_MAX_FILENAME_LEN]; char mode[TFTP_MAX_MODE_LEN]; u16_t filename_end_offset; u16_t mode_end_offset; if(tftp_state.handle != NULL) { send_error(addr, port, TFTP_ERROR_ACCESS_VIOLATION, "Only one connection at a time is supported"); break; } sys_timeout(TFTP_TIMER_MSECS, tftp_tmr, NULL); /* find \0 in pbuf -> end of filename string */ filename_end_offset = pbuf_memfind(p, &tftp_null, sizeof(tftp_null), 2); if((u16_t)(filename_end_offset-2) > sizeof(filename)) { send_error(addr, port, TFTP_ERROR_ACCESS_VIOLATION, "Filename too long/not NULL terminated"); break; } pbuf_copy_partial(p, filename, filename_end_offset-2, 2); /* find \0 in pbuf -> end of mode string */ mode_end_offset = pbuf_memfind(p, &tftp_null, sizeof(tftp_null), filename_end_offset+1); if((u16_t)(mode_end_offset-filename_end_offset) > sizeof(mode)) { send_error(addr, port, TFTP_ERROR_ACCESS_VIOLATION, "Mode too long/not NULL terminated"); break; } pbuf_copy_partial(p, mode, mode_end_offset-filename_end_offset, filename_end_offset+1); tftp_state.handle = tftp_state.ctx->open(filename, mode, opcode == PP_HTONS(TFTP_WRQ)); tftp_state.blknum = 1; if (!tftp_state.handle) { send_error(addr, port, TFTP_ERROR_FILE_NOT_FOUND, "Unable to open requested file."); break; } LWIP_DEBUGF(TFTP_DEBUG | LWIP_DBG_STATE, ("tftp: %s request from ", (opcode == PP_HTONS(TFTP_WRQ)) ? "write" : "read")); ip_addr_debug_print(TFTP_DEBUG | LWIP_DBG_STATE, addr); LWIP_DEBUGF(TFTP_DEBUG | LWIP_DBG_STATE, (" for '%s' mode '%s'\n", filename, mode)); ip_addr_copy(tftp_state.addr, *addr); tftp_state.port = port; if (opcode == PP_HTONS(TFTP_WRQ)) { tftp_state.mode_write = 1; send_ack(0); } else { tftp_state.mode_write = 0; send_data(); } break; } case PP_HTONS(TFTP_DATA): { int ret; u16_t blknum; if (tftp_state.handle == NULL) { send_error(addr, port, TFTP_ERROR_ACCESS_VIOLATION, "No connection"); break; } if (tftp_state.mode_write != 1) { send_error(addr, port, TFTP_ERROR_ACCESS_VIOLATION, "Not a write connection"); break; } blknum = lwip_ntohs(sbuf[1]); pbuf_header(p, -TFTP_HEADER_LENGTH); ret = tftp_state.ctx->write(tftp_state.handle, p); if (ret < 0) { send_error(addr, port, TFTP_ERROR_ACCESS_VIOLATION, "error writing file"); close_handle(); } else { send_ack(blknum); } if (p->tot_len < TFTP_MAX_PAYLOAD_SIZE) { close_handle(); } break; } case PP_HTONS(TFTP_ACK): { u16_t blknum; int lastpkt; if (tftp_state.handle == NULL) { send_error(addr, port, TFTP_ERROR_ACCESS_VIOLATION, "No connection"); break; } if (tftp_state.mode_write != 0) { send_error(addr, port, TFTP_ERROR_ACCESS_VIOLATION, "Not a read connection"); break; } blknum = lwip_ntohs(sbuf[1]); if (blknum != tftp_state.blknum) { send_error(addr, port, TFTP_ERROR_UNKNOWN_TRFR_ID, "Wrong block number"); break; } lastpkt = 0; if (tftp_state.last_data != NULL) { lastpkt = tftp_state.last_data->tot_len != (TFTP_MAX_PAYLOAD_SIZE + TFTP_HEADER_LENGTH); } if (!lastpkt) { tftp_state.blknum++; send_data(); } else { close_handle(); } break; } default: send_error(addr, port, TFTP_ERROR_ILLEGAL_OPERATION, "Unknown operation"); break; } pbuf_free(p); } static void tftp_tmr(void* arg) { LWIP_UNUSED_ARG(arg); tftp_state.timer++; if (tftp_state.handle == NULL) { return; } sys_timeout(TFTP_TIMER_MSECS, tftp_tmr, NULL); if ((tftp_state.timer - tftp_state.last_pkt) > (TFTP_TIMEOUT_MSECS / TFTP_TIMER_MSECS)) { if ((tftp_state.last_data != NULL) && (tftp_state.retries < TFTP_MAX_RETRIES)) { LWIP_DEBUGF(TFTP_DEBUG | LWIP_DBG_STATE, ("tftp: timeout, retrying\n")); resend_data(); tftp_state.retries++; } else { LWIP_DEBUGF(TFTP_DEBUG | LWIP_DBG_STATE, ("tftp: timeout\n")); close_handle(); } } } /** @ingroup tftp * Initialize TFTP server. * @param ctx TFTP callback struct */ err_t tftp_init(const struct tftp_context *ctx) { err_t ret; struct udp_pcb *pcb = udp_new_ip_type(IPADDR_TYPE_ANY); if (pcb == NULL) { return ERR_MEM; } ret = udp_bind(pcb, IP_ANY_TYPE, TFTP_PORT); if (ret != ERR_OK) { udp_remove(pcb); return ret; } tftp_state.handle = NULL; tftp_state.port = 0; tftp_state.ctx = ctx; tftp_state.timer = 0; tftp_state.last_data = NULL; tftp_state.upcb = pcb; udp_recv(pcb, recv, NULL); return ERR_OK; } #endif /* LWIP_UDP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/apps/tftp/tftp_server.c
C
unknown
11,503
/** * @file * Common functions used throughout the stack. * * These are reference implementations of the byte swapping functions. * Again with the aim of being simple, correct and fully portable. * Byte swapping is the second thing you would want to optimize. You will * need to port it to your architecture and in your cc.h: * * \#define lwip_htons(x) your_htons * \#define lwip_htonl(x) your_htonl * * Note lwip_ntohs() and lwip_ntohl() are merely references to the htonx counterparts. * * If you \#define them to htons() and htonl(), you should * \#define LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS to prevent lwIP from * defining htonx/ntohx compatibility macros. * @defgroup sys_nonstandard Non-standard functions * @ingroup sys_layer * lwIP provides default implementations for non-standard functions. * These can be mapped to OS functions to reduce code footprint if desired. * All defines related to this section must not be placed in lwipopts.h, * but in arch/cc.h! * These options cannot be \#defined in lwipopts.h since they are not options * of lwIP itself, but options of the lwIP port to your system. */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Simon Goldschmidt * */ #include "lwip/opt.h" #include "lwip/def.h" #include <string.h> #if BYTE_ORDER == LITTLE_ENDIAN #if !defined(lwip_htons) /** * Convert an u16_t from host- to network byte order. * * @param n u16_t in host byte order * @return n in network byte order */ u16_t lwip_htons(u16_t n) { return (u16_t)PP_HTONS(n); } #endif /* lwip_htons */ #if !defined(lwip_htonl) /** * Convert an u32_t from host- to network byte order. * * @param n u32_t in host byte order * @return n in network byte order */ u32_t lwip_htonl(u32_t n) { return (u32_t)PP_HTONL(n); } #endif /* lwip_htonl */ #endif /* BYTE_ORDER == LITTLE_ENDIAN */ #ifndef lwip_strnstr /** * @ingroup sys_nonstandard * lwIP default implementation for strnstr() non-standard function. * This can be \#defined to strnstr() depending on your platform port. */ char* lwip_strnstr(const char* buffer, const char* token, size_t n) { const char* p; size_t tokenlen = strlen(token); if (tokenlen == 0) { return LWIP_CONST_CAST(char *, buffer); } for (p = buffer; *p && (p + tokenlen <= buffer + n); p++) { if ((*p == *token) && (strncmp(p, token, tokenlen) == 0)) { return LWIP_CONST_CAST(char *, p); } } return NULL; } #endif #ifndef lwip_stricmp /** * @ingroup sys_nonstandard * lwIP default implementation for stricmp() non-standard function. * This can be \#defined to stricmp() depending on your platform port. */ int lwip_stricmp(const char* str1, const char* str2) { char c1, c2; do { c1 = *str1++; c2 = *str2++; if (c1 != c2) { char c1_upc = c1 | 0x20; if ((c1_upc >= 'a') && (c1_upc <= 'z')) { /* characters are not equal an one is in the alphabet range: downcase both chars and check again */ char c2_upc = c2 | 0x20; if (c1_upc != c2_upc) { /* still not equal */ /* don't care for < or > */ return 1; } } else { /* characters are not equal but none is in the alphabet range */ return 1; } } } while (c1 != 0); return 0; } #endif #ifndef lwip_strnicmp /** * @ingroup sys_nonstandard * lwIP default implementation for strnicmp() non-standard function. * This can be \#defined to strnicmp() depending on your platform port. */ int lwip_strnicmp(const char* str1, const char* str2, size_t len) { char c1, c2; do { c1 = *str1++; c2 = *str2++; if (c1 != c2) { char c1_upc = c1 | 0x20; if ((c1_upc >= 'a') && (c1_upc <= 'z')) { /* characters are not equal an one is in the alphabet range: downcase both chars and check again */ char c2_upc = c2 | 0x20; if (c1_upc != c2_upc) { /* still not equal */ /* don't care for < or > */ return 1; } } else { /* characters are not equal but none is in the alphabet range */ return 1; } } } while (len-- && c1 != 0); return 0; } #endif #ifndef lwip_itoa /** * @ingroup sys_nonstandard * lwIP default implementation for itoa() non-standard function. * This can be \#defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform port. */ void lwip_itoa(char* result, size_t bufsize, int number) { const int base = 10; char* ptr = result, *ptr1 = result, tmp_char; int tmp_value; LWIP_UNUSED_ARG(bufsize); do { tmp_value = number; number /= base; *ptr++ = "zyxwvutsrqponmlkjihgfedcba9876543210123456789abcdefghijklmnopqrstuvwxyz"[35 + (tmp_value - number * base)]; } while(number); /* Apply negative sign */ if (tmp_value < 0) { *ptr++ = '-'; } *ptr-- = '\0'; while(ptr1 < ptr) { tmp_char = *ptr; *ptr--= *ptr1; *ptr1++ = tmp_char; } } #endif
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/def.c
C
unknown
6,693
/** * @file * DNS - host name to IP address resolver. * * @defgroup dns DNS * @ingroup callbackstyle_api * * Implements a DNS host name to IP address resolver. * * The lwIP DNS resolver functions are used to lookup a host name and * map it to a numerical IP address. It maintains a list of resolved * hostnames that can be queried with the dns_lookup() function. * New hostnames can be resolved using the dns_query() function. * * The lwIP version of the resolver also adds a non-blocking version of * gethostbyname() that will work with a raw API application. This function * checks for an IP address string first and converts it if it is valid. * gethostbyname() then does a dns_lookup() to see if the name is * already in the table. If so, the IP is returned. If not, a query is * issued and the function returns with a ERR_INPROGRESS status. The app * using the dns client must then go into a waiting state. * * Once a hostname has been resolved (or found to be non-existent), * the resolver code calls a specified callback function (which * must be implemented by the module that uses the resolver). * * Multicast DNS queries are supported for names ending on ".local". * However, only "One-Shot Multicast DNS Queries" are supported (RFC 6762 * chapter 5.1), this is not a fully compliant implementation of continuous * mDNS querying! * * All functions must be called from TCPIP thread. * * @see @ref netconn_common for thread-safe access. */ /* * Port to lwIP from uIP * by Jim Pettinato April 2007 * * security fixes and more by Simon Goldschmidt * * uIP version Copyright (c) 2002-2003, Adam Dunkels. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*----------------------------------------------------------------------------- * RFC 1035 - Domain names - implementation and specification * RFC 2181 - Clarifications to the DNS Specification *----------------------------------------------------------------------------*/ /** @todo: define good default values (rfc compliance) */ /** @todo: improve answer parsing, more checkings... */ /** @todo: check RFC1035 - 7.3. Processing responses */ /** @todo: one-shot mDNS: dual-stack fallback to another IP version */ /*----------------------------------------------------------------------------- * Includes *----------------------------------------------------------------------------*/ #include "lwip/opt.h" #if LWIP_DNS /* don't build if not configured for use in lwipopts.h */ #include "lwip/def.h" #include "lwip/udp.h" #include "lwip/mem.h" #include "lwip/memp.h" #include "lwip/dns.h" #include "lwip/prot/dns.h" #include <string.h> /** Random generator function to create random TXIDs and source ports for queries */ #ifndef DNS_RAND_TXID #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_XID) != 0) #define DNS_RAND_TXID LWIP_RAND #else static u16_t dns_txid; #define DNS_RAND_TXID() (++dns_txid) #endif #endif /** Limits the source port to be >= 1024 by default */ #ifndef DNS_PORT_ALLOWED #define DNS_PORT_ALLOWED(port) ((port) >= 1024) #endif /** DNS maximum number of retries when asking for a name, before "timeout". */ #ifndef DNS_MAX_RETRIES #define DNS_MAX_RETRIES 4 #endif /** DNS resource record max. TTL (one week as default) */ #ifndef DNS_MAX_TTL #define DNS_MAX_TTL 604800 #elif DNS_MAX_TTL > 0x7FFFFFFF #error DNS_MAX_TTL must be a positive 32-bit value #endif #if DNS_TABLE_SIZE > 255 #error DNS_TABLE_SIZE must fit into an u8_t #endif #if DNS_MAX_SERVERS > 255 #error DNS_MAX_SERVERS must fit into an u8_t #endif /* The number of parallel requests (i.e. calls to dns_gethostbyname * that cannot be answered from the DNS table. * This is set to the table size by default. */ #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) #ifndef DNS_MAX_REQUESTS #define DNS_MAX_REQUESTS DNS_TABLE_SIZE #else #if DNS_MAX_REQUESTS > 255 #error DNS_MAX_REQUESTS must fit into an u8_t #endif #endif #else /* In this configuration, both arrays have to have the same size and are used * like one entry (used/free) */ #define DNS_MAX_REQUESTS DNS_TABLE_SIZE #endif /* The number of UDP source ports used in parallel */ #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) #ifndef DNS_MAX_SOURCE_PORTS #define DNS_MAX_SOURCE_PORTS DNS_MAX_REQUESTS #else #if DNS_MAX_SOURCE_PORTS > 255 #error DNS_MAX_SOURCE_PORTS must fit into an u8_t #endif #endif #else #ifdef DNS_MAX_SOURCE_PORTS #undef DNS_MAX_SOURCE_PORTS #endif #define DNS_MAX_SOURCE_PORTS 1 #endif #if LWIP_IPV4 && LWIP_IPV6 #define LWIP_DNS_ADDRTYPE_IS_IPV6(t) (((t) == LWIP_DNS_ADDRTYPE_IPV6_IPV4) || ((t) == LWIP_DNS_ADDRTYPE_IPV6)) #define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) (IP_IS_V6_VAL(ip) ? LWIP_DNS_ADDRTYPE_IS_IPV6(t) : (!LWIP_DNS_ADDRTYPE_IS_IPV6(t))) #define LWIP_DNS_ADDRTYPE_ARG(x) , x #define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) x #define LWIP_DNS_SET_ADDRTYPE(x, y) do { x = y; } while(0) #else #if LWIP_IPV6 #define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 1 #else #define LWIP_DNS_ADDRTYPE_IS_IPV6(t) 0 #endif #define LWIP_DNS_ADDRTYPE_MATCH_IP(t, ip) 1 #define LWIP_DNS_ADDRTYPE_ARG(x) #define LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(x) 0 #define LWIP_DNS_SET_ADDRTYPE(x, y) #endif /* LWIP_IPV4 && LWIP_IPV6 */ #if LWIP_DNS_SUPPORT_MDNS_QUERIES #define LWIP_DNS_ISMDNS_ARG(x) , x #else #define LWIP_DNS_ISMDNS_ARG(x) #endif /** DNS query message structure. No packing needed: only used locally on the stack. */ struct dns_query { /* DNS query record starts with either a domain name or a pointer to a name already present somewhere in the packet. */ u16_t type; u16_t cls; }; #define SIZEOF_DNS_QUERY 4 /** DNS answer message structure. No packing needed: only used locally on the stack. */ struct dns_answer { /* DNS answer record starts with either a domain name or a pointer to a name already present somewhere in the packet. */ u16_t type; u16_t cls; u32_t ttl; u16_t len; }; #define SIZEOF_DNS_ANSWER 10 /* maximum allowed size for the struct due to non-packed */ #define SIZEOF_DNS_ANSWER_ASSERT 12 /* DNS table entry states */ typedef enum { DNS_STATE_UNUSED = 0, DNS_STATE_NEW = 1, DNS_STATE_ASKING = 2, DNS_STATE_DONE = 3 } dns_state_enum_t; /** DNS table entry */ struct dns_table_entry { u32_t ttl; ip_addr_t ipaddr; u16_t txid; u8_t state; u8_t server_idx; u8_t tmr; u8_t retries; u8_t seqno; #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) u8_t pcb_idx; #endif char name[DNS_MAX_NAME_LENGTH]; #if LWIP_IPV4 && LWIP_IPV6 u8_t reqaddrtype; #endif /* LWIP_IPV4 && LWIP_IPV6 */ #if LWIP_DNS_SUPPORT_MDNS_QUERIES u8_t is_mdns; #endif }; /** DNS request table entry: used when dns_gehostbyname cannot answer the * request from the DNS table */ struct dns_req_entry { /* pointer to callback on DNS query done */ dns_found_callback found; /* argument passed to the callback function */ void *arg; #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) u8_t dns_table_idx; #endif #if LWIP_IPV4 && LWIP_IPV6 u8_t reqaddrtype; #endif /* LWIP_IPV4 && LWIP_IPV6 */ }; #if DNS_LOCAL_HOSTLIST #if DNS_LOCAL_HOSTLIST_IS_DYNAMIC /** Local host-list. For hostnames in this list, no * external name resolution is performed */ static struct local_hostlist_entry *local_hostlist_dynamic; #else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ /** Defining this allows the local_hostlist_static to be placed in a different * linker section (e.g. FLASH) */ #ifndef DNS_LOCAL_HOSTLIST_STORAGE_PRE #define DNS_LOCAL_HOSTLIST_STORAGE_PRE static #endif /* DNS_LOCAL_HOSTLIST_STORAGE_PRE */ /** Defining this allows the local_hostlist_static to be placed in a different * linker section (e.g. FLASH) */ #ifndef DNS_LOCAL_HOSTLIST_STORAGE_POST #define DNS_LOCAL_HOSTLIST_STORAGE_POST #endif /* DNS_LOCAL_HOSTLIST_STORAGE_POST */ DNS_LOCAL_HOSTLIST_STORAGE_PRE struct local_hostlist_entry local_hostlist_static[] DNS_LOCAL_HOSTLIST_STORAGE_POST = DNS_LOCAL_HOSTLIST_INIT; #endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ static void dns_init_local(void); static err_t dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)); #endif /* DNS_LOCAL_HOSTLIST */ /* forward declarations */ static void dns_recv(void *s, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); static void dns_check_entries(void); static void dns_call_found(u8_t idx, ip_addr_t* addr); /*----------------------------------------------------------------------------- * Globals *----------------------------------------------------------------------------*/ /* DNS variables */ static struct udp_pcb *dns_pcbs[DNS_MAX_SOURCE_PORTS]; #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) static u8_t dns_last_pcb_idx; #endif static u8_t dns_seqno; static struct dns_table_entry dns_table[DNS_TABLE_SIZE]; static struct dns_req_entry dns_requests[DNS_MAX_REQUESTS]; static ip_addr_t dns_servers[DNS_MAX_SERVERS]; #if LWIP_IPV4 const ip_addr_t dns_mquery_v4group = DNS_MQUERY_IPV4_GROUP_INIT; #endif /* LWIP_IPV4 */ #if LWIP_IPV6 const ip_addr_t dns_mquery_v6group = DNS_MQUERY_IPV6_GROUP_INIT; #endif /* LWIP_IPV6 */ /** * Initialize the resolver: set up the UDP pcb and configure the default server * (if DNS_SERVER_ADDRESS is set). */ void dns_init(void) { #ifdef DNS_SERVER_ADDRESS /* initialize default DNS server address */ ip_addr_t dnsserver; DNS_SERVER_ADDRESS(&dnsserver); dns_setserver(0, &dnsserver); #endif /* DNS_SERVER_ADDRESS */ LWIP_ASSERT("sanity check SIZEOF_DNS_QUERY", sizeof(struct dns_query) == SIZEOF_DNS_QUERY); LWIP_ASSERT("sanity check SIZEOF_DNS_ANSWER", sizeof(struct dns_answer) <= SIZEOF_DNS_ANSWER_ASSERT); LWIP_DEBUGF(DNS_DEBUG, ("dns_init: initializing\n")); /* if dns client not yet initialized... */ #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) if (dns_pcbs[0] == NULL) { dns_pcbs[0] = udp_new_ip_type(IPADDR_TYPE_ANY); LWIP_ASSERT("dns_pcbs[0] != NULL", dns_pcbs[0] != NULL); /* initialize DNS table not needed (initialized to zero since it is a * global variable) */ LWIP_ASSERT("For implicit initialization to work, DNS_STATE_UNUSED needs to be 0", DNS_STATE_UNUSED == 0); /* initialize DNS client */ udp_bind(dns_pcbs[0], IP_ANY_TYPE, 0); udp_recv(dns_pcbs[0], dns_recv, NULL); } #endif #if DNS_LOCAL_HOSTLIST dns_init_local(); #endif } /** * @ingroup dns * Initialize one of the DNS servers. * * @param numdns the index of the DNS server to set must be < DNS_MAX_SERVERS * @param dnsserver IP address of the DNS server to set */ void dns_setserver(u8_t numdns, const ip_addr_t *dnsserver) { if (numdns < DNS_MAX_SERVERS) { if (dnsserver != NULL) { dns_servers[numdns] = (*dnsserver); } else { dns_servers[numdns] = *IP_ADDR_ANY; } } } /** * @ingroup dns * Obtain one of the currently configured DNS server. * * @param numdns the index of the DNS server * @return IP address of the indexed DNS server or "ip_addr_any" if the DNS * server has not been configured. */ const ip_addr_t* dns_getserver(u8_t numdns) { if (numdns < DNS_MAX_SERVERS) { return &dns_servers[numdns]; } else { return IP_ADDR_ANY; } } /** * The DNS resolver client timer - handle retries and timeouts and should * be called every DNS_TMR_INTERVAL milliseconds (every second by default). */ void dns_tmr(void) { LWIP_DEBUGF(DNS_DEBUG, ("dns_tmr: dns_check_entries\n")); dns_check_entries(); } #if DNS_LOCAL_HOSTLIST static void dns_init_local(void) { #if DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) size_t i; struct local_hostlist_entry *entry; /* Dynamic: copy entries from DNS_LOCAL_HOSTLIST_INIT to list */ struct local_hostlist_entry local_hostlist_init[] = DNS_LOCAL_HOSTLIST_INIT; size_t namelen; for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_init); i++) { struct local_hostlist_entry *init_entry = &local_hostlist_init[i]; LWIP_ASSERT("invalid host name (NULL)", init_entry->name != NULL); namelen = strlen(init_entry->name); LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); LWIP_ASSERT("mem-error in dns_init_local", entry != NULL); if (entry != NULL) { char* entry_name = (char*)entry + sizeof(struct local_hostlist_entry); MEMCPY(entry_name, init_entry->name, namelen); entry_name[namelen] = 0; entry->name = entry_name; entry->addr = init_entry->addr; entry->next = local_hostlist_dynamic; local_hostlist_dynamic = entry; } } #endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) */ } /** * @ingroup dns * Iterate the local host-list for a hostname. * * @param iterator_fn a function that is called for every entry in the local host-list * @param iterator_arg 3rd argument passed to iterator_fn * @return the number of entries in the local host-list */ size_t dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg) { size_t i; #if DNS_LOCAL_HOSTLIST_IS_DYNAMIC struct local_hostlist_entry *entry = local_hostlist_dynamic; i = 0; while (entry != NULL) { if (iterator_fn != NULL) { iterator_fn(entry->name, &entry->addr, iterator_arg); } i++; entry = entry->next; } #else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { if (iterator_fn != NULL) { iterator_fn(local_hostlist_static[i].name, &local_hostlist_static[i].addr, iterator_arg); } } #endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ return i; } /** * @ingroup dns * Scans the local host-list for a hostname. * * @param hostname Hostname to look for in the local host-list * @param addr the first IP address for the hostname in the local host-list or * IPADDR_NONE if not found. * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 (ATTENTION: no fallback here!) * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 (ATTENTION: no fallback here!) * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only * @return ERR_OK if found, ERR_ARG if not found */ err_t dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype) { LWIP_UNUSED_ARG(dns_addrtype); return dns_lookup_local(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)); } /* Internal implementation for dns_local_lookup and dns_lookup */ static err_t dns_lookup_local(const char *hostname, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) { #if DNS_LOCAL_HOSTLIST_IS_DYNAMIC struct local_hostlist_entry *entry = local_hostlist_dynamic; while (entry != NULL) { if ((lwip_stricmp(entry->name, hostname) == 0) && LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, entry->addr)) { if (addr) { ip_addr_copy(*addr, entry->addr); } return ERR_OK; } entry = entry->next; } #else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ size_t i; for (i = 0; i < LWIP_ARRAYSIZE(local_hostlist_static); i++) { if ((lwip_stricmp(local_hostlist_static[i].name, hostname) == 0) && LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, local_hostlist_static[i].addr)) { if (addr) { ip_addr_copy(*addr, local_hostlist_static[i].addr); } return ERR_OK; } } #endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ return ERR_ARG; } #if DNS_LOCAL_HOSTLIST_IS_DYNAMIC /** * @ingroup dns * Remove all entries from the local host-list for a specific hostname * and/or IP address * * @param hostname hostname for which entries shall be removed from the local * host-list * @param addr address for which entries shall be removed from the local host-list * @return the number of removed entries */ int dns_local_removehost(const char *hostname, const ip_addr_t *addr) { int removed = 0; struct local_hostlist_entry *entry = local_hostlist_dynamic; struct local_hostlist_entry *last_entry = NULL; while (entry != NULL) { if (((hostname == NULL) || !lwip_stricmp(entry->name, hostname)) && ((addr == NULL) || ip_addr_cmp(&entry->addr, addr))) { struct local_hostlist_entry *free_entry; if (last_entry != NULL) { last_entry->next = entry->next; } else { local_hostlist_dynamic = entry->next; } free_entry = entry; entry = entry->next; memp_free(MEMP_LOCALHOSTLIST, free_entry); removed++; } else { last_entry = entry; entry = entry->next; } } return removed; } /** * @ingroup dns * Add a hostname/IP address pair to the local host-list. * Duplicates are not checked. * * @param hostname hostname of the new entry * @param addr IP address of the new entry * @return ERR_OK if succeeded or ERR_MEM on memory error */ err_t dns_local_addhost(const char *hostname, const ip_addr_t *addr) { struct local_hostlist_entry *entry; size_t namelen; char* entry_name; LWIP_ASSERT("invalid host name (NULL)", hostname != NULL); namelen = strlen(hostname); LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); if (entry == NULL) { return ERR_MEM; } entry_name = (char*)entry + sizeof(struct local_hostlist_entry); MEMCPY(entry_name, hostname, namelen); entry_name[namelen] = 0; entry->name = entry_name; ip_addr_copy(entry->addr, *addr); entry->next = local_hostlist_dynamic; local_hostlist_dynamic = entry; return ERR_OK; } #endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC*/ #endif /* DNS_LOCAL_HOSTLIST */ /** * @ingroup dns * Look up a hostname in the array of known hostnames. * * @note This function only looks in the internal array of known * hostnames, it does not send out a query for the hostname if none * was found. The function dns_enqueue() can be used to send a query * for a hostname. * * @param name the hostname to look up * @param addr the hostname's IP address, as u32_t (instead of ip_addr_t to * better check for failure: != IPADDR_NONE) or IPADDR_NONE if the hostname * was not found in the cached dns_table. * @return ERR_OK if found, ERR_ARG if not found */ static err_t dns_lookup(const char *name, ip_addr_t *addr LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype)) { u8_t i; #if DNS_LOCAL_HOSTLIST || defined(DNS_LOOKUP_LOCAL_EXTERN) #endif /* DNS_LOCAL_HOSTLIST || defined(DNS_LOOKUP_LOCAL_EXTERN) */ #if DNS_LOCAL_HOSTLIST if (dns_lookup_local(name, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { return ERR_OK; } #endif /* DNS_LOCAL_HOSTLIST */ #ifdef DNS_LOOKUP_LOCAL_EXTERN if (DNS_LOOKUP_LOCAL_EXTERN(name, addr, LWIP_DNS_ADDRTYPE_ARG_OR_ZERO(dns_addrtype)) == ERR_OK) { return ERR_OK; } #endif /* DNS_LOOKUP_LOCAL_EXTERN */ /* Walk through name list, return entry if found. If not, return NULL. */ for (i = 0; i < DNS_TABLE_SIZE; ++i) { if ((dns_table[i].state == DNS_STATE_DONE) && (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0) && LWIP_DNS_ADDRTYPE_MATCH_IP(dns_addrtype, dns_table[i].ipaddr)) { LWIP_DEBUGF(DNS_DEBUG, ("dns_lookup: \"%s\": found = ", name)); ip_addr_debug_print(DNS_DEBUG, &(dns_table[i].ipaddr)); LWIP_DEBUGF(DNS_DEBUG, ("\n")); if (addr) { ip_addr_copy(*addr, dns_table[i].ipaddr); } return ERR_OK; } } return ERR_ARG; } /** * Compare the "dotted" name "query" with the encoded name "response" * to make sure an answer from the DNS server matches the current dns_table * entry (otherwise, answers might arrive late for hostname not on the list * any more). * * @param query hostname (not encoded) from the dns_table * @param p pbuf containing the encoded hostname in the DNS response * @param start_offset offset into p where the name starts * @return 0xFFFF: names differ, other: names equal -> offset behind name */ static u16_t dns_compare_name(const char *query, struct pbuf* p, u16_t start_offset) { int n; u16_t response_offset = start_offset; do { n = pbuf_try_get_at(p, response_offset++); if (n < 0) { return 0xFFFF; } /** @see RFC 1035 - 4.1.4. Message compression */ if ((n & 0xc0) == 0xc0) { /* Compressed name: cannot be equal since we don't send them */ return 0xFFFF; } else { /* Not compressed name */ while (n > 0) { int c = pbuf_try_get_at(p, response_offset); if (c < 0) { return 0xFFFF; } if ((*query) != (u8_t)c) { return 0xFFFF; } ++response_offset; ++query; --n; } ++query; } n = pbuf_try_get_at(p, response_offset); if (n < 0) { return 0xFFFF; } } while (n != 0); return response_offset + 1; } /** * Walk through a compact encoded DNS name and return the end of the name. * * @param p pbuf containing the name * @param query_idx start index into p pointing to encoded DNS name in the DNS server response * @return index to end of the name */ static u16_t dns_skip_name(struct pbuf* p, u16_t query_idx) { int n; u16_t offset = query_idx; do { n = pbuf_try_get_at(p, offset++); if (n < 0) { return 0xFFFF; } /** @see RFC 1035 - 4.1.4. Message compression */ if ((n & 0xc0) == 0xc0) { /* Compressed name: since we only want to skip it (not check it), stop here */ break; } else { /* Not compressed name */ if (offset + n >= p->tot_len) { return 0xFFFF; } offset = (u16_t)(offset + n); } n = pbuf_try_get_at(p, offset); if (n < 0) { return 0xFFFF; } } while (n != 0); return offset + 1; } /** * Send a DNS query packet. * * @param idx the DNS table entry index for which to send a request * @return ERR_OK if packet is sent; an err_t indicating the problem otherwise */ static err_t dns_send(u8_t idx) { err_t err; struct dns_hdr hdr; struct dns_query qry; struct pbuf *p; u16_t query_idx, copy_len; const char *hostname, *hostname_part; u8_t n; u8_t pcb_idx; struct dns_table_entry* entry = &dns_table[idx]; LWIP_DEBUGF(DNS_DEBUG, ("dns_send: dns_servers[%"U16_F"] \"%s\": request\n", (u16_t)(entry->server_idx), entry->name)); LWIP_ASSERT("dns server out of array", entry->server_idx < DNS_MAX_SERVERS); if (ip_addr_isany_val(dns_servers[entry->server_idx]) #if LWIP_DNS_SUPPORT_MDNS_QUERIES && !entry->is_mdns #endif ) { /* DNS server not valid anymore, e.g. PPP netif has been shut down */ /* call specified callback function if provided */ dns_call_found(idx, NULL); /* flush this entry */ entry->state = DNS_STATE_UNUSED; return ERR_OK; } /* if here, we have either a new query or a retry on a previous query to process */ p = pbuf_alloc(PBUF_TRANSPORT, (u16_t)(SIZEOF_DNS_HDR + strlen(entry->name) + 2 + SIZEOF_DNS_QUERY), PBUF_RAM); if (p != NULL) { const ip_addr_t* dst; u16_t dst_port; /* fill dns header */ memset(&hdr, 0, SIZEOF_DNS_HDR); hdr.id = lwip_htons(entry->txid); hdr.flags1 = DNS_FLAG1_RD; hdr.numquestions = PP_HTONS(1); pbuf_take(p, &hdr, SIZEOF_DNS_HDR); hostname = entry->name; --hostname; /* convert hostname into suitable query format. */ query_idx = SIZEOF_DNS_HDR; do { ++hostname; hostname_part = hostname; for (n = 0; *hostname != '.' && *hostname != 0; ++hostname) { ++n; } copy_len = (u16_t)(hostname - hostname_part); pbuf_put_at(p, query_idx, n); pbuf_take_at(p, hostname_part, copy_len, query_idx + 1); query_idx += n + 1; } while (*hostname != 0); pbuf_put_at(p, query_idx, 0); query_idx++; /* fill dns query */ if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { qry.type = PP_HTONS(DNS_RRTYPE_AAAA); } else { qry.type = PP_HTONS(DNS_RRTYPE_A); } qry.cls = PP_HTONS(DNS_RRCLASS_IN); pbuf_take_at(p, &qry, SIZEOF_DNS_QUERY, query_idx); #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) pcb_idx = entry->pcb_idx; #else pcb_idx = 0; #endif /* send dns packet */ LWIP_DEBUGF(DNS_DEBUG, ("sending DNS request ID %d for name \"%s\" to server %d\r\n", entry->txid, entry->name, entry->server_idx)); #if LWIP_DNS_SUPPORT_MDNS_QUERIES if (entry->is_mdns) { dst_port = DNS_MQUERY_PORT; #if LWIP_IPV6 if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) { dst = &dns_mquery_v6group; } #endif #if LWIP_IPV4 && LWIP_IPV6 else #endif #if LWIP_IPV4 { dst = &dns_mquery_v4group; } #endif } else #endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ { dst_port = DNS_SERVER_PORT; dst = &dns_servers[entry->server_idx]; } err = udp_sendto(dns_pcbs[pcb_idx], p, dst, dst_port); /* free pbuf */ pbuf_free(p); } else { err = ERR_MEM; } return err; } #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) static struct udp_pcb* dns_alloc_random_port(void) { err_t err; struct udp_pcb* ret; ret = udp_new_ip_type(IPADDR_TYPE_ANY); if (ret == NULL) { /* out of memory, have to reuse an existing pcb */ return NULL; } do { u16_t port = (u16_t)DNS_RAND_TXID(); if (!DNS_PORT_ALLOWED(port)) { /* this port is not allowed, try again */ err = ERR_USE; continue; } err = udp_bind(ret, IP_ANY_TYPE, port); } while (err == ERR_USE); if (err != ERR_OK) { udp_remove(ret); return NULL; } udp_recv(ret, dns_recv, NULL); return ret; } /** * dns_alloc_pcb() - allocates a new pcb (or reuses an existing one) to be used * for sending a request * * @return an index into dns_pcbs */ static u8_t dns_alloc_pcb(void) { u8_t i; u8_t idx; for (i = 0; i < DNS_MAX_SOURCE_PORTS; i++) { if (dns_pcbs[i] == NULL) { break; } } if (i < DNS_MAX_SOURCE_PORTS) { dns_pcbs[i] = dns_alloc_random_port(); if (dns_pcbs[i] != NULL) { /* succeeded */ dns_last_pcb_idx = i; return i; } } /* if we come here, creating a new UDP pcb failed, so we have to use an already existing one */ for (i = 0, idx = dns_last_pcb_idx + 1; i < DNS_MAX_SOURCE_PORTS; i++, idx++) { if (idx >= DNS_MAX_SOURCE_PORTS) { idx = 0; } if (dns_pcbs[idx] != NULL) { dns_last_pcb_idx = idx; return idx; } } return DNS_MAX_SOURCE_PORTS; } #endif /* ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) */ /** * dns_call_found() - call the found callback and check if there are duplicate * entries for the given hostname. If there are any, their found callback will * be called and they will be removed. * * @param idx dns table index of the entry that is resolved or removed * @param addr IP address for the hostname (or NULL on error or memory shortage) */ static void dns_call_found(u8_t idx, ip_addr_t* addr) { #if ((LWIP_DNS_SECURE & (LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING | LWIP_DNS_SECURE_RAND_SRC_PORT)) != 0) u8_t i; #endif #if LWIP_IPV4 && LWIP_IPV6 if (addr != NULL) { /* check that address type matches the request and adapt the table entry */ if (IP_IS_V6_VAL(*addr)) { LWIP_ASSERT("invalid response", LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; } else { LWIP_ASSERT("invalid response", !LWIP_DNS_ADDRTYPE_IS_IPV6(dns_table[idx].reqaddrtype)); dns_table[idx].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; } } #endif /* LWIP_IPV4 && LWIP_IPV6 */ #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) for (i = 0; i < DNS_MAX_REQUESTS; i++) { if (dns_requests[i].found && (dns_requests[i].dns_table_idx == idx)) { (*dns_requests[i].found)(dns_table[idx].name, addr, dns_requests[i].arg); /* flush this entry */ dns_requests[i].found = NULL; } } #else if (dns_requests[idx].found) { (*dns_requests[idx].found)(dns_table[idx].name, addr, dns_requests[idx].arg); } dns_requests[idx].found = NULL; #endif #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) /* close the pcb used unless other request are using it */ for (i = 0; i < DNS_MAX_REQUESTS; i++) { if (i == idx) { continue; /* only check other requests */ } if (dns_table[i].state == DNS_STATE_ASKING) { if (dns_table[i].pcb_idx == dns_table[idx].pcb_idx) { /* another request is still using the same pcb */ dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; break; } } } if (dns_table[idx].pcb_idx < DNS_MAX_SOURCE_PORTS) { /* if we come here, the pcb is not used any more and can be removed */ udp_remove(dns_pcbs[dns_table[idx].pcb_idx]); dns_pcbs[dns_table[idx].pcb_idx] = NULL; dns_table[idx].pcb_idx = DNS_MAX_SOURCE_PORTS; } #endif } /* Create a query transmission ID that is unique for all outstanding queries */ static u16_t dns_create_txid(void) { u16_t txid; u8_t i; again: txid = (u16_t)DNS_RAND_TXID(); /* check whether the ID is unique */ for (i = 0; i < DNS_TABLE_SIZE; i++) { if ((dns_table[i].state == DNS_STATE_ASKING) && (dns_table[i].txid == txid)) { /* ID already used by another pending query */ goto again; } } return txid; } /** * dns_check_entry() - see if entry has not yet been queried and, if so, sends out a query. * Check an entry in the dns_table: * - send out query for new entries * - retry old pending entries on timeout (also with different servers) * - remove completed entries from the table if their TTL has expired * * @param i index of the dns_table entry to check */ static void dns_check_entry(u8_t i) { err_t err; struct dns_table_entry *entry = &dns_table[i]; LWIP_ASSERT("array index out of bounds", i < DNS_TABLE_SIZE); switch (entry->state) { case DNS_STATE_NEW: /* initialize new entry */ entry->txid = dns_create_txid(); entry->state = DNS_STATE_ASKING; entry->server_idx = 0; entry->tmr = 1; entry->retries = 0; /* send DNS packet for this entry */ err = dns_send(i); if (err != ERR_OK) { LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, ("dns_send returned error: %s\n", lwip_strerr(err))); } break; case DNS_STATE_ASKING: if (--entry->tmr == 0) { if (++entry->retries == DNS_MAX_RETRIES) { if ((entry->server_idx + 1 < DNS_MAX_SERVERS) && !ip_addr_isany_val(dns_servers[entry->server_idx + 1]) #if LWIP_DNS_SUPPORT_MDNS_QUERIES && !entry->is_mdns #endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ ) { /* change of server */ entry->server_idx++; entry->tmr = 1; entry->retries = 0; } else { LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": timeout\n", entry->name)); /* call specified callback function if provided */ dns_call_found(i, NULL); /* flush this entry */ entry->state = DNS_STATE_UNUSED; break; } } else { /* wait longer for the next retry */ entry->tmr = entry->retries; } /* send DNS packet for this entry */ err = dns_send(i); if (err != ERR_OK) { LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, ("dns_send returned error: %s\n", lwip_strerr(err))); } } break; case DNS_STATE_DONE: /* if the time to live is nul */ if ((entry->ttl == 0) || (--entry->ttl == 0)) { LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": flush\n", entry->name)); /* flush this entry, there cannot be any related pending entries in this state */ entry->state = DNS_STATE_UNUSED; } break; case DNS_STATE_UNUSED: /* nothing to do */ break; default: LWIP_ASSERT("unknown dns_table entry state:", 0); break; } } /** * Call dns_check_entry for each entry in dns_table - check all entries. */ static void dns_check_entries(void) { u8_t i; for (i = 0; i < DNS_TABLE_SIZE; ++i) { dns_check_entry(i); } } /** * Save TTL and call dns_call_found for correct response. */ static void dns_correct_response(u8_t idx, u32_t ttl) { struct dns_table_entry *entry = &dns_table[idx]; entry->state = DNS_STATE_DONE; LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response = ", entry->name)); ip_addr_debug_print(DNS_DEBUG, (&(entry->ipaddr))); LWIP_DEBUGF(DNS_DEBUG, ("\n")); /* read the answer resource record's TTL, and maximize it if needed */ entry->ttl = ttl; if (entry->ttl > DNS_MAX_TTL) { entry->ttl = DNS_MAX_TTL; } dns_call_found(idx, &entry->ipaddr); if (entry->ttl == 0) { /* RFC 883, page 29: "Zero values are interpreted to mean that the RR can only be used for the transaction in progress, and should not be cached." -> flush this entry now */ /* entry reused during callback? */ if (entry->state == DNS_STATE_DONE) { entry->state = DNS_STATE_UNUSED; } } } /** * Receive input function for DNS response packets arriving for the dns UDP pcb. */ static void dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) { u8_t i; u16_t txid; u16_t res_idx; struct dns_hdr hdr; struct dns_answer ans; struct dns_query qry; u16_t nquestions, nanswers; LWIP_UNUSED_ARG(arg); LWIP_UNUSED_ARG(pcb); LWIP_UNUSED_ARG(port); /* is the dns message big enough ? */ if (p->tot_len < (SIZEOF_DNS_HDR + SIZEOF_DNS_QUERY)) { LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: pbuf too small\n")); /* free pbuf and return */ goto memerr; } /* copy dns payload inside static buffer for processing */ if (pbuf_copy_partial(p, &hdr, SIZEOF_DNS_HDR, 0) == SIZEOF_DNS_HDR) { /* Match the ID in the DNS header with the name table. */ txid = lwip_htons(hdr.id); for (i = 0; i < DNS_TABLE_SIZE; i++) { const struct dns_table_entry *entry = &dns_table[i]; if ((entry->state == DNS_STATE_ASKING) && (entry->txid == txid)) { /* We only care about the question(s) and the answers. The authrr and the extrarr are simply discarded. */ nquestions = lwip_htons(hdr.numquestions); nanswers = lwip_htons(hdr.numanswers); /* Check for correct response. */ if ((hdr.flags1 & DNS_FLAG1_RESPONSE) == 0) { LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": not a response\n", entry->name)); goto memerr; /* ignore this packet */ } if (nquestions != 1) { LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); goto memerr; /* ignore this packet */ } #if LWIP_DNS_SUPPORT_MDNS_QUERIES if (!entry->is_mdns) #endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ { /* Check whether response comes from the same network address to which the question was sent. (RFC 5452) */ if (!ip_addr_cmp(addr, &dns_servers[entry->server_idx])) { goto memerr; /* ignore this packet */ } } /* Check if the name in the "question" part match with the name in the entry and skip it if equal. */ res_idx = dns_compare_name(entry->name, p, SIZEOF_DNS_HDR); if (res_idx == 0xFFFF) { LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); goto memerr; /* ignore this packet */ } /* check if "question" part matches the request */ if (pbuf_copy_partial(p, &qry, SIZEOF_DNS_QUERY, res_idx) != SIZEOF_DNS_QUERY) { goto memerr; /* ignore this packet */ } if ((qry.cls != PP_HTONS(DNS_RRCLASS_IN)) || (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_AAAA))) || (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype) && (qry.type != PP_HTONS(DNS_RRTYPE_A)))) { LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", entry->name)); goto memerr; /* ignore this packet */ } /* skip the rest of the "question" part */ res_idx += SIZEOF_DNS_QUERY; /* Check for error. If so, call callback to inform. */ if (hdr.flags2 & DNS_FLAG2_ERR_MASK) { LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in flags\n", entry->name)); } else { while ((nanswers > 0) && (res_idx < p->tot_len)) { /* skip answer resource record's host name */ res_idx = dns_skip_name(p, res_idx); if (res_idx == 0xFFFF) { goto memerr; /* ignore this packet */ } /* Check for IP address type and Internet class. Others are discarded. */ if (pbuf_copy_partial(p, &ans, SIZEOF_DNS_ANSWER, res_idx) != SIZEOF_DNS_ANSWER) { goto memerr; /* ignore this packet */ } res_idx += SIZEOF_DNS_ANSWER; if (ans.cls == PP_HTONS(DNS_RRCLASS_IN)) { #if LWIP_IPV4 if ((ans.type == PP_HTONS(DNS_RRTYPE_A)) && (ans.len == PP_HTONS(sizeof(ip4_addr_t)))) { #if LWIP_IPV4 && LWIP_IPV6 if (!LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) #endif /* LWIP_IPV4 && LWIP_IPV6 */ { ip4_addr_t ip4addr; /* read the IP address after answer resource record's header */ if (pbuf_copy_partial(p, &ip4addr, sizeof(ip4_addr_t), res_idx) != sizeof(ip4_addr_t)) { goto memerr; /* ignore this packet */ } ip_addr_copy_from_ip4(dns_table[i].ipaddr, ip4addr); pbuf_free(p); /* handle correct response */ dns_correct_response(i, lwip_ntohl(ans.ttl)); return; } } #endif /* LWIP_IPV4 */ #if LWIP_IPV6 if ((ans.type == PP_HTONS(DNS_RRTYPE_AAAA)) && (ans.len == PP_HTONS(sizeof(ip6_addr_t)))) { #if LWIP_IPV4 && LWIP_IPV6 if (LWIP_DNS_ADDRTYPE_IS_IPV6(entry->reqaddrtype)) #endif /* LWIP_IPV4 && LWIP_IPV6 */ { ip6_addr_t ip6addr; /* read the IP address after answer resource record's header */ if (pbuf_copy_partial(p, &ip6addr, sizeof(ip6_addr_t), res_idx) != sizeof(ip6_addr_t)) { goto memerr; /* ignore this packet */ } ip_addr_copy_from_ip6(dns_table[i].ipaddr, ip6addr); pbuf_free(p); /* handle correct response */ dns_correct_response(i, lwip_ntohl(ans.ttl)); return; } } #endif /* LWIP_IPV6 */ } /* skip this answer */ if ((int)(res_idx + lwip_htons(ans.len)) > 0xFFFF) { goto memerr; /* ignore this packet */ } res_idx += lwip_htons(ans.len); --nanswers; } #if LWIP_IPV4 && LWIP_IPV6 if ((entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { if (entry->reqaddrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { /* IPv4 failed, try IPv6 */ dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV6; } else { /* IPv6 failed, try IPv4 */ dns_table[i].reqaddrtype = LWIP_DNS_ADDRTYPE_IPV4; } pbuf_free(p); dns_table[i].state = DNS_STATE_NEW; dns_check_entry(i); return; } #endif /* LWIP_IPV4 && LWIP_IPV6 */ LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in response\n", entry->name)); } /* call callback to indicate error, clean up memory and return */ pbuf_free(p); dns_call_found(i, NULL); dns_table[i].state = DNS_STATE_UNUSED; return; } } } memerr: /* deallocate memory and return */ pbuf_free(p); return; } /** * Queues a new hostname to resolve and sends out a DNS query for that hostname * * @param name the hostname that is to be queried * @param hostnamelen length of the hostname * @param found a callback function to be called on success, failure or timeout * @param callback_arg argument to pass to the callback function * @return err_t return code. */ static err_t dns_enqueue(const char *name, size_t hostnamelen, dns_found_callback found, void *callback_arg LWIP_DNS_ADDRTYPE_ARG(u8_t dns_addrtype) LWIP_DNS_ISMDNS_ARG(u8_t is_mdns)) { u8_t i; u8_t lseq, lseqi; struct dns_table_entry *entry = NULL; size_t namelen; struct dns_req_entry* req; #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) u8_t r; /* check for duplicate entries */ for (i = 0; i < DNS_TABLE_SIZE; i++) { if ((dns_table[i].state == DNS_STATE_ASKING) && (lwip_strnicmp(name, dns_table[i].name, sizeof(dns_table[i].name)) == 0)) { #if LWIP_IPV4 && LWIP_IPV6 if (dns_table[i].reqaddrtype != dns_addrtype) { /* requested address types don't match this can lead to 2 concurrent requests, but mixing the address types for the same host should not be that common */ continue; } #endif /* LWIP_IPV4 && LWIP_IPV6 */ /* this is a duplicate entry, find a free request entry */ for (r = 0; r < DNS_MAX_REQUESTS; r++) { if (dns_requests[r].found == 0) { dns_requests[r].found = found; dns_requests[r].arg = callback_arg; dns_requests[r].dns_table_idx = i; LWIP_DNS_SET_ADDRTYPE(dns_requests[r].reqaddrtype, dns_addrtype); LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": duplicate request\n", name)); return ERR_INPROGRESS; } } } } /* no duplicate entries found */ #endif /* search an unused entry, or the oldest one */ lseq = 0; lseqi = DNS_TABLE_SIZE; for (i = 0; i < DNS_TABLE_SIZE; ++i) { entry = &dns_table[i]; /* is it an unused entry ? */ if (entry->state == DNS_STATE_UNUSED) { break; } /* check if this is the oldest completed entry */ if (entry->state == DNS_STATE_DONE) { u8_t age = dns_seqno - entry->seqno; if (age > lseq) { lseq = age; lseqi = i; } } } /* if we don't have found an unused entry, use the oldest completed one */ if (i == DNS_TABLE_SIZE) { if ((lseqi >= DNS_TABLE_SIZE) || (dns_table[lseqi].state != DNS_STATE_DONE)) { /* no entry can be used now, table is full */ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS entries table is full\n", name)); return ERR_MEM; } else { /* use the oldest completed one */ i = lseqi; entry = &dns_table[i]; } } #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_NO_MULTIPLE_OUTSTANDING) != 0) /* find a free request entry */ req = NULL; for (r = 0; r < DNS_MAX_REQUESTS; r++) { if (dns_requests[r].found == NULL) { req = &dns_requests[r]; break; } } if (req == NULL) { /* no request entry can be used now, table is full */ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS request entries table is full\n", name)); return ERR_MEM; } req->dns_table_idx = i; #else /* in this configuration, the entry index is the same as the request index */ req = &dns_requests[i]; #endif /* use this entry */ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS entry %"U16_F"\n", name, (u16_t)(i))); /* fill the entry */ entry->state = DNS_STATE_NEW; entry->seqno = dns_seqno; LWIP_DNS_SET_ADDRTYPE(entry->reqaddrtype, dns_addrtype); LWIP_DNS_SET_ADDRTYPE(req->reqaddrtype, dns_addrtype); req->found = found; req->arg = callback_arg; namelen = LWIP_MIN(hostnamelen, DNS_MAX_NAME_LENGTH-1); MEMCPY(entry->name, name, namelen); entry->name[namelen] = 0; #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) != 0) entry->pcb_idx = dns_alloc_pcb(); if (entry->pcb_idx >= DNS_MAX_SOURCE_PORTS) { /* failed to get a UDP pcb */ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": failed to allocate a pcb\n", name)); entry->state = DNS_STATE_UNUSED; req->found = NULL; return ERR_MEM; } LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS pcb %"U16_F"\n", name, (u16_t)(entry->pcb_idx))); #endif #if LWIP_DNS_SUPPORT_MDNS_QUERIES entry->is_mdns = is_mdns; #endif dns_seqno++; /* force to send query without waiting timer */ dns_check_entry(i); /* dns query is enqueued */ return ERR_INPROGRESS; } /** * @ingroup dns * Resolve a hostname (string) into an IP address. * NON-BLOCKING callback version for use with raw API!!! * * Returns immediately with one of err_t return codes: * - ERR_OK if hostname is a valid IP address string or the host * name is already in the local names table. * - ERR_INPROGRESS enqueue a request to be sent to the DNS server * for resolution if no errors are present. * - ERR_ARG: dns client not initialized or invalid hostname * * @param hostname the hostname that is to be queried * @param addr pointer to a ip_addr_t where to store the address if it is already * cached in the dns_table (only valid if ERR_OK is returned!) * @param found a callback function to be called on success, failure or timeout (only if * ERR_INPROGRESS is returned!) * @param callback_arg argument to pass to the callback function * @return a err_t return code. */ err_t dns_gethostbyname(const char *hostname, ip_addr_t *addr, dns_found_callback found, void *callback_arg) { return dns_gethostbyname_addrtype(hostname, addr, found, callback_arg, LWIP_DNS_ADDRTYPE_DEFAULT); } /** * @ingroup dns * Like dns_gethostbyname, but returned address type can be controlled: * @param hostname the hostname that is to be queried * @param addr pointer to a ip_addr_t where to store the address if it is already * cached in the dns_table (only valid if ERR_OK is returned!) * @param found a callback function to be called on success, failure or timeout (only if * ERR_INPROGRESS is returned!) * @param callback_arg argument to pass to the callback function * @param dns_addrtype - LWIP_DNS_ADDRTYPE_IPV4_IPV6: try to resolve IPv4 first, try IPv6 if IPv4 fails only * - LWIP_DNS_ADDRTYPE_IPV6_IPV4: try to resolve IPv6 first, try IPv4 if IPv6 fails only * - LWIP_DNS_ADDRTYPE_IPV4: try to resolve IPv4 only * - LWIP_DNS_ADDRTYPE_IPV6: try to resolve IPv6 only */ err_t dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, dns_found_callback found, void *callback_arg, u8_t dns_addrtype) { size_t hostnamelen; #if LWIP_DNS_SUPPORT_MDNS_QUERIES u8_t is_mdns; #endif /* not initialized or no valid server yet, or invalid addr pointer * or invalid hostname or invalid hostname length */ if ((addr == NULL) || (!hostname) || (!hostname[0])) { return ERR_ARG; } #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_SRC_PORT) == 0) if (dns_pcbs[0] == NULL) { return ERR_ARG; } #endif hostnamelen = strlen(hostname); if (hostnamelen >= DNS_MAX_NAME_LENGTH) { LWIP_DEBUGF(DNS_DEBUG, ("dns_gethostbyname: name too long to resolve")); return ERR_ARG; } #if LWIP_HAVE_LOOPIF if (strcmp(hostname, "localhost") == 0) { ip_addr_set_loopback(LWIP_DNS_ADDRTYPE_IS_IPV6(dns_addrtype), addr); return ERR_OK; } #endif /* LWIP_HAVE_LOOPIF */ /* host name already in octet notation? set ip addr and return ERR_OK */ if (ipaddr_aton(hostname, addr)) { #if LWIP_IPV4 && LWIP_IPV6 if ((IP_IS_V6(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV4)) || (IP_IS_V4(addr) && (dns_addrtype != LWIP_DNS_ADDRTYPE_IPV6))) #endif /* LWIP_IPV4 && LWIP_IPV6 */ { return ERR_OK; } } /* already have this address cached? */ if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(dns_addrtype)) == ERR_OK) { return ERR_OK; } #if LWIP_IPV4 && LWIP_IPV6 if ((dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) || (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV6_IPV4)) { /* fallback to 2nd IP type and try again to lookup */ u8_t fallback; if (dns_addrtype == LWIP_DNS_ADDRTYPE_IPV4_IPV6) { fallback = LWIP_DNS_ADDRTYPE_IPV6; } else { fallback = LWIP_DNS_ADDRTYPE_IPV4; } if (dns_lookup(hostname, addr LWIP_DNS_ADDRTYPE_ARG(fallback)) == ERR_OK) { return ERR_OK; } } #else /* LWIP_IPV4 && LWIP_IPV6 */ LWIP_UNUSED_ARG(dns_addrtype); #endif /* LWIP_IPV4 && LWIP_IPV6 */ #if LWIP_DNS_SUPPORT_MDNS_QUERIES if (strstr(hostname, ".local") == &hostname[hostnamelen] - 6) { is_mdns = 1; } else { is_mdns = 0; } if (!is_mdns) #endif /* LWIP_DNS_SUPPORT_MDNS_QUERIES */ { /* prevent calling found callback if no server is set, return error instead */ if (ip_addr_isany_val(dns_servers[0])) { return ERR_VAL; } } /* queue query with specified callback */ return dns_enqueue(hostname, hostnamelen, found, callback_arg LWIP_DNS_ADDRTYPE_ARG(dns_addrtype) LWIP_DNS_ISMDNS_ARG(is_mdns)); } #endif /* LWIP_DNS */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/dns.c
C
unknown
52,793
/** * @file * Incluse internet checksum functions.\n * * These are some reference implementations of the checksum algorithm, with the * aim of being simple, correct and fully portable. Checksumming is the * first thing you would want to optimize for your platform. If you create * your own version, link it in and in your cc.h put: * * \#define LWIP_CHKSUM your_checksum_routine * * Or you can select from the implementations below by defining * LWIP_CHKSUM_ALGORITHM to 1, 2 or 3. */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #include "lwip/inet_chksum.h" #include "lwip/def.h" #include "lwip/ip_addr.h" #include <string.h> #ifndef LWIP_CHKSUM # define LWIP_CHKSUM lwip_standard_chksum # ifndef LWIP_CHKSUM_ALGORITHM # define LWIP_CHKSUM_ALGORITHM 2 # endif u16_t lwip_standard_chksum(const void *dataptr, int len); #endif /* If none set: */ #ifndef LWIP_CHKSUM_ALGORITHM # define LWIP_CHKSUM_ALGORITHM 0 #endif #if (LWIP_CHKSUM_ALGORITHM == 1) /* Version #1 */ /** * lwip checksum * * @param dataptr points to start of data to be summed at any boundary * @param len length of data to be summed * @return host order (!) lwip checksum (non-inverted Internet sum) * * @note accumulator size limits summable length to 64k * @note host endianess is irrelevant (p3 RFC1071) */ u16_t lwip_standard_chksum(const void *dataptr, int len) { u32_t acc; u16_t src; const u8_t *octetptr; acc = 0; /* dataptr may be at odd or even addresses */ octetptr = (const u8_t*)dataptr; while (len > 1) { /* declare first octet as most significant thus assume network order, ignoring host order */ src = (*octetptr) << 8; octetptr++; /* declare second octet as least significant */ src |= (*octetptr); octetptr++; acc += src; len -= 2; } if (len > 0) { /* accumulate remaining octet */ src = (*octetptr) << 8; acc += src; } /* add deferred carry bits */ acc = (acc >> 16) + (acc & 0x0000ffffUL); if ((acc & 0xffff0000UL) != 0) { acc = (acc >> 16) + (acc & 0x0000ffffUL); } /* This maybe a little confusing: reorder sum using lwip_htons() instead of lwip_ntohs() since it has a little less call overhead. The caller must invert bits for Internet sum ! */ return lwip_htons((u16_t)acc); } #endif #if (LWIP_CHKSUM_ALGORITHM == 2) /* Alternative version #2 */ /* * Curt McDowell * Broadcom Corp. * csm@broadcom.com * * IP checksum two bytes at a time with support for * unaligned buffer. * Works for len up to and including 0x20000. * by Curt McDowell, Broadcom Corp. 12/08/2005 * * @param dataptr points to start of data to be summed at any boundary * @param len length of data to be summed * @return host order (!) lwip checksum (non-inverted Internet sum) */ u16_t lwip_standard_chksum(const void *dataptr, int len) { const u8_t *pb = (const u8_t *)dataptr; const u16_t *ps; u16_t t = 0; u32_t sum = 0; int odd = ((mem_ptr_t)pb & 1); /* Get aligned to u16_t */ if (odd && len > 0) { ((u8_t *)&t)[1] = *pb++; len--; } /* Add the bulk of the data */ ps = (const u16_t *)(const void *)pb; while (len > 1) { sum += *ps++; len -= 2; } /* Consume left-over byte, if any */ if (len > 0) { ((u8_t *)&t)[0] = *(const u8_t *)ps; } /* Add end bytes */ sum += t; /* Fold 32-bit sum to 16 bits calling this twice is probably faster than if statements... */ sum = FOLD_U32T(sum); sum = FOLD_U32T(sum); /* Swap if alignment was odd */ if (odd) { sum = SWAP_BYTES_IN_WORD(sum); } return (u16_t)sum; } #endif #if (LWIP_CHKSUM_ALGORITHM == 3) /* Alternative version #3 */ /** * An optimized checksum routine. Basically, it uses loop-unrolling on * the checksum loop, treating the head and tail bytes specially, whereas * the inner loop acts on 8 bytes at a time. * * @arg start of buffer to be checksummed. May be an odd byte address. * @len number of bytes in the buffer to be checksummed. * @return host order (!) lwip checksum (non-inverted Internet sum) * * by Curt McDowell, Broadcom Corp. December 8th, 2005 */ u16_t lwip_standard_chksum(const void *dataptr, int len) { const u8_t *pb = (const u8_t *)dataptr; const u16_t *ps; u16_t t = 0; const u32_t *pl; u32_t sum = 0, tmp; /* starts at odd byte address? */ int odd = ((mem_ptr_t)pb & 1); if (odd && len > 0) { ((u8_t *)&t)[1] = *pb++; len--; } ps = (const u16_t *)(const void*)pb; if (((mem_ptr_t)ps & 3) && len > 1) { sum += *ps++; len -= 2; } pl = (const u32_t *)(const void*)ps; while (len > 7) { tmp = sum + *pl++; /* ping */ if (tmp < sum) { tmp++; /* add back carry */ } sum = tmp + *pl++; /* pong */ if (sum < tmp) { sum++; /* add back carry */ } len -= 8; } /* make room in upper bits */ sum = FOLD_U32T(sum); ps = (const u16_t *)pl; /* 16-bit aligned word remaining? */ while (len > 1) { sum += *ps++; len -= 2; } /* dangling tail byte remaining? */ if (len > 0) { /* include odd byte */ ((u8_t *)&t)[0] = *(const u8_t *)ps; } sum += t; /* add end bytes */ /* Fold 32-bit sum to 16 bits calling this twice is probably faster than if statements... */ sum = FOLD_U32T(sum); sum = FOLD_U32T(sum); if (odd) { sum = SWAP_BYTES_IN_WORD(sum); } return (u16_t)sum; } #endif /** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ static u16_t inet_cksum_pseudo_base(struct pbuf *p, u8_t proto, u16_t proto_len, u32_t acc) { struct pbuf *q; u8_t swapped = 0; /* iterate through all pbuf in chain */ for (q = p; q != NULL; q = q->next) { LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", (void *)q, (void *)q->next)); acc += LWIP_CHKSUM(q->payload, q->len); /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ /* just executing this next line is probably faster that the if statement needed to check whether we really need to execute it, and does no harm */ acc = FOLD_U32T(acc); if (q->len % 2 != 0) { swapped = 1 - swapped; acc = SWAP_BYTES_IN_WORD(acc); } /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ } if (swapped) { acc = SWAP_BYTES_IN_WORD(acc); } acc += (u32_t)lwip_htons((u16_t)proto); acc += (u32_t)lwip_htons(proto_len); /* Fold 32-bit sum to 16 bits calling this twice is probably faster than if statements... */ acc = FOLD_U32T(acc); acc = FOLD_U32T(acc); LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); return (u16_t)~(acc & 0xffffUL); } #if LWIP_IPV4 /* inet_chksum_pseudo: * * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. * IP addresses are expected to be in network byte order. * * @param p chain of pbufs over that a checksum should be calculated (ip data part) * @param src source ip address (used for checksum of pseudo header) * @param dst destination ip address (used for checksum of pseudo header) * @param proto ip protocol (used for checksum of pseudo header) * @param proto_len length of the ip data part (used for checksum of pseudo header) * @return checksum (as u16_t) to be saved directly in the protocol header */ u16_t inet_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, const ip4_addr_t *src, const ip4_addr_t *dest) { u32_t acc; u32_t addr; addr = ip4_addr_get_u32(src); acc = (addr & 0xffffUL); acc += ((addr >> 16) & 0xffffUL); addr = ip4_addr_get_u32(dest); acc += (addr & 0xffffUL); acc += ((addr >> 16) & 0xffffUL); /* fold down to 16 bits */ acc = FOLD_U32T(acc); acc = FOLD_U32T(acc); return inet_cksum_pseudo_base(p, proto, proto_len, acc); } #endif /* LWIP_IPV4 */ #if LWIP_IPV6 /** * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. * IPv6 addresses are expected to be in network byte order. * * @param p chain of pbufs over that a checksum should be calculated (ip data part) * @param proto ipv6 protocol/next header (used for checksum of pseudo header) * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) * @param src source ipv6 address (used for checksum of pseudo header) * @param dest destination ipv6 address (used for checksum of pseudo header) * @return checksum (as u16_t) to be saved directly in the protocol header */ u16_t ip6_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, const ip6_addr_t *src, const ip6_addr_t *dest) { u32_t acc = 0; u32_t addr; u8_t addr_part; for (addr_part = 0; addr_part < 4; addr_part++) { addr = src->addr[addr_part]; acc += (addr & 0xffffUL); acc += ((addr >> 16) & 0xffffUL); addr = dest->addr[addr_part]; acc += (addr & 0xffffUL); acc += ((addr >> 16) & 0xffffUL); } /* fold down to 16 bits */ acc = FOLD_U32T(acc); acc = FOLD_U32T(acc); return inet_cksum_pseudo_base(p, proto, proto_len, acc); } #endif /* LWIP_IPV6 */ /* ip_chksum_pseudo: * * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. * IP addresses are expected to be in network byte order. * * @param p chain of pbufs over that a checksum should be calculated (ip data part) * @param src source ip address (used for checksum of pseudo header) * @param dst destination ip address (used for checksum of pseudo header) * @param proto ip protocol (used for checksum of pseudo header) * @param proto_len length of the ip data part (used for checksum of pseudo header) * @return checksum (as u16_t) to be saved directly in the protocol header */ u16_t ip_chksum_pseudo(struct pbuf *p, u8_t proto, u16_t proto_len, const ip_addr_t *src, const ip_addr_t *dest) { #if LWIP_IPV6 if (IP_IS_V6(dest)) { return ip6_chksum_pseudo(p, proto, proto_len, ip_2_ip6(src), ip_2_ip6(dest)); } #endif /* LWIP_IPV6 */ #if LWIP_IPV4 && LWIP_IPV6 else #endif /* LWIP_IPV4 && LWIP_IPV6 */ #if LWIP_IPV4 { return inet_chksum_pseudo(p, proto, proto_len, ip_2_ip4(src), ip_2_ip4(dest)); } #endif /* LWIP_IPV4 */ } /** Parts of the pseudo checksum which are common to IPv4 and IPv6 */ static u16_t inet_cksum_pseudo_partial_base(struct pbuf *p, u8_t proto, u16_t proto_len, u16_t chksum_len, u32_t acc) { struct pbuf *q; u8_t swapped = 0; u16_t chklen; /* iterate through all pbuf in chain */ for (q = p; (q != NULL) && (chksum_len > 0); q = q->next) { LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): checksumming pbuf %p (has next %p) \n", (void *)q, (void *)q->next)); chklen = q->len; if (chklen > chksum_len) { chklen = chksum_len; } acc += LWIP_CHKSUM(q->payload, chklen); chksum_len -= chklen; LWIP_ASSERT("delete me", chksum_len < 0x7fff); /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): unwrapped lwip_chksum()=%"X32_F" \n", acc));*/ /* fold the upper bit down */ acc = FOLD_U32T(acc); if (q->len % 2 != 0) { swapped = 1 - swapped; acc = SWAP_BYTES_IN_WORD(acc); } /*LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): wrapped lwip_chksum()=%"X32_F" \n", acc));*/ } if (swapped) { acc = SWAP_BYTES_IN_WORD(acc); } acc += (u32_t)lwip_htons((u16_t)proto); acc += (u32_t)lwip_htons(proto_len); /* Fold 32-bit sum to 16 bits calling this twice is probably faster than if statements... */ acc = FOLD_U32T(acc); acc = FOLD_U32T(acc); LWIP_DEBUGF(INET_DEBUG, ("inet_chksum_pseudo(): pbuf chain lwip_chksum()=%"X32_F"\n", acc)); return (u16_t)~(acc & 0xffffUL); } #if LWIP_IPV4 /* inet_chksum_pseudo_partial: * * Calculates the IPv4 pseudo Internet checksum used by TCP and UDP for a pbuf chain. * IP addresses are expected to be in network byte order. * * @param p chain of pbufs over that a checksum should be calculated (ip data part) * @param src source ip address (used for checksum of pseudo header) * @param dst destination ip address (used for checksum of pseudo header) * @param proto ip protocol (used for checksum of pseudo header) * @param proto_len length of the ip data part (used for checksum of pseudo header) * @return checksum (as u16_t) to be saved directly in the protocol header */ u16_t inet_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, u16_t chksum_len, const ip4_addr_t *src, const ip4_addr_t *dest) { u32_t acc; u32_t addr; addr = ip4_addr_get_u32(src); acc = (addr & 0xffffUL); acc += ((addr >> 16) & 0xffffUL); addr = ip4_addr_get_u32(dest); acc += (addr & 0xffffUL); acc += ((addr >> 16) & 0xffffUL); /* fold down to 16 bits */ acc = FOLD_U32T(acc); acc = FOLD_U32T(acc); return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); } #endif /* LWIP_IPV4 */ #if LWIP_IPV6 /** * Calculates the checksum with IPv6 pseudo header used by TCP and UDP for a pbuf chain. * IPv6 addresses are expected to be in network byte order. Will only compute for a * portion of the payload. * * @param p chain of pbufs over that a checksum should be calculated (ip data part) * @param proto ipv6 protocol/next header (used for checksum of pseudo header) * @param proto_len length of the ipv6 payload (used for checksum of pseudo header) * @param chksum_len number of payload bytes used to compute chksum * @param src source ipv6 address (used for checksum of pseudo header) * @param dest destination ipv6 address (used for checksum of pseudo header) * @return checksum (as u16_t) to be saved directly in the protocol header */ u16_t ip6_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, u16_t chksum_len, const ip6_addr_t *src, const ip6_addr_t *dest) { u32_t acc = 0; u32_t addr; u8_t addr_part; for (addr_part = 0; addr_part < 4; addr_part++) { addr = src->addr[addr_part]; acc += (addr & 0xffffUL); acc += ((addr >> 16) & 0xffffUL); addr = dest->addr[addr_part]; acc += (addr & 0xffffUL); acc += ((addr >> 16) & 0xffffUL); } /* fold down to 16 bits */ acc = FOLD_U32T(acc); acc = FOLD_U32T(acc); return inet_cksum_pseudo_partial_base(p, proto, proto_len, chksum_len, acc); } #endif /* LWIP_IPV6 */ /* ip_chksum_pseudo_partial: * * Calculates the IPv4 or IPv6 pseudo Internet checksum used by TCP and UDP for a pbuf chain. * * @param p chain of pbufs over that a checksum should be calculated (ip data part) * @param src source ip address (used for checksum of pseudo header) * @param dst destination ip address (used for checksum of pseudo header) * @param proto ip protocol (used for checksum of pseudo header) * @param proto_len length of the ip data part (used for checksum of pseudo header) * @return checksum (as u16_t) to be saved directly in the protocol header */ u16_t ip_chksum_pseudo_partial(struct pbuf *p, u8_t proto, u16_t proto_len, u16_t chksum_len, const ip_addr_t *src, const ip_addr_t *dest) { #if LWIP_IPV6 if (IP_IS_V6(dest)) { return ip6_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip6(src), ip_2_ip6(dest)); } #endif /* LWIP_IPV6 */ #if LWIP_IPV4 && LWIP_IPV6 else #endif /* LWIP_IPV4 && LWIP_IPV6 */ #if LWIP_IPV4 { return inet_chksum_pseudo_partial(p, proto, proto_len, chksum_len, ip_2_ip4(src), ip_2_ip4(dest)); } #endif /* LWIP_IPV4 */ } /* inet_chksum: * * Calculates the Internet checksum over a portion of memory. Used primarily for IP * and ICMP. * * @param dataptr start of the buffer to calculate the checksum (no alignment needed) * @param len length of the buffer to calculate the checksum * @return checksum (as u16_t) to be saved directly in the protocol header */ u16_t inet_chksum(const void *dataptr, u16_t len) { return (u16_t)~(unsigned int)LWIP_CHKSUM(dataptr, len); } /** * Calculate a checksum over a chain of pbufs (without pseudo-header, much like * inet_chksum only pbufs are used). * * @param p pbuf chain over that the checksum should be calculated * @return checksum (as u16_t) to be saved directly in the protocol header */ u16_t inet_chksum_pbuf(struct pbuf *p) { u32_t acc; struct pbuf *q; u8_t swapped; acc = 0; swapped = 0; for (q = p; q != NULL; q = q->next) { acc += LWIP_CHKSUM(q->payload, q->len); acc = FOLD_U32T(acc); if (q->len % 2 != 0) { swapped = 1 - swapped; acc = SWAP_BYTES_IN_WORD(acc); } } if (swapped) { acc = SWAP_BYTES_IN_WORD(acc); } return (u16_t)~(acc & 0xffffUL); } /* These are some implementations for LWIP_CHKSUM_COPY, which copies data * like MEMCPY but generates a checksum at the same time. Since this is a * performance-sensitive function, you might want to create your own version * in assembly targeted at your hardware by defining it in lwipopts.h: * #define LWIP_CHKSUM_COPY(dst, src, len) your_chksum_copy(dst, src, len) */ #if (LWIP_CHKSUM_COPY_ALGORITHM == 1) /* Version #1 */ /** Safe but slow: first call MEMCPY, then call LWIP_CHKSUM. * For architectures with big caches, data might still be in cache when * generating the checksum after copying. */ u16_t lwip_chksum_copy(void *dst, const void *src, u16_t len) { MEMCPY(dst, src, len); return LWIP_CHKSUM(dst, len); } #endif /* (LWIP_CHKSUM_COPY_ALGORITHM == 1) */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/inet_chksum.c
C
unknown
19,661
/** * @file * Modules initialization * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> */ #include "lwip/opt.h" #include "lwip/init.h" #include "lwip/stats.h" #include "lwip/sys.h" #include "lwip/mem.h" #include "lwip/memp.h" #include "lwip/pbuf.h" #include "lwip/netif.h" #include "lwip/sockets.h" #include "lwip/ip.h" #include "lwip/raw.h" #include "lwip/udp.h" #include "lwip/priv/tcp_priv.h" #include "lwip/igmp.h" #include "lwip/dns.h" #include "lwip/timeouts.h" #include "lwip/etharp.h" #include "lwip/ip6.h" #include "lwip/nd6.h" #include "lwip/mld6.h" #include "lwip/api.h" #include "netif/ppp/ppp_opts.h" #include "netif/ppp/ppp_impl.h" #ifndef LWIP_SKIP_PACKING_CHECK #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/bpstruct.h" #endif PACK_STRUCT_BEGIN struct packed_struct_test { PACK_STRUCT_FLD_8(u8_t dummy1); PACK_STRUCT_FIELD(u32_t dummy2); } PACK_STRUCT_STRUCT; PACK_STRUCT_END #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/epstruct.h" #endif #define PACKED_STRUCT_TEST_EXPECTED_SIZE 5 #endif /* Compile-time sanity checks for configuration errors. * These can be done independently of LWIP_DEBUG, without penalty. */ #ifndef BYTE_ORDER #error "BYTE_ORDER is not defined, you have to define it in your cc.h" #endif #if (!IP_SOF_BROADCAST && IP_SOF_BROADCAST_RECV) #error "If you want to use broadcast filter per pcb on recv operations, you have to define IP_SOF_BROADCAST=1 in your lwipopts.h" #endif #if (!LWIP_UDP && LWIP_UDPLITE) #error "If you want to use UDP Lite, you have to define LWIP_UDP=1 in your lwipopts.h" #endif #if (!LWIP_UDP && LWIP_DHCP) #error "If you want to use DHCP, you have to define LWIP_UDP=1 in your lwipopts.h" #endif #if (!LWIP_UDP && LWIP_MULTICAST_TX_OPTIONS) #error "If you want to use IGMP/LWIP_MULTICAST_TX_OPTIONS, you have to define LWIP_UDP=1 in your lwipopts.h" #endif #if (!LWIP_UDP && LWIP_DNS) #error "If you want to use DNS, you have to define LWIP_UDP=1 in your lwipopts.h" #endif #if !MEMP_MEM_MALLOC /* MEMP_NUM_* checks are disabled when not using the pool allocator */ #if (LWIP_ARP && ARP_QUEUEING && (MEMP_NUM_ARP_QUEUE<=0)) #error "If you want to use ARP Queueing, you have to define MEMP_NUM_ARP_QUEUE>=1 in your lwipopts.h" #endif #if (LWIP_RAW && (MEMP_NUM_RAW_PCB<=0)) #error "If you want to use RAW, you have to define MEMP_NUM_RAW_PCB>=1 in your lwipopts.h" #endif #if (LWIP_UDP && (MEMP_NUM_UDP_PCB<=0)) #error "If you want to use UDP, you have to define MEMP_NUM_UDP_PCB>=1 in your lwipopts.h" #endif #if (LWIP_TCP && (MEMP_NUM_TCP_PCB<=0)) #error "If you want to use TCP, you have to define MEMP_NUM_TCP_PCB>=1 in your lwipopts.h" #endif #if (LWIP_IGMP && (MEMP_NUM_IGMP_GROUP<=1)) #error "If you want to use IGMP, you have to define MEMP_NUM_IGMP_GROUP>1 in your lwipopts.h" #endif #if (LWIP_IGMP && !LWIP_MULTICAST_TX_OPTIONS) #error "If you want to use IGMP, you have to define LWIP_MULTICAST_TX_OPTIONS==1 in your lwipopts.h" #endif #if (LWIP_IGMP && !LWIP_IPV4) #error "IGMP needs LWIP_IPV4 enabled in your lwipopts.h" #endif #if (LWIP_MULTICAST_TX_OPTIONS && !LWIP_IPV4) #error "LWIP_MULTICAST_TX_OPTIONS needs LWIP_IPV4 enabled in your lwipopts.h" #endif #if ((LWIP_NETCONN || LWIP_SOCKET) && (MEMP_NUM_TCPIP_MSG_API<=0)) #error "If you want to use Sequential API, you have to define MEMP_NUM_TCPIP_MSG_API>=1 in your lwipopts.h" #endif /* There must be sufficient timeouts, taking into account requirements of the subsystems. */ #if LWIP_TIMERS && (MEMP_NUM_SYS_TIMEOUT < (LWIP_TCP + IP_REASSEMBLY + LWIP_ARP + (2*LWIP_DHCP) + LWIP_AUTOIP + LWIP_IGMP + LWIP_DNS + PPP_SUPPORT + (LWIP_IPV6 ? (1 + LWIP_IPV6_REASS + LWIP_IPV6_MLD) : 0))) #error "MEMP_NUM_SYS_TIMEOUT is too low to accomodate all required timeouts" #endif #if (IP_REASSEMBLY && (MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS)) #error "MEMP_NUM_REASSDATA > IP_REASS_MAX_PBUFS doesn't make sense since each struct ip_reassdata must hold 2 pbufs at least!" #endif #endif /* !MEMP_MEM_MALLOC */ #if LWIP_WND_SCALE #if (LWIP_TCP && (TCP_WND > 0xffffffff)) #error "If you want to use TCP, TCP_WND must fit in an u32_t, so, you have to reduce it in your lwipopts.h" #endif #if (LWIP_TCP && (TCP_RCV_SCALE > 14)) #error "The maximum valid window scale value is 14!" #endif #if (LWIP_TCP && (TCP_WND > (0xFFFFU << TCP_RCV_SCALE))) #error "TCP_WND is bigger than the configured LWIP_WND_SCALE allows!" #endif #if (LWIP_TCP && ((TCP_WND >> TCP_RCV_SCALE) == 0)) #error "TCP_WND is too small for the configured LWIP_WND_SCALE (results in zero window)!" #endif #else /* LWIP_WND_SCALE */ #if (LWIP_TCP && (TCP_WND > 0xffff)) #error "If you want to use TCP, TCP_WND must fit in an u16_t, so, you have to reduce it in your lwipopts.h (or enable window scaling)" #endif #endif /* LWIP_WND_SCALE */ #if (LWIP_TCP && (TCP_SND_QUEUELEN > 0xffff)) #error "If you want to use TCP, TCP_SND_QUEUELEN must fit in an u16_t, so, you have to reduce it in your lwipopts.h" #endif #if (LWIP_TCP && (TCP_SND_QUEUELEN < 2)) #error "TCP_SND_QUEUELEN must be at least 2 for no-copy TCP writes to work" #endif #if (LWIP_TCP && ((TCP_MAXRTX > 12) || (TCP_SYNMAXRTX > 12))) #error "If you want to use TCP, TCP_MAXRTX and TCP_SYNMAXRTX must less or equal to 12 (due to tcp_backoff table), so, you have to reduce them in your lwipopts.h" #endif #if (LWIP_TCP && TCP_LISTEN_BACKLOG && ((TCP_DEFAULT_LISTEN_BACKLOG < 0) || (TCP_DEFAULT_LISTEN_BACKLOG > 0xff))) #error "If you want to use TCP backlog, TCP_DEFAULT_LISTEN_BACKLOG must fit into an u8_t" #endif #if (LWIP_NETIF_API && (NO_SYS==1)) #error "If you want to use NETIF API, you have to define NO_SYS=0 in your lwipopts.h" #endif #if ((LWIP_SOCKET || LWIP_NETCONN) && (NO_SYS==1)) #error "If you want to use Sequential API, you have to define NO_SYS=0 in your lwipopts.h" #endif #if (LWIP_PPP_API && (NO_SYS==1)) #error "If you want to use PPP API, you have to define NO_SYS=0 in your lwipopts.h" #endif #if (LWIP_PPP_API && (PPP_SUPPORT==0)) #error "If you want to use PPP API, you have to enable PPP_SUPPORT in your lwipopts.h" #endif #if (((!LWIP_DHCP) || (!LWIP_AUTOIP)) && LWIP_DHCP_AUTOIP_COOP) #error "If you want to use DHCP/AUTOIP cooperation mode, you have to define LWIP_DHCP=1 and LWIP_AUTOIP=1 in your lwipopts.h" #endif #if (((!LWIP_DHCP) || (!LWIP_ARP)) && DHCP_DOES_ARP_CHECK) #error "If you want to use DHCP ARP checking, you have to define LWIP_DHCP=1 and LWIP_ARP=1 in your lwipopts.h" #endif #if (!LWIP_ARP && LWIP_AUTOIP) #error "If you want to use AUTOIP, you have to define LWIP_ARP=1 in your lwipopts.h" #endif #if (LWIP_TCP && ((LWIP_EVENT_API && LWIP_CALLBACK_API) || (!LWIP_EVENT_API && !LWIP_CALLBACK_API))) #error "One and exactly one of LWIP_EVENT_API and LWIP_CALLBACK_API has to be enabled in your lwipopts.h" #endif #if (MEM_LIBC_MALLOC && MEM_USE_POOLS) #error "MEM_LIBC_MALLOC and MEM_USE_POOLS may not both be simultaneously enabled in your lwipopts.h" #endif #if (MEM_USE_POOLS && !MEMP_USE_CUSTOM_POOLS) #error "MEM_USE_POOLS requires custom pools (MEMP_USE_CUSTOM_POOLS) to be enabled in your lwipopts.h" #endif #if (PBUF_POOL_BUFSIZE <= MEM_ALIGNMENT) #error "PBUF_POOL_BUFSIZE must be greater than MEM_ALIGNMENT or the offset may take the full first pbuf" #endif #if (DNS_LOCAL_HOSTLIST && !DNS_LOCAL_HOSTLIST_IS_DYNAMIC && !(defined(DNS_LOCAL_HOSTLIST_INIT))) #error "you have to define define DNS_LOCAL_HOSTLIST_INIT {{'host1', 0x123}, {'host2', 0x234}} to initialize DNS_LOCAL_HOSTLIST" #endif #if PPP_SUPPORT && !PPPOS_SUPPORT && !PPPOE_SUPPORT && !PPPOL2TP_SUPPORT #error "PPP_SUPPORT needs at least one of PPPOS_SUPPORT, PPPOE_SUPPORT or PPPOL2TP_SUPPORT turned on" #endif #if PPP_SUPPORT && !PPP_IPV4_SUPPORT && !PPP_IPV6_SUPPORT #error "PPP_SUPPORT needs PPP_IPV4_SUPPORT and/or PPP_IPV6_SUPPORT turned on" #endif #if PPP_SUPPORT && PPP_IPV4_SUPPORT && !LWIP_IPV4 #error "PPP_IPV4_SUPPORT needs LWIP_IPV4 turned on" #endif #if PPP_SUPPORT && PPP_IPV6_SUPPORT && !LWIP_IPV6 #error "PPP_IPV6_SUPPORT needs LWIP_IPV6 turned on" #endif #if !LWIP_ETHERNET && (LWIP_ARP || PPPOE_SUPPORT) #error "LWIP_ETHERNET needs to be turned on for LWIP_ARP or PPPOE_SUPPORT" #endif #if LWIP_TCPIP_CORE_LOCKING_INPUT && !LWIP_TCPIP_CORE_LOCKING #error "When using LWIP_TCPIP_CORE_LOCKING_INPUT, LWIP_TCPIP_CORE_LOCKING must be enabled, too" #endif #if LWIP_TCP && LWIP_NETIF_TX_SINGLE_PBUF && !TCP_OVERSIZE #error "LWIP_NETIF_TX_SINGLE_PBUF needs TCP_OVERSIZE enabled to create single-pbuf TCP packets" #endif #if LWIP_NETCONN && LWIP_TCP #if NETCONN_COPY != TCP_WRITE_FLAG_COPY #error "NETCONN_COPY != TCP_WRITE_FLAG_COPY" #endif #if NETCONN_MORE != TCP_WRITE_FLAG_MORE #error "NETCONN_MORE != TCP_WRITE_FLAG_MORE" #endif #endif /* LWIP_NETCONN && LWIP_TCP */ #if LWIP_SOCKET /* Check that the SO_* socket options and SOF_* lwIP-internal flags match */ #if SO_REUSEADDR != SOF_REUSEADDR #error "WARNING: SO_REUSEADDR != SOF_REUSEADDR" #endif #if SO_KEEPALIVE != SOF_KEEPALIVE #error "WARNING: SO_KEEPALIVE != SOF_KEEPALIVE" #endif #if SO_BROADCAST != SOF_BROADCAST #error "WARNING: SO_BROADCAST != SOF_BROADCAST" #endif #endif /* LWIP_SOCKET */ /* Compile-time checks for deprecated options. */ #ifdef MEMP_NUM_TCPIP_MSG #error "MEMP_NUM_TCPIP_MSG option is deprecated. Remove it from your lwipopts.h." #endif #ifdef TCP_REXMIT_DEBUG #error "TCP_REXMIT_DEBUG option is deprecated. Remove it from your lwipopts.h." #endif #ifdef RAW_STATS #error "RAW_STATS option is deprecated. Remove it from your lwipopts.h." #endif #ifdef ETHARP_QUEUE_FIRST #error "ETHARP_QUEUE_FIRST option is deprecated. Remove it from your lwipopts.h." #endif #ifdef ETHARP_ALWAYS_INSERT #error "ETHARP_ALWAYS_INSERT option is deprecated. Remove it from your lwipopts.h." #endif #if !NO_SYS && LWIP_TCPIP_CORE_LOCKING && LWIP_COMPAT_MUTEX && !defined(LWIP_COMPAT_MUTEX_ALLOWED) #error "LWIP_COMPAT_MUTEX cannot prevent priority inversion. It is recommended to implement priority-aware mutexes. (Define LWIP_COMPAT_MUTEX_ALLOWED to disable this error.)" #endif #ifndef LWIP_DISABLE_TCP_SANITY_CHECKS #define LWIP_DISABLE_TCP_SANITY_CHECKS 0 #endif #ifndef LWIP_DISABLE_MEMP_SANITY_CHECKS #define LWIP_DISABLE_MEMP_SANITY_CHECKS 0 #endif /* MEMP sanity checks */ #if MEMP_MEM_MALLOC #if !LWIP_DISABLE_MEMP_SANITY_CHECKS #if LWIP_NETCONN || LWIP_SOCKET #if !MEMP_NUM_NETCONN && LWIP_SOCKET #error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN cannot be 0 when using sockets!" #endif #else /* MEMP_MEM_MALLOC */ #if MEMP_NUM_NETCONN > (MEMP_NUM_TCP_PCB+MEMP_NUM_TCP_PCB_LISTEN+MEMP_NUM_UDP_PCB+MEMP_NUM_RAW_PCB) #error "lwip_sanity_check: WARNING: MEMP_NUM_NETCONN should be less than the sum of MEMP_NUM_{TCP,RAW,UDP}_PCB+MEMP_NUM_TCP_PCB_LISTEN. If you know what you are doing, define LWIP_DISABLE_MEMP_SANITY_CHECKS to 1 to disable this error." #endif #endif /* LWIP_NETCONN || LWIP_SOCKET */ #endif /* !LWIP_DISABLE_MEMP_SANITY_CHECKS */ #if MEM_USE_POOLS #error "MEMP_MEM_MALLOC and MEM_USE_POOLS cannot be enabled at the same time" #endif #ifdef LWIP_HOOK_MEMP_AVAILABLE #error "LWIP_HOOK_MEMP_AVAILABLE doesn't make sense with MEMP_MEM_MALLOC" #endif #endif /* MEMP_MEM_MALLOC */ /* TCP sanity checks */ #if !LWIP_DISABLE_TCP_SANITY_CHECKS #if LWIP_TCP #if !MEMP_MEM_MALLOC && (MEMP_NUM_TCP_SEG < TCP_SND_QUEUELEN) #error "lwip_sanity_check: WARNING: MEMP_NUM_TCP_SEG should be at least as big as TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif #if TCP_SND_BUF < (2 * TCP_MSS) #error "lwip_sanity_check: WARNING: TCP_SND_BUF must be at least as much as (2 * TCP_MSS) for things to work smoothly. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif #if TCP_SND_QUEUELEN < (2 * (TCP_SND_BUF / TCP_MSS)) #error "lwip_sanity_check: WARNING: TCP_SND_QUEUELEN must be at least as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif #if TCP_SNDLOWAT >= TCP_SND_BUF #error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must be less than TCP_SND_BUF. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif #if TCP_SNDLOWAT >= (0xFFFF - (4 * TCP_MSS)) #error "lwip_sanity_check: WARNING: TCP_SNDLOWAT must at least be 4*MSS below u16_t overflow!" #endif #if TCP_SNDQUEUELOWAT >= TCP_SND_QUEUELEN #error "lwip_sanity_check: WARNING: TCP_SNDQUEUELOWAT must be less than TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif #if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (PBUF_POOL_BUFSIZE <= (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)) #error "lwip_sanity_check: WARNING: PBUF_POOL_BUFSIZE does not provide enough space for protocol headers. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif #if !MEMP_MEM_MALLOC && PBUF_POOL_SIZE && (TCP_WND > (PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)))) #error "lwip_sanity_check: WARNING: TCP_WND is larger than space provided by PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers). If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif #if TCP_WND < TCP_MSS #error "lwip_sanity_check: WARNING: TCP_WND is smaller than MSS. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif #endif /* LWIP_TCP */ #endif /* !LWIP_DISABLE_TCP_SANITY_CHECKS */ /** * @ingroup lwip_nosys * Initialize all modules. * Use this in NO_SYS mode. Use tcpip_init() otherwise. */ void lwip_init(void) { #ifndef LWIP_SKIP_CONST_CHECK int a = 0; LWIP_UNUSED_ARG(a); LWIP_ASSERT("LWIP_CONST_CAST not implemented correctly. Check your lwIP port.", LWIP_CONST_CAST(void*, &a) == &a); #endif #ifndef LWIP_SKIP_PACKING_CHECK LWIP_ASSERT("Struct packing not implemented correctly. Check your lwIP port.", sizeof(struct packed_struct_test) == PACKED_STRUCT_TEST_EXPECTED_SIZE); #endif /* Modules initialization */ stats_init(); #if !NO_SYS sys_init(); #endif /* !NO_SYS */ mem_init(); memp_init(); pbuf_init(); netif_init(); #if LWIP_IPV4 ip_init(); #if LWIP_ARP etharp_init(); #endif /* LWIP_ARP */ #endif /* LWIP_IPV4 */ #if LWIP_RAW raw_init(); #endif /* LWIP_RAW */ #if LWIP_UDP udp_init(); #endif /* LWIP_UDP */ #if LWIP_TCP tcp_init(); #endif /* LWIP_TCP */ #if LWIP_IGMP igmp_init(); #endif /* LWIP_IGMP */ #if LWIP_DNS dns_init(); #endif /* LWIP_DNS */ #if PPP_SUPPORT ppp_init(); #endif #if LWIP_TIMERS sys_timeouts_init(); #endif /* LWIP_TIMERS */ }
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/init_lwip.c
C
unknown
16,791
/** * @file * Common IPv4 and IPv6 code * * @defgroup ip IP * @ingroup callbackstyle_api * * @defgroup ip4 IPv4 * @ingroup ip * * @defgroup ip6 IPv6 * @ingroup ip * * @defgroup ipaddr IP address handling * @ingroup infrastructure * * @defgroup ip4addr IPv4 only * @ingroup ipaddr * * @defgroup ip6addr IPv6 only * @ingroup ipaddr */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #if LWIP_IPV4 || LWIP_IPV6 #include "lwip/ip_addr.h" #include "lwip/ip.h" /** Global data for both IPv4 and IPv6 */ struct ip_globals ip_data; #if LWIP_IPV4 && LWIP_IPV6 const ip_addr_t ip_addr_any_type = IPADDR_ANY_TYPE_INIT; /** * @ingroup ipaddr * Convert IP address string (both versions) to numeric. * The version is auto-detected from the string. * * @param cp IP address string to convert * @param addr conversion result is stored here * @return 1 on success, 0 on error */ int ipaddr_aton(const char *cp, ip_addr_t *addr) { if (cp != NULL) { const char* c; for (c = cp; *c != 0; c++) { if (*c == ':') { /* contains a colon: IPv6 address */ if (addr) { IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V6); } return ip6addr_aton(cp, ip_2_ip6(addr)); } else if (*c == '.') { /* contains a dot: IPv4 address */ break; } } /* call ip4addr_aton as fallback or if IPv4 was found */ if (addr) { IP_SET_TYPE_VAL(*addr, IPADDR_TYPE_V4); } return ip4addr_aton(cp, ip_2_ip4(addr)); } return 0; } /** * @ingroup lwip_nosys * If both IP versions are enabled, this function can dispatch packets to the correct one. * Don't call directly, pass to netif_add() and call netif->input(). */ err_t ip_input(struct pbuf *p, struct netif *inp) { if (p != NULL) { if (IP_HDR_GET_VERSION(p->payload) == 6) { return ip6_input(p, inp); } return ip4_input(p, inp); } return ERR_VAL; } #endif /* LWIP_IPV4 && LWIP_IPV6 */ #endif /* LWIP_IPV4 || LWIP_IPV6 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ip.c
C
unknown
3,685
/** * @file * AutoIP Automatic LinkLocal IP Configuration * * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform * with RFC 3927. * * @defgroup autoip AUTOIP * @ingroup ip4 * AUTOIP related functions * USAGE: * * define @ref LWIP_AUTOIP 1 in your lwipopts.h * Options: * AUTOIP_TMR_INTERVAL msecs, * I recommend a value of 100. The value must divide 1000 with a remainder almost 0. * Possible values are 1000, 500, 333, 250, 200, 166, 142, 125, 111, 100 .... * * Without DHCP: * - Call autoip_start() after netif_add(). * * With DHCP: * - define @ref LWIP_DHCP_AUTOIP_COOP 1 in your lwipopts.h. * - Configure your DHCP Client. * * @see netifapi_autoip */ /* * * Copyright (c) 2007 Dominik Spies <kontakt@dspies.de> * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dominik Spies <kontakt@dspies.de> */ #include "lwip/opt.h" #if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ #include "lwip/mem.h" /* #include "lwip/udp.h" */ #include "lwip/ip_addr.h" #include "lwip/netif.h" #include "lwip/autoip.h" #include "lwip/etharp.h" #include "lwip/prot/autoip.h" #include <string.h> /** Pseudo random macro based on netif informations. * You could use "rand()" from the C Library if you define LWIP_AUTOIP_RAND in lwipopts.h */ #ifndef LWIP_AUTOIP_RAND #define LWIP_AUTOIP_RAND(netif) ( (((u32_t)((netif->hwaddr[5]) & 0xff) << 24) | \ ((u32_t)((netif->hwaddr[3]) & 0xff) << 16) | \ ((u32_t)((netif->hwaddr[2]) & 0xff) << 8) | \ ((u32_t)((netif->hwaddr[4]) & 0xff))) + \ (netif_autoip_data(netif)? netif_autoip_data(netif)->tried_llipaddr : 0)) #endif /* LWIP_AUTOIP_RAND */ /** * Macro that generates the initial IP address to be tried by AUTOIP. * If you want to override this, define it to something else in lwipopts.h. */ #ifndef LWIP_AUTOIP_CREATE_SEED_ADDR #define LWIP_AUTOIP_CREATE_SEED_ADDR(netif) \ lwip_htonl(AUTOIP_RANGE_START + ((u32_t)(((u8_t)(netif->hwaddr[4])) | \ ((u32_t)((u8_t)(netif->hwaddr[5]))) << 8))) #endif /* LWIP_AUTOIP_CREATE_SEED_ADDR */ /* static functions */ static err_t autoip_arp_announce(struct netif *netif); static void autoip_start_probing(struct netif *netif); /** * @ingroup autoip * Set a statically allocated struct autoip to work with. * Using this prevents autoip_start to allocate it using mem_malloc. * * @param netif the netif for which to set the struct autoip * @param autoip (uninitialised) autoip struct allocated by the application */ void autoip_set_struct(struct netif *netif, struct autoip *autoip) { LWIP_ASSERT("netif != NULL", netif != NULL); LWIP_ASSERT("autoip != NULL", autoip != NULL); LWIP_ASSERT("netif already has a struct autoip set", netif_autoip_data(netif) == NULL); /* clear data structure */ memset(autoip, 0, sizeof(struct autoip)); /* autoip->state = AUTOIP_STATE_OFF; */ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); } /** Restart AutoIP client and check the next address (conflict detected) * * @param netif The netif under AutoIP control */ static void autoip_restart(struct netif *netif) { struct autoip* autoip = netif_autoip_data(netif); autoip->tried_llipaddr++; autoip_start(netif); } /** * Handle a IP address conflict after an ARP conflict detection */ static void autoip_handle_arp_conflict(struct netif *netif) { struct autoip* autoip = netif_autoip_data(netif); /* RFC3927, 2.5 "Conflict Detection and Defense" allows two options where a) means retreat on the first conflict and b) allows to keep an already configured address when having only one conflict in 10 seconds We use option b) since it helps to improve the chance that one of the two conflicting hosts may be able to retain its address. */ if (autoip->lastconflict > 0) { /* retreat, there was a conflicting ARP in the last DEFEND_INTERVAL seconds */ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("autoip_handle_arp_conflict(): we are defending, but in DEFEND_INTERVAL, retreating\n")); /* Active TCP sessions are aborted when removing the ip addresss */ autoip_restart(netif); } else { LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("autoip_handle_arp_conflict(): we are defend, send ARP Announce\n")); autoip_arp_announce(netif); autoip->lastconflict = DEFEND_INTERVAL * AUTOIP_TICKS_PER_SECOND; } } /** * Create an IP-Address out of range 169.254.1.0 to 169.254.254.255 * * @param netif network interface on which create the IP-Address * @param ipaddr ip address to initialize */ static void autoip_create_addr(struct netif *netif, ip4_addr_t *ipaddr) { struct autoip* autoip = netif_autoip_data(netif); /* Here we create an IP-Address out of range 169.254.1.0 to 169.254.254.255 * compliant to RFC 3927 Section 2.1 * We have 254 * 256 possibilities */ u32_t addr = lwip_ntohl(LWIP_AUTOIP_CREATE_SEED_ADDR(netif)); addr += autoip->tried_llipaddr; addr = AUTOIP_NET | (addr & 0xffff); /* Now, 169.254.0.0 <= addr <= 169.254.255.255 */ if (addr < AUTOIP_RANGE_START) { addr += AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; } if (addr > AUTOIP_RANGE_END) { addr -= AUTOIP_RANGE_END - AUTOIP_RANGE_START + 1; } LWIP_ASSERT("AUTOIP address not in range", (addr >= AUTOIP_RANGE_START) && (addr <= AUTOIP_RANGE_END)); ip4_addr_set_u32(ipaddr, lwip_htonl(addr)); LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("autoip_create_addr(): tried_llipaddr=%"U16_F", %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", (u16_t)(autoip->tried_llipaddr), ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); } /** * Sends an ARP probe from a network interface * * @param netif network interface used to send the probe */ static err_t autoip_arp_probe(struct netif *netif) { struct autoip* autoip = netif_autoip_data(netif); /* this works because netif->ip_addr is ANY */ return etharp_request(netif, &autoip->llipaddr); } /** * Sends an ARP announce from a network interface * * @param netif network interface used to send the announce */ static err_t autoip_arp_announce(struct netif *netif) { return etharp_gratuitous(netif); } /** * Configure interface for use with current LL IP-Address * * @param netif network interface to configure with current LL IP-Address */ static err_t autoip_bind(struct netif *netif) { struct autoip* autoip = netif_autoip_data(netif); ip4_addr_t sn_mask, gw_addr; LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_bind(netif=%p) %c%c%"U16_F" %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num, ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); IP4_ADDR(&sn_mask, 255, 255, 0, 0); IP4_ADDR(&gw_addr, 0, 0, 0, 0); netif_set_addr(netif, &autoip->llipaddr, &sn_mask, &gw_addr); /* interface is used by routing now that an address is set */ return ERR_OK; } /** * @ingroup autoip * Start AutoIP client * * @param netif network interface on which start the AutoIP client */ err_t autoip_start(struct netif *netif) { struct autoip* autoip = netif_autoip_data(netif); err_t result = ERR_OK; LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); /* Set IP-Address, Netmask and Gateway to 0 to make sure that * ARP Packets are formed correctly */ netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("autoip_start(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); if (autoip == NULL) { /* no AutoIP client attached yet? */ LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): starting new AUTOIP client\n")); autoip = (struct autoip *)mem_malloc(sizeof(struct autoip)); if (autoip == NULL) { LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): could not allocate autoip\n")); return ERR_MEM; } memset(autoip, 0, sizeof(struct autoip)); /* store this AutoIP client in the netif */ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP, autoip); LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_start(): allocated autoip")); } else { autoip->state = AUTOIP_STATE_OFF; autoip->ttw = 0; autoip->sent_num = 0; ip4_addr_set_zero(&autoip->llipaddr); autoip->lastconflict = 0; } autoip_create_addr(netif, &(autoip->llipaddr)); autoip_start_probing(netif); return result; } static void autoip_start_probing(struct netif *netif) { struct autoip* autoip = netif_autoip_data(netif); autoip->state = AUTOIP_STATE_PROBING; autoip->sent_num = 0; LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("autoip_start_probing(): changing state to PROBING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); /* time to wait to first probe, this is randomly * chosen out of 0 to PROBE_WAIT seconds. * compliant to RFC 3927 Section 2.2.1 */ autoip->ttw = (u16_t)(LWIP_AUTOIP_RAND(netif) % (PROBE_WAIT * AUTOIP_TICKS_PER_SECOND)); /* * if we tried more then MAX_CONFLICTS we must limit our rate for * acquiring and probing address * compliant to RFC 3927 Section 2.2.1 */ if (autoip->tried_llipaddr > MAX_CONFLICTS) { autoip->ttw = RATE_LIMIT_INTERVAL * AUTOIP_TICKS_PER_SECOND; } } /** * Handle a possible change in the network configuration. * * If there is an AutoIP address configured, take the interface down * and begin probing with the same address. */ void autoip_network_changed(struct netif *netif) { struct autoip* autoip = netif_autoip_data(netif); if (autoip && (autoip->state != AUTOIP_STATE_OFF)) { autoip_start_probing(netif); } } /** * @ingroup autoip * Stop AutoIP client * * @param netif network interface on which stop the AutoIP client */ err_t autoip_stop(struct netif *netif) { struct autoip* autoip = netif_autoip_data(netif); if (autoip != NULL) { autoip->state = AUTOIP_STATE_OFF; if (ip4_addr_islinklocal(netif_ip4_addr(netif))) { netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); } } return ERR_OK; } /** * Has to be called in loop every AUTOIP_TMR_INTERVAL milliseconds */ void autoip_tmr(void) { struct netif *netif = netif_list; /* loop through netif's */ while (netif != NULL) { struct autoip* autoip = netif_autoip_data(netif); /* only act on AutoIP configured interfaces */ if (autoip != NULL) { if (autoip->lastconflict > 0) { autoip->lastconflict--; } LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() AutoIP-State: %"U16_F", ttw=%"U16_F"\n", (u16_t)(autoip->state), autoip->ttw)); if (autoip->ttw > 0) { autoip->ttw--; } switch(autoip->state) { case AUTOIP_STATE_PROBING: if (autoip->ttw == 0) { if (autoip->sent_num >= PROBE_NUM) { /* Switch to ANNOUNCING: now we can bind to an IP address and use it */ autoip->state = AUTOIP_STATE_ANNOUNCING; autoip_bind(netif); /* autoip_bind() calls netif_set_addr(): this triggers a gratuitous ARP which counts as an announcement */ autoip->sent_num = 1; autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("autoip_tmr(): changing state to ANNOUNCING: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); } else { autoip_arp_probe(netif); LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() PROBING Sent Probe\n")); autoip->sent_num++; if (autoip->sent_num == PROBE_NUM) { /* calculate time to wait to for announce */ autoip->ttw = ANNOUNCE_WAIT * AUTOIP_TICKS_PER_SECOND; } else { /* calculate time to wait to next probe */ autoip->ttw = (u16_t)((LWIP_AUTOIP_RAND(netif) % ((PROBE_MAX - PROBE_MIN) * AUTOIP_TICKS_PER_SECOND) ) + PROBE_MIN * AUTOIP_TICKS_PER_SECOND); } } } break; case AUTOIP_STATE_ANNOUNCING: if (autoip->ttw == 0) { autoip_arp_announce(netif); LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_tmr() ANNOUNCING Sent Announce\n")); autoip->ttw = ANNOUNCE_INTERVAL * AUTOIP_TICKS_PER_SECOND; autoip->sent_num++; if (autoip->sent_num >= ANNOUNCE_NUM) { autoip->state = AUTOIP_STATE_BOUND; autoip->sent_num = 0; autoip->ttw = 0; LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("autoip_tmr(): changing state to BOUND: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", ip4_addr1_16(&autoip->llipaddr), ip4_addr2_16(&autoip->llipaddr), ip4_addr3_16(&autoip->llipaddr), ip4_addr4_16(&autoip->llipaddr))); } } break; default: /* nothing to do in other states */ break; } } /* proceed to next network interface */ netif = netif->next; } } /** * Handles every incoming ARP Packet, called by etharp_input(). * * @param netif network interface to use for autoip processing * @param hdr Incoming ARP packet */ void autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr) { struct autoip* autoip = netif_autoip_data(netif); LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE, ("autoip_arp_reply()\n")); if ((autoip != NULL) && (autoip->state != AUTOIP_STATE_OFF)) { /* when ip.src == llipaddr && hw.src != netif->hwaddr * * when probing ip.dst == llipaddr && hw.src != netif->hwaddr * we have a conflict and must solve it */ ip4_addr_t sipaddr, dipaddr; struct eth_addr netifaddr; ETHADDR16_COPY(netifaddr.addr, netif->hwaddr); /* Copy struct ip4_addr2 to aligned ip4_addr, to support compilers without * structure packing (not using structure copy which breaks strict-aliasing rules). */ IPADDR2_COPY(&sipaddr, &hdr->sipaddr); IPADDR2_COPY(&dipaddr, &hdr->dipaddr); if (autoip->state == AUTOIP_STATE_PROBING) { /* RFC 3927 Section 2.2.1: * from beginning to after ANNOUNCE_WAIT * seconds we have a conflict if * ip.src == llipaddr OR * ip.dst == llipaddr && hw.src != own hwaddr */ if ((ip4_addr_cmp(&sipaddr, &autoip->llipaddr)) || (ip4_addr_isany_val(sipaddr) && ip4_addr_cmp(&dipaddr, &autoip->llipaddr) && !eth_addr_cmp(&netifaddr, &hdr->shwaddr))) { LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, ("autoip_arp_reply(): Probe Conflict detected\n")); autoip_restart(netif); } } else { /* RFC 3927 Section 2.5: * in any state we have a conflict if * ip.src == llipaddr && hw.src != own hwaddr */ if (ip4_addr_cmp(&sipaddr, &autoip->llipaddr) && !eth_addr_cmp(&netifaddr, &hdr->shwaddr)) { LWIP_DEBUGF(AUTOIP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, ("autoip_arp_reply(): Conflicting ARP-Packet detected\n")); autoip_handle_arp_conflict(netif); } } } } /** check if AutoIP supplied netif->ip_addr * * @param netif the netif to check * @return 1 if AutoIP supplied netif->ip_addr (state BOUND or ANNOUNCING), * 0 otherwise */ u8_t autoip_supplied_address(const struct netif *netif) { if ((netif != NULL) && (netif_autoip_data(netif) != NULL)) { struct autoip* autoip = netif_autoip_data(netif); return (autoip->state == AUTOIP_STATE_BOUND) || (autoip->state == AUTOIP_STATE_ANNOUNCING); } return 0; } u8_t autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr) { struct autoip* autoip = netif_autoip_data(netif); return (autoip != NULL) && ip4_addr_cmp(addr, &(autoip->llipaddr)); } #endif /* LWIP_IPV4 && LWIP_AUTOIP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv4/autoip.c
C
unknown
18,749
/** * @file * Dynamic Host Configuration Protocol client * * @defgroup dhcp4 DHCPv4 * @ingroup ip4 * DHCP (IPv4) related functions * This is a DHCP client for the lwIP TCP/IP stack. It aims to conform * with RFC 2131 and RFC 2132. * * @todo: * - Support for interfaces other than Ethernet (SLIP, PPP, ...) * * Options: * @ref DHCP_COARSE_TIMER_SECS (recommended 60 which is a minute) * @ref DHCP_FINE_TIMER_MSECS (recommended 500 which equals TCP coarse timer) * * dhcp_start() starts a DHCP client instance which * configures the interface by obtaining an IP address lease and maintaining it. * * Use dhcp_release() to end the lease and use dhcp_stop() * to remove the DHCP client. * * @see netifapi_dhcp4 */ /* * Copyright (c) 2001-2004 Leon Woestenberg <leon.woestenberg@gmx.net> * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * The Swedish Institute of Computer Science and Adam Dunkels * are specifically granted permission to redistribute this * source code. * * Author: Leon Woestenberg <leon.woestenberg@gmx.net> * */ #include "lwip/opt.h" #if LWIP_IPV4 && LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ #include "lwip/stats.h" #include "lwip/mem.h" #include "lwip/udp.h" #include "lwip/ip_addr.h" #include "lwip/netif.h" #include "lwip/def.h" #include "lwip/dhcp.h" #include "lwip/autoip.h" #include "lwip/dns.h" #include "lwip/etharp.h" #include "lwip/prot/dhcp.h" #include <string.h> /** DHCP_CREATE_RAND_XID: if this is set to 1, the xid is created using * LWIP_RAND() (this overrides DHCP_GLOBAL_XID) */ #ifndef DHCP_CREATE_RAND_XID #define DHCP_CREATE_RAND_XID 1 #endif /** Default for DHCP_GLOBAL_XID is 0xABCD0000 * This can be changed by defining DHCP_GLOBAL_XID and DHCP_GLOBAL_XID_HEADER, e.g. * \#define DHCP_GLOBAL_XID_HEADER "stdlib.h" * \#define DHCP_GLOBAL_XID rand() */ #ifdef DHCP_GLOBAL_XID_HEADER #include DHCP_GLOBAL_XID_HEADER /* include optional starting XID generation prototypes */ #endif /** DHCP_OPTION_MAX_MSG_SIZE is set to the MTU * MTU is checked to be big enough in dhcp_start */ #define DHCP_MAX_MSG_LEN(netif) (netif->mtu) #define DHCP_MAX_MSG_LEN_MIN_REQUIRED 576 /** Minimum length for reply before packet is parsed */ #define DHCP_MIN_REPLY_LEN 44 #define REBOOT_TRIES 2 #if LWIP_DNS && LWIP_DHCP_MAX_DNS_SERVERS #if DNS_MAX_SERVERS > LWIP_DHCP_MAX_DNS_SERVERS #define LWIP_DHCP_PROVIDE_DNS_SERVERS LWIP_DHCP_MAX_DNS_SERVERS #else #define LWIP_DHCP_PROVIDE_DNS_SERVERS DNS_MAX_SERVERS #endif #else #define LWIP_DHCP_PROVIDE_DNS_SERVERS 0 #endif /** Option handling: options are parsed in dhcp_parse_reply * and saved in an array where other functions can load them from. * This might be moved into the struct dhcp (not necessarily since * lwIP is single-threaded and the array is only used while in recv * callback). */ enum dhcp_option_idx { DHCP_OPTION_IDX_OVERLOAD = 0, DHCP_OPTION_IDX_MSG_TYPE, DHCP_OPTION_IDX_SERVER_ID, DHCP_OPTION_IDX_LEASE_TIME, DHCP_OPTION_IDX_T1, DHCP_OPTION_IDX_T2, DHCP_OPTION_IDX_SUBNET_MASK, DHCP_OPTION_IDX_ROUTER, #if LWIP_DHCP_PROVIDE_DNS_SERVERS DHCP_OPTION_IDX_DNS_SERVER, DHCP_OPTION_IDX_DNS_SERVER_LAST = DHCP_OPTION_IDX_DNS_SERVER + LWIP_DHCP_PROVIDE_DNS_SERVERS - 1, #endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ #if LWIP_DHCP_GET_NTP_SRV DHCP_OPTION_IDX_NTP_SERVER, DHCP_OPTION_IDX_NTP_SERVER_LAST = DHCP_OPTION_IDX_NTP_SERVER + LWIP_DHCP_MAX_NTP_SERVERS - 1, #endif /* LWIP_DHCP_GET_NTP_SRV */ DHCP_OPTION_IDX_MAX }; /** Holds the decoded option values, only valid while in dhcp_recv. @todo: move this into struct dhcp? */ u32_t dhcp_rx_options_val[DHCP_OPTION_IDX_MAX]; /** Holds a flag which option was received and is contained in dhcp_rx_options_val, only valid while in dhcp_recv. @todo: move this into struct dhcp? */ u8_t dhcp_rx_options_given[DHCP_OPTION_IDX_MAX]; static u8_t dhcp_discover_request_options[] = { DHCP_OPTION_SUBNET_MASK, DHCP_OPTION_ROUTER, DHCP_OPTION_BROADCAST #if LWIP_DHCP_PROVIDE_DNS_SERVERS , DHCP_OPTION_DNS_SERVER #endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ #if LWIP_DHCP_GET_NTP_SRV , DHCP_OPTION_NTP #endif /* LWIP_DHCP_GET_NTP_SRV */ }; #ifdef DHCP_GLOBAL_XID static u32_t xid; static u8_t xid_initialised; #endif /* DHCP_GLOBAL_XID */ #define dhcp_option_given(dhcp, idx) (dhcp_rx_options_given[idx] != 0) #define dhcp_got_option(dhcp, idx) (dhcp_rx_options_given[idx] = 1) #define dhcp_clear_option(dhcp, idx) (dhcp_rx_options_given[idx] = 0) #define dhcp_clear_all_options(dhcp) (memset(dhcp_rx_options_given, 0, sizeof(dhcp_rx_options_given))) #define dhcp_get_option_value(dhcp, idx) (dhcp_rx_options_val[idx]) #define dhcp_set_option_value(dhcp, idx, val) (dhcp_rx_options_val[idx] = (val)) static struct udp_pcb *dhcp_pcb; static u8_t dhcp_pcb_refcount; /* DHCP client state machine functions */ static err_t dhcp_discover(struct netif *netif); static err_t dhcp_select(struct netif *netif); static void dhcp_bind(struct netif *netif); #if DHCP_DOES_ARP_CHECK static err_t dhcp_decline(struct netif *netif); #endif /* DHCP_DOES_ARP_CHECK */ static err_t dhcp_rebind(struct netif *netif); static err_t dhcp_reboot(struct netif *netif); static void dhcp_set_state(struct dhcp *dhcp, u8_t new_state); /* receive, unfold, parse and free incoming messages */ static void dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port); /* set the DHCP timers */ static void dhcp_timeout(struct netif *netif); static void dhcp_t1_timeout(struct netif *netif); static void dhcp_t2_timeout(struct netif *netif); /* build outgoing messages */ /* create a DHCP message, fill in common headers */ static err_t dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type); /* free a DHCP request */ static void dhcp_delete_msg(struct dhcp *dhcp); /* add a DHCP option (type, then length in bytes) */ static void dhcp_option(struct dhcp *dhcp, u8_t option_type, u8_t option_len); /* add option values */ static void dhcp_option_byte(struct dhcp *dhcp, u8_t value); static void dhcp_option_short(struct dhcp *dhcp, u16_t value); static void dhcp_option_long(struct dhcp *dhcp, u32_t value); #if LWIP_NETIF_HOSTNAME static void dhcp_option_hostname(struct dhcp *dhcp, struct netif *netif); #endif /* LWIP_NETIF_HOSTNAME */ /* always add the DHCP options trailer to end and pad */ static void dhcp_option_trailer(struct dhcp *dhcp); /** Ensure DHCP PCB is allocated and bound */ static err_t dhcp_inc_pcb_refcount(void) { if (dhcp_pcb_refcount == 0) { LWIP_ASSERT("dhcp_inc_pcb_refcount(): memory leak", dhcp_pcb == NULL); /* allocate UDP PCB */ dhcp_pcb = udp_new(); if (dhcp_pcb == NULL) { return ERR_MEM; } ip_set_option(dhcp_pcb, SOF_BROADCAST); /* set up local and remote port for the pcb -> listen on all interfaces on all src/dest IPs */ udp_bind(dhcp_pcb, IP4_ADDR_ANY, DHCP_CLIENT_PORT); udp_connect(dhcp_pcb, IP4_ADDR_ANY, DHCP_SERVER_PORT); udp_recv(dhcp_pcb, dhcp_recv, NULL); } dhcp_pcb_refcount++; return ERR_OK; } /** Free DHCP PCB if the last netif stops using it */ static void dhcp_dec_pcb_refcount(void) { LWIP_ASSERT("dhcp_pcb_refcount(): refcount error", (dhcp_pcb_refcount > 0)); dhcp_pcb_refcount--; if (dhcp_pcb_refcount == 0) { udp_remove(dhcp_pcb); dhcp_pcb = NULL; } } /** * Back-off the DHCP client (because of a received NAK response). * * Back-off the DHCP client because of a received NAK. Receiving a * NAK means the client asked for something non-sensible, for * example when it tries to renew a lease obtained on another network. * * We clear any existing set IP address and restart DHCP negotiation * afresh (as per RFC2131 3.2.3). * * @param netif the netif under DHCP control */ static void dhcp_handle_nak(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_nak(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); /* Change to a defined state - set this before assigning the address to ensure the callback can use dhcp_supplied_address() */ dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); /* remove IP address from interface (must no longer be used, as per RFC2131) */ netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); /* We can immediately restart discovery */ dhcp_discover(netif); } #if DHCP_DOES_ARP_CHECK /** * Checks if the offered IP address is already in use. * * It does so by sending an ARP request for the offered address and * entering CHECKING state. If no ARP reply is received within a small * interval, the address is assumed to be free for use by us. * * @param netif the netif under DHCP control */ static void dhcp_check(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); err_t result; u16_t msecs; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_check(netif=%p) %c%c\n", (void *)netif, (s16_t)netif->name[0], (s16_t)netif->name[1])); dhcp_set_state(dhcp, DHCP_STATE_CHECKING); /* create an ARP query for the offered IP address, expecting that no host responds, as the IP address should not be in use. */ result = etharp_query(netif, &dhcp->offered_ip_addr, NULL); if (result != ERR_OK) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_check: could not perform ARP query\n")); } if (dhcp->tries < 255) { dhcp->tries++; } msecs = 500; dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_check(): set request timeout %"U16_F" msecs\n", msecs)); } #endif /* DHCP_DOES_ARP_CHECK */ /** * Remember the configuration offered by a DHCP server. * * @param netif the netif under DHCP control */ static void dhcp_handle_offer(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_handle_offer(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); /* obtain the server address */ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SERVER_ID)) { ip_addr_set_ip4_u32(&dhcp->server_ip_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SERVER_ID))); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): server 0x%08"X32_F"\n", ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); /* remember offered address */ ip4_addr_copy(dhcp->offered_ip_addr, dhcp->msg_in->yiaddr); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_handle_offer(): offer for 0x%08"X32_F"\n", ip4_addr_get_u32(&dhcp->offered_ip_addr))); dhcp_select(netif); } else { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_handle_offer(netif=%p) did not get server ID!\n", (void*)netif)); } } /** * Select a DHCP server offer out of all offers. * * Simply select the first offer received. * * @param netif the netif under DHCP control * @return lwIP specific error (see error.h) */ static err_t dhcp_select(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); err_t result; u16_t msecs; u8_t i; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_select(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); dhcp_set_state(dhcp, DHCP_STATE_REQUESTING); /* create and initialize the DHCP message header */ result = dhcp_create_msg(netif, dhcp, DHCP_REQUEST); if (result == ERR_OK) { dhcp_option(dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); dhcp_option_short(dhcp, DHCP_MAX_MSG_LEN(netif)); /* MUST request the offered IP address */ dhcp_option(dhcp, DHCP_OPTION_REQUESTED_IP, 4); dhcp_option_long(dhcp, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); dhcp_option(dhcp, DHCP_OPTION_SERVER_ID, 4); dhcp_option_long(dhcp, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&dhcp->server_ip_addr)))); dhcp_option(dhcp, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { dhcp_option_byte(dhcp, dhcp_discover_request_options[i]); } #if LWIP_NETIF_HOSTNAME dhcp_option_hostname(dhcp, netif); #endif /* LWIP_NETIF_HOSTNAME */ dhcp_option_trailer(dhcp); /* shrink the pbuf to the actual content length */ pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); /* send broadcast to any DHCP server */ udp_sendto_if_src(dhcp_pcb, dhcp->p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif, IP4_ADDR_ANY); dhcp_delete_msg(dhcp); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_select: REQUESTING\n")); } else { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("dhcp_select: could not allocate DHCP request\n")); } if (dhcp->tries < 255) { dhcp->tries++; } msecs = (dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000; dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_select(): set request timeout %"U16_F" msecs\n", msecs)); return result; } /** * The DHCP timer that checks for lease renewal/rebind timeouts. * Must be called once a minute (see @ref DHCP_COARSE_TIMER_SECS). */ void dhcp_coarse_tmr(void) { struct netif *netif = netif_list; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_coarse_tmr()\n")); /* iterate through all network interfaces */ while (netif != NULL) { /* only act on DHCP configured interfaces */ struct dhcp *dhcp = netif_dhcp_data(netif); if ((dhcp != NULL) && (dhcp->state != DHCP_STATE_OFF)) { /* compare lease time to expire timeout */ if (dhcp->t0_timeout && (++dhcp->lease_used == dhcp->t0_timeout)) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t0 timeout\n")); /* this clients' lease time has expired */ dhcp_release(netif); dhcp_discover(netif); /* timer is active (non zero), and triggers (zeroes) now? */ } else if (dhcp->t2_rebind_time && (dhcp->t2_rebind_time-- == 1)) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t2 timeout\n")); /* this clients' rebind timeout triggered */ dhcp_t2_timeout(netif); /* timer is active (non zero), and triggers (zeroes) now */ } else if (dhcp->t1_renew_time && (dhcp->t1_renew_time-- == 1)) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_coarse_tmr(): t1 timeout\n")); /* this clients' renewal timeout triggered */ dhcp_t1_timeout(netif); } } /* proceed to next netif */ netif = netif->next; } } /** * DHCP transaction timeout handling (this function must be called every 500ms, * see @ref DHCP_FINE_TIMER_MSECS). * * A DHCP server is expected to respond within a short period of time. * This timer checks whether an outstanding DHCP request is timed out. */ void dhcp_fine_tmr(void) { struct netif *netif = netif_list; /* loop through netif's */ while (netif != NULL) { struct dhcp *dhcp = netif_dhcp_data(netif); /* only act on DHCP configured interfaces */ if (dhcp != NULL) { /* timer is active (non zero), and is about to trigger now */ if (dhcp->request_timeout > 1) { dhcp->request_timeout--; } else if (dhcp->request_timeout == 1) { dhcp->request_timeout--; /* { netif->dhcp->request_timeout == 0 } */ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_fine_tmr(): request timeout\n")); /* this client's request timeout triggered */ dhcp_timeout(netif); } } /* proceed to next network interface */ netif = netif->next; } } /** * A DHCP negotiation transaction, or ARP request, has timed out. * * The timer that was started with the DHCP or ARP request has * timed out, indicating no response was received in time. * * @param netif the netif under DHCP control */ static void dhcp_timeout(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout()\n")); /* back-off period has passed, or server selection timed out */ if ((dhcp->state == DHCP_STATE_BACKING_OFF) || (dhcp->state == DHCP_STATE_SELECTING)) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_timeout(): restarting discovery\n")); dhcp_discover(netif); /* receiving the requested lease timed out */ } else if (dhcp->state == DHCP_STATE_REQUESTING) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, DHCP request timed out\n")); if (dhcp->tries <= 5) { dhcp_select(netif); } else { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): REQUESTING, releasing, restarting\n")); dhcp_release(netif); dhcp_discover(netif); } #if DHCP_DOES_ARP_CHECK /* received no ARP reply for the offered address (which is good) */ } else if (dhcp->state == DHCP_STATE_CHECKING) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_timeout(): CHECKING, ARP request timed out\n")); if (dhcp->tries <= 1) { dhcp_check(netif); /* no ARP replies on the offered address, looks like the IP address is indeed free */ } else { /* bind the interface to the offered address */ dhcp_bind(netif); } #endif /* DHCP_DOES_ARP_CHECK */ } else if (dhcp->state == DHCP_STATE_REBOOTING) { if (dhcp->tries < REBOOT_TRIES) { dhcp_reboot(netif); } else { dhcp_discover(netif); } } } /** * The renewal period has timed out. * * @param netif the netif under DHCP control */ static void dhcp_t1_timeout(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_t1_timeout()\n")); if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || (dhcp->state == DHCP_STATE_RENEWING)) { /* just retry to renew - note that the rebind timer (t2) will * eventually time-out if renew tries fail. */ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_t1_timeout(): must renew\n")); /* This slightly different to RFC2131: DHCPREQUEST will be sent from state DHCP_STATE_RENEWING, not DHCP_STATE_BOUND */ dhcp_renew(netif); /* Calculate next timeout */ if (((dhcp->t2_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { dhcp->t1_renew_time = ((dhcp->t2_timeout - dhcp->lease_used) / 2); } } } /** * The rebind period has timed out. * * @param netif the netif under DHCP control */ static void dhcp_t2_timeout(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_t2_timeout()\n")); if ((dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_BOUND) || (dhcp->state == DHCP_STATE_RENEWING) || (dhcp->state == DHCP_STATE_REBINDING)) { /* just retry to rebind */ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_t2_timeout(): must rebind\n")); /* This slightly different to RFC2131: DHCPREQUEST will be sent from state DHCP_STATE_REBINDING, not DHCP_STATE_BOUND */ dhcp_rebind(netif); /* Calculate next timeout */ if (((dhcp->t0_timeout - dhcp->lease_used) / 2) >= ((60 + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS)) { dhcp->t2_rebind_time = ((dhcp->t0_timeout - dhcp->lease_used) / 2); } } } /** * Handle a DHCP ACK packet * * @param netif the netif under DHCP control */ static void dhcp_handle_ack(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); #if LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV u8_t n; #endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS || LWIP_DHCP_GET_NTP_SRV */ #if LWIP_DHCP_GET_NTP_SRV ip4_addr_t ntp_server_addrs[LWIP_DHCP_MAX_NTP_SERVERS]; #endif /* clear options we might not get from the ACK */ ip4_addr_set_zero(&dhcp->offered_sn_mask); ip4_addr_set_zero(&dhcp->offered_gw_addr); #if LWIP_DHCP_BOOTP_FILE ip4_addr_set_zero(&dhcp->offered_si_addr); #endif /* LWIP_DHCP_BOOTP_FILE */ /* lease time given? */ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_LEASE_TIME)) { /* remember offered lease time */ dhcp->offered_t0_lease = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_LEASE_TIME); } /* renewal period given? */ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T1)) { /* remember given renewal period */ dhcp->offered_t1_renew = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T1); } else { /* calculate safe periods for renewal */ dhcp->offered_t1_renew = dhcp->offered_t0_lease / 2; } /* renewal period given? */ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_T2)) { /* remember given rebind period */ dhcp->offered_t2_rebind = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_T2); } else { /* calculate safe periods for rebinding (offered_t0_lease * 0.875 -> 87.5%)*/ dhcp->offered_t2_rebind = (dhcp->offered_t0_lease * 7U) / 8U; } /* (y)our internet address */ ip4_addr_copy(dhcp->offered_ip_addr, dhcp->msg_in->yiaddr); #if LWIP_DHCP_BOOTP_FILE /* copy boot server address, boot file name copied in dhcp_parse_reply if not overloaded */ ip4_addr_copy(dhcp->offered_si_addr, dhcp->msg_in->siaddr); #endif /* LWIP_DHCP_BOOTP_FILE */ /* subnet mask given? */ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_SUBNET_MASK)) { /* remember given subnet mask */ ip4_addr_set_u32(&dhcp->offered_sn_mask, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_SUBNET_MASK))); dhcp->subnet_mask_given = 1; } else { dhcp->subnet_mask_given = 0; } /* gateway router */ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_ROUTER)) { ip4_addr_set_u32(&dhcp->offered_gw_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_ROUTER))); } #if LWIP_DHCP_GET_NTP_SRV /* NTP servers */ for (n = 0; (n < LWIP_DHCP_MAX_NTP_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n); n++) { ip4_addr_set_u32(&ntp_server_addrs[n], lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_NTP_SERVER + n))); } dhcp_set_ntp_servers(n, ntp_server_addrs); #endif /* LWIP_DHCP_GET_NTP_SRV */ #if LWIP_DHCP_PROVIDE_DNS_SERVERS /* DNS servers */ for (n = 0; (n < LWIP_DHCP_PROVIDE_DNS_SERVERS) && dhcp_option_given(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n); n++) { ip_addr_t dns_addr; ip_addr_set_ip4_u32(&dns_addr, lwip_htonl(dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_DNS_SERVER + n))); dns_setserver(n, &dns_addr); } #endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ } /** * @ingroup dhcp4 * Set a statically allocated struct dhcp to work with. * Using this prevents dhcp_start to allocate it using mem_malloc. * * @param netif the netif for which to set the struct dhcp * @param dhcp (uninitialised) dhcp struct allocated by the application */ void dhcp_set_struct(struct netif *netif, struct dhcp *dhcp) { LWIP_ASSERT("netif != NULL", netif != NULL); LWIP_ASSERT("dhcp != NULL", dhcp != NULL); LWIP_ASSERT("netif already has a struct dhcp set", netif_dhcp_data(netif) == NULL); /* clear data structure */ memset(dhcp, 0, sizeof(struct dhcp)); /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); } /** * @ingroup dhcp4 * Removes a struct dhcp from a netif. * * ATTENTION: Only use this when not using dhcp_set_struct() to allocate the * struct dhcp since the memory is passed back to the heap. * * @param netif the netif from which to remove the struct dhcp */ void dhcp_cleanup(struct netif *netif) { LWIP_ASSERT("netif != NULL", netif != NULL); if (netif_dhcp_data(netif) != NULL) { mem_free(netif_dhcp_data(netif)); netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL); } } /** * @ingroup dhcp4 * Start DHCP negotiation for a network interface. * * If no DHCP client instance was attached to this interface, * a new client is created first. If a DHCP client instance * was already present, it restarts negotiation. * * @param netif The lwIP network interface * @return lwIP error code * - ERR_OK - No error * - ERR_MEM - Out of memory */ err_t dhcp_start(struct netif *netif) { struct dhcp *dhcp; err_t result; LWIP_ERROR("netif != NULL", (netif != NULL), return ERR_ARG;); LWIP_ERROR("netif is not up, old style port?", netif_is_up(netif), return ERR_ARG;); dhcp = netif_dhcp_data(netif); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); /* check MTU of the netif */ if (netif->mtu < DHCP_MAX_MSG_LEN_MIN_REQUIRED) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): Cannot use this netif with DHCP: MTU is too small\n")); return ERR_MEM; } /* no DHCP client attached yet? */ if (dhcp == NULL) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): mallocing new DHCP client\n")); dhcp = (struct dhcp *)mem_malloc(sizeof(struct dhcp)); if (dhcp == NULL) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): could not allocate dhcp\n")); return ERR_MEM; } /* store this dhcp client in the netif */ netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, dhcp); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): allocated dhcp")); /* already has DHCP client attached */ } else { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_start(): restarting DHCP configuration\n")); LWIP_ASSERT("pbuf p_out wasn't freed", dhcp->p_out == NULL); LWIP_ASSERT("reply wasn't freed", dhcp->msg_in == NULL ); if (dhcp->pcb_allocated != 0) { dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ } /* dhcp is cleared below, no need to reset flag*/ } /* clear data structure */ memset(dhcp, 0, sizeof(struct dhcp)); /* dhcp_set_state(&dhcp, DHCP_STATE_OFF); */ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_start(): starting DHCP configuration\n")); if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ return ERR_MEM; } dhcp->pcb_allocated = 1; #if LWIP_DHCP_CHECK_LINK_UP if (!netif_is_link_up(netif)) { /* set state INIT and wait for dhcp_network_changed() to call dhcp_discover() */ dhcp_set_state(dhcp, DHCP_STATE_INIT); return ERR_OK; } #endif /* LWIP_DHCP_CHECK_LINK_UP */ /* (re)start the DHCP negotiation */ result = dhcp_discover(netif); if (result != ERR_OK) { /* free resources allocated above */ dhcp_stop(netif); return ERR_MEM; } return result; } /** * @ingroup dhcp4 * Inform a DHCP server of our manual configuration. * * This informs DHCP servers of our fixed IP address configuration * by sending an INFORM message. It does not involve DHCP address * configuration, it is just here to be nice to the network. * * @param netif The lwIP network interface */ void dhcp_inform(struct netif *netif) { struct dhcp dhcp; err_t result = ERR_OK; LWIP_ERROR("netif != NULL", (netif != NULL), return;); if (dhcp_inc_pcb_refcount() != ERR_OK) { /* ensure DHCP PCB is allocated */ return; } memset(&dhcp, 0, sizeof(struct dhcp)); dhcp_set_state(&dhcp, DHCP_STATE_INFORMING); /* create and initialize the DHCP message header */ result = dhcp_create_msg(netif, &dhcp, DHCP_INFORM); if (result == ERR_OK) { dhcp_option(&dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); dhcp_option_short(&dhcp, DHCP_MAX_MSG_LEN(netif)); dhcp_option_trailer(&dhcp); pbuf_realloc(dhcp.p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp.options_out_len); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_inform: INFORMING\n")); udp_sendto_if(dhcp_pcb, dhcp.p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif); dhcp_delete_msg(&dhcp); } else { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_inform: could not allocate DHCP request\n")); } dhcp_dec_pcb_refcount(); /* delete DHCP PCB if not needed any more */ } /** Handle a possible change in the network configuration. * * This enters the REBOOTING state to verify that the currently bound * address is still valid. */ void dhcp_network_changed(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); if (!dhcp) return; switch (dhcp->state) { case DHCP_STATE_REBINDING: case DHCP_STATE_RENEWING: case DHCP_STATE_BOUND: case DHCP_STATE_REBOOTING: dhcp->tries = 0; dhcp_reboot(netif); break; case DHCP_STATE_OFF: /* stay off */ break; default: /* INIT/REQUESTING/CHECKING/BACKING_OFF restart with new 'rid' because the state changes, SELECTING: continue with current 'rid' as we stay in the same state */ #if LWIP_DHCP_AUTOIP_COOP if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { autoip_stop(netif); dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; } #endif /* LWIP_DHCP_AUTOIP_COOP */ /* ensure we start with short timeouts, even if already discovering */ dhcp->tries = 0; dhcp_discover(netif); break; } } #if DHCP_DOES_ARP_CHECK /** * Match an ARP reply with the offered IP address: * check whether the offered IP address is not in use using ARP * * @param netif the network interface on which the reply was received * @param addr The IP address we received a reply from */ void dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr) { struct dhcp *dhcp; LWIP_ERROR("netif != NULL", (netif != NULL), return;); dhcp = netif_dhcp_data(netif); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_arp_reply()\n")); /* is a DHCP client doing an ARP check? */ if ((dhcp != NULL) && (dhcp->state == DHCP_STATE_CHECKING)) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_arp_reply(): CHECKING, arp reply for 0x%08"X32_F"\n", ip4_addr_get_u32(addr))); /* did a host respond with the address we were offered by the DHCP server? */ if (ip4_addr_cmp(addr, &dhcp->offered_ip_addr)) { /* we will not accept the offered address */ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE | LWIP_DBG_LEVEL_WARNING, ("dhcp_arp_reply(): arp reply matched with offered address, declining\n")); dhcp_decline(netif); } } } /** * Decline an offered lease. * * Tell the DHCP server we do not accept the offered address. * One reason to decline the lease is when we find out the address * is already in use by another host (through ARP). * * @param netif the netif under DHCP control */ static err_t dhcp_decline(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); err_t result = ERR_OK; u16_t msecs; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline()\n")); dhcp_set_state(dhcp, DHCP_STATE_BACKING_OFF); /* create and initialize the DHCP message header */ result = dhcp_create_msg(netif, dhcp, DHCP_DECLINE); if (result == ERR_OK) { dhcp_option(dhcp, DHCP_OPTION_REQUESTED_IP, 4); dhcp_option_long(dhcp, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); dhcp_option_trailer(dhcp); /* resize pbuf to reflect true size of options */ pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); /* per section 4.4.4, broadcast DECLINE messages */ udp_sendto_if_src(dhcp_pcb, dhcp->p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif, IP4_ADDR_ANY); dhcp_delete_msg(dhcp); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_decline: BACKING OFF\n")); } else { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_decline: could not allocate DHCP request\n")); } if (dhcp->tries < 255) { dhcp->tries++; } msecs = 10*1000; dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_decline(): set request timeout %"U16_F" msecs\n", msecs)); return result; } #endif /* DHCP_DOES_ARP_CHECK */ /** * Start the DHCP process, discover a DHCP server. * * @param netif the netif under DHCP control */ static err_t dhcp_discover(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); err_t result = ERR_OK; u16_t msecs; u8_t i; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover()\n")); ip4_addr_set_any(&dhcp->offered_ip_addr); dhcp_set_state(dhcp, DHCP_STATE_SELECTING); /* create and initialize the DHCP message header */ result = dhcp_create_msg(netif, dhcp, DHCP_DISCOVER); if (result == ERR_OK) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: making request\n")); dhcp_option(dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); dhcp_option_short(dhcp, DHCP_MAX_MSG_LEN(netif)); dhcp_option(dhcp, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { dhcp_option_byte(dhcp, dhcp_discover_request_options[i]); } dhcp_option_trailer(dhcp); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: realloc()ing\n")); pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: sendto(DISCOVER, IP_ADDR_BROADCAST, DHCP_SERVER_PORT)\n")); udp_sendto_if_src(dhcp_pcb, dhcp->p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif, IP4_ADDR_ANY); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_discover: deleting()ing\n")); dhcp_delete_msg(dhcp); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover: SELECTING\n")); } else { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_discover: could not allocate DHCP request\n")); } if (dhcp->tries < 255) { dhcp->tries++; } #if LWIP_DHCP_AUTOIP_COOP if (dhcp->tries >= LWIP_DHCP_AUTOIP_COOP_TRIES && dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_OFF) { dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_ON; autoip_start(netif); } #endif /* LWIP_DHCP_AUTOIP_COOP */ msecs = (dhcp->tries < 6 ? 1 << dhcp->tries : 60) * 1000; dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_discover(): set request timeout %"U16_F" msecs\n", msecs)); return result; } /** * Bind the interface to the offered IP address. * * @param netif network interface to bind to the offered address */ static void dhcp_bind(struct netif *netif) { u32_t timeout; struct dhcp *dhcp; ip4_addr_t sn_mask, gw_addr; LWIP_ERROR("dhcp_bind: netif != NULL", (netif != NULL), return;); dhcp = netif_dhcp_data(netif); LWIP_ERROR("dhcp_bind: dhcp != NULL", (dhcp != NULL), return;); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(netif=%p) %c%c%"U16_F"\n", (void*)netif, netif->name[0], netif->name[1], (u16_t)netif->num)); /* reset time used of lease */ dhcp->lease_used = 0; if (dhcp->offered_t0_lease != 0xffffffffUL) { /* set renewal period timer */ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t0 renewal timer %"U32_F" secs\n", dhcp->offered_t0_lease)); timeout = (dhcp->offered_t0_lease + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; if (timeout > 0xffff) { timeout = 0xffff; } dhcp->t0_timeout = (u16_t)timeout; if (dhcp->t0_timeout == 0) { dhcp->t0_timeout = 1; } LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t0_lease*1000)); } /* temporary DHCP lease? */ if (dhcp->offered_t1_renew != 0xffffffffUL) { /* set renewal period timer */ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t1 renewal timer %"U32_F" secs\n", dhcp->offered_t1_renew)); timeout = (dhcp->offered_t1_renew + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; if (timeout > 0xffff) { timeout = 0xffff; } dhcp->t1_timeout = (u16_t)timeout; if (dhcp->t1_timeout == 0) { dhcp->t1_timeout = 1; } LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t1_renew*1000)); dhcp->t1_renew_time = dhcp->t1_timeout; } /* set renewal period timer */ if (dhcp->offered_t2_rebind != 0xffffffffUL) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_bind(): t2 rebind timer %"U32_F" secs\n", dhcp->offered_t2_rebind)); timeout = (dhcp->offered_t2_rebind + DHCP_COARSE_TIMER_SECS / 2) / DHCP_COARSE_TIMER_SECS; if (timeout > 0xffff) { timeout = 0xffff; } dhcp->t2_timeout = (u16_t)timeout; if (dhcp->t2_timeout == 0) { dhcp->t2_timeout = 1; } LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_bind(): set request timeout %"U32_F" msecs\n", dhcp->offered_t2_rebind*1000)); dhcp->t2_rebind_time = dhcp->t2_timeout; } /* If we have sub 1 minute lease, t2 and t1 will kick in at the same time. */ if ((dhcp->t1_timeout >= dhcp->t2_timeout) && (dhcp->t2_timeout > 0)) { dhcp->t1_timeout = 0; } if (dhcp->subnet_mask_given) { /* copy offered network mask */ ip4_addr_copy(sn_mask, dhcp->offered_sn_mask); } else { /* subnet mask not given, choose a safe subnet mask given the network class */ u8_t first_octet = ip4_addr1(&dhcp->offered_ip_addr); if (first_octet <= 127) { ip4_addr_set_u32(&sn_mask, PP_HTONL(0xff000000UL)); } else if (first_octet >= 192) { ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffffff00UL)); } else { ip4_addr_set_u32(&sn_mask, PP_HTONL(0xffff0000UL)); } } ip4_addr_copy(gw_addr, dhcp->offered_gw_addr); /* gateway address not given? */ if (ip4_addr_isany_val(gw_addr)) { /* copy network address */ ip4_addr_get_network(&gw_addr, &dhcp->offered_ip_addr, &sn_mask); /* use first host address on network as gateway */ ip4_addr_set_u32(&gw_addr, ip4_addr_get_u32(&gw_addr) | PP_HTONL(0x00000001UL)); } #if LWIP_DHCP_AUTOIP_COOP if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { autoip_stop(netif); dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; } #endif /* LWIP_DHCP_AUTOIP_COOP */ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_STATE, ("dhcp_bind(): IP: 0x%08"X32_F" SN: 0x%08"X32_F" GW: 0x%08"X32_F"\n", ip4_addr_get_u32(&dhcp->offered_ip_addr), ip4_addr_get_u32(&sn_mask), ip4_addr_get_u32(&gw_addr))); /* netif is now bound to DHCP leased address - set this before assigning the address to ensure the callback can use dhcp_supplied_address() */ dhcp_set_state(dhcp, DHCP_STATE_BOUND); netif_set_addr(netif, &dhcp->offered_ip_addr, &sn_mask, &gw_addr); /* interface is used by routing now that an address is set */ } /** * @ingroup dhcp4 * Renew an existing DHCP lease at the involved DHCP server. * * @param netif network interface which must renew its lease */ err_t dhcp_renew(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); err_t result; u16_t msecs; u8_t i; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_renew()\n")); dhcp_set_state(dhcp, DHCP_STATE_RENEWING); /* create and initialize the DHCP message header */ result = dhcp_create_msg(netif, dhcp, DHCP_REQUEST); if (result == ERR_OK) { dhcp_option(dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); dhcp_option_short(dhcp, DHCP_MAX_MSG_LEN(netif)); dhcp_option(dhcp, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { dhcp_option_byte(dhcp, dhcp_discover_request_options[i]); } #if LWIP_NETIF_HOSTNAME dhcp_option_hostname(dhcp, netif); #endif /* LWIP_NETIF_HOSTNAME */ /* append DHCP message trailer */ dhcp_option_trailer(dhcp); pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); udp_sendto_if(dhcp_pcb, dhcp->p_out, &dhcp->server_ip_addr, DHCP_SERVER_PORT, netif); dhcp_delete_msg(dhcp); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew: RENEWING\n")); } else { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_renew: could not allocate DHCP request\n")); } if (dhcp->tries < 255) { dhcp->tries++; } /* back-off on retries, but to a maximum of 20 seconds */ msecs = dhcp->tries < 10 ? dhcp->tries * 2000 : 20 * 1000; dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_renew(): set request timeout %"U16_F" msecs\n", msecs)); return result; } /** * Rebind with a DHCP server for an existing DHCP lease. * * @param netif network interface which must rebind with a DHCP server */ static err_t dhcp_rebind(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); err_t result; u16_t msecs; u8_t i; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind()\n")); dhcp_set_state(dhcp, DHCP_STATE_REBINDING); /* create and initialize the DHCP message header */ result = dhcp_create_msg(netif, dhcp, DHCP_REQUEST); if (result == ERR_OK) { dhcp_option(dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); dhcp_option_short(dhcp, DHCP_MAX_MSG_LEN(netif)); dhcp_option(dhcp, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { dhcp_option_byte(dhcp, dhcp_discover_request_options[i]); } #if LWIP_NETIF_HOSTNAME dhcp_option_hostname(dhcp, netif); #endif /* LWIP_NETIF_HOSTNAME */ dhcp_option_trailer(dhcp); pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); /* broadcast to server */ udp_sendto_if(dhcp_pcb, dhcp->p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif); dhcp_delete_msg(dhcp); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind: REBINDING\n")); } else { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_rebind: could not allocate DHCP request\n")); } if (dhcp->tries < 255) { dhcp->tries++; } msecs = dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000; dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_rebind(): set request timeout %"U16_F" msecs\n", msecs)); return result; } /** * Enter REBOOTING state to verify an existing lease * * @param netif network interface which must reboot */ static err_t dhcp_reboot(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); err_t result; u16_t msecs; u8_t i; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot()\n")); dhcp_set_state(dhcp, DHCP_STATE_REBOOTING); /* create and initialize the DHCP message header */ result = dhcp_create_msg(netif, dhcp, DHCP_REQUEST); if (result == ERR_OK) { dhcp_option(dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); dhcp_option_short(dhcp, DHCP_MAX_MSG_LEN_MIN_REQUIRED); dhcp_option(dhcp, DHCP_OPTION_REQUESTED_IP, 4); dhcp_option_long(dhcp, lwip_ntohl(ip4_addr_get_u32(&dhcp->offered_ip_addr))); dhcp_option(dhcp, DHCP_OPTION_PARAMETER_REQUEST_LIST, LWIP_ARRAYSIZE(dhcp_discover_request_options)); for (i = 0; i < LWIP_ARRAYSIZE(dhcp_discover_request_options); i++) { dhcp_option_byte(dhcp, dhcp_discover_request_options[i]); } dhcp_option_trailer(dhcp); pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); /* broadcast to server */ udp_sendto_if(dhcp_pcb, dhcp->p_out, IP_ADDR_BROADCAST, DHCP_SERVER_PORT, netif); dhcp_delete_msg(dhcp); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot: REBOOTING\n")); } else { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_reboot: could not allocate DHCP request\n")); } if (dhcp->tries < 255) { dhcp->tries++; } msecs = dhcp->tries < 10 ? dhcp->tries * 1000 : 10 * 1000; dhcp->request_timeout = (msecs + DHCP_FINE_TIMER_MSECS - 1) / DHCP_FINE_TIMER_MSECS; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_reboot(): set request timeout %"U16_F" msecs\n", msecs)); return result; } /** * @ingroup dhcp4 * Release a DHCP lease (usually called before @ref dhcp_stop). * * @param netif network interface which must release its lease */ err_t dhcp_release(struct netif *netif) { struct dhcp *dhcp = netif_dhcp_data(netif); err_t result; ip_addr_t server_ip_addr; u8_t is_dhcp_supplied_address; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_release()\n")); if (dhcp == NULL) { return ERR_ARG; } ip_addr_copy(server_ip_addr, dhcp->server_ip_addr); is_dhcp_supplied_address = dhcp_supplied_address(netif); /* idle DHCP client */ dhcp_set_state(dhcp, DHCP_STATE_OFF); /* clean old DHCP offer */ ip_addr_set_zero_ip4(&dhcp->server_ip_addr); ip4_addr_set_zero(&dhcp->offered_ip_addr); ip4_addr_set_zero(&dhcp->offered_sn_mask); ip4_addr_set_zero(&dhcp->offered_gw_addr); #if LWIP_DHCP_BOOTP_FILE ip4_addr_set_zero(&dhcp->offered_si_addr); #endif /* LWIP_DHCP_BOOTP_FILE */ dhcp->offered_t0_lease = dhcp->offered_t1_renew = dhcp->offered_t2_rebind = 0; dhcp->t1_renew_time = dhcp->t2_rebind_time = dhcp->lease_used = dhcp->t0_timeout = 0; if (!is_dhcp_supplied_address) { /* don't issue release message when address is not dhcp-assigned */ return ERR_OK; } /* create and initialize the DHCP message header */ result = dhcp_create_msg(netif, dhcp, DHCP_RELEASE); if (result == ERR_OK) { dhcp_option(dhcp, DHCP_OPTION_SERVER_ID, 4); dhcp_option_long(dhcp, lwip_ntohl(ip4_addr_get_u32(ip_2_ip4(&server_ip_addr)))); dhcp_option_trailer(dhcp); pbuf_realloc(dhcp->p_out, sizeof(struct dhcp_msg) - DHCP_OPTIONS_LEN + dhcp->options_out_len); udp_sendto_if(dhcp_pcb, dhcp->p_out, &server_ip_addr, DHCP_SERVER_PORT, netif); dhcp_delete_msg(dhcp); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("dhcp_release: RELEASED, DHCP_STATE_OFF\n")); } else { /* sending release failed, but that's not a problem since the correct behaviour of dhcp does not rely on release */ LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_release: could not allocate DHCP request\n")); } /* remove IP address from interface (prevents routing from selecting this interface) */ netif_set_addr(netif, IP4_ADDR_ANY4, IP4_ADDR_ANY4, IP4_ADDR_ANY4); return result; } /** * @ingroup dhcp4 * Remove the DHCP client from the interface. * * @param netif The network interface to stop DHCP on */ void dhcp_stop(struct netif *netif) { struct dhcp *dhcp; LWIP_ERROR("dhcp_stop: netif != NULL", (netif != NULL), return;); dhcp = netif_dhcp_data(netif); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_stop()\n")); /* netif is DHCP configured? */ if (dhcp != NULL) { #if LWIP_DHCP_AUTOIP_COOP if (dhcp->autoip_coop_state == DHCP_AUTOIP_COOP_STATE_ON) { autoip_stop(netif); dhcp->autoip_coop_state = DHCP_AUTOIP_COOP_STATE_OFF; } #endif /* LWIP_DHCP_AUTOIP_COOP */ LWIP_ASSERT("reply wasn't freed", dhcp->msg_in == NULL); dhcp_set_state(dhcp, DHCP_STATE_OFF); if (dhcp->pcb_allocated != 0) { dhcp_dec_pcb_refcount(); /* free DHCP PCB if not needed any more */ dhcp->pcb_allocated = 0; } } } /* * Set the DHCP state of a DHCP client. * * If the state changed, reset the number of tries. */ static void dhcp_set_state(struct dhcp *dhcp, u8_t new_state) { if (new_state != dhcp->state) { dhcp->state = new_state; dhcp->tries = 0; dhcp->request_timeout = 0; } } /* * Concatenate an option type and length field to the outgoing * DHCP message. * */ static void dhcp_option(struct dhcp *dhcp, u8_t option_type, u8_t option_len) { LWIP_ASSERT("dhcp_option: dhcp->options_out_len + 2 + option_len <= DHCP_OPTIONS_LEN", dhcp->options_out_len + 2U + option_len <= DHCP_OPTIONS_LEN); dhcp->msg_out->options[dhcp->options_out_len++] = option_type; dhcp->msg_out->options[dhcp->options_out_len++] = option_len; } /* * Concatenate a single byte to the outgoing DHCP message. * */ static void dhcp_option_byte(struct dhcp *dhcp, u8_t value) { LWIP_ASSERT("dhcp_option_byte: dhcp->options_out_len < DHCP_OPTIONS_LEN", dhcp->options_out_len < DHCP_OPTIONS_LEN); dhcp->msg_out->options[dhcp->options_out_len++] = value; } static void dhcp_option_short(struct dhcp *dhcp, u16_t value) { LWIP_ASSERT("dhcp_option_short: dhcp->options_out_len + 2 <= DHCP_OPTIONS_LEN", dhcp->options_out_len + 2U <= DHCP_OPTIONS_LEN); dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t)((value & 0xff00U) >> 8); dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t) (value & 0x00ffU); } static void dhcp_option_long(struct dhcp *dhcp, u32_t value) { LWIP_ASSERT("dhcp_option_long: dhcp->options_out_len + 4 <= DHCP_OPTIONS_LEN", dhcp->options_out_len + 4U <= DHCP_OPTIONS_LEN); dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t)((value & 0xff000000UL) >> 24); dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t)((value & 0x00ff0000UL) >> 16); dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t)((value & 0x0000ff00UL) >> 8); dhcp->msg_out->options[dhcp->options_out_len++] = (u8_t)((value & 0x000000ffUL)); } #if LWIP_NETIF_HOSTNAME static void dhcp_option_hostname(struct dhcp *dhcp, struct netif *netif) { if (netif->hostname != NULL) { size_t namelen = strlen(netif->hostname); if (namelen > 0) { size_t len; const char *p = netif->hostname; /* Shrink len to available bytes (need 2 bytes for OPTION_HOSTNAME and 1 byte for trailer) */ size_t available = DHCP_OPTIONS_LEN - dhcp->options_out_len - 3; LWIP_ASSERT("DHCP: hostname is too long!", namelen <= available); len = LWIP_MIN(namelen, available); LWIP_ASSERT("DHCP: hostname is too long!", len <= 0xFF); dhcp_option(dhcp, DHCP_OPTION_HOSTNAME, (u8_t)len); while (len--) { dhcp_option_byte(dhcp, *p++); } } } } #endif /* LWIP_NETIF_HOSTNAME */ /** * Extract the DHCP message and the DHCP options. * * Extract the DHCP message and the DHCP options, each into a contiguous * piece of memory. As a DHCP message is variable sized by its options, * and also allows overriding some fields for options, the easy approach * is to first unfold the options into a contiguous piece of memory, and * use that further on. * */ static err_t dhcp_parse_reply(struct dhcp *dhcp, struct pbuf *p) { u8_t *options; u16_t offset; u16_t offset_max; u16_t options_idx; u16_t options_idx_max; struct pbuf *q; int parse_file_as_options = 0; int parse_sname_as_options = 0; /* clear received options */ dhcp_clear_all_options(dhcp); /* check that beginning of dhcp_msg (up to and including chaddr) is in first pbuf */ if (p->len < DHCP_SNAME_OFS) { return ERR_BUF; } dhcp->msg_in = (struct dhcp_msg *)p->payload; #if LWIP_DHCP_BOOTP_FILE /* clear boot file name */ dhcp->boot_file_name[0] = 0; #endif /* LWIP_DHCP_BOOTP_FILE */ /* parse options */ /* start with options field */ options_idx = DHCP_OPTIONS_OFS; /* parse options to the end of the received packet */ options_idx_max = p->tot_len; again: q = p; while ((q != NULL) && (options_idx >= q->len)) { options_idx -= q->len; options_idx_max -= q->len; q = q->next; } if (q == NULL) { return ERR_BUF; } offset = options_idx; offset_max = options_idx_max; options = (u8_t*)q->payload; /* at least 1 byte to read and no end marker, then at least 3 bytes to read? */ while ((q != NULL) && (offset < offset_max) && (options[offset] != DHCP_OPTION_END)) { u8_t op = options[offset]; u8_t len; u8_t decode_len = 0; int decode_idx = -1; u16_t val_offset = offset + 2; /* len byte might be in the next pbuf */ if ((offset + 1) < q->len) { len = options[offset + 1]; } else { len = (q->next != NULL ? ((u8_t*)q->next->payload)[0] : 0); } /* LWIP_DEBUGF(DHCP_DEBUG, ("msg_offset=%"U16_F", q->len=%"U16_F, msg_offset, q->len)); */ decode_len = len; switch(op) { /* case(DHCP_OPTION_END): handled above */ case(DHCP_OPTION_PAD): /* special option: no len encoded */ decode_len = len = 0; /* will be increased below */ offset--; break; case(DHCP_OPTION_SUBNET_MASK): LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); decode_idx = DHCP_OPTION_IDX_SUBNET_MASK; break; case(DHCP_OPTION_ROUTER): decode_len = 4; /* only copy the first given router */ LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); decode_idx = DHCP_OPTION_IDX_ROUTER; break; #if LWIP_DHCP_PROVIDE_DNS_SERVERS case(DHCP_OPTION_DNS_SERVER): /* special case: there might be more than one server */ LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); /* limit number of DNS servers */ decode_len = LWIP_MIN(len, 4 * DNS_MAX_SERVERS); LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); decode_idx = DHCP_OPTION_IDX_DNS_SERVER; break; #endif /* LWIP_DHCP_PROVIDE_DNS_SERVERS */ case(DHCP_OPTION_LEASE_TIME): LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); decode_idx = DHCP_OPTION_IDX_LEASE_TIME; break; #if LWIP_DHCP_GET_NTP_SRV case(DHCP_OPTION_NTP): /* special case: there might be more than one server */ LWIP_ERROR("len %% 4 == 0", len % 4 == 0, return ERR_VAL;); /* limit number of NTP servers */ decode_len = LWIP_MIN(len, 4 * LWIP_DHCP_MAX_NTP_SERVERS); LWIP_ERROR("len >= decode_len", len >= decode_len, return ERR_VAL;); decode_idx = DHCP_OPTION_IDX_NTP_SERVER; break; #endif /* LWIP_DHCP_GET_NTP_SRV*/ case(DHCP_OPTION_OVERLOAD): LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); /* decode overload only in options, not in file/sname: invalid packet */ LWIP_ERROR("overload in file/sname", options_idx == DHCP_OPTIONS_OFS, return ERR_VAL;); decode_idx = DHCP_OPTION_IDX_OVERLOAD; break; case(DHCP_OPTION_MESSAGE_TYPE): LWIP_ERROR("len == 1", len == 1, return ERR_VAL;); decode_idx = DHCP_OPTION_IDX_MSG_TYPE; break; case(DHCP_OPTION_SERVER_ID): LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); decode_idx = DHCP_OPTION_IDX_SERVER_ID; break; case(DHCP_OPTION_T1): LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); decode_idx = DHCP_OPTION_IDX_T1; break; case(DHCP_OPTION_T2): LWIP_ERROR("len == 4", len == 4, return ERR_VAL;); decode_idx = DHCP_OPTION_IDX_T2; break; default: decode_len = 0; LWIP_DEBUGF(DHCP_DEBUG, ("skipping option %"U16_F" in options\n", (u16_t)op)); break; } offset += len + 2; if (decode_len > 0) { u32_t value = 0; u16_t copy_len; decode_next: LWIP_ASSERT("check decode_idx", decode_idx >= 0 && decode_idx < DHCP_OPTION_IDX_MAX); if (!dhcp_option_given(dhcp, decode_idx)) { copy_len = LWIP_MIN(decode_len, 4); if (pbuf_copy_partial(q, &value, copy_len, val_offset) != copy_len) { return ERR_BUF; } if (decode_len > 4) { /* decode more than one u32_t */ LWIP_ERROR("decode_len %% 4 == 0", decode_len % 4 == 0, return ERR_VAL;); dhcp_got_option(dhcp, decode_idx); dhcp_set_option_value(dhcp, decode_idx, lwip_htonl(value)); decode_len -= 4; val_offset += 4; decode_idx++; goto decode_next; } else if (decode_len == 4) { value = lwip_ntohl(value); } else { LWIP_ERROR("invalid decode_len", decode_len == 1, return ERR_VAL;); value = ((u8_t*)&value)[0]; } dhcp_got_option(dhcp, decode_idx); dhcp_set_option_value(dhcp, decode_idx, value); } } if (offset >= q->len) { offset -= q->len; offset_max -= q->len; if ((offset < offset_max) && offset_max) { q = q->next; LWIP_ERROR("next pbuf was null", q != NULL, return ERR_VAL;); options = (u8_t*)q->payload; } else { /* We've run out of bytes, probably no end marker. Don't proceed. */ break; } } } /* is this an overloaded message? */ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_OVERLOAD)) { u32_t overload = dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_OVERLOAD); dhcp_clear_option(dhcp, DHCP_OPTION_IDX_OVERLOAD); if (overload == DHCP_OVERLOAD_FILE) { parse_file_as_options = 1; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded file field\n")); } else if (overload == DHCP_OVERLOAD_SNAME) { parse_sname_as_options = 1; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname field\n")); } else if (overload == DHCP_OVERLOAD_SNAME_FILE) { parse_sname_as_options = 1; parse_file_as_options = 1; LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("overloaded sname and file field\n")); } else { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("invalid overload option: %d\n", (int)overload)); } #if LWIP_DHCP_BOOTP_FILE if (!parse_file_as_options) { /* only do this for ACK messages */ if (dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE) && (dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE) == DHCP_ACK)) /* copy bootp file name, don't care for sname (server hostname) */ if (pbuf_copy_partial(p, dhcp->boot_file_name, DHCP_FILE_LEN-1, DHCP_FILE_OFS) != (DHCP_FILE_LEN-1)) { return ERR_BUF; } /* make sure the string is really NULL-terminated */ dhcp->boot_file_name[DHCP_FILE_LEN-1] = 0; } #endif /* LWIP_DHCP_BOOTP_FILE */ } if (parse_file_as_options) { /* if both are overloaded, parse file first and then sname (RFC 2131 ch. 4.1) */ parse_file_as_options = 0; options_idx = DHCP_FILE_OFS; options_idx_max = DHCP_FILE_OFS + DHCP_FILE_LEN; goto again; } else if (parse_sname_as_options) { parse_sname_as_options = 0; options_idx = DHCP_SNAME_OFS; options_idx_max = DHCP_SNAME_OFS + DHCP_SNAME_LEN; goto again; } return ERR_OK; } /** * If an incoming DHCP message is in response to us, then trigger the state machine */ static void dhcp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u16_t port) { struct netif *netif = ip_current_input_netif(); struct dhcp *dhcp = netif_dhcp_data(netif); struct dhcp_msg *reply_msg = (struct dhcp_msg *)p->payload; u8_t msg_type; u8_t i; LWIP_UNUSED_ARG(arg); /* Caught DHCP message from netif that does not have DHCP enabled? -> not interested */ if ((dhcp == NULL) || (dhcp->pcb_allocated == 0)) { goto free_pbuf_and_return; } LWIP_ASSERT("invalid server address type", IP_IS_V4(addr)); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("dhcp_recv(pbuf = %p) from DHCP server %"U16_F".%"U16_F".%"U16_F".%"U16_F" port %"U16_F"\n", (void*)p, ip4_addr1_16(ip_2_ip4(addr)), ip4_addr2_16(ip_2_ip4(addr)), ip4_addr3_16(ip_2_ip4(addr)), ip4_addr4_16(ip_2_ip4(addr)), port)); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->len = %"U16_F"\n", p->len)); LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("pbuf->tot_len = %"U16_F"\n", p->tot_len)); /* prevent warnings about unused arguments */ LWIP_UNUSED_ARG(pcb); LWIP_UNUSED_ARG(addr); LWIP_UNUSED_ARG(port); LWIP_ASSERT("reply wasn't freed", dhcp->msg_in == NULL); if (p->len < DHCP_MIN_REPLY_LEN) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP reply message or pbuf too short\n")); goto free_pbuf_and_return; } if (reply_msg->op != DHCP_BOOTREPLY) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("not a DHCP reply message, but type %"U16_F"\n", (u16_t)reply_msg->op)); goto free_pbuf_and_return; } /* iterate through hardware address and match against DHCP message */ for (i = 0; i < netif->hwaddr_len && i < NETIF_MAX_HWADDR_LEN && i < DHCP_CHADDR_LEN; i++) { if (netif->hwaddr[i] != reply_msg->chaddr[i]) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("netif->hwaddr[%"U16_F"]==%02"X16_F" != reply_msg->chaddr[%"U16_F"]==%02"X16_F"\n", (u16_t)i, (u16_t)netif->hwaddr[i], (u16_t)i, (u16_t)reply_msg->chaddr[i])); goto free_pbuf_and_return; } } /* match transaction ID against what we expected */ if (lwip_ntohl(reply_msg->xid) != dhcp->xid) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("transaction id mismatch reply_msg->xid(%"X32_F")!=dhcp->xid(%"X32_F")\n",lwip_ntohl(reply_msg->xid),dhcp->xid)); goto free_pbuf_and_return; } /* option fields could be unfold? */ if (dhcp_parse_reply(dhcp, p) != ERR_OK) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("problem unfolding DHCP message - too short on memory?\n")); goto free_pbuf_and_return; } LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("searching DHCP_OPTION_MESSAGE_TYPE\n")); /* obtain pointer to DHCP message type */ if (!dhcp_option_given(dhcp, DHCP_OPTION_IDX_MSG_TYPE)) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("DHCP_OPTION_MESSAGE_TYPE option not found\n")); goto free_pbuf_and_return; } /* read DHCP message type */ msg_type = (u8_t)dhcp_get_option_value(dhcp, DHCP_OPTION_IDX_MSG_TYPE); /* message type is DHCP ACK? */ if (msg_type == DHCP_ACK) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_ACK received\n")); /* in requesting state? */ if (dhcp->state == DHCP_STATE_REQUESTING) { dhcp_handle_ack(netif); #if DHCP_DOES_ARP_CHECK if ((netif->flags & NETIF_FLAG_ETHARP) != 0) { /* check if the acknowledged lease address is already in use */ dhcp_check(netif); } else { /* bind interface to the acknowledged lease address */ dhcp_bind(netif); } #else /* bind interface to the acknowledged lease address */ dhcp_bind(netif); #endif } /* already bound to the given lease address? */ else if ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REBINDING) || (dhcp->state == DHCP_STATE_RENEWING)) { dhcp_handle_ack(netif); dhcp_bind(netif); } } /* received a DHCP_NAK in appropriate state? */ else if ((msg_type == DHCP_NAK) && ((dhcp->state == DHCP_STATE_REBOOTING) || (dhcp->state == DHCP_STATE_REQUESTING) || (dhcp->state == DHCP_STATE_REBINDING) || (dhcp->state == DHCP_STATE_RENEWING ))) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_NAK received\n")); dhcp_handle_nak(netif); } /* received a DHCP_OFFER in DHCP_STATE_SELECTING state? */ else if ((msg_type == DHCP_OFFER) && (dhcp->state == DHCP_STATE_SELECTING)) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("DHCP_OFFER received in DHCP_STATE_SELECTING state\n")); dhcp->request_timeout = 0; /* remember offered lease */ dhcp_handle_offer(netif); } free_pbuf_and_return: if (dhcp != NULL) { dhcp->msg_in = NULL; } pbuf_free(p); } /** * Create a DHCP request, fill in common headers * * @param netif the netif under DHCP control * @param dhcp dhcp control struct * @param message_type message type of the request */ static err_t dhcp_create_msg(struct netif *netif, struct dhcp *dhcp, u8_t message_type) { u16_t i; #ifndef DHCP_GLOBAL_XID /** default global transaction identifier starting value (easy to match * with a packet analyser). We simply increment for each new request. * Predefine DHCP_GLOBAL_XID to a better value or a function call to generate one * at runtime, any supporting function prototypes can be defined in DHCP_GLOBAL_XID_HEADER */ #if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) static u32_t xid; #else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ static u32_t xid = 0xABCD0000; #endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ #else if (!xid_initialised) { xid = DHCP_GLOBAL_XID; xid_initialised = !xid_initialised; } #endif LWIP_ERROR("dhcp_create_msg: netif != NULL", (netif != NULL), return ERR_ARG;); LWIP_ERROR("dhcp_create_msg: dhcp != NULL", (dhcp != NULL), return ERR_VAL;); LWIP_ASSERT("dhcp_create_msg: dhcp->p_out == NULL", dhcp->p_out == NULL); LWIP_ASSERT("dhcp_create_msg: dhcp->msg_out == NULL", dhcp->msg_out == NULL); dhcp->p_out = pbuf_alloc(PBUF_TRANSPORT, sizeof(struct dhcp_msg), PBUF_RAM); if (dhcp->p_out == NULL) { LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("dhcp_create_msg(): could not allocate pbuf\n")); return ERR_MEM; } LWIP_ASSERT("dhcp_create_msg: check that first pbuf can hold struct dhcp_msg", (dhcp->p_out->len >= sizeof(struct dhcp_msg))); /* DHCP_REQUEST should reuse 'xid' from DHCPOFFER */ if ((message_type != DHCP_REQUEST) || (dhcp->state == DHCP_STATE_REBOOTING)) { /* reuse transaction identifier in retransmissions */ if (dhcp->tries == 0) { #if DHCP_CREATE_RAND_XID && defined(LWIP_RAND) xid = LWIP_RAND(); #else /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ xid++; #endif /* DHCP_CREATE_RAND_XID && defined(LWIP_RAND) */ } dhcp->xid = xid; } LWIP_DEBUGF(DHCP_DEBUG | LWIP_DBG_TRACE, ("transaction id xid(%"X32_F")\n", xid)); dhcp->msg_out = (struct dhcp_msg *)dhcp->p_out->payload; dhcp->msg_out->op = DHCP_BOOTREQUEST; /* @todo: make link layer independent */ dhcp->msg_out->htype = DHCP_HTYPE_ETH; dhcp->msg_out->hlen = netif->hwaddr_len; dhcp->msg_out->hops = 0; dhcp->msg_out->xid = lwip_htonl(dhcp->xid); dhcp->msg_out->secs = 0; /* we don't need the broadcast flag since we can receive unicast traffic before being fully configured! */ dhcp->msg_out->flags = 0; ip4_addr_set_zero(&dhcp->msg_out->ciaddr); /* set ciaddr to netif->ip_addr based on message_type and state */ if ((message_type == DHCP_INFORM) || (message_type == DHCP_DECLINE) || (message_type == DHCP_RELEASE) || ((message_type == DHCP_REQUEST) && /* DHCP_STATE_BOUND not used for sending! */ ((dhcp->state== DHCP_STATE_RENEWING) || dhcp->state== DHCP_STATE_REBINDING))) { ip4_addr_copy(dhcp->msg_out->ciaddr, *netif_ip4_addr(netif)); } ip4_addr_set_zero(&dhcp->msg_out->yiaddr); ip4_addr_set_zero(&dhcp->msg_out->siaddr); ip4_addr_set_zero(&dhcp->msg_out->giaddr); for (i = 0; i < DHCP_CHADDR_LEN; i++) { /* copy netif hardware address, pad with zeroes */ dhcp->msg_out->chaddr[i] = (i < netif->hwaddr_len && i < NETIF_MAX_HWADDR_LEN) ? netif->hwaddr[i] : 0/* pad byte*/; } for (i = 0; i < DHCP_SNAME_LEN; i++) { dhcp->msg_out->sname[i] = 0; } for (i = 0; i < DHCP_FILE_LEN; i++) { dhcp->msg_out->file[i] = 0; } dhcp->msg_out->cookie = PP_HTONL(DHCP_MAGIC_COOKIE); dhcp->options_out_len = 0; /* fill options field with an incrementing array (for debugging purposes) */ for (i = 0; i < DHCP_OPTIONS_LEN; i++) { dhcp->msg_out->options[i] = (u8_t)i; /* for debugging only, no matter if truncated */ } /* Add option MESSAGE_TYPE */ dhcp_option(dhcp, DHCP_OPTION_MESSAGE_TYPE, DHCP_OPTION_MESSAGE_TYPE_LEN); dhcp_option_byte(dhcp, message_type); return ERR_OK; } /** * Free previously allocated memory used to send a DHCP request. * * @param dhcp the dhcp struct to free the request from */ static void dhcp_delete_msg(struct dhcp *dhcp) { LWIP_ERROR("dhcp_delete_msg: dhcp != NULL", (dhcp != NULL), return;); LWIP_ASSERT("dhcp_delete_msg: dhcp->p_out != NULL", dhcp->p_out != NULL); LWIP_ASSERT("dhcp_delete_msg: dhcp->msg_out != NULL", dhcp->msg_out != NULL); if (dhcp->p_out != NULL) { pbuf_free(dhcp->p_out); } dhcp->p_out = NULL; dhcp->msg_out = NULL; } /** * Add a DHCP message trailer * * Adds the END option to the DHCP message, and if * necessary, up to three padding bytes. * * @param dhcp DHCP state structure */ static void dhcp_option_trailer(struct dhcp *dhcp) { LWIP_ERROR("dhcp_option_trailer: dhcp != NULL", (dhcp != NULL), return;); LWIP_ASSERT("dhcp_option_trailer: dhcp->msg_out != NULL\n", dhcp->msg_out != NULL); LWIP_ASSERT("dhcp_option_trailer: dhcp->options_out_len < DHCP_OPTIONS_LEN\n", dhcp->options_out_len < DHCP_OPTIONS_LEN); dhcp->msg_out->options[dhcp->options_out_len++] = DHCP_OPTION_END; /* packet is too small, or not 4 byte aligned? */ while (((dhcp->options_out_len < DHCP_MIN_OPTIONS_LEN) || (dhcp->options_out_len & 3)) && (dhcp->options_out_len < DHCP_OPTIONS_LEN)) { /* add a fill/padding byte */ dhcp->msg_out->options[dhcp->options_out_len++] = 0; } } /** check if DHCP supplied netif->ip_addr * * @param netif the netif to check * @return 1 if DHCP supplied netif->ip_addr (states BOUND or RENEWING), * 0 otherwise */ u8_t dhcp_supplied_address(const struct netif *netif) { if ((netif != NULL) && (netif_dhcp_data(netif) != NULL)) { struct dhcp* dhcp = netif_dhcp_data(netif); return (dhcp->state == DHCP_STATE_BOUND) || (dhcp->state == DHCP_STATE_RENEWING) || (dhcp->state == DHCP_STATE_REBINDING); } return 0; } #endif /* LWIP_IPV4 && LWIP_DHCP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv4/dhcp.c
C
unknown
73,878
/** * @file * Address Resolution Protocol module for IP over Ethernet * * Functionally, ARP is divided into two parts. The first maps an IP address * to a physical address when sending a packet, and the second part answers * requests from other machines for our physical address. * * This implementation complies with RFC 826 (Ethernet ARP). It supports * Gratuitious ARP from RFC3220 (IP Mobility Support for IPv4) section 4.6 * if an interface calls etharp_gratuitous(our_netif) upon address change. */ /* * Copyright (c) 2001-2003 Swedish Institute of Computer Science. * Copyright (c) 2003-2004 Leon Woestenberg <leon.woestenberg@axon.tv> * Copyright (c) 2003-2004 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * */ #include "lwip/opt.h" #if LWIP_ARP || LWIP_ETHERNET #include "lwip/etharp.h" #include "lwip/stats.h" #include "lwip/snmp.h" #include "lwip/dhcp.h" #include "lwip/autoip.h" #include "netif/ethernet.h" #include <string.h> #ifdef LWIP_HOOK_FILENAME #include LWIP_HOOK_FILENAME #endif #if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ /** Re-request a used ARP entry 1 minute before it would expire to prevent * breaking a steadily used connection because the ARP entry timed out. */ #define ARP_AGE_REREQUEST_USED_UNICAST (ARP_MAXAGE - 30) #define ARP_AGE_REREQUEST_USED_BROADCAST (ARP_MAXAGE - 15) /** the time an ARP entry stays pending after first request, * for ARP_TMR_INTERVAL = 1000, this is * 10 seconds. * * @internal Keep this number at least 2, otherwise it might * run out instantly if the timeout occurs directly after a request. */ #define ARP_MAXPENDING 5 /** ARP states */ enum etharp_state { ETHARP_STATE_EMPTY = 0, ETHARP_STATE_PENDING, ETHARP_STATE_STABLE, ETHARP_STATE_STABLE_REREQUESTING_1, ETHARP_STATE_STABLE_REREQUESTING_2 #if ETHARP_SUPPORT_STATIC_ENTRIES ,ETHARP_STATE_STATIC #endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ }; struct etharp_entry { #if ARP_QUEUEING /** Pointer to queue of pending outgoing packets on this ARP entry. */ struct etharp_q_entry *q; #else /* ARP_QUEUEING */ /** Pointer to a single pending outgoing packet on this ARP entry. */ struct pbuf *q; #endif /* ARP_QUEUEING */ ip4_addr_t ipaddr; struct netif *netif; struct eth_addr ethaddr; u16_t ctime; u8_t state; }; static struct etharp_entry arp_table[ARP_TABLE_SIZE]; #if !LWIP_NETIF_HWADDRHINT static u8_t etharp_cached_entry; #endif /* !LWIP_NETIF_HWADDRHINT */ /** Try hard to create a new entry - we want the IP address to appear in the cache (even if this means removing an active entry or so). */ #define ETHARP_FLAG_TRY_HARD 1 #define ETHARP_FLAG_FIND_ONLY 2 #if ETHARP_SUPPORT_STATIC_ENTRIES #define ETHARP_FLAG_STATIC_ENTRY 4 #endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ #if LWIP_NETIF_HWADDRHINT #define ETHARP_SET_HINT(netif, hint) if (((netif) != NULL) && ((netif)->addr_hint != NULL)) \ *((netif)->addr_hint) = (hint); #else /* LWIP_NETIF_HWADDRHINT */ #define ETHARP_SET_HINT(netif, hint) (etharp_cached_entry = (hint)) #endif /* LWIP_NETIF_HWADDRHINT */ /* Some checks, instead of etharp_init(): */ #if (LWIP_ARP && (ARP_TABLE_SIZE > 0x7f)) #error "ARP_TABLE_SIZE must fit in an s8_t, you have to reduce it in your lwipopts.h" #endif static err_t etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr* hw_dst_addr); static err_t etharp_raw(struct netif *netif, const struct eth_addr *ethsrc_addr, const struct eth_addr *ethdst_addr, const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, const u16_t opcode); #if ARP_QUEUEING /** * Free a complete queue of etharp entries * * @param q a qeueue of etharp_q_entry's to free */ static void free_etharp_q(struct etharp_q_entry *q) { struct etharp_q_entry *r; LWIP_ASSERT("q != NULL", q != NULL); LWIP_ASSERT("q->p != NULL", q->p != NULL); while (q) { r = q; q = q->next; LWIP_ASSERT("r->p != NULL", (r->p != NULL)); pbuf_free(r->p); memp_free(MEMP_ARP_QUEUE, r); } } #else /* ARP_QUEUEING */ /** Compatibility define: free the queued pbuf */ #define free_etharp_q(q) pbuf_free(q) #endif /* ARP_QUEUEING */ /** Clean up ARP table entries */ static void etharp_free_entry(int i) { /* remove from SNMP ARP index tree */ mib2_remove_arp_entry(arp_table[i].netif, &arp_table[i].ipaddr); /* and empty packet queue */ if (arp_table[i].q != NULL) { /* remove all queued packets */ LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_free_entry: freeing entry %"U16_F", packet queue %p.\n", (u16_t)i, (void *)(arp_table[i].q))); free_etharp_q(arp_table[i].q); arp_table[i].q = NULL; } /* recycle entry for re-use */ arp_table[i].state = ETHARP_STATE_EMPTY; #ifdef LWIP_DEBUG /* for debugging, clean out the complete entry */ arp_table[i].ctime = 0; arp_table[i].netif = NULL; ip4_addr_set_zero(&arp_table[i].ipaddr); arp_table[i].ethaddr = ethzero; #endif /* LWIP_DEBUG */ } /** * Clears expired entries in the ARP table. * * This function should be called every ARP_TMR_INTERVAL milliseconds (1 second), * in order to expire entries in the ARP table. */ void etharp_tmr(void) { u8_t i; LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer\n")); /* remove expired entries from the ARP table */ for (i = 0; i < ARP_TABLE_SIZE; ++i) { u8_t state = arp_table[i].state; if (state != ETHARP_STATE_EMPTY #if ETHARP_SUPPORT_STATIC_ENTRIES && (state != ETHARP_STATE_STATIC) #endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ ) { arp_table[i].ctime++; if ((arp_table[i].ctime >= ARP_MAXAGE) || ((arp_table[i].state == ETHARP_STATE_PENDING) && (arp_table[i].ctime >= ARP_MAXPENDING))) { /* pending or stable entry has become old! */ LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_timer: expired %s entry %"U16_F".\n", arp_table[i].state >= ETHARP_STATE_STABLE ? "stable" : "pending", (u16_t)i)); /* clean up entries that have just been expired */ etharp_free_entry(i); } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_1) { /* Don't send more than one request every 2 seconds. */ arp_table[i].state = ETHARP_STATE_STABLE_REREQUESTING_2; } else if (arp_table[i].state == ETHARP_STATE_STABLE_REREQUESTING_2) { /* Reset state to stable, so that the next transmitted packet will re-send an ARP request. */ arp_table[i].state = ETHARP_STATE_STABLE; } else if (arp_table[i].state == ETHARP_STATE_PENDING) { /* still pending, resend an ARP query */ etharp_request(arp_table[i].netif, &arp_table[i].ipaddr); } } } } /** * Search the ARP table for a matching or new entry. * * If an IP address is given, return a pending or stable ARP entry that matches * the address. If no match is found, create a new entry with this address set, * but in state ETHARP_EMPTY. The caller must check and possibly change the * state of the returned entry. * * If ipaddr is NULL, return a initialized new entry in state ETHARP_EMPTY. * * In all cases, attempt to create new entries from an empty entry. If no * empty entries are available and ETHARP_FLAG_TRY_HARD flag is set, recycle * old entries. Heuristic choose the least important entry for recycling. * * @param ipaddr IP address to find in ARP cache, or to add if not found. * @param flags See @ref etharp_state * @param netif netif related to this address (used for NETIF_HWADDRHINT) * * @return The ARP entry index that matched or is created, ERR_MEM if no * entry is found or could be recycled. */ static s8_t etharp_find_entry(const ip4_addr_t *ipaddr, u8_t flags, struct netif* netif) { s8_t old_pending = ARP_TABLE_SIZE, old_stable = ARP_TABLE_SIZE; s8_t empty = ARP_TABLE_SIZE; u8_t i = 0; /* oldest entry with packets on queue */ s8_t old_queue = ARP_TABLE_SIZE; /* its age */ u16_t age_queue = 0, age_pending = 0, age_stable = 0; LWIP_UNUSED_ARG(netif); /** * a) do a search through the cache, remember candidates * b) select candidate entry * c) create new entry */ /* a) in a single search sweep, do all of this * 1) remember the first empty entry (if any) * 2) remember the oldest stable entry (if any) * 3) remember the oldest pending entry without queued packets (if any) * 4) remember the oldest pending entry with queued packets (if any) * 5) search for a matching IP entry, either pending or stable * until 5 matches, or all entries are searched for. */ for (i = 0; i < ARP_TABLE_SIZE; ++i) { u8_t state = arp_table[i].state; /* no empty entry found yet and now we do find one? */ if ((empty == ARP_TABLE_SIZE) && (state == ETHARP_STATE_EMPTY)) { LWIP_DEBUGF(ETHARP_DEBUG, ("etharp_find_entry: found empty entry %"U16_F"\n", (u16_t)i)); /* remember first empty entry */ empty = i; } else if (state != ETHARP_STATE_EMPTY) { LWIP_ASSERT("state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE", state == ETHARP_STATE_PENDING || state >= ETHARP_STATE_STABLE); /* if given, does IP address match IP address in ARP entry? */ if (ipaddr && ip4_addr_cmp(ipaddr, &arp_table[i].ipaddr) #if ETHARP_TABLE_MATCH_NETIF && ((netif == NULL) || (netif == arp_table[i].netif)) #endif /* ETHARP_TABLE_MATCH_NETIF */ ) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: found matching entry %"U16_F"\n", (u16_t)i)); /* found exact IP address match, simply bail out */ return i; } /* pending entry? */ if (state == ETHARP_STATE_PENDING) { /* pending with queued packets? */ if (arp_table[i].q != NULL) { if (arp_table[i].ctime >= age_queue) { old_queue = i; age_queue = arp_table[i].ctime; } } else /* pending without queued packets? */ { if (arp_table[i].ctime >= age_pending) { old_pending = i; age_pending = arp_table[i].ctime; } } /* stable entry? */ } else if (state >= ETHARP_STATE_STABLE) { #if ETHARP_SUPPORT_STATIC_ENTRIES /* don't record old_stable for static entries since they never expire */ if (state < ETHARP_STATE_STATIC) #endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ { /* remember entry with oldest stable entry in oldest, its age in maxtime */ if (arp_table[i].ctime >= age_stable) { old_stable = i; age_stable = arp_table[i].ctime; } } } } } /* { we have no match } => try to create a new entry */ /* don't create new entry, only search? */ if (((flags & ETHARP_FLAG_FIND_ONLY) != 0) || /* or no empty entry found and not allowed to recycle? */ ((empty == ARP_TABLE_SIZE) && ((flags & ETHARP_FLAG_TRY_HARD) == 0))) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty entry found and not allowed to recycle\n")); return (s8_t)ERR_MEM; } /* b) choose the least destructive entry to recycle: * 1) empty entry * 2) oldest stable entry * 3) oldest pending entry without queued packets * 4) oldest pending entry with queued packets * * { ETHARP_FLAG_TRY_HARD is set at this point } */ /* 1) empty entry available? */ if (empty < ARP_TABLE_SIZE) { i = empty; LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting empty entry %"U16_F"\n", (u16_t)i)); } else { /* 2) found recyclable stable entry? */ if (old_stable < ARP_TABLE_SIZE) { /* recycle oldest stable*/ i = old_stable; LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest stable entry %"U16_F"\n", (u16_t)i)); /* no queued packets should exist on stable entries */ LWIP_ASSERT("arp_table[i].q == NULL", arp_table[i].q == NULL); /* 3) found recyclable pending entry without queued packets? */ } else if (old_pending < ARP_TABLE_SIZE) { /* recycle oldest pending */ i = old_pending; LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %"U16_F" (without queue)\n", (u16_t)i)); /* 4) found recyclable pending entry with queued packets? */ } else if (old_queue < ARP_TABLE_SIZE) { /* recycle oldest pending (queued packets are free in etharp_free_entry) */ i = old_queue; LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: selecting oldest pending entry %"U16_F", freeing packet queue %p\n", (u16_t)i, (void *)(arp_table[i].q))); /* no empty or recyclable entries found */ } else { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_find_entry: no empty or recyclable entries found\n")); return (s8_t)ERR_MEM; } /* { empty or recyclable entry found } */ LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); etharp_free_entry(i); } LWIP_ASSERT("i < ARP_TABLE_SIZE", i < ARP_TABLE_SIZE); LWIP_ASSERT("arp_table[i].state == ETHARP_STATE_EMPTY", arp_table[i].state == ETHARP_STATE_EMPTY); /* IP address given? */ if (ipaddr != NULL) { /* set IP address */ ip4_addr_copy(arp_table[i].ipaddr, *ipaddr); } arp_table[i].ctime = 0; #if ETHARP_TABLE_MATCH_NETIF arp_table[i].netif = netif; #endif /* ETHARP_TABLE_MATCH_NETIF*/ return (err_t)i; } /** * Update (or insert) a IP/MAC address pair in the ARP cache. * * If a pending entry is resolved, any queued packets will be sent * at this point. * * @param netif netif related to this entry (used for NETIF_ADDRHINT) * @param ipaddr IP address of the inserted ARP entry. * @param ethaddr Ethernet address of the inserted ARP entry. * @param flags See @ref etharp_state * * @return * - ERR_OK Successfully updated ARP cache. * - ERR_MEM If we could not add a new ARP entry when ETHARP_FLAG_TRY_HARD was set. * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. * * @see pbuf_free() */ static err_t etharp_update_arp_entry(struct netif *netif, const ip4_addr_t *ipaddr, struct eth_addr *ethaddr, u8_t flags) { s8_t i; LWIP_ASSERT("netif->hwaddr_len == ETH_HWADDR_LEN", netif->hwaddr_len == ETH_HWADDR_LEN); LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); /* non-unicast address? */ if (ip4_addr_isany(ipaddr) || ip4_addr_isbroadcast(ipaddr, netif) || ip4_addr_ismulticast(ipaddr)) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: will not add non-unicast IP address to ARP cache\n")); return ERR_ARG; } /* find or create ARP entry */ i = etharp_find_entry(ipaddr, flags, netif); /* bail out if no entry could be found */ if (i < 0) { return (err_t)i; } #if ETHARP_SUPPORT_STATIC_ENTRIES if (flags & ETHARP_FLAG_STATIC_ENTRY) { /* record static type */ arp_table[i].state = ETHARP_STATE_STATIC; } else if (arp_table[i].state == ETHARP_STATE_STATIC) { /* found entry is a static type, don't overwrite it */ return ERR_VAL; } else #endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ { /* mark it stable */ arp_table[i].state = ETHARP_STATE_STABLE; } /* record network interface */ arp_table[i].netif = netif; /* insert in SNMP ARP index tree */ mib2_add_arp_entry(netif, &arp_table[i].ipaddr); LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_update_arp_entry: updating stable entry %"S16_F"\n", (s16_t)i)); /* update address */ ETHADDR32_COPY(&arp_table[i].ethaddr, ethaddr); /* reset time stamp */ arp_table[i].ctime = 0; /* this is where we will send out queued packets! */ #if ARP_QUEUEING while (arp_table[i].q != NULL) { struct pbuf *p; /* remember remainder of queue */ struct etharp_q_entry *q = arp_table[i].q; /* pop first item off the queue */ arp_table[i].q = q->next; /* get the packet pointer */ p = q->p; /* now queue entry can be freed */ memp_free(MEMP_ARP_QUEUE, q); #else /* ARP_QUEUEING */ if (arp_table[i].q != NULL) { struct pbuf *p = arp_table[i].q; arp_table[i].q = NULL; #endif /* ARP_QUEUEING */ /* send the queued IP packet */ ethernet_output(netif, p, (struct eth_addr*)(netif->hwaddr), ethaddr, ETHTYPE_IP); /* free the queued IP packet */ pbuf_free(p); } return ERR_OK; } #if ETHARP_SUPPORT_STATIC_ENTRIES /** Add a new static entry to the ARP table. If an entry exists for the * specified IP address, this entry is overwritten. * If packets are queued for the specified IP address, they are sent out. * * @param ipaddr IP address for the new static entry * @param ethaddr ethernet address for the new static entry * @return See return values of etharp_add_static_entry */ err_t etharp_add_static_entry(const ip4_addr_t *ipaddr, struct eth_addr *ethaddr) { struct netif *netif; LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_add_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F" - %02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F":%02"X16_F"\n", ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr), (u16_t)ethaddr->addr[0], (u16_t)ethaddr->addr[1], (u16_t)ethaddr->addr[2], (u16_t)ethaddr->addr[3], (u16_t)ethaddr->addr[4], (u16_t)ethaddr->addr[5])); netif = ip4_route(ipaddr); if (netif == NULL) { return ERR_RTE; } return etharp_update_arp_entry(netif, ipaddr, ethaddr, ETHARP_FLAG_TRY_HARD | ETHARP_FLAG_STATIC_ENTRY); } /** Remove a static entry from the ARP table previously added with a call to * etharp_add_static_entry. * * @param ipaddr IP address of the static entry to remove * @return ERR_OK: entry removed * ERR_MEM: entry wasn't found * ERR_ARG: entry wasn't a static entry but a dynamic one */ err_t etharp_remove_static_entry(const ip4_addr_t *ipaddr) { s8_t i; LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_remove_static_entry: %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr))); /* find or create ARP entry */ i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, NULL); /* bail out if no entry could be found */ if (i < 0) { return (err_t)i; } if (arp_table[i].state != ETHARP_STATE_STATIC) { /* entry wasn't a static entry, cannot remove it */ return ERR_ARG; } /* entry found, free it */ etharp_free_entry(i); return ERR_OK; } #endif /* ETHARP_SUPPORT_STATIC_ENTRIES */ /** * Remove all ARP table entries of the specified netif. * * @param netif points to a network interface */ void etharp_cleanup_netif(struct netif *netif) { u8_t i; for (i = 0; i < ARP_TABLE_SIZE; ++i) { u8_t state = arp_table[i].state; if ((state != ETHARP_STATE_EMPTY) && (arp_table[i].netif == netif)) { etharp_free_entry(i); } } } /** * Finds (stable) ethernet/IP address pair from ARP table * using interface and IP address index. * @note the addresses in the ARP table are in network order! * * @param netif points to interface index * @param ipaddr points to the (network order) IP address index * @param eth_ret points to return pointer * @param ip_ret points to return pointer * @return table index if found, -1 otherwise */ s8_t etharp_find_addr(struct netif *netif, const ip4_addr_t *ipaddr, struct eth_addr **eth_ret, const ip4_addr_t **ip_ret) { s8_t i; LWIP_ASSERT("eth_ret != NULL && ip_ret != NULL", eth_ret != NULL && ip_ret != NULL); LWIP_UNUSED_ARG(netif); i = etharp_find_entry(ipaddr, ETHARP_FLAG_FIND_ONLY, netif); if ((i >= 0) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { *eth_ret = &arp_table[i].ethaddr; *ip_ret = &arp_table[i].ipaddr; return i; } return -1; } /** * Possibility to iterate over stable ARP table entries * * @param i entry number, 0 to ARP_TABLE_SIZE * @param ipaddr return value: IP address * @param netif return value: points to interface * @param eth_ret return value: ETH address * @return 1 on valid index, 0 otherwise */ u8_t etharp_get_entry(u8_t i, ip4_addr_t **ipaddr, struct netif **netif, struct eth_addr **eth_ret) { LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); LWIP_ASSERT("netif != NULL", netif != NULL); LWIP_ASSERT("eth_ret != NULL", eth_ret != NULL); if((i < ARP_TABLE_SIZE) && (arp_table[i].state >= ETHARP_STATE_STABLE)) { *ipaddr = &arp_table[i].ipaddr; *netif = arp_table[i].netif; *eth_ret = &arp_table[i].ethaddr; return 1; } else { return 0; } } /** * Responds to ARP requests to us. Upon ARP replies to us, add entry to cache * send out queued IP packets. Updates cache with snooped address pairs. * * Should be called for incoming ARP packets. The pbuf in the argument * is freed by this function. * * @param p The ARP packet that arrived on netif. Is freed by this function. * @param netif The lwIP network interface on which the ARP packet pbuf arrived. * * @see pbuf_free() */ void etharp_input(struct pbuf *p, struct netif *netif) { struct etharp_hdr *hdr; /* these are aligned properly, whereas the ARP header fields might not be */ ip4_addr_t sipaddr, dipaddr; u8_t for_us; LWIP_ERROR("netif != NULL", (netif != NULL), return;); hdr = (struct etharp_hdr *)p->payload; /* RFC 826 "Packet Reception": */ if ((hdr->hwtype != PP_HTONS(HWTYPE_ETHERNET)) || (hdr->hwlen != ETH_HWADDR_LEN) || (hdr->protolen != sizeof(ip4_addr_t)) || (hdr->proto != PP_HTONS(ETHTYPE_IP))) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("etharp_input: packet dropped, wrong hw type, hwlen, proto, protolen or ethernet type (%"U16_F"/%"U16_F"/%"U16_F"/%"U16_F")\n", hdr->hwtype, (u16_t)hdr->hwlen, hdr->proto, (u16_t)hdr->protolen)); ETHARP_STATS_INC(etharp.proterr); ETHARP_STATS_INC(etharp.drop); pbuf_free(p); return; } ETHARP_STATS_INC(etharp.recv); #if LWIP_AUTOIP /* We have to check if a host already has configured our random * created link local address and continuously check if there is * a host with this IP-address so we can detect collisions */ autoip_arp_reply(netif, hdr); #endif /* LWIP_AUTOIP */ /* Copy struct ip4_addr2 to aligned ip4_addr, to support compilers without * structure packing (not using structure copy which breaks strict-aliasing rules). */ IPADDR2_COPY(&sipaddr, &hdr->sipaddr); IPADDR2_COPY(&dipaddr, &hdr->dipaddr); /* this interface is not configured? */ if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { for_us = 0; } else { /* ARP packet directed to us? */ for_us = (u8_t)ip4_addr_cmp(&dipaddr, netif_ip4_addr(netif)); } /* ARP message directed to us? -> add IP address in ARP cache; assume requester wants to talk to us, can result in directly sending the queued packets for this host. ARP message not directed to us? -> update the source IP address in the cache, if present */ etharp_update_arp_entry(netif, &sipaddr, &(hdr->shwaddr), for_us ? ETHARP_FLAG_TRY_HARD : ETHARP_FLAG_FIND_ONLY); /* now act on the message itself */ switch (hdr->opcode) { /* ARP request? */ case PP_HTONS(ARP_REQUEST): /* ARP request. If it asked for our address, we send out a * reply. In any case, we time-stamp any existing ARP entry, * and possibly send out an IP packet that was queued on it. */ LWIP_DEBUGF (ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP request\n")); /* ARP request for our address? */ if (for_us) { /* send ARP response */ etharp_raw(netif, (struct eth_addr *)netif->hwaddr, &hdr->shwaddr, (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), &hdr->shwaddr, &sipaddr, ARP_REPLY); /* we are not configured? */ } else if (ip4_addr_isany_val(*netif_ip4_addr(netif))) { /* { for_us == 0 and netif->ip_addr.addr == 0 } */ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: we are unconfigured, ARP request ignored.\n")); /* request was not directed to us */ } else { /* { for_us == 0 and netif->ip_addr.addr != 0 } */ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP request was not for us.\n")); } break; case PP_HTONS(ARP_REPLY): /* ARP reply. We already updated the ARP cache earlier. */ LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: incoming ARP reply\n")); #if (LWIP_DHCP && DHCP_DOES_ARP_CHECK) /* DHCP wants to know about ARP replies from any host with an * IP address also offered to us by the DHCP server. We do not * want to take a duplicate IP address on a single network. * @todo How should we handle redundant (fail-over) interfaces? */ dhcp_arp_reply(netif, &sipaddr); #endif /* (LWIP_DHCP && DHCP_DOES_ARP_CHECK) */ break; default: LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_input: ARP unknown opcode type %"S16_F"\n", lwip_htons(hdr->opcode))); ETHARP_STATS_INC(etharp.err); break; } /* free ARP packet */ pbuf_free(p); } /** Just a small helper function that sends a pbuf to an ethernet address * in the arp_table specified by the index 'arp_idx'. */ static err_t etharp_output_to_arp_index(struct netif *netif, struct pbuf *q, u8_t arp_idx) { LWIP_ASSERT("arp_table[arp_idx].state >= ETHARP_STATE_STABLE", arp_table[arp_idx].state >= ETHARP_STATE_STABLE); /* if arp table entry is about to expire: re-request it, but only if its state is ETHARP_STATE_STABLE to prevent flooding the network with ARP requests if this address is used frequently. */ if (arp_table[arp_idx].state == ETHARP_STATE_STABLE) { if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_BROADCAST) { /* issue a standard request using broadcast */ if (etharp_request(netif, &arp_table[arp_idx].ipaddr) == ERR_OK) { arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; } } else if (arp_table[arp_idx].ctime >= ARP_AGE_REREQUEST_USED_UNICAST) { /* issue a unicast request (for 15 seconds) to prevent unnecessary broadcast */ if (etharp_request_dst(netif, &arp_table[arp_idx].ipaddr, &arp_table[arp_idx].ethaddr) == ERR_OK) { arp_table[arp_idx].state = ETHARP_STATE_STABLE_REREQUESTING_1; } } } return ethernet_output(netif, q, (struct eth_addr*)(netif->hwaddr), &arp_table[arp_idx].ethaddr, ETHTYPE_IP); } /** * Resolve and fill-in Ethernet address header for outgoing IP packet. * * For IP multicast and broadcast, corresponding Ethernet addresses * are selected and the packet is transmitted on the link. * * For unicast addresses, the packet is submitted to etharp_query(). In * case the IP address is outside the local network, the IP address of * the gateway is used. * * @param netif The lwIP network interface which the IP packet will be sent on. * @param q The pbuf(s) containing the IP packet to be sent. * @param ipaddr The IP address of the packet destination. * * @return * - ERR_RTE No route to destination (no gateway to external networks), * or the return type of either etharp_query() or ethernet_output(). */ err_t etharp_output(struct netif *netif, struct pbuf *q, const ip4_addr_t *ipaddr) { const struct eth_addr *dest; struct eth_addr mcastaddr; const ip4_addr_t *dst_addr = ipaddr; LWIP_ASSERT("netif != NULL", netif != NULL); LWIP_ASSERT("q != NULL", q != NULL); LWIP_ASSERT("ipaddr != NULL", ipaddr != NULL); /* Determine on destination hardware address. Broadcasts and multicasts * are special, other IP addresses are looked up in the ARP table. */ /* broadcast destination IP address? */ if (ip4_addr_isbroadcast(ipaddr, netif)) { /* broadcast on Ethernet also */ dest = (const struct eth_addr *)&ethbroadcast; /* multicast destination IP address? */ } else if (ip4_addr_ismulticast(ipaddr)) { /* Hash IP multicast address to MAC address.*/ mcastaddr.addr[0] = LL_IP4_MULTICAST_ADDR_0; mcastaddr.addr[1] = LL_IP4_MULTICAST_ADDR_1; mcastaddr.addr[2] = LL_IP4_MULTICAST_ADDR_2; mcastaddr.addr[3] = ip4_addr2(ipaddr) & 0x7f; mcastaddr.addr[4] = ip4_addr3(ipaddr); mcastaddr.addr[5] = ip4_addr4(ipaddr); /* destination Ethernet address is multicast */ dest = &mcastaddr; /* unicast destination IP address? */ } else { s8_t i; /* outside local network? if so, this can neither be a global broadcast nor a subnet broadcast. */ if (!ip4_addr_netcmp(ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) && !ip4_addr_islinklocal(ipaddr)) { #if LWIP_AUTOIP struct ip_hdr *iphdr = LWIP_ALIGNMENT_CAST(struct ip_hdr*, q->payload); /* According to RFC 3297, chapter 2.6.2 (Forwarding Rules), a packet with a link-local source address must always be "directly to its destination on the same physical link. The host MUST NOT send the packet to any router for forwarding". */ if (!ip4_addr_islinklocal(&iphdr->src)) #endif /* LWIP_AUTOIP */ { #ifdef LWIP_HOOK_ETHARP_GET_GW /* For advanced routing, a single default gateway might not be enough, so get the IP address of the gateway to handle the current destination address. */ dst_addr = LWIP_HOOK_ETHARP_GET_GW(netif, ipaddr); if (dst_addr == NULL) #endif /* LWIP_HOOK_ETHARP_GET_GW */ { /* interface has default gateway? */ if (!ip4_addr_isany_val(*netif_ip4_gw(netif))) { /* send to hardware address of default gateway IP address */ dst_addr = netif_ip4_gw(netif); /* no default gateway available */ } else { /* no route to destination error (default gateway missing) */ return ERR_RTE; } } } } #if LWIP_NETIF_HWADDRHINT if (netif->addr_hint != NULL) { /* per-pcb cached entry was given */ u8_t etharp_cached_entry = *(netif->addr_hint); if (etharp_cached_entry < ARP_TABLE_SIZE) { #endif /* LWIP_NETIF_HWADDRHINT */ if ((arp_table[etharp_cached_entry].state >= ETHARP_STATE_STABLE) && #if ETHARP_TABLE_MATCH_NETIF (arp_table[etharp_cached_entry].netif == netif) && #endif (ip4_addr_cmp(dst_addr, &arp_table[etharp_cached_entry].ipaddr))) { /* the per-pcb-cached entry is stable and the right one! */ ETHARP_STATS_INC(etharp.cachehit); return etharp_output_to_arp_index(netif, q, etharp_cached_entry); } #if LWIP_NETIF_HWADDRHINT } } #endif /* LWIP_NETIF_HWADDRHINT */ /* find stable entry: do this here since this is a critical path for throughput and etharp_find_entry() is kind of slow */ for (i = 0; i < ARP_TABLE_SIZE; i++) { if ((arp_table[i].state >= ETHARP_STATE_STABLE) && #if ETHARP_TABLE_MATCH_NETIF (arp_table[i].netif == netif) && #endif (ip4_addr_cmp(dst_addr, &arp_table[i].ipaddr))) { /* found an existing, stable entry */ ETHARP_SET_HINT(netif, i); return etharp_output_to_arp_index(netif, q, i); } } /* no stable entry found, use the (slower) query function: queue on destination Ethernet address belonging to ipaddr */ return etharp_query(netif, dst_addr, q); } /* continuation for multicast/broadcast destinations */ /* obtain source Ethernet address of the given interface */ /* send packet directly on the link */ return ethernet_output(netif, q, (struct eth_addr*)(netif->hwaddr), dest, ETHTYPE_IP); } /** * Send an ARP request for the given IP address and/or queue a packet. * * If the IP address was not yet in the cache, a pending ARP cache entry * is added and an ARP request is sent for the given address. The packet * is queued on this entry. * * If the IP address was already pending in the cache, a new ARP request * is sent for the given address. The packet is queued on this entry. * * If the IP address was already stable in the cache, and a packet is * given, it is directly sent and no ARP request is sent out. * * If the IP address was already stable in the cache, and no packet is * given, an ARP request is sent out. * * @param netif The lwIP network interface on which ipaddr * must be queried for. * @param ipaddr The IP address to be resolved. * @param q If non-NULL, a pbuf that must be delivered to the IP address. * q is not freed by this function. * * @note q must only be ONE packet, not a packet queue! * * @return * - ERR_BUF Could not make room for Ethernet header. * - ERR_MEM Hardware address unknown, and no more ARP entries available * to query for address or queue the packet. * - ERR_MEM Could not queue packet due to memory shortage. * - ERR_RTE No route to destination (no gateway to external networks). * - ERR_ARG Non-unicast address given, those will not appear in ARP cache. * */ err_t etharp_query(struct netif *netif, const ip4_addr_t *ipaddr, struct pbuf *q) { struct eth_addr * srcaddr = (struct eth_addr *)netif->hwaddr; err_t result = ERR_MEM; int is_new_entry = 0; s8_t i; /* ARP entry index */ /* non-unicast address? */ if (ip4_addr_isbroadcast(ipaddr, netif) || ip4_addr_ismulticast(ipaddr) || ip4_addr_isany(ipaddr)) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: will not add non-unicast IP address to ARP cache\n")); return ERR_ARG; } /* find entry in ARP cache, ask to create entry if queueing packet */ i = etharp_find_entry(ipaddr, ETHARP_FLAG_TRY_HARD, netif); /* could not find or create entry? */ if (i < 0) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not create ARP entry\n")); if (q) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: packet dropped\n")); ETHARP_STATS_INC(etharp.memerr); } return (err_t)i; } /* mark a fresh entry as pending (we just sent a request) */ if (arp_table[i].state == ETHARP_STATE_EMPTY) { is_new_entry = 1; arp_table[i].state = ETHARP_STATE_PENDING; /* record network interface for re-sending arp request in etharp_tmr */ arp_table[i].netif = netif; } /* { i is either a STABLE or (new or existing) PENDING entry } */ LWIP_ASSERT("arp_table[i].state == PENDING or STABLE", ((arp_table[i].state == ETHARP_STATE_PENDING) || (arp_table[i].state >= ETHARP_STATE_STABLE))); /* do we have a new entry? or an implicit query request? */ if (is_new_entry || (q == NULL)) { /* try to resolve it; send out ARP request */ result = etharp_request(netif, ipaddr); if (result != ERR_OK) { /* ARP request couldn't be sent */ /* We don't re-send arp request in etharp_tmr, but we still queue packets, since this failure could be temporary, and the next packet calling etharp_query again could lead to sending the queued packets. */ } if (q == NULL) { return result; } } /* packet given? */ LWIP_ASSERT("q != NULL", q != NULL); /* stable entry? */ if (arp_table[i].state >= ETHARP_STATE_STABLE) { /* we have a valid IP->Ethernet address mapping */ ETHARP_SET_HINT(netif, i); /* send the packet */ result = ethernet_output(netif, q, srcaddr, &(arp_table[i].ethaddr), ETHTYPE_IP); /* pending entry? (either just created or already pending */ } else if (arp_table[i].state == ETHARP_STATE_PENDING) { /* entry is still pending, queue the given packet 'q' */ struct pbuf *p; int copy_needed = 0; /* IF q includes a PBUF_REF, PBUF_POOL or PBUF_RAM, we have no choice but * to copy the whole queue into a new PBUF_RAM (see bug #11400) * PBUF_ROMs can be left as they are, since ROM must not get changed. */ p = q; while (p) { LWIP_ASSERT("no packet queues allowed!", (p->len != p->tot_len) || (p->next == 0)); if (p->type != PBUF_ROM) { copy_needed = 1; break; } p = p->next; } if (copy_needed) { /* copy the whole packet into new pbufs */ p = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); if (p != NULL) { if (pbuf_copy(p, q) != ERR_OK) { pbuf_free(p); p = NULL; } } } else { /* referencing the old pbuf is enough */ p = q; pbuf_ref(p); } /* packet could be taken over? */ if (p != NULL) { /* queue packet ... */ #if ARP_QUEUEING struct etharp_q_entry *new_entry; /* allocate a new arp queue entry */ new_entry = (struct etharp_q_entry *)memp_malloc(MEMP_ARP_QUEUE); if (new_entry != NULL) { unsigned int qlen = 0; new_entry->next = 0; new_entry->p = p; if (arp_table[i].q != NULL) { /* queue was already existent, append the new entry to the end */ struct etharp_q_entry *r; r = arp_table[i].q; qlen++; while (r->next != NULL) { r = r->next; qlen++; } r->next = new_entry; } else { /* queue did not exist, first item in queue */ arp_table[i].q = new_entry; } #if ARP_QUEUE_LEN if (qlen >= ARP_QUEUE_LEN) { struct etharp_q_entry *old; old = arp_table[i].q; arp_table[i].q = arp_table[i].q->next; pbuf_free(old->p); memp_free(MEMP_ARP_QUEUE, old); } #endif LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"S16_F"\n", (void *)q, (s16_t)i)); result = ERR_OK; } else { /* the pool MEMP_ARP_QUEUE is empty */ pbuf_free(p); LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); result = ERR_MEM; } #else /* ARP_QUEUEING */ /* always queue one packet per ARP request only, freeing a previously queued packet */ if (arp_table[i].q != NULL) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: dropped previously queued packet %p for ARP entry %"S16_F"\n", (void *)q, (s16_t)i)); pbuf_free(arp_table[i].q); } arp_table[i].q = p; result = ERR_OK; LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"S16_F"\n", (void *)q, (s16_t)i)); #endif /* ARP_QUEUEING */ } else { ETHARP_STATS_INC(etharp.memerr); LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q)); result = ERR_MEM; } } return result; } /** * Send a raw ARP packet (opcode and all addresses can be modified) * * @param netif the lwip network interface on which to send the ARP packet * @param ethsrc_addr the source MAC address for the ethernet header * @param ethdst_addr the destination MAC address for the ethernet header * @param hwsrc_addr the source MAC address for the ARP protocol header * @param ipsrc_addr the source IP address for the ARP protocol header * @param hwdst_addr the destination MAC address for the ARP protocol header * @param ipdst_addr the destination IP address for the ARP protocol header * @param opcode the type of the ARP packet * @return ERR_OK if the ARP packet has been sent * ERR_MEM if the ARP packet couldn't be allocated * any other err_t on failure */ static err_t etharp_raw(struct netif *netif, const struct eth_addr *ethsrc_addr, const struct eth_addr *ethdst_addr, const struct eth_addr *hwsrc_addr, const ip4_addr_t *ipsrc_addr, const struct eth_addr *hwdst_addr, const ip4_addr_t *ipdst_addr, const u16_t opcode) { struct pbuf *p; err_t result = ERR_OK; struct etharp_hdr *hdr; LWIP_ASSERT("netif != NULL", netif != NULL); /* allocate a pbuf for the outgoing ARP request packet */ p = pbuf_alloc(PBUF_LINK, SIZEOF_ETHARP_HDR, PBUF_RAM); /* could allocate a pbuf for an ARP request? */ if (p == NULL) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("etharp_raw: could not allocate pbuf for ARP request.\n")); ETHARP_STATS_INC(etharp.memerr); return ERR_MEM; } LWIP_ASSERT("check that first pbuf can hold struct etharp_hdr", (p->len >= SIZEOF_ETHARP_HDR)); hdr = (struct etharp_hdr *)p->payload; LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_raw: sending raw ARP packet.\n")); hdr->opcode = lwip_htons(opcode); LWIP_ASSERT("netif->hwaddr_len must be the same as ETH_HWADDR_LEN for etharp!", (netif->hwaddr_len == ETH_HWADDR_LEN)); /* Write the ARP MAC-Addresses */ ETHADDR16_COPY(&hdr->shwaddr, hwsrc_addr); ETHADDR16_COPY(&hdr->dhwaddr, hwdst_addr); /* Copy struct ip4_addr2 to aligned ip4_addr, to support compilers without * structure packing. */ IPADDR2_COPY(&hdr->sipaddr, ipsrc_addr); IPADDR2_COPY(&hdr->dipaddr, ipdst_addr); hdr->hwtype = PP_HTONS(HWTYPE_ETHERNET); hdr->proto = PP_HTONS(ETHTYPE_IP); /* set hwlen and protolen */ hdr->hwlen = ETH_HWADDR_LEN; hdr->protolen = sizeof(ip4_addr_t); /* send ARP query */ #if LWIP_AUTOIP /* If we are using Link-Local, all ARP packets that contain a Link-Local * 'sender IP address' MUST be sent using link-layer broadcast instead of * link-layer unicast. (See RFC3927 Section 2.5, last paragraph) */ if(ip4_addr_islinklocal(ipsrc_addr)) { ethernet_output(netif, p, ethsrc_addr, &ethbroadcast, ETHTYPE_ARP); } else #endif /* LWIP_AUTOIP */ { ethernet_output(netif, p, ethsrc_addr, ethdst_addr, ETHTYPE_ARP); } ETHARP_STATS_INC(etharp.xmit); /* free ARP query packet */ pbuf_free(p); p = NULL; /* could not allocate pbuf for ARP request */ return result; } /** * Send an ARP request packet asking for ipaddr to a specific eth address. * Used to send unicast request to refresh the ARP table just before an entry * times out * * @param netif the lwip network interface on which to send the request * @param ipaddr the IP address for which to ask * @param hw_dst_addr the ethernet address to send this packet to * @return ERR_OK if the request has been sent * ERR_MEM if the ARP packet couldn't be allocated * any other err_t on failure */ static err_t etharp_request_dst(struct netif *netif, const ip4_addr_t *ipaddr, const struct eth_addr* hw_dst_addr) { return etharp_raw(netif, (struct eth_addr *)netif->hwaddr, hw_dst_addr, (struct eth_addr *)netif->hwaddr, netif_ip4_addr(netif), &ethzero, ipaddr, ARP_REQUEST); } /** * Send an ARP request packet asking for ipaddr. * * @param netif the lwip network interface on which to send the request * @param ipaddr the IP address for which to ask * @return ERR_OK if the request has been sent * ERR_MEM if the ARP packet couldn't be allocated * any other err_t on failure */ err_t etharp_request(struct netif *netif, const ip4_addr_t *ipaddr) { LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("etharp_request: sending ARP request.\n")); return etharp_request_dst(netif, ipaddr, &ethbroadcast); } #endif /* LWIP_IPV4 && LWIP_ARP */ #endif /* LWIP_ARP || LWIP_ETHERNET */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv4/etharp.c
C
unknown
46,593
/** * @file * ICMP - Internet Control Message Protocol * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ /* Some ICMP messages should be passed to the transport protocols. This is not implemented. */ #include "lwip/opt.h" #if LWIP_IPV4 && LWIP_ICMP /* don't build if not configured for use in lwipopts.h */ #include "lwip/icmp.h" #include "lwip/inet_chksum.h" #include "lwip/ip.h" #include "lwip/def.h" #include "lwip/stats.h" #include <string.h> #ifdef LWIP_HOOK_FILENAME #include LWIP_HOOK_FILENAME #endif /** Small optimization: set to 0 if incoming PBUF_POOL pbuf always can be * used to modify and send a response packet (and to 1 if this is not the case, * e.g. when link header is stripped of when receiving) */ #ifndef LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN #define LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN 1 #endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ /* The amount of data from the original packet to return in a dest-unreachable */ #define ICMP_DEST_UNREACH_DATASIZE 8 static void icmp_send_response(struct pbuf *p, u8_t type, u8_t code); /** * Processes ICMP input packets, called from ip_input(). * * Currently only processes icmp echo requests and sends * out the echo response. * * @param p the icmp echo request packet, p->payload pointing to the icmp header * @param inp the netif on which this packet was received */ void icmp_input(struct pbuf *p, struct netif *inp) { u8_t type; #ifdef LWIP_DEBUG u8_t code; #endif /* LWIP_DEBUG */ struct icmp_echo_hdr *iecho; const struct ip_hdr *iphdr_in; u16_t hlen; const ip4_addr_t* src; ICMP_STATS_INC(icmp.recv); MIB2_STATS_INC(mib2.icmpinmsgs); iphdr_in = ip4_current_header(); hlen = IPH_HL(iphdr_in) * 4; if (hlen < IP_HLEN) { LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short IP header (%"S16_F" bytes) received\n", hlen)); goto lenerr; } if (p->len < sizeof(u16_t)*2) { LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short ICMP (%"U16_F" bytes) received\n", p->tot_len)); goto lenerr; } type = *((u8_t *)p->payload); #ifdef LWIP_DEBUG code = *(((u8_t *)p->payload)+1); #endif /* LWIP_DEBUG */ switch (type) { case ICMP_ER: /* This is OK, echo reply might have been parsed by a raw PCB (as obviously, an echo request has been sent, too). */ MIB2_STATS_INC(mib2.icmpinechoreps); break; case ICMP_ECHO: MIB2_STATS_INC(mib2.icmpinechos); src = ip4_current_dest_addr(); /* multicast destination address? */ if (ip4_addr_ismulticast(ip4_current_dest_addr())) { #if LWIP_MULTICAST_PING /* For multicast, use address of receiving interface as source address */ src = netif_ip4_addr(inp); #else /* LWIP_MULTICAST_PING */ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to multicast pings\n")); goto icmperr; #endif /* LWIP_MULTICAST_PING */ } /* broadcast destination address? */ if (ip4_addr_isbroadcast(ip4_current_dest_addr(), ip_current_netif())) { #if LWIP_BROADCAST_PING /* For broadcast, use address of receiving interface as source address */ src = netif_ip4_addr(inp); #else /* LWIP_BROADCAST_PING */ LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to broadcast pings\n")); goto icmperr; #endif /* LWIP_BROADCAST_PING */ } LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ping\n")); if (p->tot_len < sizeof(struct icmp_echo_hdr)) { LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: bad ICMP echo received\n")); goto lenerr; } #if CHECKSUM_CHECK_ICMP IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP) { if (inet_chksum_pbuf(p) != 0) { LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: checksum failed for received ICMP echo\n")); pbuf_free(p); ICMP_STATS_INC(icmp.chkerr); MIB2_STATS_INC(mib2.icmpinerrors); return; } } #endif #if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN if (pbuf_header(p, (s16_t)(hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN))) { /* p is not big enough to contain link headers * allocate a new one and copy p into it */ struct pbuf *r; /* allocate new packet buffer with space for link headers */ r = pbuf_alloc(PBUF_LINK, p->tot_len + hlen, PBUF_RAM); if (r == NULL) { LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed\n")); goto icmperr; } if (r->len < hlen + sizeof(struct icmp_echo_hdr)) { LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("first pbuf cannot hold the ICMP header")); pbuf_free(r); goto icmperr; } /* copy the ip header */ MEMCPY(r->payload, iphdr_in, hlen); /* switch r->payload back to icmp header (cannot fail) */ if (pbuf_header(r, (s16_t)-hlen)) { LWIP_ASSERT("icmp_input: moving r->payload to icmp header failed\n", 0); pbuf_free(r); goto icmperr; } /* copy the rest of the packet without ip header */ if (pbuf_copy(r, p) != ERR_OK) { LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("icmp_input: copying to new pbuf failed")); pbuf_free(r); goto icmperr; } /* free the original p */ pbuf_free(p); /* we now have an identical copy of p that has room for link headers */ p = r; } else { /* restore p->payload to point to icmp header (cannot fail) */ if (pbuf_header(p, -(s16_t)(hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN))) { LWIP_ASSERT("icmp_input: restoring original p->payload failed\n", 0); goto icmperr; } } #endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */ /* At this point, all checks are OK. */ /* We generate an answer by switching the dest and src ip addresses, * setting the icmp type to ECHO_RESPONSE and updating the checksum. */ iecho = (struct icmp_echo_hdr *)p->payload; if (pbuf_header(p, (s16_t)hlen)) { LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Can't move over header in packet")); } else { err_t ret; struct ip_hdr *iphdr = (struct ip_hdr*)p->payload; ip4_addr_copy(iphdr->src, *src); ip4_addr_copy(iphdr->dest, *ip4_current_src_addr()); ICMPH_TYPE_SET(iecho, ICMP_ER); #if CHECKSUM_GEN_ICMP IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP) { /* adjust the checksum */ if (iecho->chksum > PP_HTONS(0xffffU - (ICMP_ECHO << 8))) { iecho->chksum += PP_HTONS(ICMP_ECHO << 8) + 1; } else { iecho->chksum += PP_HTONS(ICMP_ECHO << 8); } } #if LWIP_CHECKSUM_CTRL_PER_NETIF else { iecho->chksum = 0; } #endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ #else /* CHECKSUM_GEN_ICMP */ iecho->chksum = 0; #endif /* CHECKSUM_GEN_ICMP */ /* Set the correct TTL and recalculate the header checksum. */ IPH_TTL_SET(iphdr, ICMP_TTL); IPH_CHKSUM_SET(iphdr, 0); #if CHECKSUM_GEN_IP IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_IP) { IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, hlen)); } #endif /* CHECKSUM_GEN_IP */ ICMP_STATS_INC(icmp.xmit); /* increase number of messages attempted to send */ MIB2_STATS_INC(mib2.icmpoutmsgs); /* increase number of echo replies attempted to send */ MIB2_STATS_INC(mib2.icmpoutechoreps); /* send an ICMP packet */ ret = ip4_output_if(p, src, LWIP_IP_HDRINCL, ICMP_TTL, 0, IP_PROTO_ICMP, inp); if (ret != ERR_OK) { LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ip_output_if returned an error: %s\n", lwip_strerr(ret))); } } break; default: if (type == ICMP_DUR) { MIB2_STATS_INC(mib2.icmpindestunreachs); } else if (type == ICMP_TE) { MIB2_STATS_INC(mib2.icmpintimeexcds); } else if (type == ICMP_PP) { MIB2_STATS_INC(mib2.icmpinparmprobs); } else if (type == ICMP_SQ) { MIB2_STATS_INC(mib2.icmpinsrcquenchs); } else if (type == ICMP_RD) { MIB2_STATS_INC(mib2.icmpinredirects); } else if (type == ICMP_TS) { MIB2_STATS_INC(mib2.icmpintimestamps); } else if (type == ICMP_TSR) { MIB2_STATS_INC(mib2.icmpintimestampreps); } else if (type == ICMP_AM) { MIB2_STATS_INC(mib2.icmpinaddrmasks); } else if (type == ICMP_AMR) { MIB2_STATS_INC(mib2.icmpinaddrmaskreps); } LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ICMP type %"S16_F" code %"S16_F" not supported.\n", (s16_t)type, (s16_t)code)); ICMP_STATS_INC(icmp.proterr); ICMP_STATS_INC(icmp.drop); } pbuf_free(p); return; lenerr: pbuf_free(p); ICMP_STATS_INC(icmp.lenerr); MIB2_STATS_INC(mib2.icmpinerrors); return; #if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING icmperr: pbuf_free(p); ICMP_STATS_INC(icmp.err); MIB2_STATS_INC(mib2.icmpinerrors); return; #endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN || !LWIP_MULTICAST_PING || !LWIP_BROADCAST_PING */ } /** * Send an icmp 'destination unreachable' packet, called from ip_input() if * the transport layer protocol is unknown and from udp_input() if the local * port is not bound. * * @param p the input packet for which the 'unreachable' should be sent, * p->payload pointing to the IP header * @param t type of the 'unreachable' packet */ void icmp_dest_unreach(struct pbuf *p, enum icmp_dur_type t) { MIB2_STATS_INC(mib2.icmpoutdestunreachs); icmp_send_response(p, ICMP_DUR, t); } #if IP_FORWARD || IP_REASSEMBLY /** * Send a 'time exceeded' packet, called from ip_forward() if TTL is 0. * * @param p the input packet for which the 'time exceeded' should be sent, * p->payload pointing to the IP header * @param t type of the 'time exceeded' packet */ void icmp_time_exceeded(struct pbuf *p, enum icmp_te_type t) { MIB2_STATS_INC(mib2.icmpouttimeexcds); icmp_send_response(p, ICMP_TE, t); } #endif /* IP_FORWARD || IP_REASSEMBLY */ /** * Send an icmp packet in response to an incoming packet. * * @param p the input packet for which the 'unreachable' should be sent, * p->payload pointing to the IP header * @param type Type of the ICMP header * @param code Code of the ICMP header */ static void icmp_send_response(struct pbuf *p, u8_t type, u8_t code) { struct pbuf *q; struct ip_hdr *iphdr; /* we can use the echo header here */ struct icmp_echo_hdr *icmphdr; ip4_addr_t iphdr_src; struct netif *netif; /* increase number of messages attempted to send */ MIB2_STATS_INC(mib2.icmpoutmsgs); /* ICMP header + IP header + 8 bytes of data */ q = pbuf_alloc(PBUF_IP, sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE, PBUF_RAM); if (q == NULL) { LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMP packet.\n")); MIB2_STATS_INC(mib2.icmpouterrors); return; } LWIP_ASSERT("check that first pbuf can hold icmp message", (q->len >= (sizeof(struct icmp_echo_hdr) + IP_HLEN + ICMP_DEST_UNREACH_DATASIZE))); iphdr = (struct ip_hdr *)p->payload; LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded from ")); ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->src); LWIP_DEBUGF(ICMP_DEBUG, (" to ")); ip4_addr_debug_print_val(ICMP_DEBUG, iphdr->dest); LWIP_DEBUGF(ICMP_DEBUG, ("\n")); icmphdr = (struct icmp_echo_hdr *)q->payload; icmphdr->type = type; icmphdr->code = code; icmphdr->id = 0; icmphdr->seqno = 0; /* copy fields from original packet */ SMEMCPY((u8_t *)q->payload + sizeof(struct icmp_echo_hdr), (u8_t *)p->payload, IP_HLEN + ICMP_DEST_UNREACH_DATASIZE); ip4_addr_copy(iphdr_src, iphdr->src); #ifdef LWIP_HOOK_IP4_ROUTE_SRC { ip4_addr_t iphdr_dst; ip4_addr_copy(iphdr_dst, iphdr->dest); netif = ip4_route_src(&iphdr_src, &iphdr_dst); } #else netif = ip4_route(&iphdr_src); #endif if (netif != NULL) { /* calculate checksum */ icmphdr->chksum = 0; #if CHECKSUM_GEN_ICMP IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP) { icmphdr->chksum = inet_chksum(icmphdr, q->len); } #endif ICMP_STATS_INC(icmp.xmit); ip4_output_if(q, NULL, &iphdr_src, ICMP_TTL, 0, IP_PROTO_ICMP, netif); } pbuf_free(q); } #endif /* LWIP_IPV4 && LWIP_ICMP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv4/icmp.c
C
unknown
14,176
/** * @file * IGMP - Internet Group Management Protocol * * @defgroup igmp IGMP * @ingroup ip4 * To be called from TCPIP thread */ /* * Copyright (c) 2002 CITEL Technologies Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of CITEL Technologies Ltd nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY CITEL TECHNOLOGIES AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL CITEL TECHNOLOGIES OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file is a contribution to the lwIP TCP/IP stack. * The Swedish Institute of Computer Science and Adam Dunkels * are specifically granted permission to redistribute this * source code. */ /*------------------------------------------------------------- Note 1) Although the rfc requires V1 AND V2 capability we will only support v2 since now V1 is very old (August 1989) V1 can be added if required a debug print and statistic have been implemented to show this up. ------------------------------------------------------------- ------------------------------------------------------------- Note 2) A query for a specific group address (as opposed to ALLHOSTS) has now been implemented as I am unsure if it is required a debug print and statistic have been implemented to show this up. ------------------------------------------------------------- ------------------------------------------------------------- Note 3) The router alert rfc 2113 is implemented in outgoing packets but not checked rigorously incoming ------------------------------------------------------------- Steve Reynolds ------------------------------------------------------------*/ /*----------------------------------------------------------------------------- * RFC 988 - Host extensions for IP multicasting - V0 * RFC 1054 - Host extensions for IP multicasting - * RFC 1112 - Host extensions for IP multicasting - V1 * RFC 2236 - Internet Group Management Protocol, Version 2 - V2 <- this code is based on this RFC (it's the "de facto" standard) * RFC 3376 - Internet Group Management Protocol, Version 3 - V3 * RFC 4604 - Using Internet Group Management Protocol Version 3... - V3+ * RFC 2113 - IP Router Alert Option - *----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------- * Includes *----------------------------------------------------------------------------*/ #include "lwip/opt.h" #if LWIP_IPV4 && LWIP_IGMP /* don't build if not configured for use in lwipopts.h */ #include "lwip/igmp.h" #include "lwip/debug.h" #include "lwip/def.h" #include "lwip/mem.h" #include "lwip/ip.h" #include "lwip/inet_chksum.h" #include "lwip/netif.h" #include "lwip/stats.h" #include "lwip/prot/igmp.h" #include "string.h" static struct igmp_group *igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr); static err_t igmp_remove_group(struct netif* netif, struct igmp_group *group); static void igmp_timeout(struct netif *netif, struct igmp_group *group); static void igmp_start_timer(struct igmp_group *group, u8_t max_time); static void igmp_delaying_member(struct igmp_group *group, u8_t maxresp); static err_t igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif); static void igmp_send(struct netif *netif, struct igmp_group *group, u8_t type); static ip4_addr_t allsystems; static ip4_addr_t allrouters; /** * Initialize the IGMP module */ void igmp_init(void) { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_init: initializing\n")); IP4_ADDR(&allsystems, 224, 0, 0, 1); IP4_ADDR(&allrouters, 224, 0, 0, 2); } /** * Start IGMP processing on interface * * @param netif network interface on which start IGMP processing */ err_t igmp_start(struct netif *netif) { struct igmp_group* group; LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: starting IGMP processing on if %p\n", (void*)netif)); group = igmp_lookup_group(netif, &allsystems); if (group != NULL) { group->group_state = IGMP_GROUP_IDLE_MEMBER; group->use++; /* Allow the igmp messages at the MAC level */ if (netif->igmp_mac_filter != NULL) { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_start: igmp_mac_filter(ADD ")); ip4_addr_debug_print_val(IGMP_DEBUG, allsystems); LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif)); netif->igmp_mac_filter(netif, &allsystems, NETIF_ADD_MAC_FILTER); } return ERR_OK; } return ERR_MEM; } /** * Stop IGMP processing on interface * * @param netif network interface on which stop IGMP processing */ err_t igmp_stop(struct netif *netif) { struct igmp_group *group = netif_igmp_data(netif); netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, NULL); while (group != NULL) { struct igmp_group *next = group->next; /* avoid use-after-free below */ /* disable the group at the MAC level */ if (netif->igmp_mac_filter != NULL) { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_stop: igmp_mac_filter(DEL ")); ip4_addr_debug_print(IGMP_DEBUG, &group->group_address); LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif)); netif->igmp_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); } /* free group */ memp_free(MEMP_IGMP_GROUP, group); /* move to "next" */ group = next; } return ERR_OK; } /** * Report IGMP memberships for this interface * * @param netif network interface on which report IGMP memberships */ void igmp_report_groups(struct netif *netif) { struct igmp_group *group = netif_igmp_data(netif); LWIP_DEBUGF(IGMP_DEBUG, ("igmp_report_groups: sending IGMP reports on if %p\n", (void*)netif)); /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ if(group != NULL) { group = group->next; } while (group != NULL) { igmp_delaying_member(group, IGMP_JOIN_DELAYING_MEMBER_TMR); group = group->next; } } /** * Search for a group in the global igmp_group_list * * @param ifp the network interface for which to look * @param addr the group ip address to search for * @return a struct igmp_group* if the group has been found, * NULL if the group wasn't found. */ struct igmp_group * igmp_lookfor_group(struct netif *ifp, const ip4_addr_t *addr) { struct igmp_group *group = netif_igmp_data(ifp); while (group != NULL) { if (ip4_addr_cmp(&(group->group_address), addr)) { return group; } group = group->next; } /* to be clearer, we return NULL here instead of * 'group' (which is also NULL at this point). */ return NULL; } /** * Search for a specific igmp group and create a new one if not found- * * @param ifp the network interface for which to look * @param addr the group ip address to search * @return a struct igmp_group*, * NULL on memory error. */ static struct igmp_group * igmp_lookup_group(struct netif *ifp, const ip4_addr_t *addr) { struct igmp_group *group; struct igmp_group *list_head = netif_igmp_data(ifp); /* Search if the group already exists */ group = igmp_lookfor_group(ifp, addr); if (group != NULL) { /* Group already exists. */ return group; } /* Group doesn't exist yet, create a new one */ group = (struct igmp_group *)memp_malloc(MEMP_IGMP_GROUP); if (group != NULL) { ip4_addr_set(&(group->group_address), addr); group->timer = 0; /* Not running */ group->group_state = IGMP_GROUP_NON_MEMBER; group->last_reporter_flag = 0; group->use = 0; /* Ensure allsystems group is always first in list */ if (list_head == NULL) { /* this is the first entry in linked list */ LWIP_ASSERT("igmp_lookup_group: first group must be allsystems", (ip4_addr_cmp(addr, &allsystems) != 0)); group->next = NULL; netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_IGMP, group); } else { /* append _after_ first entry */ LWIP_ASSERT("igmp_lookup_group: all except first group must not be allsystems", (ip4_addr_cmp(addr, &allsystems) == 0)); group->next = list_head->next; list_head->next = group; } } LWIP_DEBUGF(IGMP_DEBUG, ("igmp_lookup_group: %sallocated a new group with address ", (group?"":"impossible to "))); ip4_addr_debug_print(IGMP_DEBUG, addr); LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void*)ifp)); return group; } /** * Remove a group in the global igmp_group_list, but don't free it yet * * @param group the group to remove from the global igmp_group_list * @return ERR_OK if group was removed from the list, an err_t otherwise */ static err_t igmp_remove_group(struct netif* netif, struct igmp_group *group) { err_t err = ERR_OK; struct igmp_group *tmp_group; /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ for (tmp_group = netif_igmp_data(netif); tmp_group != NULL; tmp_group = tmp_group->next) { if (tmp_group->next == group) { tmp_group->next = group->next; break; } } /* Group not found in the global igmp_group_list */ if (tmp_group == NULL) { err = ERR_ARG; } return err; } /** * Called from ip_input() if a new IGMP packet is received. * * @param p received igmp packet, p->payload pointing to the igmp header * @param inp network interface on which the packet was received * @param dest destination ip address of the igmp packet */ void igmp_input(struct pbuf *p, struct netif *inp, const ip4_addr_t *dest) { struct igmp_msg* igmp; struct igmp_group* group; struct igmp_group* groupref; IGMP_STATS_INC(igmp.recv); /* Note that the length CAN be greater than 8 but only 8 are used - All are included in the checksum */ if (p->len < IGMP_MINLEN) { pbuf_free(p); IGMP_STATS_INC(igmp.lenerr); LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: length error\n")); return; } LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: message from ")); ip4_addr_debug_print(IGMP_DEBUG, &(ip4_current_header()->src)); LWIP_DEBUGF(IGMP_DEBUG, (" to address ")); ip4_addr_debug_print(IGMP_DEBUG, &(ip4_current_header()->dest)); LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void*)inp)); /* Now calculate and check the checksum */ igmp = (struct igmp_msg *)p->payload; if (inet_chksum(igmp, p->len)) { pbuf_free(p); IGMP_STATS_INC(igmp.chkerr); LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: checksum error\n")); return; } /* Packet is ok so find an existing group */ group = igmp_lookfor_group(inp, dest); /* use the destination IP address of incoming packet */ /* If group can be found or create... */ if (!group) { pbuf_free(p); IGMP_STATS_INC(igmp.drop); LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP frame not for us\n")); return; } /* NOW ACT ON THE INCOMING MESSAGE TYPE... */ switch (igmp->igmp_msgtype) { case IGMP_MEMB_QUERY: /* IGMP_MEMB_QUERY to the "all systems" address ? */ if ((ip4_addr_cmp(dest, &allsystems)) && ip4_addr_isany(&igmp->igmp_group_address)) { /* THIS IS THE GENERAL QUERY */ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: General IGMP_MEMB_QUERY on \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); if (igmp->igmp_maxresp == 0) { IGMP_STATS_INC(igmp.rx_v1); LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: got an all hosts query with time== 0 - this is V1 and not implemented - treat as v2\n")); igmp->igmp_maxresp = IGMP_V1_DELAYING_MEMBER_TMR; } else { IGMP_STATS_INC(igmp.rx_general); } groupref = netif_igmp_data(inp); /* Do not send messages on the all systems group address! */ /* Skip the first group in the list, it is always the allsystems group added in igmp_start() */ if(groupref != NULL) { groupref = groupref->next; } while (groupref) { igmp_delaying_member(groupref, igmp->igmp_maxresp); groupref = groupref->next; } } else { /* IGMP_MEMB_QUERY to a specific group ? */ if (!ip4_addr_isany(&igmp->igmp_group_address)) { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_MEMB_QUERY to a specific group ")); ip4_addr_debug_print(IGMP_DEBUG, &igmp->igmp_group_address); if (ip4_addr_cmp(dest, &allsystems)) { ip4_addr_t groupaddr; LWIP_DEBUGF(IGMP_DEBUG, (" using \"ALL SYSTEMS\" address (224.0.0.1) [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); /* we first need to re-look for the group since we used dest last time */ ip4_addr_copy(groupaddr, igmp->igmp_group_address); group = igmp_lookfor_group(inp, &groupaddr); } else { LWIP_DEBUGF(IGMP_DEBUG, (" with the group address as destination [igmp_maxresp=%i]\n", (int)(igmp->igmp_maxresp))); } if (group != NULL) { IGMP_STATS_INC(igmp.rx_group); igmp_delaying_member(group, igmp->igmp_maxresp); } else { IGMP_STATS_INC(igmp.drop); } } else { IGMP_STATS_INC(igmp.proterr); } } break; case IGMP_V2_MEMB_REPORT: LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: IGMP_V2_MEMB_REPORT\n")); IGMP_STATS_INC(igmp.rx_report); if (group->group_state == IGMP_GROUP_DELAYING_MEMBER) { /* This is on a specific group we have already looked up */ group->timer = 0; /* stopped */ group->group_state = IGMP_GROUP_IDLE_MEMBER; group->last_reporter_flag = 0; } break; default: LWIP_DEBUGF(IGMP_DEBUG, ("igmp_input: unexpected msg %d in state %d on group %p on if %p\n", igmp->igmp_msgtype, group->group_state, (void*)&group, (void*)inp)); IGMP_STATS_INC(igmp.proterr); break; } pbuf_free(p); return; } /** * @ingroup igmp * Join a group on one network interface. * * @param ifaddr ip address of the network interface which should join a new group * @param groupaddr the ip address of the group which to join * @return ERR_OK if group was joined on the netif(s), an err_t otherwise */ err_t igmp_joingroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) { err_t err = ERR_VAL; /* no matching interface */ struct netif *netif; /* make sure it is multicast address */ LWIP_ERROR("igmp_joingroup: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); LWIP_ERROR("igmp_joingroup: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); /* loop through netif's */ netif = netif_list; while (netif != NULL) { /* Should we join this interface ? */ if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { err = igmp_joingroup_netif(netif, groupaddr); if (err != ERR_OK) { /* Return an error even if some network interfaces are joined */ /** @todo undo any other netif already joined */ return err; } } /* proceed to next network interface */ netif = netif->next; } return err; } /** * @ingroup igmp * Join a group on one network interface. * * @param netif the network interface which should join a new group * @param groupaddr the ip address of the group which to join * @return ERR_OK if group was joined on the netif, an err_t otherwise */ err_t igmp_joingroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) { struct igmp_group *group; /* make sure it is multicast address */ LWIP_ERROR("igmp_joingroup_netif: attempt to join non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); LWIP_ERROR("igmp_joingroup_netif: attempt to join allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); /* make sure it is an igmp-enabled netif */ LWIP_ERROR("igmp_joingroup_netif: attempt to join on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); /* find group or create a new one if not found */ group = igmp_lookup_group(netif, groupaddr); if (group != NULL) { /* This should create a new group, check the state to make sure */ if (group->group_state != IGMP_GROUP_NON_MEMBER) { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to group not in state IGMP_GROUP_NON_MEMBER\n")); } else { /* OK - it was new group */ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: join to new group: ")); ip4_addr_debug_print(IGMP_DEBUG, groupaddr); LWIP_DEBUGF(IGMP_DEBUG, ("\n")); /* If first use of the group, allow the group at the MAC level */ if ((group->use==0) && (netif->igmp_mac_filter != NULL)) { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: igmp_mac_filter(ADD ")); ip4_addr_debug_print(IGMP_DEBUG, groupaddr); LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif)); netif->igmp_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); } IGMP_STATS_INC(igmp.tx_join); igmp_send(netif, group, IGMP_V2_MEMB_REPORT); igmp_start_timer(group, IGMP_JOIN_DELAYING_MEMBER_TMR); /* Need to work out where this timer comes from */ group->group_state = IGMP_GROUP_DELAYING_MEMBER; } /* Increment group use */ group->use++; /* Join on this interface */ return ERR_OK; } else { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_joingroup_netif: Not enough memory to join to group\n")); return ERR_MEM; } } /** * @ingroup igmp * Leave a group on one network interface. * * @param ifaddr ip address of the network interface which should leave a group * @param groupaddr the ip address of the group which to leave * @return ERR_OK if group was left on the netif(s), an err_t otherwise */ err_t igmp_leavegroup(const ip4_addr_t *ifaddr, const ip4_addr_t *groupaddr) { err_t err = ERR_VAL; /* no matching interface */ struct netif *netif; /* make sure it is multicast address */ LWIP_ERROR("igmp_leavegroup: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); LWIP_ERROR("igmp_leavegroup: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); /* loop through netif's */ netif = netif_list; while (netif != NULL) { /* Should we leave this interface ? */ if ((netif->flags & NETIF_FLAG_IGMP) && ((ip4_addr_isany(ifaddr) || ip4_addr_cmp(netif_ip4_addr(netif), ifaddr)))) { err_t res = igmp_leavegroup_netif(netif, groupaddr); if (err != ERR_OK) { /* Store this result if we have not yet gotten a success */ err = res; } } /* proceed to next network interface */ netif = netif->next; } return err; } /** * @ingroup igmp * Leave a group on one network interface. * * @param netif the network interface which should leave a group * @param groupaddr the ip address of the group which to leave * @return ERR_OK if group was left on the netif, an err_t otherwise */ err_t igmp_leavegroup_netif(struct netif *netif, const ip4_addr_t *groupaddr) { struct igmp_group *group; /* make sure it is multicast address */ LWIP_ERROR("igmp_leavegroup_netif: attempt to leave non-multicast address", ip4_addr_ismulticast(groupaddr), return ERR_VAL;); LWIP_ERROR("igmp_leavegroup_netif: attempt to leave allsystems address", (!ip4_addr_cmp(groupaddr, &allsystems)), return ERR_VAL;); /* make sure it is an igmp-enabled netif */ LWIP_ERROR("igmp_leavegroup_netif: attempt to leave on non-IGMP netif", netif->flags & NETIF_FLAG_IGMP, return ERR_VAL;); /* find group */ group = igmp_lookfor_group(netif, groupaddr); if (group != NULL) { /* Only send a leave if the flag is set according to the state diagram */ LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: Leaving group: ")); ip4_addr_debug_print(IGMP_DEBUG, groupaddr); LWIP_DEBUGF(IGMP_DEBUG, ("\n")); /* If there is no other use of the group */ if (group->use <= 1) { /* Remove the group from the list */ igmp_remove_group(netif, group); /* If we are the last reporter for this group */ if (group->last_reporter_flag) { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: sending leaving group\n")); IGMP_STATS_INC(igmp.tx_leave); igmp_send(netif, group, IGMP_LEAVE_GROUP); } /* Disable the group at the MAC level */ if (netif->igmp_mac_filter != NULL) { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: igmp_mac_filter(DEL ")); ip4_addr_debug_print(IGMP_DEBUG, groupaddr); LWIP_DEBUGF(IGMP_DEBUG, (") on if %p\n", (void*)netif)); netif->igmp_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); } /* Free group struct */ memp_free(MEMP_IGMP_GROUP, group); } else { /* Decrement group use */ group->use--; } return ERR_OK; } else { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_leavegroup_netif: not member of group\n")); return ERR_VAL; } } /** * The igmp timer function (both for NO_SYS=1 and =0) * Should be called every IGMP_TMR_INTERVAL milliseconds (100 ms is default). */ void igmp_tmr(void) { struct netif *netif = netif_list; while (netif != NULL) { struct igmp_group *group = netif_igmp_data(netif); while (group != NULL) { if (group->timer > 0) { group->timer--; if (group->timer == 0) { igmp_timeout(netif, group); } } group = group->next; } netif = netif->next; } } /** * Called if a timeout for one group is reached. * Sends a report for this group. * * @param group an igmp_group for which a timeout is reached */ static void igmp_timeout(struct netif *netif, struct igmp_group *group) { /* If the state is IGMP_GROUP_DELAYING_MEMBER then we send a report for this group (unless it is the allsystems group) */ if ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && (!(ip4_addr_cmp(&(group->group_address), &allsystems)))) { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_timeout: report membership for group with address ")); ip4_addr_debug_print(IGMP_DEBUG, &(group->group_address)); LWIP_DEBUGF(IGMP_DEBUG, (" on if %p\n", (void*)netif)); group->group_state = IGMP_GROUP_IDLE_MEMBER; IGMP_STATS_INC(igmp.tx_report); igmp_send(netif, group, IGMP_V2_MEMB_REPORT); } } /** * Start a timer for an igmp group * * @param group the igmp_group for which to start a timer * @param max_time the time in multiples of IGMP_TMR_INTERVAL (decrease with * every call to igmp_tmr()) */ static void igmp_start_timer(struct igmp_group *group, u8_t max_time) { #ifdef LWIP_RAND group->timer = max_time > 2 ? (LWIP_RAND() % max_time) : 1; #else /* LWIP_RAND */ /* ATTENTION: use this only if absolutely necessary! */ group->timer = max_time / 2; #endif /* LWIP_RAND */ if (group->timer == 0) { group->timer = 1; } } /** * Delaying membership report for a group if necessary * * @param group the igmp_group for which "delaying" membership report * @param maxresp query delay */ static void igmp_delaying_member(struct igmp_group *group, u8_t maxresp) { if ((group->group_state == IGMP_GROUP_IDLE_MEMBER) || ((group->group_state == IGMP_GROUP_DELAYING_MEMBER) && ((group->timer == 0) || (maxresp < group->timer)))) { igmp_start_timer(group, maxresp); group->group_state = IGMP_GROUP_DELAYING_MEMBER; } } /** * Sends an IP packet on a network interface. This function constructs the IP header * and calculates the IP header checksum. If the source IP address is NULL, * the IP address of the outgoing network interface is filled in as source address. * * @param p the packet to send (p->payload points to the data, e.g. next protocol header; if dest == LWIP_IP_HDRINCL, p already includes an IP header and p->payload points to that IP header) * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the * IP address of the netif used to send is used as source address) * @param dest the destination IP address to send the packet to * @param netif the netif on which to send this packet * @return ERR_OK if the packet was sent OK * ERR_BUF if p doesn't have enough space for IP/LINK headers * returns errors returned by netif->output */ static err_t igmp_ip_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, struct netif *netif) { /* This is the "router alert" option */ u16_t ra[2]; ra[0] = PP_HTONS(ROUTER_ALERT); ra[1] = 0x0000; /* Router shall examine packet */ IGMP_STATS_INC(igmp.xmit); return ip4_output_if_opt(p, src, dest, IGMP_TTL, 0, IP_PROTO_IGMP, netif, ra, ROUTER_ALERTLEN); } /** * Send an igmp packet to a specific group. * * @param group the group to which to send the packet * @param type the type of igmp packet to send */ static void igmp_send(struct netif *netif, struct igmp_group *group, u8_t type) { struct pbuf* p = NULL; struct igmp_msg* igmp = NULL; ip4_addr_t src = *IP4_ADDR_ANY4; ip4_addr_t* dest = NULL; /* IP header + "router alert" option + IGMP header */ p = pbuf_alloc(PBUF_TRANSPORT, IGMP_MINLEN, PBUF_RAM); if (p) { igmp = (struct igmp_msg *)p->payload; LWIP_ASSERT("igmp_send: check that first pbuf can hold struct igmp_msg", (p->len >= sizeof(struct igmp_msg))); ip4_addr_copy(src, *netif_ip4_addr(netif)); if (type == IGMP_V2_MEMB_REPORT) { dest = &(group->group_address); ip4_addr_copy(igmp->igmp_group_address, group->group_address); group->last_reporter_flag = 1; /* Remember we were the last to report */ } else { if (type == IGMP_LEAVE_GROUP) { dest = &allrouters; ip4_addr_copy(igmp->igmp_group_address, group->group_address); } } if ((type == IGMP_V2_MEMB_REPORT) || (type == IGMP_LEAVE_GROUP)) { igmp->igmp_msgtype = type; igmp->igmp_maxresp = 0; igmp->igmp_checksum = 0; igmp->igmp_checksum = inet_chksum(igmp, IGMP_MINLEN); igmp_ip_output_if(p, &src, dest, netif); } pbuf_free(p); } else { LWIP_DEBUGF(IGMP_DEBUG, ("igmp_send: not enough memory for igmp_send\n")); IGMP_STATS_INC(igmp.memerr); } } #endif /* LWIP_IPV4 && LWIP_IGMP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv4/igmp.c
C
unknown
28,463
/** * @file * This is the IPv4 layer implementation for incoming and outgoing IP traffic. * * @see ip_frag.c * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #if LWIP_IPV4 #include "lwip/ip.h" #include "lwip/def.h" #include "lwip/mem.h" #include "lwip/ip4_frag.h" #include "lwip/inet_chksum.h" #include "lwip/netif.h" #include "lwip/icmp.h" #include "lwip/igmp.h" #include "lwip/raw.h" #include "lwip/udp.h" #include "lwip/priv/tcp_priv.h" #include "lwip/autoip.h" #include "lwip/stats.h" #include "lwip/prot/dhcp.h" #include <string.h> #ifdef LWIP_HOOK_FILENAME #include LWIP_HOOK_FILENAME #endif /** Set this to 0 in the rare case of wanting to call an extra function to * generate the IP checksum (in contrast to calculating it on-the-fly). */ #ifndef LWIP_INLINE_IP_CHKSUM #if LWIP_CHECKSUM_CTRL_PER_NETIF #define LWIP_INLINE_IP_CHKSUM 0 #else /* LWIP_CHECKSUM_CTRL_PER_NETIF */ #define LWIP_INLINE_IP_CHKSUM 1 #endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */ #endif #if LWIP_INLINE_IP_CHKSUM && CHECKSUM_GEN_IP #define CHECKSUM_GEN_IP_INLINE 1 #else #define CHECKSUM_GEN_IP_INLINE 0 #endif #if LWIP_DHCP || defined(LWIP_IP_ACCEPT_UDP_PORT) #define IP_ACCEPT_LINK_LAYER_ADDRESSING 1 /** Some defines for DHCP to let link-layer-addressed packets through while the * netif is down. * To use this in your own application/protocol, define LWIP_IP_ACCEPT_UDP_PORT(port) * to return 1 if the port is accepted and 0 if the port is not accepted. */ #if LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) /* accept DHCP client port and custom port */ #define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (((port) == PP_NTOHS(DHCP_CLIENT_PORT)) \ || (LWIP_IP_ACCEPT_UDP_PORT(port))) #elif defined(LWIP_IP_ACCEPT_UDP_PORT) /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ /* accept custom port only */ #define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) (LWIP_IP_ACCEPT_UDP_PORT(port)) #else /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ /* accept DHCP client port only */ #define IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(port) ((port) == PP_NTOHS(DHCP_CLIENT_PORT)) #endif /* LWIP_DHCP && defined(LWIP_IP_ACCEPT_UDP_PORT) */ #else /* LWIP_DHCP */ #define IP_ACCEPT_LINK_LAYER_ADDRESSING 0 #endif /* LWIP_DHCP */ /** The IP header ID of the next outgoing IP packet */ static u16_t ip_id; #if LWIP_MULTICAST_TX_OPTIONS /** The default netif used for multicast */ static struct netif* ip4_default_multicast_netif; /** * @ingroup ip4 * Set a default netif for IPv4 multicast. */ void ip4_set_default_multicast_netif(struct netif* default_multicast_netif) { ip4_default_multicast_netif = default_multicast_netif; } #endif /* LWIP_MULTICAST_TX_OPTIONS */ #ifdef LWIP_HOOK_IP4_ROUTE_SRC /** * Source based IPv4 routing must be fully implemented in * LWIP_HOOK_IP4_ROUTE_SRC(). This function only provides he parameters. */ struct netif * ip4_route_src(const ip4_addr_t *dest, const ip4_addr_t *src) { if (src != NULL) { /* when src==NULL, the hook is called from ip4_route(dest) */ struct netif *netif = LWIP_HOOK_IP4_ROUTE_SRC(dest, src); if (netif != NULL) { return netif; } } return ip4_route(dest); } #endif /* LWIP_HOOK_IP4_ROUTE_SRC */ /** * Finds the appropriate network interface for a given IP address. It * searches the list of network interfaces linearly. A match is found * if the masked IP address of the network interface equals the masked * IP address given to the function. * * @param dest the destination IP address for which to find the route * @return the netif on which to send to reach dest */ struct netif * ip4_route(const ip4_addr_t *dest) { struct netif *netif; #if LWIP_MULTICAST_TX_OPTIONS /* Use administratively selected interface for multicast by default */ if (ip4_addr_ismulticast(dest) && ip4_default_multicast_netif) { return ip4_default_multicast_netif; } #endif /* LWIP_MULTICAST_TX_OPTIONS */ /* iterate through netifs */ for (netif = netif_list; netif != NULL; netif = netif->next) { /* is the netif up, does it have a link and a valid address? */ if (netif_is_up(netif) && netif_is_link_up(netif) && !ip4_addr_isany_val(*netif_ip4_addr(netif))) { /* network mask matches? */ if (ip4_addr_netcmp(dest, netif_ip4_addr(netif), netif_ip4_netmask(netif))) { /* return netif on which to forward IP packet */ return netif; } /* gateway matches on a non broadcast interface? (i.e. peer in a point to point interface) */ if (((netif->flags & NETIF_FLAG_BROADCAST) == 0) && ip4_addr_cmp(dest, netif_ip4_gw(netif))) { /* return netif on which to forward IP packet */ return netif; } } } #if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF /* loopif is disabled, looopback traffic is passed through any netif */ if (ip4_addr_isloopback(dest)) { /* don't check for link on loopback traffic */ if (netif_default != NULL && netif_is_up(netif_default)) { return netif_default; } /* default netif is not up, just use any netif for loopback traffic */ for (netif = netif_list; netif != NULL; netif = netif->next) { if (netif_is_up(netif)) { return netif; } } return NULL; } #endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ #ifdef LWIP_HOOK_IP4_ROUTE_SRC netif = LWIP_HOOK_IP4_ROUTE_SRC(dest, NULL); if (netif != NULL) { return netif; } #elif defined(LWIP_HOOK_IP4_ROUTE) netif = LWIP_HOOK_IP4_ROUTE(dest); if (netif != NULL) { return netif; } #endif if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default) || ip4_addr_isany_val(*netif_ip4_addr(netif_default))) { /* No matching netif found and default netif is not usable. If this is not good enough for you, use LWIP_HOOK_IP4_ROUTE() */ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_route: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); IP_STATS_INC(ip.rterr); MIB2_STATS_INC(mib2.ipoutnoroutes); return NULL; } return netif_default; } #if IP_FORWARD /** * Determine whether an IP address is in a reserved set of addresses * that may not be forwarded, or whether datagrams to that destination * may be forwarded. * @param p the packet to forward * @return 1: can forward 0: discard */ static int ip4_canforward(struct pbuf *p) { u32_t addr = lwip_htonl(ip4_addr_get_u32(ip4_current_dest_addr())); if (p->flags & PBUF_FLAG_LLBCAST) { /* don't route link-layer broadcasts */ return 0; } if ((p->flags & PBUF_FLAG_LLMCAST) && !IP_MULTICAST(addr)) { /* don't route link-layer multicasts unless the destination address is an IP multicast address */ return 0; } if (IP_EXPERIMENTAL(addr)) { return 0; } if (IP_CLASSA(addr)) { u32_t net = addr & IP_CLASSA_NET; if ((net == 0) || (net == ((u32_t)IP_LOOPBACKNET << IP_CLASSA_NSHIFT))) { /* don't route loopback packets */ return 0; } } return 1; } /** * Forwards an IP packet. It finds an appropriate route for the * packet, decrements the TTL value of the packet, adjusts the * checksum and outputs the packet on the appropriate interface. * * @param p the packet to forward (p->payload points to IP header) * @param iphdr the IP header of the input packet * @param inp the netif on which this packet was received */ static void ip4_forward(struct pbuf *p, struct ip_hdr *iphdr, struct netif *inp) { struct netif *netif; PERF_START; LWIP_UNUSED_ARG(inp); if (!ip4_canforward(p)) { goto return_noroute; } /* RFC3927 2.7: do not forward link-local addresses */ if (ip4_addr_islinklocal(ip4_current_dest_addr())) { LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not forwarding LLA %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); goto return_noroute; } /* Find network interface where to forward this IP packet to. */ netif = ip4_route_src(ip4_current_dest_addr(), ip4_current_src_addr()); if (netif == NULL) { LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: no forwarding route for %"U16_F".%"U16_F".%"U16_F".%"U16_F" found\n", ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); /* @todo: send ICMP_DUR_NET? */ goto return_noroute; } #if !IP_FORWARD_ALLOW_TX_ON_RX_NETIF /* Do not forward packets onto the same network interface on which * they arrived. */ if (netif == inp) { LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: not bouncing packets back on incoming interface.\n")); goto return_noroute; } #endif /* IP_FORWARD_ALLOW_TX_ON_RX_NETIF */ /* decrement TTL */ IPH_TTL_SET(iphdr, IPH_TTL(iphdr) - 1); /* send ICMP if TTL == 0 */ if (IPH_TTL(iphdr) == 0) { MIB2_STATS_INC(mib2.ipinhdrerrors); #if LWIP_ICMP /* Don't send ICMP messages in response to ICMP messages */ if (IPH_PROTO(iphdr) != IP_PROTO_ICMP) { icmp_time_exceeded(p, ICMP_TE_TTL); } #endif /* LWIP_ICMP */ return; } /* Incrementally update the IP checksum. */ if (IPH_CHKSUM(iphdr) >= PP_HTONS(0xffffU - 0x100)) { IPH_CHKSUM_SET(iphdr, IPH_CHKSUM(iphdr) + PP_HTONS(0x100) + 1); } else { IPH_CHKSUM_SET(iphdr, IPH_CHKSUM(iphdr) + PP_HTONS(0x100)); } LWIP_DEBUGF(IP_DEBUG, ("ip4_forward: forwarding packet to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", ip4_addr1_16(ip4_current_dest_addr()), ip4_addr2_16(ip4_current_dest_addr()), ip4_addr3_16(ip4_current_dest_addr()), ip4_addr4_16(ip4_current_dest_addr()))); IP_STATS_INC(ip.fw); MIB2_STATS_INC(mib2.ipforwdatagrams); IP_STATS_INC(ip.xmit); PERF_STOP("ip4_forward"); /* don't fragment if interface has mtu set to 0 [loopif] */ if (netif->mtu && (p->tot_len > netif->mtu)) { if ((IPH_OFFSET(iphdr) & PP_NTOHS(IP_DF)) == 0) { #if IP_FRAG ip4_frag(p, netif, ip4_current_dest_addr()); #else /* IP_FRAG */ /* @todo: send ICMP Destination Unreachable code 13 "Communication administratively prohibited"? */ #endif /* IP_FRAG */ } else { #if LWIP_ICMP /* send ICMP Destination Unreachable code 4: "Fragmentation Needed and DF Set" */ icmp_dest_unreach(p, ICMP_DUR_FRAG); #endif /* LWIP_ICMP */ } return; } /* transmit pbuf on chosen interface */ netif->output(netif, p, ip4_current_dest_addr()); return; return_noroute: MIB2_STATS_INC(mib2.ipoutnoroutes); } #endif /* IP_FORWARD */ /** * This function is called by the network interface device driver when * an IP packet is received. The function does the basic checks of the * IP header such as packet size being at least larger than the header * size etc. If the packet was not destined for us, the packet is * forwarded (using ip_forward). The IP checksum is always checked. * * Finally, the packet is sent to the upper layer protocol input function. * * @param p the received IP packet (p->payload points to IP header) * @param inp the netif on which this packet was received * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't * processed, but currently always returns ERR_OK) */ err_t ip4_input(struct pbuf *p, struct netif *inp) { struct ip_hdr *iphdr; struct netif *netif; u16_t iphdr_hlen; u16_t iphdr_len; #if IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP int check_ip_src = 1; #endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING || LWIP_IGMP */ IP_STATS_INC(ip.recv); MIB2_STATS_INC(mib2.ipinreceives); /* identify the IP header */ iphdr = (struct ip_hdr *)p->payload; if (IPH_V(iphdr) != 4) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IP packet dropped due to bad version number %"U16_F"\n", (u16_t)IPH_V(iphdr))); ip4_debug_print(p); pbuf_free(p); IP_STATS_INC(ip.err); IP_STATS_INC(ip.drop); MIB2_STATS_INC(mib2.ipinhdrerrors); return ERR_OK; } #ifdef LWIP_HOOK_IP4_INPUT if (LWIP_HOOK_IP4_INPUT(p, inp)) { /* the packet has been eaten */ return ERR_OK; } #endif /* obtain IP header length in number of 32-bit words */ iphdr_hlen = IPH_HL(iphdr); /* calculate IP header length in bytes */ iphdr_hlen *= 4; /* obtain ip length in bytes */ iphdr_len = lwip_ntohs(IPH_LEN(iphdr)); /* Trim pbuf. This is especially required for packets < 60 bytes. */ if (iphdr_len < p->tot_len) { pbuf_realloc(p, iphdr_len); } /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ if ((iphdr_hlen > p->len) || (iphdr_len > p->tot_len) || (iphdr_hlen < IP_HLEN)) { if (iphdr_hlen < IP_HLEN) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_input: short IP header (%"U16_F" bytes) received, IP packet dropped\n", iphdr_hlen)); } if (iphdr_hlen > p->len) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", iphdr_hlen, p->len)); } if (iphdr_len > p->tot_len) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP (len %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", iphdr_len, p->tot_len)); } /* free (drop) packet pbufs */ pbuf_free(p); IP_STATS_INC(ip.lenerr); IP_STATS_INC(ip.drop); MIB2_STATS_INC(mib2.ipindiscards); return ERR_OK; } /* verify checksum */ #if CHECKSUM_CHECK_IP IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_IP) { if (inet_chksum(iphdr, iphdr_hlen) != 0) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Checksum (0x%"X16_F") failed, IP packet dropped.\n", inet_chksum(iphdr, iphdr_hlen))); ip4_debug_print(p); pbuf_free(p); IP_STATS_INC(ip.chkerr); IP_STATS_INC(ip.drop); MIB2_STATS_INC(mib2.ipinhdrerrors); return ERR_OK; } } #endif /* copy IP addresses to aligned ip_addr_t */ ip_addr_copy_from_ip4(ip_data.current_iphdr_dest, iphdr->dest); ip_addr_copy_from_ip4(ip_data.current_iphdr_src, iphdr->src); /* match packet against an interface, i.e. is this packet for us? */ if (ip4_addr_ismulticast(ip4_current_dest_addr())) { #if LWIP_IGMP if ((inp->flags & NETIF_FLAG_IGMP) && (igmp_lookfor_group(inp, ip4_current_dest_addr()))) { /* IGMP snooping switches need 0.0.0.0 to be allowed as source address (RFC 4541) */ ip4_addr_t allsystems; IP4_ADDR(&allsystems, 224, 0, 0, 1); if (ip4_addr_cmp(ip4_current_dest_addr(), &allsystems) && ip4_addr_isany(ip4_current_src_addr())) { check_ip_src = 0; } netif = inp; } else { netif = NULL; } #else /* LWIP_IGMP */ if ((netif_is_up(inp)) && (!ip4_addr_isany_val(*netif_ip4_addr(inp)))) { netif = inp; } else { netif = NULL; } #endif /* LWIP_IGMP */ } else { /* start trying with inp. if that's not acceptable, start walking the list of configured netifs. 'first' is used as a boolean to mark whether we started walking the list */ int first = 1; netif = inp; do { LWIP_DEBUGF(IP_DEBUG, ("ip_input: iphdr->dest 0x%"X32_F" netif->ip_addr 0x%"X32_F" (0x%"X32_F", 0x%"X32_F", 0x%"X32_F")\n", ip4_addr_get_u32(&iphdr->dest), ip4_addr_get_u32(netif_ip4_addr(netif)), ip4_addr_get_u32(&iphdr->dest) & ip4_addr_get_u32(netif_ip4_netmask(netif)), ip4_addr_get_u32(netif_ip4_addr(netif)) & ip4_addr_get_u32(netif_ip4_netmask(netif)), ip4_addr_get_u32(&iphdr->dest) & ~ip4_addr_get_u32(netif_ip4_netmask(netif)))); /* interface is up and configured? */ if ((netif_is_up(netif)) && (!ip4_addr_isany_val(*netif_ip4_addr(netif)))) { /* unicast to this interface address? */ if (ip4_addr_cmp(ip4_current_dest_addr(), netif_ip4_addr(netif)) || /* or broadcast on this interface network address? */ ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) #if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF || (ip4_addr_get_u32(ip4_current_dest_addr()) == PP_HTONL(IPADDR_LOOPBACK)) #endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ ) { LWIP_DEBUGF(IP_DEBUG, ("ip4_input: packet accepted on interface %c%c\n", netif->name[0], netif->name[1])); /* break out of for loop */ break; } #if LWIP_AUTOIP /* connections to link-local addresses must persist after changing the netif's address (RFC3927 ch. 1.9) */ if (autoip_accept_packet(netif, ip4_current_dest_addr())) { LWIP_DEBUGF(IP_DEBUG, ("ip4_input: LLA packet accepted on interface %c%c\n", netif->name[0], netif->name[1])); /* break out of for loop */ break; } #endif /* LWIP_AUTOIP */ } if (first) { #if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF /* Packets sent to the loopback address must not be accepted on an * interface that does not have the loopback address assigned to it, * unless a non-loopback interface is used for loopback traffic. */ if (ip4_addr_isloopback(ip4_current_dest_addr())) { netif = NULL; break; } #endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ first = 0; netif = netif_list; } else { netif = netif->next; } if (netif == inp) { netif = netif->next; } } while (netif != NULL); } #if IP_ACCEPT_LINK_LAYER_ADDRESSING /* Pass DHCP messages regardless of destination address. DHCP traffic is addressed * using link layer addressing (such as Ethernet MAC) so we must not filter on IP. * According to RFC 1542 section 3.1.1, referred by RFC 2131). * * If you want to accept private broadcast communication while a netif is down, * define LWIP_IP_ACCEPT_UDP_PORT(dst_port), e.g.: * * #define LWIP_IP_ACCEPT_UDP_PORT(dst_port) ((dst_port) == PP_NTOHS(12345)) */ if (netif == NULL) { /* remote port is DHCP server? */ if (IPH_PROTO(iphdr) == IP_PROTO_UDP) { struct udp_hdr *udphdr = (struct udp_hdr *)((u8_t *)iphdr + iphdr_hlen); LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: UDP packet to DHCP client port %"U16_F"\n", lwip_ntohs(udphdr->dest))); if (IP_ACCEPT_LINK_LAYER_ADDRESSED_PORT(udphdr->dest)) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: DHCP packet accepted.\n")); netif = inp; check_ip_src = 0; } } } #endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ /* broadcast or multicast packet source address? Compliant with RFC 1122: 3.2.1.3 */ #if LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING if (check_ip_src #if IP_ACCEPT_LINK_LAYER_ADDRESSING /* DHCP servers need 0.0.0.0 to be allowed as source address (RFC 1.1.2.2: 3.2.1.3/a) */ && !ip4_addr_isany_val(*ip4_current_src_addr()) #endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ ) #endif /* LWIP_IGMP || IP_ACCEPT_LINK_LAYER_ADDRESSING */ { if ((ip4_addr_isbroadcast(ip4_current_src_addr(), inp)) || (ip4_addr_ismulticast(ip4_current_src_addr()))) { /* packet source is not valid */ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_WARNING, ("ip4_input: packet source is not valid.\n")); /* free (drop) packet pbufs */ pbuf_free(p); IP_STATS_INC(ip.drop); MIB2_STATS_INC(mib2.ipinaddrerrors); MIB2_STATS_INC(mib2.ipindiscards); return ERR_OK; } } /* packet not for us? */ if (netif == NULL) { /* packet not for us, route or discard */ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_TRACE, ("ip4_input: packet not for us.\n")); #if IP_FORWARD /* non-broadcast packet? */ if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), inp)) { /* try to forward IP packet on (other) interfaces */ ip4_forward(p, iphdr, inp); } else #endif /* IP_FORWARD */ { IP_STATS_INC(ip.drop); MIB2_STATS_INC(mib2.ipinaddrerrors); MIB2_STATS_INC(mib2.ipindiscards); } pbuf_free(p); return ERR_OK; } /* packet consists of multiple fragments? */ if ((IPH_OFFSET(iphdr) & PP_HTONS(IP_OFFMASK | IP_MF)) != 0) { #if IP_REASSEMBLY /* packet fragment reassembly code present? */ LWIP_DEBUGF(IP_DEBUG, ("IP packet is a fragment (id=0x%04"X16_F" tot_len=%"U16_F" len=%"U16_F" MF=%"U16_F" offset=%"U16_F"), calling ip4_reass()\n", lwip_ntohs(IPH_ID(iphdr)), p->tot_len, lwip_ntohs(IPH_LEN(iphdr)), (u16_t)!!(IPH_OFFSET(iphdr) & PP_HTONS(IP_MF)), (u16_t)((lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK)*8))); /* reassemble the packet*/ p = ip4_reass(p); /* packet not fully reassembled yet? */ if (p == NULL) { return ERR_OK; } iphdr = (struct ip_hdr *)p->payload; #else /* IP_REASSEMBLY == 0, no packet fragment reassembly code present */ pbuf_free(p); LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since it was fragmented (0x%"X16_F") (while IP_REASSEMBLY == 0).\n", lwip_ntohs(IPH_OFFSET(iphdr)))); IP_STATS_INC(ip.opterr); IP_STATS_INC(ip.drop); /* unsupported protocol feature */ MIB2_STATS_INC(mib2.ipinunknownprotos); return ERR_OK; #endif /* IP_REASSEMBLY */ } #if IP_OPTIONS_ALLOWED == 0 /* no support for IP options in the IP header? */ #if LWIP_IGMP /* there is an extra "router alert" option in IGMP messages which we allow for but do not police */ if ((iphdr_hlen > IP_HLEN) && (IPH_PROTO(iphdr) != IP_PROTO_IGMP)) { #else if (iphdr_hlen > IP_HLEN) { #endif /* LWIP_IGMP */ LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IP packet dropped since there were IP options (while IP_OPTIONS_ALLOWED == 0).\n")); pbuf_free(p); IP_STATS_INC(ip.opterr); IP_STATS_INC(ip.drop); /* unsupported protocol feature */ MIB2_STATS_INC(mib2.ipinunknownprotos); return ERR_OK; } #endif /* IP_OPTIONS_ALLOWED == 0 */ /* send to upper layers */ LWIP_DEBUGF(IP_DEBUG, ("ip4_input: \n")); ip4_debug_print(p); LWIP_DEBUGF(IP_DEBUG, ("ip4_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); ip_data.current_netif = netif; ip_data.current_input_netif = inp; ip_data.current_ip4_header = iphdr; ip_data.current_ip_header_tot_len = IPH_HL(iphdr) * 4; #if LWIP_RAW /* raw input did not eat the packet? */ if (raw_input(p, inp) == 0) #endif /* LWIP_RAW */ { pbuf_header(p, -(s16_t)iphdr_hlen); /* Move to payload, no check necessary. */ switch (IPH_PROTO(iphdr)) { #if LWIP_UDP case IP_PROTO_UDP: #if LWIP_UDPLITE case IP_PROTO_UDPLITE: #endif /* LWIP_UDPLITE */ MIB2_STATS_INC(mib2.ipindelivers); udp_input(p, inp); break; #endif /* LWIP_UDP */ #if LWIP_TCP case IP_PROTO_TCP: MIB2_STATS_INC(mib2.ipindelivers); tcp_input(p, inp); break; #endif /* LWIP_TCP */ #if LWIP_ICMP case IP_PROTO_ICMP: MIB2_STATS_INC(mib2.ipindelivers); icmp_input(p, inp); break; #endif /* LWIP_ICMP */ #if LWIP_IGMP case IP_PROTO_IGMP: igmp_input(p, inp, ip4_current_dest_addr()); break; #endif /* LWIP_IGMP */ default: #if LWIP_ICMP /* send ICMP destination protocol unreachable unless is was a broadcast */ if (!ip4_addr_isbroadcast(ip4_current_dest_addr(), netif) && !ip4_addr_ismulticast(ip4_current_dest_addr())) { pbuf_header_force(p, iphdr_hlen); /* Move to ip header, no check necessary. */ p->payload = iphdr; icmp_dest_unreach(p, ICMP_DUR_PROTO); } #endif /* LWIP_ICMP */ pbuf_free(p); LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Unsupported transport protocol %"U16_F"\n", (u16_t)IPH_PROTO(iphdr))); IP_STATS_INC(ip.proterr); IP_STATS_INC(ip.drop); MIB2_STATS_INC(mib2.ipinunknownprotos); } } /* @todo: this is not really necessary... */ ip_data.current_netif = NULL; ip_data.current_input_netif = NULL; ip_data.current_ip4_header = NULL; ip_data.current_ip_header_tot_len = 0; ip4_addr_set_any(ip4_current_src_addr()); ip4_addr_set_any(ip4_current_dest_addr()); return ERR_OK; } /** * Sends an IP packet on a network interface. This function constructs * the IP header and calculates the IP header checksum. If the source * IP address is NULL, the IP address of the outgoing network * interface is filled in as source address. * If the destination IP address is LWIP_IP_HDRINCL, p is assumed to already * include an IP header and p->payload points to it instead of the data. * * @param p the packet to send (p->payload points to the data, e.g. next protocol header; if dest == LWIP_IP_HDRINCL, p already includes an IP header and p->payload points to that IP header) * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the * IP address of the netif used to send is used as source address) * @param dest the destination IP address to send the packet to * @param ttl the TTL value to be set in the IP header * @param tos the TOS value to be set in the IP header * @param proto the PROTOCOL to be set in the IP header * @param netif the netif on which to send this packet * @return ERR_OK if the packet was sent OK * ERR_BUF if p doesn't have enough space for IP/LINK headers * returns errors returned by netif->output * * @note ip_id: RFC791 "some host may be able to simply use * unique identifiers independent of destination" */ err_t ip4_output_if(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, u8_t ttl, u8_t tos, u8_t proto, struct netif *netif) { #if IP_OPTIONS_SEND return ip4_output_if_opt(p, src, dest, ttl, tos, proto, netif, NULL, 0); } /** * Same as ip_output_if() but with the possibility to include IP options: * * @ param ip_options pointer to the IP options, copied into the IP header * @ param optlen length of ip_options */ err_t ip4_output_if_opt(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, u16_t optlen) { #endif /* IP_OPTIONS_SEND */ const ip4_addr_t *src_used = src; if (dest != LWIP_IP_HDRINCL) { if (ip4_addr_isany(src)) { src_used = netif_ip4_addr(netif); } } #if IP_OPTIONS_SEND return ip4_output_if_opt_src(p, src_used, dest, ttl, tos, proto, netif, ip_options, optlen); #else /* IP_OPTIONS_SEND */ return ip4_output_if_src(p, src_used, dest, ttl, tos, proto, netif); #endif /* IP_OPTIONS_SEND */ } /** * Same as ip_output_if() but 'src' address is not replaced by netif address * when it is 'any'. */ err_t ip4_output_if_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, u8_t ttl, u8_t tos, u8_t proto, struct netif *netif) { #if IP_OPTIONS_SEND return ip4_output_if_opt_src(p, src, dest, ttl, tos, proto, netif, NULL, 0); } /** * Same as ip_output_if_opt() but 'src' address is not replaced by netif address * when it is 'any'. */ err_t ip4_output_if_opt_src(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, u8_t ttl, u8_t tos, u8_t proto, struct netif *netif, void *ip_options, u16_t optlen) { #endif /* IP_OPTIONS_SEND */ struct ip_hdr *iphdr; ip4_addr_t dest_addr; #if CHECKSUM_GEN_IP_INLINE u32_t chk_sum = 0; #endif /* CHECKSUM_GEN_IP_INLINE */ LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); MIB2_STATS_INC(mib2.ipoutrequests); /* Should the IP header be generated or is it already included in p? */ if (dest != LWIP_IP_HDRINCL) { u16_t ip_hlen = IP_HLEN; #if IP_OPTIONS_SEND u16_t optlen_aligned = 0; if (optlen != 0) { #if CHECKSUM_GEN_IP_INLINE int i; #endif /* CHECKSUM_GEN_IP_INLINE */ /* round up to a multiple of 4 */ optlen_aligned = ((optlen + 3) & ~3); ip_hlen += optlen_aligned; /* First write in the IP options */ if (pbuf_header(p, optlen_aligned)) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output_if_opt: not enough room for IP options in pbuf\n")); IP_STATS_INC(ip.err); MIB2_STATS_INC(mib2.ipoutdiscards); return ERR_BUF; } MEMCPY(p->payload, ip_options, optlen); if (optlen < optlen_aligned) { /* zero the remaining bytes */ memset(((char*)p->payload) + optlen, 0, optlen_aligned - optlen); } #if CHECKSUM_GEN_IP_INLINE for (i = 0; i < optlen_aligned/2; i++) { chk_sum += ((u16_t*)p->payload)[i]; } #endif /* CHECKSUM_GEN_IP_INLINE */ } #endif /* IP_OPTIONS_SEND */ /* generate IP header */ if (pbuf_header(p, IP_HLEN)) { LWIP_DEBUGF(IP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip4_output: not enough room for IP header in pbuf\n")); IP_STATS_INC(ip.err); MIB2_STATS_INC(mib2.ipoutdiscards); return ERR_BUF; } iphdr = (struct ip_hdr *)p->payload; LWIP_ASSERT("check that first pbuf can hold struct ip_hdr", (p->len >= sizeof(struct ip_hdr))); IPH_TTL_SET(iphdr, ttl); IPH_PROTO_SET(iphdr, proto); #if CHECKSUM_GEN_IP_INLINE chk_sum += PP_NTOHS(proto | (ttl << 8)); #endif /* CHECKSUM_GEN_IP_INLINE */ /* dest cannot be NULL here */ ip4_addr_copy(iphdr->dest, *dest); #if CHECKSUM_GEN_IP_INLINE chk_sum += ip4_addr_get_u32(&iphdr->dest) & 0xFFFF; chk_sum += ip4_addr_get_u32(&iphdr->dest) >> 16; #endif /* CHECKSUM_GEN_IP_INLINE */ IPH_VHL_SET(iphdr, 4, ip_hlen / 4); IPH_TOS_SET(iphdr, tos); #if CHECKSUM_GEN_IP_INLINE chk_sum += PP_NTOHS(tos | (iphdr->_v_hl << 8)); #endif /* CHECKSUM_GEN_IP_INLINE */ IPH_LEN_SET(iphdr, lwip_htons(p->tot_len)); #if CHECKSUM_GEN_IP_INLINE chk_sum += iphdr->_len; #endif /* CHECKSUM_GEN_IP_INLINE */ IPH_OFFSET_SET(iphdr, 0); IPH_ID_SET(iphdr, lwip_htons(ip_id)); #if CHECKSUM_GEN_IP_INLINE chk_sum += iphdr->_id; #endif /* CHECKSUM_GEN_IP_INLINE */ ++ip_id; if (src == NULL) { ip4_addr_copy(iphdr->src, *IP4_ADDR_ANY4); } else { /* src cannot be NULL here */ ip4_addr_copy(iphdr->src, *src); } #if CHECKSUM_GEN_IP_INLINE chk_sum += ip4_addr_get_u32(&iphdr->src) & 0xFFFF; chk_sum += ip4_addr_get_u32(&iphdr->src) >> 16; chk_sum = (chk_sum >> 16) + (chk_sum & 0xFFFF); chk_sum = (chk_sum >> 16) + chk_sum; chk_sum = ~chk_sum; IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { iphdr->_chksum = (u16_t)chk_sum; /* network order */ } #if LWIP_CHECKSUM_CTRL_PER_NETIF else { IPH_CHKSUM_SET(iphdr, 0); } #endif /* LWIP_CHECKSUM_CTRL_PER_NETIF*/ #else /* CHECKSUM_GEN_IP_INLINE */ IPH_CHKSUM_SET(iphdr, 0); #if CHECKSUM_GEN_IP IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, ip_hlen)); } #endif /* CHECKSUM_GEN_IP */ #endif /* CHECKSUM_GEN_IP_INLINE */ } else { /* IP header already included in p */ iphdr = (struct ip_hdr *)p->payload; ip4_addr_copy(dest_addr, iphdr->dest); dest = &dest_addr; } IP_STATS_INC(ip.xmit); LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); ip4_debug_print(p); #if ENABLE_LOOPBACK if (ip4_addr_cmp(dest, netif_ip4_addr(netif)) #if !LWIP_HAVE_LOOPIF || ip4_addr_isloopback(dest) #endif /* !LWIP_HAVE_LOOPIF */ ) { /* Packet to self, enqueue it for loopback */ LWIP_DEBUGF(IP_DEBUG, ("netif_loop_output()")); return netif_loop_output(netif, p); } #if LWIP_MULTICAST_TX_OPTIONS if ((p->flags & PBUF_FLAG_MCASTLOOP) != 0) { netif_loop_output(netif, p); } #endif /* LWIP_MULTICAST_TX_OPTIONS */ #endif /* ENABLE_LOOPBACK */ #if IP_FRAG /* don't fragment if interface has mtu set to 0 [loopif] */ if (netif->mtu && (p->tot_len > netif->mtu)) { return ip4_frag(p, netif, dest); } #endif /* IP_FRAG */ LWIP_DEBUGF(IP_DEBUG, ("ip4_output_if: call netif->output()\n")); return netif->output(netif, p, dest); } /** * Simple interface to ip_output_if. It finds the outgoing network * interface and calls upon ip_output_if to do the actual work. * * @param p the packet to send (p->payload points to the data, e.g. next protocol header; if dest == LWIP_IP_HDRINCL, p already includes an IP header and p->payload points to that IP header) * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the * IP address of the netif used to send is used as source address) * @param dest the destination IP address to send the packet to * @param ttl the TTL value to be set in the IP header * @param tos the TOS value to be set in the IP header * @param proto the PROTOCOL to be set in the IP header * * @return ERR_RTE if no route is found * see ip_output_if() for more return values */ err_t ip4_output(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, u8_t ttl, u8_t tos, u8_t proto) { struct netif *netif; LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); if ((netif = ip4_route_src(dest, src)) == NULL) { LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); IP_STATS_INC(ip.rterr); return ERR_RTE; } return ip4_output_if(p, src, dest, ttl, tos, proto, netif); } #if LWIP_NETIF_HWADDRHINT /** Like ip_output, but takes and addr_hint pointer that is passed on to netif->addr_hint * before calling ip_output_if. * * @param p the packet to send (p->payload points to the data, e.g. next protocol header; if dest == LWIP_IP_HDRINCL, p already includes an IP header and p->payload points to that IP header) * @param src the source IP address to send from (if src == IP4_ADDR_ANY, the * IP address of the netif used to send is used as source address) * @param dest the destination IP address to send the packet to * @param ttl the TTL value to be set in the IP header * @param tos the TOS value to be set in the IP header * @param proto the PROTOCOL to be set in the IP header * @param addr_hint address hint pointer set to netif->addr_hint before * calling ip_output_if() * * @return ERR_RTE if no route is found * see ip_output_if() for more return values */ err_t ip4_output_hinted(struct pbuf *p, const ip4_addr_t *src, const ip4_addr_t *dest, u8_t ttl, u8_t tos, u8_t proto, u8_t *addr_hint) { struct netif *netif; err_t err; LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); if ((netif = ip4_route_src(dest, src)) == NULL) { LWIP_DEBUGF(IP_DEBUG, ("ip4_output: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", ip4_addr1_16(dest), ip4_addr2_16(dest), ip4_addr3_16(dest), ip4_addr4_16(dest))); IP_STATS_INC(ip.rterr); return ERR_RTE; } NETIF_SET_HWADDRHINT(netif, addr_hint); err = ip4_output_if(p, src, dest, ttl, tos, proto, netif); NETIF_SET_HWADDRHINT(netif, NULL); return err; } #endif /* LWIP_NETIF_HWADDRHINT*/ #if IP_DEBUG /* Print an IP header by using LWIP_DEBUGF * @param p an IP packet, p->payload pointing to the IP header */ void ip4_debug_print(struct pbuf *p) { struct ip_hdr *iphdr = (struct ip_hdr *)p->payload; LWIP_DEBUGF(IP_DEBUG, ("IP header:\n")); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP_DEBUG, ("|%2"S16_F" |%2"S16_F" | 0x%02"X16_F" | %5"U16_F" | (v, hl, tos, len)\n", (u16_t)IPH_V(iphdr), (u16_t)IPH_HL(iphdr), (u16_t)IPH_TOS(iphdr), lwip_ntohs(IPH_LEN(iphdr)))); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP_DEBUG, ("| %5"U16_F" |%"U16_F"%"U16_F"%"U16_F"| %4"U16_F" | (id, flags, offset)\n", lwip_ntohs(IPH_ID(iphdr)), (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 15 & 1), (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 14 & 1), (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) >> 13 & 1), (u16_t)(lwip_ntohs(IPH_OFFSET(iphdr)) & IP_OFFMASK))); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | 0x%04"X16_F" | (ttl, proto, chksum)\n", (u16_t)IPH_TTL(iphdr), (u16_t)IPH_PROTO(iphdr), lwip_ntohs(IPH_CHKSUM(iphdr)))); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (src)\n", ip4_addr1_16(&iphdr->src), ip4_addr2_16(&iphdr->src), ip4_addr3_16(&iphdr->src), ip4_addr4_16(&iphdr->src))); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP_DEBUG, ("| %3"U16_F" | %3"U16_F" | %3"U16_F" | %3"U16_F" | (dest)\n", ip4_addr1_16(&iphdr->dest), ip4_addr2_16(&iphdr->dest), ip4_addr3_16(&iphdr->dest), ip4_addr4_16(&iphdr->dest))); LWIP_DEBUGF(IP_DEBUG, ("+-------------------------------+\n")); } #endif /* IP_DEBUG */ #endif /* LWIP_IPV4 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv4/ip4.c
C
unknown
40,069
/** * @file * This is the IPv4 address tools implementation. * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #if LWIP_IPV4 #include "lwip/ip_addr.h" #include "lwip/netif.h" /* used by IP4_ADDR_ANY and IP_ADDR_BROADCAST in ip_addr.h */ const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY); const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST); /** * Determine if an address is a broadcast address on a network interface * * @param addr address to be checked * @param netif the network interface against which the address is checked * @return returns non-zero if the address is a broadcast address */ u8_t ip4_addr_isbroadcast_u32(u32_t addr, const struct netif *netif) { ip4_addr_t ipaddr; ip4_addr_set_u32(&ipaddr, addr); /* all ones (broadcast) or all zeroes (old skool broadcast) */ if ((~addr == IPADDR_ANY) || (addr == IPADDR_ANY)) { return 1; /* no broadcast support on this network interface? */ } else if ((netif->flags & NETIF_FLAG_BROADCAST) == 0) { /* the given address cannot be a broadcast address * nor can we check against any broadcast addresses */ return 0; /* address matches network interface address exactly? => no broadcast */ } else if (addr == ip4_addr_get_u32(netif_ip4_addr(netif))) { return 0; /* on the same (sub) network... */ } else if (ip4_addr_netcmp(&ipaddr, netif_ip4_addr(netif), netif_ip4_netmask(netif)) /* ...and host identifier bits are all ones? =>... */ && ((addr & ~ip4_addr_get_u32(netif_ip4_netmask(netif))) == (IPADDR_BROADCAST & ~ip4_addr_get_u32(netif_ip4_netmask(netif))))) { /* => network broadcast address */ return 1; } else { return 0; } } /** Checks if a netmask is valid (starting with ones, then only zeros) * * @param netmask the IPv4 netmask to check (in network byte order!) * @return 1 if the netmask is valid, 0 if it is not */ u8_t ip4_addr_netmask_valid(u32_t netmask) { u32_t mask; u32_t nm_hostorder = lwip_htonl(netmask); /* first, check for the first zero */ for (mask = 1UL << 31 ; mask != 0; mask >>= 1) { if ((nm_hostorder & mask) == 0) { break; } } /* then check that there is no one */ for (; mask != 0; mask >>= 1) { if ((nm_hostorder & mask) != 0) { /* there is a one after the first zero -> invalid */ return 0; } } /* no one after the first zero -> valid */ return 1; } /* Here for now until needed in other places in lwIP */ #ifndef isprint #define in_range(c, lo, up) ((u8_t)c >= lo && (u8_t)c <= up) #define isprint(c) in_range(c, 0x20, 0x7f) #define isdigit(c) in_range(c, '0', '9') #define isxdigit(c) (isdigit(c) || in_range(c, 'a', 'f') || in_range(c, 'A', 'F')) #define islower(c) in_range(c, 'a', 'z') #define isspace(c) (c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || c == '\v') #endif /** * Ascii internet address interpretation routine. * The value returned is in network order. * * @param cp IP address in ascii representation (e.g. "127.0.0.1") * @return ip address in network order */ u32_t ipaddr_addr(const char *cp) { ip4_addr_t val; if (ip4addr_aton(cp, &val)) { return ip4_addr_get_u32(&val); } return (IPADDR_NONE); } /** * Check whether "cp" is a valid ascii representation * of an Internet address and convert to a binary address. * Returns 1 if the address is valid, 0 if not. * This replaces inet_addr, the return value from which * cannot distinguish between failure and a local broadcast address. * * @param cp IP address in ascii representation (e.g. "127.0.0.1") * @param addr pointer to which to save the ip address in network order * @return 1 if cp could be converted to addr, 0 on failure */ int ip4addr_aton(const char *cp, ip4_addr_t *addr) { u32_t val; u8_t base; char c; u32_t parts[4]; u32_t *pp = parts; c = *cp; for (;;) { /* * Collect number up to ``.''. * Values are specified as for C: * 0x=hex, 0=octal, 1-9=decimal. */ if (!isdigit(c)) { return 0; } val = 0; base = 10; if (c == '0') { c = *++cp; if (c == 'x' || c == 'X') { base = 16; c = *++cp; } else { base = 8; } } for (;;) { if (isdigit(c)) { val = (val * base) + (u32_t)(c - '0'); c = *++cp; } else if (base == 16 && isxdigit(c)) { val = (val << 4) | (u32_t)(c + 10 - (islower(c) ? 'a' : 'A')); c = *++cp; } else { break; } } if (c == '.') { /* * Internet format: * a.b.c.d * a.b.c (with c treated as 16 bits) * a.b (with b treated as 24 bits) */ if (pp >= parts + 3) { return 0; } *pp++ = val; c = *++cp; } else { break; } } /* * Check for trailing characters. */ if (c != '\0' && !isspace(c)) { return 0; } /* * Concoct the address according to * the number of parts specified. */ switch (pp - parts + 1) { case 0: return 0; /* initial nondigit */ case 1: /* a -- 32 bits */ break; case 2: /* a.b -- 8.24 bits */ if (val > 0xffffffUL) { return 0; } if (parts[0] > 0xff) { return 0; } val |= parts[0] << 24; break; case 3: /* a.b.c -- 8.8.16 bits */ if (val > 0xffff) { return 0; } if ((parts[0] > 0xff) || (parts[1] > 0xff)) { return 0; } val |= (parts[0] << 24) | (parts[1] << 16); break; case 4: /* a.b.c.d -- 8.8.8.8 bits */ if (val > 0xff) { return 0; } if ((parts[0] > 0xff) || (parts[1] > 0xff) || (parts[2] > 0xff)) { return 0; } val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8); break; default: LWIP_ASSERT("unhandled", 0); break; } if (addr) { ip4_addr_set_u32(addr, lwip_htonl(val)); } return 1; } /** * Convert numeric IP address into decimal dotted ASCII representation. * returns ptr to static buffer; not reentrant! * * @param addr ip address in network order to convert * @return pointer to a global static (!) buffer that holds the ASCII * representation of addr */ char* ip4addr_ntoa(const ip4_addr_t *addr) { static char str[IP4ADDR_STRLEN_MAX]; return ip4addr_ntoa_r(addr, str, IP4ADDR_STRLEN_MAX); } /** * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. * * @param addr ip address in network order to convert * @param buf target buffer where the string is stored * @param buflen length of buf * @return either pointer to buf which now holds the ASCII * representation of addr or NULL if buf was too small */ char* ip4addr_ntoa_r(const ip4_addr_t *addr, char *buf, int buflen) { u32_t s_addr; char inv[3]; char *rp; u8_t *ap; u8_t rem; u8_t n; u8_t i; int len = 0; s_addr = ip4_addr_get_u32(addr); rp = buf; ap = (u8_t *)&s_addr; for (n = 0; n < 4; n++) { i = 0; do { rem = *ap % (u8_t)10; *ap /= (u8_t)10; inv[i++] = (char)('0' + rem); } while (*ap); while (i--) { if (len++ >= buflen) { return NULL; } *rp++ = inv[i]; } if (len++ >= buflen) { return NULL; } *rp++ = '.'; ap++; } *--rp = 0; return buf; } #endif /* LWIP_IPV4 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv4/ip4_addr.c
C
unknown
9,331
/** * @file * This is the IPv4 packet segmentation and reassembly implementation. * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Jani Monoses <jani@iv.ro> * Simon Goldschmidt * original reassembly code by Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #if LWIP_IPV4 #include "lwip/ip4_frag.h" #include "lwip/def.h" #include "lwip/inet_chksum.h" #include "lwip/netif.h" #include "lwip/stats.h" #include "lwip/icmp.h" #include <string.h> #if IP_REASSEMBLY /** * The IP reassembly code currently has the following limitations: * - IP header options are not supported * - fragments must not overlap (e.g. due to different routes), * currently, overlapping or duplicate fragments are thrown away * if IP_REASS_CHECK_OVERLAP=1 (the default)! * * @todo: work with IP header options */ /** Setting this to 0, you can turn off checking the fragments for overlapping * regions. The code gets a little smaller. Only use this if you know that * overlapping won't occur on your network! */ #ifndef IP_REASS_CHECK_OVERLAP #define IP_REASS_CHECK_OVERLAP 1 #endif /* IP_REASS_CHECK_OVERLAP */ /** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA * is set to 1, so one datagram can be reassembled at a time, only. */ #ifndef IP_REASS_FREE_OLDEST #define IP_REASS_FREE_OLDEST 1 #endif /* IP_REASS_FREE_OLDEST */ #define IP_REASS_FLAG_LASTFRAG 0x01 #define IP_REASS_VALIDATE_TELEGRAM_FINISHED 1 #define IP_REASS_VALIDATE_PBUF_QUEUED 0 #define IP_REASS_VALIDATE_PBUF_DROPPED -1 /** This is a helper struct which holds the starting * offset and the ending offset of this fragment to * easily chain the fragments. * It has the same packing requirements as the IP header, since it replaces * the IP header in memory in incoming fragments (after copying it) to keep * track of the various fragments. (-> If the IP header doesn't need packing, * this struct doesn't need packing, too.) */ #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/bpstruct.h" #endif PACK_STRUCT_BEGIN struct ip_reass_helper { PACK_STRUCT_FIELD(struct pbuf *next_pbuf); PACK_STRUCT_FIELD(u16_t start); PACK_STRUCT_FIELD(u16_t end); } PACK_STRUCT_STRUCT; PACK_STRUCT_END #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/epstruct.h" #endif #define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \ (ip4_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \ ip4_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \ IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0 /* global variables */ static struct ip_reassdata *reassdatagrams; static u16_t ip_reass_pbufcount; /* function prototypes */ static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev); /** * Reassembly timer base function * for both NO_SYS == 0 and 1 (!). * * Should be called every 1000 msec (defined by IP_TMR_INTERVAL). */ void ip_reass_tmr(void) { struct ip_reassdata *r, *prev = NULL; r = reassdatagrams; while (r != NULL) { /* Decrement the timer. Once it reaches 0, * clean up the incomplete fragment assembly */ if (r->timer > 0) { r->timer--; LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n",(u16_t)r->timer)); prev = r; r = r->next; } else { /* reassembly timed out */ struct ip_reassdata *tmp; LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n")); tmp = r; /* get the next pointer before freeing */ r = r->next; /* free the helper struct and all enqueued pbufs */ ip_reass_free_complete_datagram(tmp, prev); } } } /** * Free a datagram (struct ip_reassdata) and all its pbufs. * Updates the total count of enqueued pbufs (ip_reass_pbufcount), * SNMP counters and sends an ICMP time exceeded packet. * * @param ipr datagram to free * @param prev the previous datagram in the linked list * @return the number of pbufs freed */ static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) { u16_t pbufs_freed = 0; u16_t clen; struct pbuf *p; struct ip_reass_helper *iprh; LWIP_ASSERT("prev != ipr", prev != ipr); if (prev != NULL) { LWIP_ASSERT("prev->next == ipr", prev->next == ipr); } MIB2_STATS_INC(mib2.ipreasmfails); #if LWIP_ICMP iprh = (struct ip_reass_helper *)ipr->p->payload; if (iprh->start == 0) { /* The first fragment was received, send ICMP time exceeded. */ /* First, de-queue the first pbuf from r->p. */ p = ipr->p; ipr->p = iprh->next_pbuf; /* Then, copy the original header into it. */ SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN); icmp_time_exceeded(p, ICMP_TE_FRAG); clen = pbuf_clen(p); LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); pbufs_freed += clen; pbuf_free(p); } #endif /* LWIP_ICMP */ /* First, free all received pbufs. The individual pbufs need to be released separately as they have not yet been chained */ p = ipr->p; while (p != NULL) { struct pbuf *pcur; iprh = (struct ip_reass_helper *)p->payload; pcur = p; /* get the next pointer before freeing */ p = iprh->next_pbuf; clen = pbuf_clen(pcur); LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); pbufs_freed += clen; pbuf_free(pcur); } /* Then, unchain the struct ip_reassdata from the list and free it. */ ip_reass_dequeue_datagram(ipr, prev); LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= pbufs_freed); ip_reass_pbufcount -= pbufs_freed; return pbufs_freed; } #if IP_REASS_FREE_OLDEST /** * Free the oldest datagram to make room for enqueueing new fragments. * The datagram 'fraghdr' belongs to is not freed! * * @param fraghdr IP header of the current fragment * @param pbufs_needed number of pbufs needed to enqueue * (used for freeing other datagrams if not enough space) * @return the number of pbufs freed */ static int ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed) { /* @todo Can't we simply remove the last datagram in the * linked list behind reassdatagrams? */ struct ip_reassdata *r, *oldest, *prev, *oldest_prev; int pbufs_freed = 0, pbufs_freed_current; int other_datagrams; /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, * but don't free the datagram that 'fraghdr' belongs to! */ do { oldest = NULL; prev = NULL; oldest_prev = NULL; other_datagrams = 0; r = reassdatagrams; while (r != NULL) { if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) { /* Not the same datagram as fraghdr */ other_datagrams++; if (oldest == NULL) { oldest = r; oldest_prev = prev; } else if (r->timer <= oldest->timer) { /* older than the previous oldest */ oldest = r; oldest_prev = prev; } } if (r->next != NULL) { prev = r; } r = r->next; } if (oldest != NULL) { pbufs_freed_current = ip_reass_free_complete_datagram(oldest, oldest_prev); pbufs_freed += pbufs_freed_current; } } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1)); return pbufs_freed; } #endif /* IP_REASS_FREE_OLDEST */ /** * Enqueues a new fragment into the fragment queue * @param fraghdr points to the new fragments IP hdr * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space) * @return A pointer to the queue location into which the fragment was enqueued */ static struct ip_reassdata* ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen) { struct ip_reassdata* ipr; #if ! IP_REASS_FREE_OLDEST LWIP_UNUSED_ARG(clen); #endif /* No matching previous fragment found, allocate a new reassdata struct */ ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); if (ipr == NULL) { #if IP_REASS_FREE_OLDEST if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) { ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA); } if (ipr == NULL) #endif /* IP_REASS_FREE_OLDEST */ { IPFRAG_STATS_INC(ip_frag.memerr); LWIP_DEBUGF(IP_REASS_DEBUG,("Failed to alloc reassdata struct\n")); return NULL; } } memset(ipr, 0, sizeof(struct ip_reassdata)); ipr->timer = IP_REASS_MAXAGE; /* enqueue the new structure to the front of the list */ ipr->next = reassdatagrams; reassdatagrams = ipr; /* copy the ip header for later tests and input */ /* @todo: no ip options supported? */ SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN); return ipr; } /** * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs. * @param ipr points to the queue entry to dequeue */ static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev) { /* dequeue the reass struct */ if (reassdatagrams == ipr) { /* it was the first in the list */ reassdatagrams = ipr->next; } else { /* it wasn't the first, so it must have a valid 'prev' */ LWIP_ASSERT("sanity check linked list", prev != NULL); prev->next = ipr->next; } /* now we can free the ip_reassdata struct */ memp_free(MEMP_REASSDATA, ipr); } /** * Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list * will grow over time as new pbufs are rx. * Also checks that the datagram passes basic continuity checks (if the last * fragment was received at least once). * @param ipr points to the reassembly state * @param new_p points to the pbuf for the current fragment * @param is_last is 1 if this pbuf has MF==0 (ipr->flags not updated yet) * @return see IP_REASS_VALIDATE_* defines */ static int ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p, int is_last) { struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL; struct pbuf *q; u16_t offset, len; struct ip_hdr *fraghdr; int valid = 1; /* Extract length and fragment offset from current fragment */ fraghdr = (struct ip_hdr*)new_p->payload; len = lwip_ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4; offset = (lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8; /* overwrite the fragment's ip header from the pbuf with our helper struct, * and setup the embedded helper structure. */ /* make sure the struct ip_reass_helper fits into the IP header */ LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN", sizeof(struct ip_reass_helper) <= IP_HLEN); iprh = (struct ip_reass_helper*)new_p->payload; iprh->next_pbuf = NULL; iprh->start = offset; iprh->end = offset + len; /* Iterate through until we either get to the end of the list (append), * or we find one with a larger offset (insert). */ for (q = ipr->p; q != NULL;) { iprh_tmp = (struct ip_reass_helper*)q->payload; if (iprh->start < iprh_tmp->start) { /* the new pbuf should be inserted before this */ iprh->next_pbuf = q; if (iprh_prev != NULL) { /* not the fragment with the lowest offset */ #if IP_REASS_CHECK_OVERLAP if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) { /* fragment overlaps with previous or following, throw away */ goto freepbuf; } #endif /* IP_REASS_CHECK_OVERLAP */ iprh_prev->next_pbuf = new_p; if (iprh_prev->end != iprh->start) { /* There is a fragment missing between the current * and the previous fragment */ valid = 0; } } else { #if IP_REASS_CHECK_OVERLAP if (iprh->end > iprh_tmp->start) { /* fragment overlaps with following, throw away */ goto freepbuf; } #endif /* IP_REASS_CHECK_OVERLAP */ /* fragment with the lowest offset */ ipr->p = new_p; } break; } else if (iprh->start == iprh_tmp->start) { /* received the same datagram twice: no need to keep the datagram */ goto freepbuf; #if IP_REASS_CHECK_OVERLAP } else if (iprh->start < iprh_tmp->end) { /* overlap: no need to keep the new datagram */ goto freepbuf; #endif /* IP_REASS_CHECK_OVERLAP */ } else { /* Check if the fragments received so far have no holes. */ if (iprh_prev != NULL) { if (iprh_prev->end != iprh_tmp->start) { /* There is a fragment missing between the current * and the previous fragment */ valid = 0; } } } q = iprh_tmp->next_pbuf; iprh_prev = iprh_tmp; } /* If q is NULL, then we made it to the end of the list. Determine what to do now */ if (q == NULL) { if (iprh_prev != NULL) { /* this is (for now), the fragment with the highest offset: * chain it to the last fragment */ #if IP_REASS_CHECK_OVERLAP LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start); #endif /* IP_REASS_CHECK_OVERLAP */ iprh_prev->next_pbuf = new_p; if (iprh_prev->end != iprh->start) { valid = 0; } } else { #if IP_REASS_CHECK_OVERLAP LWIP_ASSERT("no previous fragment, this must be the first fragment!", ipr->p == NULL); #endif /* IP_REASS_CHECK_OVERLAP */ /* this is the first fragment we ever received for this ip datagram */ ipr->p = new_p; } } /* At this point, the validation part begins: */ /* If we already received the last fragment */ if (is_last || ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0)) { /* and had no holes so far */ if (valid) { /* then check if the rest of the fragments is here */ /* Check if the queue starts with the first datagram */ if ((ipr->p == NULL) || (((struct ip_reass_helper*)ipr->p->payload)->start != 0)) { valid = 0; } else { /* and check that there are no holes after this datagram */ iprh_prev = iprh; q = iprh->next_pbuf; while (q != NULL) { iprh = (struct ip_reass_helper*)q->payload; if (iprh_prev->end != iprh->start) { valid = 0; break; } iprh_prev = iprh; q = iprh->next_pbuf; } /* if still valid, all fragments are received * (because to the MF==0 already arrived */ if (valid) { LWIP_ASSERT("sanity check", ipr->p != NULL); LWIP_ASSERT("sanity check", ((struct ip_reass_helper*)ipr->p->payload) != iprh); LWIP_ASSERT("validate_datagram:next_pbuf!=NULL", iprh->next_pbuf == NULL); } } } /* If valid is 0 here, there are some fragments missing in the middle * (since MF == 0 has already arrived). Such datagrams simply time out if * no more fragments are received... */ return valid ? IP_REASS_VALIDATE_TELEGRAM_FINISHED : IP_REASS_VALIDATE_PBUF_QUEUED; } /* If we come here, not all fragments were received, yet! */ return IP_REASS_VALIDATE_PBUF_QUEUED; /* not yet valid! */ #if IP_REASS_CHECK_OVERLAP freepbuf: ip_reass_pbufcount -= pbuf_clen(new_p); pbuf_free(new_p); return IP_REASS_VALIDATE_PBUF_DROPPED; #endif /* IP_REASS_CHECK_OVERLAP */ } /** * Reassembles incoming IP fragments into an IP datagram. * * @param p points to a pbuf chain of the fragment * @return NULL if reassembly is incomplete, ? otherwise */ struct pbuf * ip4_reass(struct pbuf *p) { struct pbuf *r; struct ip_hdr *fraghdr; struct ip_reassdata *ipr; struct ip_reass_helper *iprh; u16_t offset, len, clen; int valid; int is_last; IPFRAG_STATS_INC(ip_frag.recv); MIB2_STATS_INC(mib2.ipreasmreqds); fraghdr = (struct ip_hdr*)p->payload; if ((IPH_HL(fraghdr) * 4) != IP_HLEN) { LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: IP options currently not supported!\n")); IPFRAG_STATS_INC(ip_frag.err); goto nullreturn; } offset = (lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8; len = lwip_ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4; /* Check if we are allowed to enqueue more datagrams. */ clen = pbuf_clen(p); if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { #if IP_REASS_FREE_OLDEST if (!ip_reass_remove_oldest_datagram(fraghdr, clen) || ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS)) #endif /* IP_REASS_FREE_OLDEST */ { /* No datagram could be freed and still too many pbufs enqueued */ LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n", ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS)); IPFRAG_STATS_INC(ip_frag.memerr); /* @todo: send ICMP time exceeded here? */ /* drop this pbuf */ goto nullreturn; } } /* Look for the datagram the fragment belongs to in the current datagram queue, * remembering the previous in the queue for later dequeueing. */ for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) { /* Check if the incoming fragment matches the one currently present in the reassembly buffer. If so, we proceed with copying the fragment into the buffer. */ if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) { LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n", lwip_ntohs(IPH_ID(fraghdr)))); IPFRAG_STATS_INC(ip_frag.cachehit); break; } } if (ipr == NULL) { /* Enqueue a new datagram into the datagram queue */ ipr = ip_reass_enqueue_new_datagram(fraghdr, clen); /* Bail if unable to enqueue */ if (ipr == NULL) { goto nullreturn; } } else { if (((lwip_ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) && ((lwip_ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) { /* ipr->iphdr is not the header from the first fragment, but fraghdr is * -> copy fraghdr into ipr->iphdr since we want to have the header * of the first fragment (for ICMP time exceeded and later, for copying * all options, if supported)*/ SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN); } } /* At this point, we have either created a new entry or pointing * to an existing one */ /* check for 'no more fragments', and update queue entry*/ is_last = (IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0; if (is_last) { u16_t datagram_len = (u16_t)(offset + len); if ((datagram_len < offset) || (datagram_len > (0xFFFF - IP_HLEN))) { /* u16_t overflow, cannot handle this */ goto nullreturn; } } /* find the right place to insert this pbuf */ /* @todo: trim pbufs if fragments are overlapping */ valid = ip_reass_chain_frag_into_datagram_and_validate(ipr, p, is_last); if (valid == IP_REASS_VALIDATE_PBUF_DROPPED) { goto nullreturn; } /* if we come here, the pbuf has been enqueued */ /* Track the current number of pbufs current 'in-flight', in order to limit the number of fragments that may be enqueued at any one time (overflow checked by testing against IP_REASS_MAX_PBUFS) */ ip_reass_pbufcount = (u16_t)(ip_reass_pbufcount + clen); if (is_last) { u16_t datagram_len = (u16_t)(offset + len); ipr->datagram_len = datagram_len; ipr->flags |= IP_REASS_FLAG_LASTFRAG; LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: last fragment seen, total len %"S16_F"\n", ipr->datagram_len)); } if (valid == IP_REASS_VALIDATE_TELEGRAM_FINISHED) { struct ip_reassdata *ipr_prev; /* the totally last fragment (flag more fragments = 0) was received at least * once AND all fragments are received */ ipr->datagram_len += IP_HLEN; /* save the second pbuf before copying the header over the pointer */ r = ((struct ip_reass_helper*)ipr->p->payload)->next_pbuf; /* copy the original ip header back to the first pbuf */ fraghdr = (struct ip_hdr*)(ipr->p->payload); SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN); IPH_LEN_SET(fraghdr, lwip_htons(ipr->datagram_len)); IPH_OFFSET_SET(fraghdr, 0); IPH_CHKSUM_SET(fraghdr, 0); /* @todo: do we need to set/calculate the correct checksum? */ #if CHECKSUM_GEN_IP IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) { IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN)); } #endif /* CHECKSUM_GEN_IP */ p = ipr->p; /* chain together the pbufs contained within the reass_data list. */ while (r != NULL) { iprh = (struct ip_reass_helper*)r->payload; /* hide the ip header for every succeeding fragment */ pbuf_header(r, -IP_HLEN); pbuf_cat(p, r); r = iprh->next_pbuf; } /* find the previous entry in the linked list */ if (ipr == reassdatagrams) { ipr_prev = NULL; } else { for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { if (ipr_prev->next == ipr) { break; } } } /* release the sources allocate for the fragment queue entry */ ip_reass_dequeue_datagram(ipr, ipr_prev); /* and adjust the number of pbufs currently queued for reassembly. */ ip_reass_pbufcount -= pbuf_clen(p); MIB2_STATS_INC(mib2.ipreasmoks); /* Return the pbuf chain */ return p; } /* the datagram is not (yet?) reassembled completely */ LWIP_DEBUGF(IP_REASS_DEBUG,("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount)); return NULL; nullreturn: LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: nullreturn\n")); IPFRAG_STATS_INC(ip_frag.drop); pbuf_free(p); return NULL; } #endif /* IP_REASSEMBLY */ #if IP_FRAG #if !LWIP_NETIF_TX_SINGLE_PBUF /** Allocate a new struct pbuf_custom_ref */ static struct pbuf_custom_ref* ip_frag_alloc_pbuf_custom_ref(void) { return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF); } /** Free a struct pbuf_custom_ref */ static void ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p) { LWIP_ASSERT("p != NULL", p != NULL); memp_free(MEMP_FRAG_PBUF, p); } /** Free-callback function to free a 'struct pbuf_custom_ref', called by * pbuf_free. */ static void ipfrag_free_pbuf_custom(struct pbuf *p) { struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p; LWIP_ASSERT("pcr != NULL", pcr != NULL); LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p); if (pcr->original != NULL) { pbuf_free(pcr->original); } ip_frag_free_pbuf_custom_ref(pcr); } #endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ /** * Fragment an IP datagram if too large for the netif. * * Chop the datagram in MTU sized chunks and send them in order * by pointing PBUF_REFs into p. * * @param p ip packet to send * @param netif the netif on which to send * @param dest destination ip address to which to send * * @return ERR_OK if sent successfully, err_t otherwise */ err_t ip4_frag(struct pbuf *p, struct netif *netif, const ip4_addr_t *dest) { struct pbuf *rambuf; #if !LWIP_NETIF_TX_SINGLE_PBUF struct pbuf *newpbuf; u16_t newpbuflen = 0; u16_t left_to_copy; #endif struct ip_hdr *original_iphdr; struct ip_hdr *iphdr; const u16_t nfb = (netif->mtu - IP_HLEN) / 8; u16_t left, fragsize; u16_t ofo; int last; u16_t poff = IP_HLEN; u16_t tmp; original_iphdr = (struct ip_hdr *)p->payload; iphdr = original_iphdr; LWIP_ERROR("ip4_frag() does not support IP options", IPH_HL(iphdr) * 4 == IP_HLEN, return ERR_VAL); /* Save original offset */ tmp = lwip_ntohs(IPH_OFFSET(iphdr)); ofo = tmp & IP_OFFMASK; LWIP_ERROR("ip_frag(): MF already set", (tmp & IP_MF) == 0, return ERR_VAL); left = p->tot_len - IP_HLEN; while (left) { /* Fill this fragment */ fragsize = LWIP_MIN(left, nfb * 8); #if LWIP_NETIF_TX_SINGLE_PBUF rambuf = pbuf_alloc(PBUF_IP, fragsize, PBUF_RAM); if (rambuf == NULL) { goto memerr; } LWIP_ASSERT("this needs a pbuf in one piece!", (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); poff += pbuf_copy_partial(p, rambuf->payload, fragsize, poff); /* make room for the IP header */ if (pbuf_header(rambuf, IP_HLEN)) { pbuf_free(rambuf); goto memerr; } /* fill in the IP header */ SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); iphdr = (struct ip_hdr*)rambuf->payload; #else /* LWIP_NETIF_TX_SINGLE_PBUF */ /* When not using a static buffer, create a chain of pbufs. * The first will be a PBUF_RAM holding the link and IP header. * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, * but limited to the size of an mtu. */ rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM); if (rambuf == NULL) { goto memerr; } LWIP_ASSERT("this needs a pbuf in one piece!", (p->len >= (IP_HLEN))); SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN); iphdr = (struct ip_hdr *)rambuf->payload; left_to_copy = fragsize; while (left_to_copy) { struct pbuf_custom_ref *pcr; u16_t plen = p->len - poff; newpbuflen = LWIP_MIN(left_to_copy, plen); /* Is this pbuf already empty? */ if (!newpbuflen) { poff = 0; p = p->next; continue; } pcr = ip_frag_alloc_pbuf_custom_ref(); if (pcr == NULL) { pbuf_free(rambuf); goto memerr; } /* Mirror this pbuf, although we might not need all of it. */ newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, (u8_t*)p->payload + poff, newpbuflen); if (newpbuf == NULL) { ip_frag_free_pbuf_custom_ref(pcr); pbuf_free(rambuf); goto memerr; } pbuf_ref(p); pcr->original = p; pcr->pc.custom_free_function = ipfrag_free_pbuf_custom; /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain * so that it is removed when pbuf_dechain is later called on rambuf. */ pbuf_cat(rambuf, newpbuf); left_to_copy -= newpbuflen; if (left_to_copy) { poff = 0; p = p->next; } } poff += newpbuflen; #endif /* LWIP_NETIF_TX_SINGLE_PBUF */ /* Correct header */ last = (left <= netif->mtu - IP_HLEN); /* Set new offset and MF flag */ tmp = (IP_OFFMASK & (ofo)); if (!last) { tmp = tmp | IP_MF; } IPH_OFFSET_SET(iphdr, lwip_htons(tmp)); IPH_LEN_SET(iphdr, lwip_htons(fragsize + IP_HLEN)); IPH_CHKSUM_SET(iphdr, 0); #if CHECKSUM_GEN_IP IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_IP) { IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN)); } #endif /* CHECKSUM_GEN_IP */ /* No need for separate header pbuf - we allowed room for it in rambuf * when allocated. */ netif->output(netif, rambuf, dest); IPFRAG_STATS_INC(ip_frag.xmit); /* Unfortunately we can't reuse rambuf - the hardware may still be * using the buffer. Instead we free it (and the ensuing chain) and * recreate it next time round the loop. If we're lucky the hardware * will have already sent the packet, the free will really free, and * there will be zero memory penalty. */ pbuf_free(rambuf); left -= fragsize; ofo += nfb; } MIB2_STATS_INC(mib2.ipfragoks); return ERR_OK; memerr: MIB2_STATS_INC(mib2.ipfragfails); return ERR_MEM; } #endif /* IP_FRAG */ #endif /* LWIP_IPV4 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv4/ip4_frag.c
C
unknown
29,820
/** * @file * * DHCPv6. */ /* * Copyright (c) 2010 Inico Technologies Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Ivan Delamer <delamer@inicotech.com> * * * Please coordinate changes and requests with Ivan Delamer * <delamer@inicotech.com> */ #include "lwip/opt.h" #if LWIP_IPV6 && LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ #include "lwip/ip6_addr.h" #include "lwip/def.h" #endif /* LWIP_IPV6 && LWIP_IPV6_DHCP6 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv6/dhcp6.c
C
unknown
1,964
/** * @file * * Ethernet output for IPv6. Uses ND tables for link-layer addressing. */ /* * Copyright (c) 2010 Inico Technologies Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Ivan Delamer <delamer@inicotech.com> * * * Please coordinate changes and requests with Ivan Delamer * <delamer@inicotech.com> */ #include "lwip/opt.h" #if LWIP_IPV6 && LWIP_ETHERNET #include "lwip/ethip6.h" #include "lwip/nd6.h" #include "lwip/pbuf.h" #include "lwip/ip6.h" #include "lwip/ip6_addr.h" #include "lwip/inet_chksum.h" #include "lwip/netif.h" #include "lwip/icmp6.h" #include "lwip/prot/ethernet.h" #include "netif/ethernet.h" #include <string.h> /** * Resolve and fill-in Ethernet address header for outgoing IPv6 packet. * * For IPv6 multicast, corresponding Ethernet addresses * are selected and the packet is transmitted on the link. * * For unicast addresses, ask the ND6 module what to do. It will either let us * send the the packet right away, or queue the packet for later itself, unless * an error occurs. * * @todo anycast addresses * * @param netif The lwIP network interface which the IP packet will be sent on. * @param q The pbuf(s) containing the IP packet to be sent. * @param ip6addr The IP address of the packet destination. * * @return * - ERR_OK or the return value of @ref nd6_get_next_hop_addr_or_queue. */ err_t ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) { struct eth_addr dest; const u8_t *hwaddr; err_t result; /* multicast destination IP address? */ if (ip6_addr_ismulticast(ip6addr)) { /* Hash IP multicast address to MAC address.*/ dest.addr[0] = 0x33; dest.addr[1] = 0x33; dest.addr[2] = ((const u8_t *)(&(ip6addr->addr[3])))[0]; dest.addr[3] = ((const u8_t *)(&(ip6addr->addr[3])))[1]; dest.addr[4] = ((const u8_t *)(&(ip6addr->addr[3])))[2]; dest.addr[5] = ((const u8_t *)(&(ip6addr->addr[3])))[3]; /* Send out. */ return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); } /* We have a unicast destination IP address */ /* @todo anycast? */ /* Ask ND6 what to do with the packet. */ result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); if (result != ERR_OK) { return result; } /* If no hardware address is returned, nd6 has queued the packet for later. */ if (hwaddr == NULL) { return ERR_OK; } /* Send out the packet using the returned hardware address. */ SMEMCPY(dest.addr, hwaddr, 6); return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); } #endif /* LWIP_IPV6 && LWIP_ETHERNET */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv6/ethip6.c
C
unknown
4,226
/** * @file * * IPv6 version of ICMP, as per RFC 4443. */ /* * Copyright (c) 2010 Inico Technologies Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Ivan Delamer <delamer@inicotech.com> * * * Please coordinate changes and requests with Ivan Delamer * <delamer@inicotech.com> */ #include "lwip/opt.h" #if LWIP_ICMP6 && LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ #include "lwip/icmp6.h" #include "lwip/prot/icmp6.h" #include "lwip/ip6.h" #include "lwip/ip6_addr.h" #include "lwip/inet_chksum.h" #include "lwip/pbuf.h" #include "lwip/netif.h" #include "lwip/nd6.h" #include "lwip/mld6.h" #include "lwip/ip.h" #include "lwip/stats.h" #include <string.h> #ifndef LWIP_ICMP6_DATASIZE #define LWIP_ICMP6_DATASIZE 8 #endif #if LWIP_ICMP6_DATASIZE == 0 #define LWIP_ICMP6_DATASIZE 8 #endif /* Forward declarations */ static void icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type); /** * Process an input ICMPv6 message. Called by ip6_input. * * Will generate a reply for echo requests. Other messages are forwarded * to nd6_input, or mld6_input. * * @param p the mld packet, p->payload pointing to the icmpv6 header * @param inp the netif on which this packet was received */ void icmp6_input(struct pbuf *p, struct netif *inp) { struct icmp6_hdr *icmp6hdr; struct pbuf *r; const ip6_addr_t *reply_src; ICMP6_STATS_INC(icmp6.recv); /* Check that ICMPv6 header fits in payload */ if (p->len < sizeof(struct icmp6_hdr)) { /* drop short packets */ pbuf_free(p); ICMP6_STATS_INC(icmp6.lenerr); ICMP6_STATS_INC(icmp6.drop); return; } icmp6hdr = (struct icmp6_hdr *)p->payload; #if CHECKSUM_CHECK_ICMP6 IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP6) { if (ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->tot_len, ip6_current_src_addr(), ip6_current_dest_addr()) != 0) { /* Checksum failed */ pbuf_free(p); ICMP6_STATS_INC(icmp6.chkerr); ICMP6_STATS_INC(icmp6.drop); return; } } #endif /* CHECKSUM_CHECK_ICMP6 */ switch (icmp6hdr->type) { case ICMP6_TYPE_NA: /* Neighbor advertisement */ case ICMP6_TYPE_NS: /* Neighbor solicitation */ case ICMP6_TYPE_RA: /* Router advertisement */ case ICMP6_TYPE_RD: /* Redirect */ case ICMP6_TYPE_PTB: /* Packet too big */ nd6_input(p, inp); return; break; case ICMP6_TYPE_RS: #if LWIP_IPV6_FORWARD /* @todo implement router functionality */ #endif break; #if LWIP_IPV6_MLD case ICMP6_TYPE_MLQ: case ICMP6_TYPE_MLR: case ICMP6_TYPE_MLD: mld6_input(p, inp); return; break; #endif case ICMP6_TYPE_EREQ: #if !LWIP_MULTICAST_PING /* multicast destination address? */ if (ip6_addr_ismulticast(ip6_current_dest_addr())) { /* drop */ pbuf_free(p); ICMP6_STATS_INC(icmp6.drop); return; } #endif /* LWIP_MULTICAST_PING */ /* Allocate reply. */ r = pbuf_alloc(PBUF_IP, p->tot_len, PBUF_RAM); if (r == NULL) { /* drop */ pbuf_free(p); ICMP6_STATS_INC(icmp6.memerr); return; } /* Copy echo request. */ if (pbuf_copy(r, p) != ERR_OK) { /* drop */ pbuf_free(p); pbuf_free(r); ICMP6_STATS_INC(icmp6.err); return; } /* Determine reply source IPv6 address. */ #if LWIP_MULTICAST_PING if (ip6_addr_ismulticast(ip6_current_dest_addr())) { reply_src = ip_2_ip6(ip6_select_source_address(inp, ip6_current_src_addr())); if (reply_src == NULL) { /* drop */ pbuf_free(p); pbuf_free(r); ICMP6_STATS_INC(icmp6.rterr); return; } } else #endif /* LWIP_MULTICAST_PING */ { reply_src = ip6_current_dest_addr(); } /* Set fields in reply. */ ((struct icmp6_echo_hdr *)(r->payload))->type = ICMP6_TYPE_EREP; ((struct icmp6_echo_hdr *)(r->payload))->chksum = 0; #if CHECKSUM_GEN_ICMP6 IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP6) { ((struct icmp6_echo_hdr *)(r->payload))->chksum = ip6_chksum_pseudo(r, IP6_NEXTH_ICMP6, r->tot_len, reply_src, ip6_current_src_addr()); } #endif /* CHECKSUM_GEN_ICMP6 */ /* Send reply. */ ICMP6_STATS_INC(icmp6.xmit); ip6_output_if(r, reply_src, ip6_current_src_addr(), LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, inp); pbuf_free(r); break; default: ICMP6_STATS_INC(icmp6.proterr); ICMP6_STATS_INC(icmp6.drop); break; } pbuf_free(p); } /** * Send an icmpv6 'destination unreachable' packet. * * @param p the input packet for which the 'unreachable' should be sent, * p->payload pointing to the IPv6 header * @param c ICMPv6 code for the unreachable type */ void icmp6_dest_unreach(struct pbuf *p, enum icmp6_dur_code c) { icmp6_send_response(p, c, 0, ICMP6_TYPE_DUR); } /** * Send an icmpv6 'packet too big' packet. * * @param p the input packet for which the 'packet too big' should be sent, * p->payload pointing to the IPv6 header * @param mtu the maximum mtu that we can accept */ void icmp6_packet_too_big(struct pbuf *p, u32_t mtu) { icmp6_send_response(p, 0, mtu, ICMP6_TYPE_PTB); } /** * Send an icmpv6 'time exceeded' packet. * * @param p the input packet for which the 'unreachable' should be sent, * p->payload pointing to the IPv6 header * @param c ICMPv6 code for the time exceeded type */ void icmp6_time_exceeded(struct pbuf *p, enum icmp6_te_code c) { icmp6_send_response(p, c, 0, ICMP6_TYPE_TE); } /** * Send an icmpv6 'parameter problem' packet. * * @param p the input packet for which the 'param problem' should be sent, * p->payload pointing to the IP header * @param c ICMPv6 code for the param problem type * @param pointer the pointer to the byte where the parameter is found */ void icmp6_param_problem(struct pbuf *p, enum icmp6_pp_code c, u32_t pointer) { icmp6_send_response(p, c, pointer, ICMP6_TYPE_PP); } /** * Send an ICMPv6 packet in response to an incoming packet. * * @param p the input packet for which the response should be sent, * p->payload pointing to the IPv6 header * @param code Code of the ICMPv6 header * @param data Additional 32-bit parameter in the ICMPv6 header * @param type Type of the ICMPv6 header */ static void icmp6_send_response(struct pbuf *p, u8_t code, u32_t data, u8_t type) { struct pbuf *q; struct icmp6_hdr *icmp6hdr; const ip6_addr_t *reply_src; ip6_addr_t *reply_dest; ip6_addr_t reply_src_local, reply_dest_local; struct ip6_hdr *ip6hdr; struct netif *netif; /* ICMPv6 header + IPv6 header + data */ q = pbuf_alloc(PBUF_IP, sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE, PBUF_RAM); if (q == NULL) { LWIP_DEBUGF(ICMP_DEBUG, ("icmp_time_exceeded: failed to allocate pbuf for ICMPv6 packet.\n")); ICMP6_STATS_INC(icmp6.memerr); return; } LWIP_ASSERT("check that first pbuf can hold icmp 6message", (q->len >= (sizeof(struct icmp6_hdr) + IP6_HLEN + LWIP_ICMP6_DATASIZE))); icmp6hdr = (struct icmp6_hdr *)q->payload; icmp6hdr->type = type; icmp6hdr->code = code; icmp6hdr->data = data; /* copy fields from original packet */ SMEMCPY((u8_t *)q->payload + sizeof(struct icmp6_hdr), (u8_t *)p->payload, IP6_HLEN + LWIP_ICMP6_DATASIZE); /* Get the destination address and netif for this ICMP message. */ if ((ip_current_netif() == NULL) || ((code == ICMP6_TE_FRAG) && (type == ICMP6_TYPE_TE))) { /* Special case, as ip6_current_xxx is either NULL, or points * to a different packet than the one that expired. * We must use the addresses that are stored in the expired packet. */ ip6hdr = (struct ip6_hdr *)p->payload; /* copy from packed address to aligned address */ ip6_addr_copy(reply_dest_local, ip6hdr->src); ip6_addr_copy(reply_src_local, ip6hdr->dest); reply_dest = &reply_dest_local; reply_src = &reply_src_local; netif = ip6_route(reply_src, reply_dest); if (netif == NULL) { /* drop */ pbuf_free(q); ICMP6_STATS_INC(icmp6.rterr); return; } } else { netif = ip_current_netif(); reply_dest = ip6_current_src_addr(); /* Select an address to use as source. */ reply_src = ip_2_ip6(ip6_select_source_address(netif, reply_dest)); if (reply_src == NULL) { /* drop */ pbuf_free(q); ICMP6_STATS_INC(icmp6.rterr); return; } } /* calculate checksum */ icmp6hdr->chksum = 0; #if CHECKSUM_GEN_ICMP6 IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { icmp6hdr->chksum = ip6_chksum_pseudo(q, IP6_NEXTH_ICMP6, q->tot_len, reply_src, reply_dest); } #endif /* CHECKSUM_GEN_ICMP6 */ ICMP6_STATS_INC(icmp6.xmit); ip6_output_if(q, reply_src, reply_dest, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); pbuf_free(q); } #endif /* LWIP_ICMP6 && LWIP_IPV6 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv6/icmp6.c
C
unknown
10,728
/** * @file * * INET v6 addresses. */ /* * Copyright (c) 2010 Inico Technologies Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Ivan Delamer <delamer@inicotech.com> * * * Please coordinate changes and requests with Ivan Delamer * <delamer@inicotech.com> */ #include "lwip/opt.h" #if LWIP_IPV6 && LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ #include "lwip/def.h" #include "lwip/inet.h" /** This variable is initialized by the system to contain the wildcard IPv6 address. */ const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; #endif /* LWIP_IPV6 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv6/inet6.c
C
unknown
2,094
/** * @file * * IPv6 layer. */ /* * Copyright (c) 2010 Inico Technologies Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Ivan Delamer <delamer@inicotech.com> * * * Please coordinate changes and requests with Ivan Delamer * <delamer@inicotech.com> */ #include "lwip/opt.h" #if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ #include "lwip/def.h" #include "lwip/mem.h" #include "lwip/netif.h" #include "lwip/ip.h" #include "lwip/ip6.h" #include "lwip/ip6_addr.h" #include "lwip/ip6_frag.h" #include "lwip/icmp6.h" #include "lwip/raw.h" #include "lwip/udp.h" #include "lwip/priv/tcp_priv.h" #include "lwip/dhcp6.h" #include "lwip/nd6.h" #include "lwip/mld6.h" #include "lwip/debug.h" #include "lwip/stats.h" #ifdef LWIP_HOOK_FILENAME #include LWIP_HOOK_FILENAME #endif /** * Finds the appropriate network interface for a given IPv6 address. It tries to select * a netif following a sequence of heuristics: * 1) if there is only 1 netif, return it * 2) if the destination is a link-local address, try to match the src address to a netif. * this is a tricky case because with multiple netifs, link-local addresses only have * meaning within a particular subnet/link. * 3) tries to match the destination subnet to a configured address * 4) tries to find a router * 5) tries to match the source address to the netif * 6) returns the default netif, if configured * * @param src the source IPv6 address, if known * @param dest the destination IPv6 address for which to find the route * @return the netif on which to send to reach dest */ struct netif * ip6_route(const ip6_addr_t *src, const ip6_addr_t *dest) { struct netif *netif; s8_t i; /* If single netif configuration, fast return. */ if ((netif_list != NULL) && (netif_list->next == NULL)) { if (!netif_is_up(netif_list) || !netif_is_link_up(netif_list)) { return NULL; } return netif_list; } /* Special processing for link-local addresses. */ if (ip6_addr_islinklocal(dest)) { if (ip6_addr_isany(src)) { /* Use default netif, if Up. */ if (netif_default == NULL || !netif_is_up(netif_default) || !netif_is_link_up(netif_default)) { return NULL; } return netif_default; } /* Try to find the netif for the source address, checking that link is up. */ for (netif = netif_list; netif != NULL; netif = netif->next) { if (!netif_is_up(netif) || !netif_is_link_up(netif)) { continue; } for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_cmp(src, netif_ip6_addr(netif, i))) { return netif; } } } /* netif not found, use default netif, if up */ if (netif_default == NULL || !netif_is_up(netif_default) || !netif_is_link_up(netif_default)) { return NULL; } return netif_default; } /* we come here for non-link-local addresses */ #ifdef LWIP_HOOK_IP6_ROUTE netif = LWIP_HOOK_IP6_ROUTE(src, dest); if (netif != NULL) { return netif; } #endif /* See if the destination subnet matches a configured address. */ for (netif = netif_list; netif != NULL; netif = netif->next) { if (!netif_is_up(netif) || !netif_is_link_up(netif)) { continue; } for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_netcmp(dest, netif_ip6_addr(netif, i))) { return netif; } } } /* Get the netif for a suitable router. */ netif = nd6_find_route(dest); if ((netif != NULL) && netif_is_up(netif) && netif_is_link_up(netif)) { return netif; } /* try with the netif that matches the source address. */ if (!ip6_addr_isany(src)) { for (netif = netif_list; netif != NULL; netif = netif->next) { if (!netif_is_up(netif) || !netif_is_link_up(netif)) { continue; } for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_cmp(src, netif_ip6_addr(netif, i))) { return netif; } } } } #if LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF /* loopif is disabled, loopback traffic is passed through any netif */ if (ip6_addr_isloopback(dest)) { /* don't check for link on loopback traffic */ if (netif_default != NULL && netif_is_up(netif_default)) { return netif_default; } /* default netif is not up, just use any netif for loopback traffic */ for (netif = netif_list; netif != NULL; netif = netif->next) { if (netif_is_up(netif)) { return netif; } } return NULL; } #endif /* LWIP_NETIF_LOOPBACK && !LWIP_HAVE_LOOPIF */ /* no matching netif found, use default netif, if up */ if ((netif_default == NULL) || !netif_is_up(netif_default) || !netif_is_link_up(netif_default)) { return NULL; } return netif_default; } /** * @ingroup ip6 * Select the best IPv6 source address for a given destination * IPv6 address. Loosely follows RFC 3484. "Strong host" behavior * is assumed. * * @param netif the netif on which to send a packet * @param dest the destination we are trying to reach * @return the most suitable source address to use, or NULL if no suitable * source address is found */ const ip_addr_t * ip6_select_source_address(struct netif *netif, const ip6_addr_t *dest) { const ip_addr_t *src = NULL; u8_t i; /* If dest is link-local, choose a link-local source. */ if (ip6_addr_islinklocal(dest) || ip6_addr_ismulticast_linklocal(dest) || ip6_addr_ismulticast_iflocal(dest)) { for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_islinklocal(netif_ip6_addr(netif, i))) { return netif_ip_addr6(netif, i); } } } /* Choose a site-local with matching prefix. */ if (ip6_addr_issitelocal(dest) || ip6_addr_ismulticast_sitelocal(dest)) { for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_issitelocal(netif_ip6_addr(netif, i)) && ip6_addr_netcmp(dest, netif_ip6_addr(netif, i))) { return netif_ip_addr6(netif, i); } } } /* Choose a unique-local with matching prefix. */ if (ip6_addr_isuniquelocal(dest) || ip6_addr_ismulticast_orglocal(dest)) { for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_isuniquelocal(netif_ip6_addr(netif, i)) && ip6_addr_netcmp(dest, netif_ip6_addr(netif, i))) { return netif_ip_addr6(netif, i); } } } /* Choose a global with best matching prefix. */ if (ip6_addr_isglobal(dest) || ip6_addr_ismulticast_global(dest)) { for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_isglobal(netif_ip6_addr(netif, i))) { if (src == NULL) { src = netif_ip_addr6(netif, i); } else { /* Replace src only if we find a prefix match. */ /* @todo find longest matching prefix. */ if ((!(ip6_addr_netcmp(ip_2_ip6(src), dest))) && ip6_addr_netcmp(netif_ip6_addr(netif, i), dest)) { src = netif_ip_addr6(netif, i); } } } } if (src != NULL) { return src; } } /* Last resort: see if arbitrary prefix matches. */ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_netcmp(dest, netif_ip6_addr(netif, i))) { return netif_ip_addr6(netif, i); } } return NULL; } #if LWIP_IPV6_FORWARD /** * Forwards an IPv6 packet. It finds an appropriate route for the * packet, decrements the HL value of the packet, and outputs * the packet on the appropriate interface. * * @param p the packet to forward (p->payload points to IP header) * @param iphdr the IPv6 header of the input packet * @param inp the netif on which this packet was received */ static void ip6_forward(struct pbuf *p, struct ip6_hdr *iphdr, struct netif *inp) { struct netif *netif; /* do not forward link-local or loopback addresses */ if (ip6_addr_islinklocal(ip6_current_dest_addr()) || ip6_addr_isloopback(ip6_current_dest_addr())) { LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not forwarding link-local address.\n")); IP6_STATS_INC(ip6.rterr); IP6_STATS_INC(ip6.drop); return; } /* Find network interface where to forward this IP packet to. */ netif = ip6_route(IP6_ADDR_ANY6, ip6_current_dest_addr()); if (netif == NULL) { LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", IP6_ADDR_BLOCK1(ip6_current_dest_addr()), IP6_ADDR_BLOCK2(ip6_current_dest_addr()), IP6_ADDR_BLOCK3(ip6_current_dest_addr()), IP6_ADDR_BLOCK4(ip6_current_dest_addr()), IP6_ADDR_BLOCK5(ip6_current_dest_addr()), IP6_ADDR_BLOCK6(ip6_current_dest_addr()), IP6_ADDR_BLOCK7(ip6_current_dest_addr()), IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); #if LWIP_ICMP6 /* Don't send ICMP messages in response to ICMP messages */ if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { icmp6_dest_unreach(p, ICMP6_DUR_NO_ROUTE); } #endif /* LWIP_ICMP6 */ IP6_STATS_INC(ip6.rterr); IP6_STATS_INC(ip6.drop); return; } /* Do not forward packets onto the same network interface on which * they arrived. */ if (netif == inp) { LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: not bouncing packets back on incoming interface.\n")); IP6_STATS_INC(ip6.rterr); IP6_STATS_INC(ip6.drop); return; } /* decrement HL */ IP6H_HOPLIM_SET(iphdr, IP6H_HOPLIM(iphdr) - 1); /* send ICMP6 if HL == 0 */ if (IP6H_HOPLIM(iphdr) == 0) { #if LWIP_ICMP6 /* Don't send ICMP messages in response to ICMP messages */ if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { icmp6_time_exceeded(p, ICMP6_TE_HL); } #endif /* LWIP_ICMP6 */ IP6_STATS_INC(ip6.drop); return; } if (netif->mtu && (p->tot_len > netif->mtu)) { #if LWIP_ICMP6 /* Don't send ICMP messages in response to ICMP messages */ if (IP6H_NEXTH(iphdr) != IP6_NEXTH_ICMP6) { icmp6_packet_too_big(p, netif->mtu); } #endif /* LWIP_ICMP6 */ IP6_STATS_INC(ip6.drop); return; } LWIP_DEBUGF(IP6_DEBUG, ("ip6_forward: forwarding packet to %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", IP6_ADDR_BLOCK1(ip6_current_dest_addr()), IP6_ADDR_BLOCK2(ip6_current_dest_addr()), IP6_ADDR_BLOCK3(ip6_current_dest_addr()), IP6_ADDR_BLOCK4(ip6_current_dest_addr()), IP6_ADDR_BLOCK5(ip6_current_dest_addr()), IP6_ADDR_BLOCK6(ip6_current_dest_addr()), IP6_ADDR_BLOCK7(ip6_current_dest_addr()), IP6_ADDR_BLOCK8(ip6_current_dest_addr()))); /* transmit pbuf on chosen interface */ netif->output_ip6(netif, p, ip6_current_dest_addr()); IP6_STATS_INC(ip6.fw); IP6_STATS_INC(ip6.xmit); return; } #endif /* LWIP_IPV6_FORWARD */ /** * This function is called by the network interface device driver when * an IPv6 packet is received. The function does the basic checks of the * IP header such as packet size being at least larger than the header * size etc. If the packet was not destined for us, the packet is * forwarded (using ip6_forward). * * Finally, the packet is sent to the upper layer protocol input function. * * @param p the received IPv6 packet (p->payload points to IPv6 header) * @param inp the netif on which this packet was received * @return ERR_OK if the packet was processed (could return ERR_* if it wasn't * processed, but currently always returns ERR_OK) */ err_t ip6_input(struct pbuf *p, struct netif *inp) { struct ip6_hdr *ip6hdr; struct netif *netif; u8_t nexth; u16_t hlen; /* the current header length */ u8_t i; #if 0 /*IP_ACCEPT_LINK_LAYER_ADDRESSING*/ @todo int check_ip_src=1; #endif /* IP_ACCEPT_LINK_LAYER_ADDRESSING */ IP6_STATS_INC(ip6.recv); /* identify the IP header */ ip6hdr = (struct ip6_hdr *)p->payload; if (IP6H_V(ip6hdr) != 6) { LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_WARNING, ("IPv6 packet dropped due to bad version number %"U32_F"\n", IP6H_V(ip6hdr))); pbuf_free(p); IP6_STATS_INC(ip6.err); IP6_STATS_INC(ip6.drop); return ERR_OK; } #ifdef LWIP_HOOK_IP6_INPUT if (LWIP_HOOK_IP6_INPUT(p, inp)) { /* the packet has been eaten */ return ERR_OK; } #endif /* header length exceeds first pbuf length, or ip length exceeds total pbuf length? */ if ((IP6_HLEN > p->len) || ((IP6H_PLEN(ip6hdr) + IP6_HLEN) > p->tot_len)) { if (IP6_HLEN > p->len) { LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IPv6 header (len %"U16_F") does not fit in first pbuf (len %"U16_F"), IP packet dropped.\n", (u16_t)IP6_HLEN, p->len)); } if ((IP6H_PLEN(ip6hdr) + IP6_HLEN) > p->tot_len) { LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IPv6 (plen %"U16_F") is longer than pbuf (len %"U16_F"), IP packet dropped.\n", (u16_t)(IP6H_PLEN(ip6hdr) + IP6_HLEN), p->tot_len)); } /* free (drop) packet pbufs */ pbuf_free(p); IP6_STATS_INC(ip6.lenerr); IP6_STATS_INC(ip6.drop); return ERR_OK; } /* Trim pbuf. This should have been done at the netif layer, * but we'll do it anyway just to be sure that its done. */ pbuf_realloc(p, IP6_HLEN + IP6H_PLEN(ip6hdr)); /* copy IP addresses to aligned ip6_addr_t */ ip_addr_copy_from_ip6(ip_data.current_iphdr_dest, ip6hdr->dest); ip_addr_copy_from_ip6(ip_data.current_iphdr_src, ip6hdr->src); /* Don't accept virtual IPv4 mapped IPv6 addresses. * Don't accept multicast source addresses. */ if (ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_dest)) || ip6_addr_isipv4mappedipv6(ip_2_ip6(&ip_data.current_iphdr_src)) || ip6_addr_ismulticast(ip_2_ip6(&ip_data.current_iphdr_src))) { IP6_STATS_INC(ip6.err); IP6_STATS_INC(ip6.drop); return ERR_OK; } /* current header pointer. */ ip_data.current_ip6_header = ip6hdr; /* In netif, used in case we need to send ICMPv6 packets back. */ ip_data.current_netif = inp; ip_data.current_input_netif = inp; /* match packet against an interface, i.e. is this packet for us? */ if (ip6_addr_ismulticast(ip6_current_dest_addr())) { /* Always joined to multicast if-local and link-local all-nodes group. */ if (ip6_addr_isallnodes_iflocal(ip6_current_dest_addr()) || ip6_addr_isallnodes_linklocal(ip6_current_dest_addr())) { netif = inp; } #if LWIP_IPV6_MLD else if (mld6_lookfor_group(inp, ip6_current_dest_addr())) { netif = inp; } #else /* LWIP_IPV6_MLD */ else if (ip6_addr_issolicitednode(ip6_current_dest_addr())) { /* Filter solicited node packets when MLD is not enabled * (for Neighbor discovery). */ netif = NULL; for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) && ip6_addr_cmp_solicitednode(ip6_current_dest_addr(), netif_ip6_addr(inp, i))) { netif = inp; LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: solicited node packet accepted on interface %c%c\n", netif->name[0], netif->name[1])); break; } } } #endif /* LWIP_IPV6_MLD */ else { netif = NULL; } } else { /* start trying with inp. if that's not acceptable, start walking the list of configured netifs. 'first' is used as a boolean to mark whether we started walking the list */ int first = 1; netif = inp; do { /* interface is up? */ if (netif_is_up(netif)) { /* unicast to this interface address? address configured? */ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_cmp(ip6_current_dest_addr(), netif_ip6_addr(netif, i))) { /* exit outer loop */ goto netif_found; } } } if (first) { if (ip6_addr_islinklocal(ip6_current_dest_addr()) #if !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF || ip6_addr_isloopback(ip6_current_dest_addr()) #endif /* !LWIP_NETIF_LOOPBACK || LWIP_HAVE_LOOPIF */ ) { /* Do not match link-local addresses to other netifs. The loopback * address is to be considered link-local and packets to it should be * dropped on other interfaces, as per RFC 4291 Sec. 2.5.3. This * requirement cannot be implemented in the case that loopback * traffic is sent across a non-loopback interface, however. */ netif = NULL; break; } first = 0; netif = netif_list; } else { netif = netif->next; } if (netif == inp) { netif = netif->next; } } while (netif != NULL); netif_found: LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet accepted on interface %c%c\n", netif ? netif->name[0] : 'X', netif? netif->name[1] : 'X')); } /* "::" packet source address? (used in duplicate address detection) */ if (ip6_addr_isany(ip6_current_src_addr()) && (!ip6_addr_issolicitednode(ip6_current_dest_addr()))) { /* packet source is not valid */ /* free (drop) packet pbufs */ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with src ANY_ADDRESS dropped\n")); pbuf_free(p); IP6_STATS_INC(ip6.drop); goto ip6_input_cleanup; } /* packet not for us? */ if (netif == NULL) { /* packet not for us, route or discard */ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_TRACE, ("ip6_input: packet not for us.\n")); #if LWIP_IPV6_FORWARD /* non-multicast packet? */ if (!ip6_addr_ismulticast(ip6_current_dest_addr())) { /* try to forward IP packet on (other) interfaces */ ip6_forward(p, ip6hdr, inp); } #endif /* LWIP_IPV6_FORWARD */ pbuf_free(p); goto ip6_input_cleanup; } /* current netif pointer. */ ip_data.current_netif = netif; /* Save next header type. */ nexth = IP6H_NEXTH(ip6hdr); /* Init header length. */ hlen = ip_data.current_ip_header_tot_len = IP6_HLEN; /* Move to payload. */ pbuf_header(p, -IP6_HLEN); /* Process known option extension headers, if present. */ while (nexth != IP6_NEXTH_NONE) { switch (nexth) { case IP6_NEXTH_HOPBYHOP: LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Hop-by-Hop options header\n")); /* Get next header type. */ nexth = *((u8_t *)p->payload); /* Get the header length. */ hlen = 8 * (1 + *((u8_t *)p->payload + 1)); ip_data.current_ip_header_tot_len += hlen; /* Skip over this header. */ if (hlen > p->len) { LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", hlen, p->len)); /* free (drop) packet pbufs */ pbuf_free(p); IP6_STATS_INC(ip6.lenerr); IP6_STATS_INC(ip6.drop); goto ip6_input_cleanup; } pbuf_header(p, -(s16_t)hlen); break; case IP6_NEXTH_DESTOPTS: LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Destination options header\n")); /* Get next header type. */ nexth = *((u8_t *)p->payload); /* Get the header length. */ hlen = 8 * (1 + *((u8_t *)p->payload + 1)); ip_data.current_ip_header_tot_len += hlen; /* Skip over this header. */ if (hlen > p->len) { LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", hlen, p->len)); /* free (drop) packet pbufs */ pbuf_free(p); IP6_STATS_INC(ip6.lenerr); IP6_STATS_INC(ip6.drop); goto ip6_input_cleanup; } pbuf_header(p, -(s16_t)hlen); break; case IP6_NEXTH_ROUTING: LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Routing header\n")); /* Get next header type. */ nexth = *((u8_t *)p->payload); /* Get the header length. */ hlen = 8 * (1 + *((u8_t *)p->payload + 1)); ip_data.current_ip_header_tot_len += hlen; /* Skip over this header. */ if (hlen > p->len) { LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", hlen, p->len)); /* free (drop) packet pbufs */ pbuf_free(p); IP6_STATS_INC(ip6.lenerr); IP6_STATS_INC(ip6.drop); goto ip6_input_cleanup; } pbuf_header(p, -(s16_t)hlen); break; case IP6_NEXTH_FRAGMENT: { struct ip6_frag_hdr *frag_hdr; LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header\n")); frag_hdr = (struct ip6_frag_hdr *)p->payload; /* Get next header type. */ nexth = frag_hdr->_nexth; /* Fragment Header length. */ hlen = 8; ip_data.current_ip_header_tot_len += hlen; /* Make sure this header fits in current pbuf. */ if (hlen > p->len) { LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("IPv6 options header (hlen %"U16_F") does not fit in first pbuf (len %"U16_F"), IPv6 packet dropped.\n", hlen, p->len)); /* free (drop) packet pbufs */ pbuf_free(p); IP6_FRAG_STATS_INC(ip6_frag.lenerr); IP6_FRAG_STATS_INC(ip6_frag.drop); goto ip6_input_cleanup; } /* Offset == 0 and more_fragments == 0? */ if ((frag_hdr->_fragment_offset & PP_HTONS(IP6_FRAG_OFFSET_MASK | IP6_FRAG_MORE_FLAG)) == 0) { /* This is a 1-fragment packet, usually a packet that we have * already reassembled. Skip this header anc continue. */ pbuf_header(p, -(s16_t)hlen); } else { #if LWIP_IPV6_REASS /* reassemble the packet */ p = ip6_reass(p); /* packet not fully reassembled yet? */ if (p == NULL) { goto ip6_input_cleanup; } /* Returned p point to IPv6 header. * Update all our variables and pointers and continue. */ ip6hdr = (struct ip6_hdr *)p->payload; nexth = IP6H_NEXTH(ip6hdr); hlen = ip_data.current_ip_header_tot_len = IP6_HLEN; pbuf_header(p, -IP6_HLEN); #else /* LWIP_IPV6_REASS */ /* free (drop) packet pbufs */ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: packet with Fragment header dropped (with LWIP_IPV6_REASS==0)\n")); pbuf_free(p); IP6_STATS_INC(ip6.opterr); IP6_STATS_INC(ip6.drop); goto ip6_input_cleanup; #endif /* LWIP_IPV6_REASS */ } break; } default: goto options_done; break; } } options_done: /* p points to IPv6 header again. */ pbuf_header_force(p, (s16_t)ip_data.current_ip_header_tot_len); /* send to upper layers */ LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: \n")); ip6_debug_print(p); LWIP_DEBUGF(IP6_DEBUG, ("ip6_input: p->len %"U16_F" p->tot_len %"U16_F"\n", p->len, p->tot_len)); #if LWIP_RAW /* raw input did not eat the packet? */ if (raw_input(p, inp) == 0) #endif /* LWIP_RAW */ { switch (nexth) { case IP6_NEXTH_NONE: pbuf_free(p); break; #if LWIP_UDP case IP6_NEXTH_UDP: #if LWIP_UDPLITE case IP6_NEXTH_UDPLITE: #endif /* LWIP_UDPLITE */ /* Point to payload. */ pbuf_header(p, -(s16_t)ip_data.current_ip_header_tot_len); udp_input(p, inp); break; #endif /* LWIP_UDP */ #if LWIP_TCP case IP6_NEXTH_TCP: /* Point to payload. */ pbuf_header(p, -(s16_t)ip_data.current_ip_header_tot_len); tcp_input(p, inp); break; #endif /* LWIP_TCP */ #if LWIP_ICMP6 case IP6_NEXTH_ICMP6: /* Point to payload. */ pbuf_header(p, -(s16_t)ip_data.current_ip_header_tot_len); icmp6_input(p, inp); break; #endif /* LWIP_ICMP */ default: #if LWIP_ICMP6 /* send ICMP parameter problem unless it was a multicast or ICMPv6 */ if ((!ip6_addr_ismulticast(ip6_current_dest_addr())) && (IP6H_NEXTH(ip6hdr) != IP6_NEXTH_ICMP6)) { icmp6_param_problem(p, ICMP6_PP_HEADER, ip_data.current_ip_header_tot_len - hlen); } #endif /* LWIP_ICMP */ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_input: Unsupported transport protocol %"U16_F"\n", (u16_t)IP6H_NEXTH(ip6hdr))); pbuf_free(p); IP6_STATS_INC(ip6.proterr); IP6_STATS_INC(ip6.drop); break; } } ip6_input_cleanup: ip_data.current_netif = NULL; ip_data.current_input_netif = NULL; ip_data.current_ip6_header = NULL; ip_data.current_ip_header_tot_len = 0; ip6_addr_set_zero(ip6_current_src_addr()); ip6_addr_set_zero(ip6_current_dest_addr()); return ERR_OK; } /** * Sends an IPv6 packet on a network interface. This function constructs * the IPv6 header. If the source IPv6 address is NULL, the IPv6 "ANY" address is * used as source (usually during network startup). If the source IPv6 address it * IP6_ADDR_ANY, the most appropriate IPv6 address of the outgoing network * interface is filled in as source address. If the destination IPv6 address is * LWIP_IP_HDRINCL, p is assumed to already include an IPv6 header and * p->payload points to it instead of the data. * * @param p the packet to send (p->payload points to the data, e.g. next protocol header; if dest == LWIP_IP_HDRINCL, p already includes an IPv6 header and p->payload points to that IPv6 header) * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an * IP address of the netif is selected and used as source address. * if src == NULL, IP6_ADDR_ANY is used as source) * @param dest the destination IPv6 address to send the packet to * @param hl the Hop Limit value to be set in the IPv6 header * @param tc the Traffic Class value to be set in the IPv6 header * @param nexth the Next Header to be set in the IPv6 header * @param netif the netif on which to send this packet * @return ERR_OK if the packet was sent OK * ERR_BUF if p doesn't have enough space for IPv6/LINK headers * returns errors returned by netif->output */ err_t ip6_output_if(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, u8_t hl, u8_t tc, u8_t nexth, struct netif *netif) { const ip6_addr_t *src_used = src; if (dest != LWIP_IP_HDRINCL) { if (src != NULL && ip6_addr_isany(src)) { src_used = ip_2_ip6(ip6_select_source_address(netif, dest)); if ((src_used == NULL) || ip6_addr_isany(src_used)) { /* No appropriate source address was found for this packet. */ LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: No suitable source address for packet.\n")); IP6_STATS_INC(ip6.rterr); return ERR_RTE; } } } return ip6_output_if_src(p, src_used, dest, hl, tc, nexth, netif); } /** * Same as ip6_output_if() but 'src' address is not replaced by netif address * when it is 'any'. */ err_t ip6_output_if_src(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, u8_t hl, u8_t tc, u8_t nexth, struct netif *netif) { struct ip6_hdr *ip6hdr; ip6_addr_t dest_addr; LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); /* Should the IPv6 header be generated or is it already included in p? */ if (dest != LWIP_IP_HDRINCL) { /* generate IPv6 header */ if (pbuf_header(p, IP6_HLEN)) { LWIP_DEBUGF(IP6_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("ip6_output: not enough room for IPv6 header in pbuf\n")); IP6_STATS_INC(ip6.err); return ERR_BUF; } ip6hdr = (struct ip6_hdr *)p->payload; LWIP_ASSERT("check that first pbuf can hold struct ip6_hdr", (p->len >= sizeof(struct ip6_hdr))); IP6H_HOPLIM_SET(ip6hdr, hl); IP6H_NEXTH_SET(ip6hdr, nexth); /* dest cannot be NULL here */ ip6_addr_copy(ip6hdr->dest, *dest); IP6H_VTCFL_SET(ip6hdr, 6, tc, 0); IP6H_PLEN_SET(ip6hdr, p->tot_len - IP6_HLEN); if (src == NULL) { src = IP6_ADDR_ANY6; } /* src cannot be NULL here */ ip6_addr_copy(ip6hdr->src, *src); } else { /* IP header already included in p */ ip6hdr = (struct ip6_hdr *)p->payload; ip6_addr_copy(dest_addr, ip6hdr->dest); dest = &dest_addr; } IP6_STATS_INC(ip6.xmit); LWIP_DEBUGF(IP6_DEBUG, ("ip6_output_if: %c%c%"U16_F"\n", netif->name[0], netif->name[1], (u16_t)netif->num)); ip6_debug_print(p); #if ENABLE_LOOPBACK { int i; #if !LWIP_HAVE_LOOPIF if (ip6_addr_isloopback(dest)) { return netif_loop_output(netif, p); } #endif /* !LWIP_HAVE_LOOPIF */ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_cmp(dest, netif_ip6_addr(netif, i))) { /* Packet to self, enqueue it for loopback */ LWIP_DEBUGF(IP6_DEBUG, ("netif_loop_output()\n")); return netif_loop_output(netif, p); } } } #endif /* ENABLE_LOOPBACK */ #if LWIP_IPV6_FRAG /* don't fragment if interface has mtu set to 0 [loopif] */ if (netif->mtu && (p->tot_len > nd6_get_destination_mtu(dest, netif))) { return ip6_frag(p, netif, dest); } #endif /* LWIP_IPV6_FRAG */ LWIP_DEBUGF(IP6_DEBUG, ("netif->output_ip6()\n")); return netif->output_ip6(netif, p, dest); } /** * Simple interface to ip6_output_if. It finds the outgoing network * interface and calls upon ip6_output_if to do the actual work. * * @param p the packet to send (p->payload points to the data, e.g. next protocol header; if dest == LWIP_IP_HDRINCL, p already includes an IPv6 header and p->payload points to that IPv6 header) * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an * IP address of the netif is selected and used as source address. * if src == NULL, IP6_ADDR_ANY is used as source) * @param dest the destination IPv6 address to send the packet to * @param hl the Hop Limit value to be set in the IPv6 header * @param tc the Traffic Class value to be set in the IPv6 header * @param nexth the Next Header to be set in the IPv6 header * * @return ERR_RTE if no route is found * see ip_output_if() for more return values */ err_t ip6_output(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, u8_t hl, u8_t tc, u8_t nexth) { struct netif *netif; struct ip6_hdr *ip6hdr; ip6_addr_t src_addr, dest_addr; LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); if (dest != LWIP_IP_HDRINCL) { netif = ip6_route(src, dest); } else { /* IP header included in p, read addresses. */ ip6hdr = (struct ip6_hdr *)p->payload; ip6_addr_copy(src_addr, ip6hdr->src); ip6_addr_copy(dest_addr, ip6hdr->dest); netif = ip6_route(&src_addr, &dest_addr); } if (netif == NULL) { LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", IP6_ADDR_BLOCK1(dest), IP6_ADDR_BLOCK2(dest), IP6_ADDR_BLOCK3(dest), IP6_ADDR_BLOCK4(dest), IP6_ADDR_BLOCK5(dest), IP6_ADDR_BLOCK6(dest), IP6_ADDR_BLOCK7(dest), IP6_ADDR_BLOCK8(dest))); IP6_STATS_INC(ip6.rterr); return ERR_RTE; } return ip6_output_if(p, src, dest, hl, tc, nexth, netif); } #if LWIP_NETIF_HWADDRHINT /** Like ip6_output, but takes and addr_hint pointer that is passed on to netif->addr_hint * before calling ip6_output_if. * * @param p the packet to send (p->payload points to the data, e.g. next protocol header; if dest == LWIP_IP_HDRINCL, p already includes an IPv6 header and p->payload points to that IPv6 header) * @param src the source IPv6 address to send from (if src == IP6_ADDR_ANY, an * IP address of the netif is selected and used as source address. * if src == NULL, IP6_ADDR_ANY is used as source) * @param dest the destination IPv6 address to send the packet to * @param hl the Hop Limit value to be set in the IPv6 header * @param tc the Traffic Class value to be set in the IPv6 header * @param nexth the Next Header to be set in the IPv6 header * @param addr_hint address hint pointer set to netif->addr_hint before * calling ip_output_if() * * @return ERR_RTE if no route is found * see ip_output_if() for more return values */ err_t ip6_output_hinted(struct pbuf *p, const ip6_addr_t *src, const ip6_addr_t *dest, u8_t hl, u8_t tc, u8_t nexth, u8_t *addr_hint) { struct netif *netif; struct ip6_hdr *ip6hdr; ip6_addr_t src_addr, dest_addr; err_t err; LWIP_IP_CHECK_PBUF_REF_COUNT_FOR_TX(p); if (dest != LWIP_IP_HDRINCL) { netif = ip6_route(src, dest); } else { /* IP header included in p, read addresses. */ ip6hdr = (struct ip6_hdr *)p->payload; ip6_addr_copy(src_addr, ip6hdr->src); ip6_addr_copy(dest_addr, ip6hdr->dest); netif = ip6_route(&src_addr, &dest_addr); } if (netif == NULL) { LWIP_DEBUGF(IP6_DEBUG, ("ip6_output: no route for %"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F":%"X16_F"\n", IP6_ADDR_BLOCK1(dest), IP6_ADDR_BLOCK2(dest), IP6_ADDR_BLOCK3(dest), IP6_ADDR_BLOCK4(dest), IP6_ADDR_BLOCK5(dest), IP6_ADDR_BLOCK6(dest), IP6_ADDR_BLOCK7(dest), IP6_ADDR_BLOCK8(dest))); IP6_STATS_INC(ip6.rterr); return ERR_RTE; } NETIF_SET_HWADDRHINT(netif, addr_hint); err = ip6_output_if(p, src, dest, hl, tc, nexth, netif); NETIF_SET_HWADDRHINT(netif, NULL); return err; } #endif /* LWIP_NETIF_HWADDRHINT*/ #if LWIP_IPV6_MLD /** * Add a hop-by-hop options header with a router alert option and padding. * * Used by MLD when sending a Multicast listener report/done message. * * @param p the packet to which we will prepend the options header * @param nexth the next header protocol number (e.g. IP6_NEXTH_ICMP6) * @param value the value of the router alert option data (e.g. IP6_ROUTER_ALERT_VALUE_MLD) * @return ERR_OK if hop-by-hop header was added, ERR_* otherwise */ err_t ip6_options_add_hbh_ra(struct pbuf *p, u8_t nexth, u8_t value) { struct ip6_hbh_hdr *hbh_hdr; /* Move pointer to make room for hop-by-hop options header. */ if (pbuf_header(p, sizeof(struct ip6_hbh_hdr))) { LWIP_DEBUGF(IP6_DEBUG, ("ip6_options: no space for options header\n")); IP6_STATS_INC(ip6.err); return ERR_BUF; } hbh_hdr = (struct ip6_hbh_hdr *)p->payload; /* Set fields. */ hbh_hdr->_nexth = nexth; hbh_hdr->_hlen = 0; hbh_hdr->_ra_opt_type = IP6_ROUTER_ALERT_OPTION; hbh_hdr->_ra_opt_dlen = 2; hbh_hdr->_ra_opt_data = value; hbh_hdr->_padn_opt_type = IP6_PADN_ALERT_OPTION; hbh_hdr->_padn_opt_dlen = 0; return ERR_OK; } #endif /* LWIP_IPV6_MLD */ #if IP6_DEBUG /* Print an IPv6 header by using LWIP_DEBUGF * @param p an IPv6 packet, p->payload pointing to the IPv6 header */ void ip6_debug_print(struct pbuf *p) { struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; LWIP_DEBUGF(IP6_DEBUG, ("IPv6 header:\n")); LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP6_DEBUG, ("| %2"U16_F" | %3"U16_F" | %7"U32_F" | (ver, class, flow)\n", IP6H_V(ip6hdr), IP6H_TC(ip6hdr), IP6H_FL(ip6hdr))); LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP6_DEBUG, ("| %5"U16_F" | %3"U16_F" | %3"U16_F" | (plen, nexth, hopl)\n", IP6H_PLEN(ip6hdr), IP6H_NEXTH(ip6hdr), IP6H_HOPLIM(ip6hdr))); LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (src)\n", IP6_ADDR_BLOCK1(&(ip6hdr->src)), IP6_ADDR_BLOCK2(&(ip6hdr->src)), IP6_ADDR_BLOCK3(&(ip6hdr->src)), IP6_ADDR_BLOCK4(&(ip6hdr->src)))); LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", IP6_ADDR_BLOCK5(&(ip6hdr->src)), IP6_ADDR_BLOCK6(&(ip6hdr->src)), IP6_ADDR_BLOCK7(&(ip6hdr->src)), IP6_ADDR_BLOCK8(&(ip6hdr->src)))); LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" | (dest)\n", IP6_ADDR_BLOCK1(&(ip6hdr->dest)), IP6_ADDR_BLOCK2(&(ip6hdr->dest)), IP6_ADDR_BLOCK3(&(ip6hdr->dest)), IP6_ADDR_BLOCK4(&(ip6hdr->dest)))); LWIP_DEBUGF(IP6_DEBUG, ("| %4"X32_F" | %4"X32_F" | %4"X32_F" | %4"X32_F" |\n", IP6_ADDR_BLOCK5(&(ip6hdr->dest)), IP6_ADDR_BLOCK6(&(ip6hdr->dest)), IP6_ADDR_BLOCK7(&(ip6hdr->dest)), IP6_ADDR_BLOCK8(&(ip6hdr->dest)))); LWIP_DEBUGF(IP6_DEBUG, ("+-------------------------------+\n")); } #endif /* IP6_DEBUG */ #endif /* LWIP_IPV6 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv6/ip6.c
C
unknown
40,158
/** * @file * * IPv6 addresses. */ /* * Copyright (c) 2010 Inico Technologies Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Ivan Delamer <delamer@inicotech.com> * * Functions for handling IPv6 addresses. * * Please coordinate changes and requests with Ivan Delamer * <delamer@inicotech.com> */ #include "lwip/opt.h" #if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ #include "lwip/ip_addr.h" #include "lwip/def.h" /* used by IP6_ADDR_ANY(6) in ip6_addr.h */ const ip_addr_t ip6_addr_any = IPADDR6_INIT(0ul, 0ul, 0ul, 0ul); #ifndef isprint #define in_range(c, lo, up) ((u8_t)c >= lo && (u8_t)c <= up) #define isprint(c) in_range(c, 0x20, 0x7f) #define isdigit(c) in_range(c, '0', '9') #define isxdigit(c) (isdigit(c) || in_range(c, 'a', 'f') || in_range(c, 'A', 'F')) #define islower(c) in_range(c, 'a', 'z') #define isspace(c) (c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || c == '\v') #define xchar(i) ((i) < 10 ? '0' + (i) : 'A' + (i) - 10) #endif /** * Check whether "cp" is a valid ascii representation * of an IPv6 address and convert to a binary address. * Returns 1 if the address is valid, 0 if not. * * @param cp IPv6 address in ascii representation (e.g. "FF01::1") * @param addr pointer to which to save the ip address in network order * @return 1 if cp could be converted to addr, 0 on failure */ int ip6addr_aton(const char *cp, ip6_addr_t *addr) { u32_t addr_index, zero_blocks, current_block_index, current_block_value; const char *s; /* Count the number of colons, to count the number of blocks in a "::" sequence zero_blocks may be 1 even if there are no :: sequences */ zero_blocks = 8; for (s = cp; *s != 0; s++) { if (*s == ':') { zero_blocks--; } else if (!isxdigit(*s)) { break; } } /* parse each block */ addr_index = 0; current_block_index = 0; current_block_value = 0; for (s = cp; *s != 0; s++) { if (*s == ':') { if (addr) { if (current_block_index & 0x1) { addr->addr[addr_index++] |= current_block_value; } else { addr->addr[addr_index] = current_block_value << 16; } } current_block_index++; current_block_value = 0; if (current_block_index > 7) { /* address too long! */ return 0; } if (s[1] == ':') { if (s[2] == ':') { /* invalid format: three successive colons */ return 0; } s++; /* "::" found, set zeros */ while (zero_blocks > 0) { zero_blocks--; if (current_block_index & 0x1) { addr_index++; } else { if (addr) { addr->addr[addr_index] = 0; } } current_block_index++; if (current_block_index > 7) { /* address too long! */ return 0; } } } } else if (isxdigit(*s)) { /* add current digit */ current_block_value = (current_block_value << 4) + (isdigit(*s) ? (u32_t)(*s - '0') : (u32_t)(10 + (islower(*s) ? *s - 'a' : *s - 'A'))); } else { /* unexpected digit, space? CRLF? */ break; } } if (addr) { if (current_block_index & 0x1) { addr->addr[addr_index++] |= current_block_value; } else { addr->addr[addr_index] = current_block_value << 16; } } /* convert to network byte order. */ if (addr) { for (addr_index = 0; addr_index < 4; addr_index++) { addr->addr[addr_index] = lwip_htonl(addr->addr[addr_index]); } } if (current_block_index != 7) { return 0; } return 1; } /** * Convert numeric IPv6 address into ASCII representation. * returns ptr to static buffer; not reentrant! * * @param addr ip6 address in network order to convert * @return pointer to a global static (!) buffer that holds the ASCII * representation of addr */ char * ip6addr_ntoa(const ip6_addr_t *addr) { static char str[40]; return ip6addr_ntoa_r(addr, str, 40); } /** * Same as ipaddr_ntoa, but reentrant since a user-supplied buffer is used. * * @param addr ip6 address in network order to convert * @param buf target buffer where the string is stored * @param buflen length of buf * @return either pointer to buf which now holds the ASCII * representation of addr or NULL if buf was too small */ char * ip6addr_ntoa_r(const ip6_addr_t *addr, char *buf, int buflen) { u32_t current_block_index, current_block_value, next_block_value; s32_t i; u8_t zero_flag, empty_block_flag; i = 0; empty_block_flag = 0; /* used to indicate a zero chain for "::' */ for (current_block_index = 0; current_block_index < 8; current_block_index++) { /* get the current 16-bit block */ current_block_value = lwip_htonl(addr->addr[current_block_index >> 1]); if ((current_block_index & 0x1) == 0) { current_block_value = current_block_value >> 16; } current_block_value &= 0xffff; /* Check for empty block. */ if (current_block_value == 0) { if (current_block_index == 7 && empty_block_flag == 1) { /* special case, we must render a ':' for the last block. */ buf[i++] = ':'; if (i >= buflen) { return NULL; } break; } if (empty_block_flag == 0) { /* generate empty block "::", but only if more than one contiguous zero block, * according to current formatting suggestions RFC 5952. */ next_block_value = lwip_htonl(addr->addr[(current_block_index + 1) >> 1]); if ((current_block_index & 0x1) == 0x01) { next_block_value = next_block_value >> 16; } next_block_value &= 0xffff; if (next_block_value == 0) { empty_block_flag = 1; buf[i++] = ':'; if (i >= buflen) { return NULL; } continue; /* move on to next block. */ } } else if (empty_block_flag == 1) { /* move on to next block. */ continue; } } else if (empty_block_flag == 1) { /* Set this flag value so we don't produce multiple empty blocks. */ empty_block_flag = 2; } if (current_block_index > 0) { buf[i++] = ':'; if (i >= buflen) { return NULL; } } if ((current_block_value & 0xf000) == 0) { zero_flag = 1; } else { buf[i++] = xchar(((current_block_value & 0xf000) >> 12)); zero_flag = 0; if (i >= buflen) { return NULL; } } if (((current_block_value & 0xf00) == 0) && (zero_flag)) { /* do nothing */ } else { buf[i++] = xchar(((current_block_value & 0xf00) >> 8)); zero_flag = 0; if (i >= buflen) { return NULL; } } if (((current_block_value & 0xf0) == 0) && (zero_flag)) { /* do nothing */ } else { buf[i++] = xchar(((current_block_value & 0xf0) >> 4)); zero_flag = 0; if (i >= buflen) { return NULL; } } buf[i++] = xchar((current_block_value & 0xf)); if (i >= buflen) { return NULL; } } buf[i] = 0; return buf; } #endif /* LWIP_IPV6 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv6/ip6_addr.c
C
unknown
9,028
/** * @file * * IPv6 fragmentation and reassembly. */ /* * Copyright (c) 2010 Inico Technologies Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Ivan Delamer <delamer@inicotech.com> * * * Please coordinate changes and requests with Ivan Delamer * <delamer@inicotech.com> */ #include "lwip/opt.h" #include "lwip/ip6_frag.h" #include "lwip/ip6.h" #include "lwip/icmp6.h" #include "lwip/nd6.h" #include "lwip/ip.h" #include "lwip/pbuf.h" #include "lwip/memp.h" #include "lwip/stats.h" #include <string.h> #if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ /** Setting this to 0, you can turn off checking the fragments for overlapping * regions. The code gets a little smaller. Only use this if you know that * overlapping won't occur on your network! */ #ifndef IP_REASS_CHECK_OVERLAP #define IP_REASS_CHECK_OVERLAP 1 #endif /* IP_REASS_CHECK_OVERLAP */ /** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller. * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA * is set to 1, so one datagram can be reassembled at a time, only. */ #ifndef IP_REASS_FREE_OLDEST #define IP_REASS_FREE_OLDEST 1 #endif /* IP_REASS_FREE_OLDEST */ #if IPV6_FRAG_COPYHEADER #define IPV6_FRAG_REQROOM ((s16_t)(sizeof(struct ip6_reass_helper) - IP6_FRAG_HLEN)) #endif #define IP_REASS_FLAG_LASTFRAG 0x01 /** This is a helper struct which holds the starting * offset and the ending offset of this fragment to * easily chain the fragments. * It has the same packing requirements as the IPv6 header, since it replaces * the Fragment Header in memory in incoming fragments to keep * track of the various fragments. */ #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/bpstruct.h" #endif PACK_STRUCT_BEGIN struct ip6_reass_helper { PACK_STRUCT_FIELD(struct pbuf *next_pbuf); PACK_STRUCT_FIELD(u16_t start); PACK_STRUCT_FIELD(u16_t end); } PACK_STRUCT_STRUCT; PACK_STRUCT_END #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/epstruct.h" #endif /* static variables */ static struct ip6_reassdata *reassdatagrams; static u16_t ip6_reass_pbufcount; /* Forward declarations. */ static void ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr); #if IP_REASS_FREE_OLDEST static void ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed); #endif /* IP_REASS_FREE_OLDEST */ void ip6_reass_tmr(void) { struct ip6_reassdata *r, *tmp; #if !IPV6_FRAG_COPYHEADER LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); #endif /* !IPV6_FRAG_COPYHEADER */ r = reassdatagrams; while (r != NULL) { /* Decrement the timer. Once it reaches 0, * clean up the incomplete fragment assembly */ if (r->timer > 0) { r->timer--; r = r->next; } else { /* reassembly timed out */ tmp = r; /* get the next pointer before freeing */ r = r->next; /* free the helper struct and all enqueued pbufs */ ip6_reass_free_complete_datagram(tmp); } } } /** * Free a datagram (struct ip6_reassdata) and all its pbufs. * Updates the total count of enqueued pbufs (ip6_reass_pbufcount), * sends an ICMP time exceeded packet. * * @param ipr datagram to free */ static void ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr) { struct ip6_reassdata *prev; u16_t pbufs_freed = 0; u16_t clen; struct pbuf *p; struct ip6_reass_helper *iprh; #if LWIP_ICMP6 iprh = (struct ip6_reass_helper *)ipr->p->payload; if (iprh->start == 0) { /* The first fragment was received, send ICMP time exceeded. */ /* First, de-queue the first pbuf from r->p. */ p = ipr->p; ipr->p = iprh->next_pbuf; /* Then, move back to the original ipv6 header (we are now pointing to Fragment header). This cannot fail since we already checked when receiving this fragment. */ if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)IPV6_FRAG_HDRREF(ipr->iphdr)))) { LWIP_ASSERT("ip6_reass_free: moving p->payload to ip6 header failed\n", 0); } else { icmp6_time_exceeded(p, ICMP6_TE_FRAG); } clen = pbuf_clen(p); LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); pbufs_freed += clen; pbuf_free(p); } #endif /* LWIP_ICMP6 */ /* First, free all received pbufs. The individual pbufs need to be released separately as they have not yet been chained */ p = ipr->p; while (p != NULL) { struct pbuf *pcur; iprh = (struct ip6_reass_helper *)p->payload; pcur = p; /* get the next pointer before freeing */ p = iprh->next_pbuf; clen = pbuf_clen(pcur); LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff); pbufs_freed += clen; pbuf_free(pcur); } /* Then, unchain the struct ip6_reassdata from the list and free it. */ if (ipr == reassdatagrams) { reassdatagrams = ipr->next; } else { prev = reassdatagrams; while (prev != NULL) { if (prev->next == ipr) { break; } prev = prev->next; } if (prev != NULL) { prev->next = ipr->next; } } memp_free(MEMP_IP6_REASSDATA, ipr); /* Finally, update number of pbufs in reassembly queue */ LWIP_ASSERT("ip_reass_pbufcount >= clen", ip6_reass_pbufcount >= pbufs_freed); ip6_reass_pbufcount -= pbufs_freed; } #if IP_REASS_FREE_OLDEST /** * Free the oldest datagram to make room for enqueueing new fragments. * The datagram ipr is not freed! * * @param ipr ip6_reassdata for the current fragment * @param pbufs_needed number of pbufs needed to enqueue * (used for freeing other datagrams if not enough space) */ static void ip6_reass_remove_oldest_datagram(struct ip6_reassdata *ipr, int pbufs_needed) { struct ip6_reassdata *r, *oldest; /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs, * but don't free the current datagram! */ do { r = oldest = reassdatagrams; while (r != NULL) { if (r != ipr) { if (r->timer <= oldest->timer) { /* older than the previous oldest */ oldest = r; } } r = r->next; } if (oldest == ipr) { /* nothing to free, ipr is the only element on the list */ return; } if (oldest != NULL) { ip6_reass_free_complete_datagram(oldest); } } while (((ip6_reass_pbufcount + pbufs_needed) > IP_REASS_MAX_PBUFS) && (reassdatagrams != NULL)); } #endif /* IP_REASS_FREE_OLDEST */ /** * Reassembles incoming IPv6 fragments into an IPv6 datagram. * * @param p points to the IPv6 Fragment Header * @return NULL if reassembly is incomplete, pbuf pointing to * IPv6 Header if reassembly is complete */ struct pbuf * ip6_reass(struct pbuf *p) { struct ip6_reassdata *ipr, *ipr_prev; struct ip6_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL; struct ip6_frag_hdr *frag_hdr; u16_t offset, len; u16_t clen; u8_t valid = 1; struct pbuf *q; IP6_FRAG_STATS_INC(ip6_frag.recv); if ((const void*)ip6_current_header() != ((u8_t*)p->payload) - IP6_HLEN) { /* ip6_frag_hdr must be in the first pbuf, not chained */ IP6_FRAG_STATS_INC(ip6_frag.proterr); IP6_FRAG_STATS_INC(ip6_frag.drop); goto nullreturn; } frag_hdr = (struct ip6_frag_hdr *) p->payload; clen = pbuf_clen(p); offset = lwip_ntohs(frag_hdr->_fragment_offset); /* Calculate fragment length from IPv6 payload length. * Adjust for headers before Fragment Header. * And finally adjust by Fragment Header length. */ len = lwip_ntohs(ip6_current_header()->_plen); len -= (u16_t)(((u8_t*)p->payload - (const u8_t*)ip6_current_header()) - IP6_HLEN); len -= IP6_FRAG_HLEN; /* Look for the datagram the fragment belongs to in the current datagram queue, * remembering the previous in the queue for later dequeueing. */ for (ipr = reassdatagrams, ipr_prev = NULL; ipr != NULL; ipr = ipr->next) { /* Check if the incoming fragment matches the one currently present in the reassembly buffer. If so, we proceed with copying the fragment into the buffer. */ if ((frag_hdr->_identification == ipr->identification) && ip6_addr_cmp(ip6_current_src_addr(), &(IPV6_FRAG_HDRREF(ipr->iphdr)->src)) && ip6_addr_cmp(ip6_current_dest_addr(), &(IPV6_FRAG_HDRREF(ipr->iphdr)->dest))) { IP6_FRAG_STATS_INC(ip6_frag.cachehit); break; } ipr_prev = ipr; } if (ipr == NULL) { /* Enqueue a new datagram into the datagram queue */ ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); if (ipr == NULL) { #if IP_REASS_FREE_OLDEST /* Make room and try again. */ ip6_reass_remove_oldest_datagram(ipr, clen); ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA); if (ipr != NULL) { /* re-search ipr_prev since it might have been removed */ for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { if (ipr_prev->next == ipr) { break; } } } else #endif /* IP_REASS_FREE_OLDEST */ { IP6_FRAG_STATS_INC(ip6_frag.memerr); IP6_FRAG_STATS_INC(ip6_frag.drop); goto nullreturn; } } memset(ipr, 0, sizeof(struct ip6_reassdata)); ipr->timer = IP_REASS_MAXAGE; /* enqueue the new structure to the front of the list */ ipr->next = reassdatagrams; reassdatagrams = ipr; /* Use the current IPv6 header for src/dest address reference. * Eventually, we will replace it when we get the first fragment * (it might be this one, in any case, it is done later). */ #if IPV6_FRAG_COPYHEADER MEMCPY(&ipr->iphdr, ip6_current_header(), IP6_HLEN); #else /* IPV6_FRAG_COPYHEADER */ /* need to use the none-const pointer here: */ ipr->iphdr = ip_data.current_ip6_header; #endif /* IPV6_FRAG_COPYHEADER */ /* copy the fragmented packet id. */ ipr->identification = frag_hdr->_identification; /* copy the nexth field */ ipr->nexth = frag_hdr->_nexth; } /* Check if we are allowed to enqueue more datagrams. */ if ((ip6_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) { #if IP_REASS_FREE_OLDEST ip6_reass_remove_oldest_datagram(ipr, clen); if ((ip6_reass_pbufcount + clen) <= IP_REASS_MAX_PBUFS) { /* re-search ipr_prev since it might have been removed */ for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) { if (ipr_prev->next == ipr) { break; } } } else #endif /* IP_REASS_FREE_OLDEST */ { /* @todo: send ICMPv6 time exceeded here? */ /* drop this pbuf */ IP6_FRAG_STATS_INC(ip6_frag.memerr); IP6_FRAG_STATS_INC(ip6_frag.drop); goto nullreturn; } } /* Overwrite Fragment Header with our own helper struct. */ #if IPV6_FRAG_COPYHEADER if (IPV6_FRAG_REQROOM > 0) { /* Make room for struct ip6_reass_helper (only required if sizeof(void*) > 4). This cannot fail since we already checked when receiving this fragment. */ u8_t hdrerr = pbuf_header_force(p, IPV6_FRAG_REQROOM); LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); } #else /* IPV6_FRAG_COPYHEADER */ LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1", sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN); #endif /* IPV6_FRAG_COPYHEADER */ iprh = (struct ip6_reass_helper *)p->payload; iprh->next_pbuf = NULL; iprh->start = (offset & IP6_FRAG_OFFSET_MASK); iprh->end = (offset & IP6_FRAG_OFFSET_MASK) + len; /* find the right place to insert this pbuf */ /* Iterate through until we either get to the end of the list (append), * or we find on with a larger offset (insert). */ for (q = ipr->p; q != NULL;) { iprh_tmp = (struct ip6_reass_helper*)q->payload; if (iprh->start < iprh_tmp->start) { #if IP_REASS_CHECK_OVERLAP if (iprh->end > iprh_tmp->start) { /* fragment overlaps with following, throw away */ IP6_FRAG_STATS_INC(ip6_frag.proterr); IP6_FRAG_STATS_INC(ip6_frag.drop); goto nullreturn; } if (iprh_prev != NULL) { if (iprh->start < iprh_prev->end) { /* fragment overlaps with previous, throw away */ IP6_FRAG_STATS_INC(ip6_frag.proterr); IP6_FRAG_STATS_INC(ip6_frag.drop); goto nullreturn; } } #endif /* IP_REASS_CHECK_OVERLAP */ /* the new pbuf should be inserted before this */ iprh->next_pbuf = q; if (iprh_prev != NULL) { /* not the fragment with the lowest offset */ iprh_prev->next_pbuf = p; } else { /* fragment with the lowest offset */ ipr->p = p; } break; } else if (iprh->start == iprh_tmp->start) { /* received the same datagram twice: no need to keep the datagram */ IP6_FRAG_STATS_INC(ip6_frag.drop); goto nullreturn; #if IP_REASS_CHECK_OVERLAP } else if (iprh->start < iprh_tmp->end) { /* overlap: no need to keep the new datagram */ IP6_FRAG_STATS_INC(ip6_frag.proterr); IP6_FRAG_STATS_INC(ip6_frag.drop); goto nullreturn; #endif /* IP_REASS_CHECK_OVERLAP */ } else { /* Check if the fragments received so far have no gaps. */ if (iprh_prev != NULL) { if (iprh_prev->end != iprh_tmp->start) { /* There is a fragment missing between the current * and the previous fragment */ valid = 0; } } } q = iprh_tmp->next_pbuf; iprh_prev = iprh_tmp; } /* If q is NULL, then we made it to the end of the list. Determine what to do now */ if (q == NULL) { if (iprh_prev != NULL) { /* this is (for now), the fragment with the highest offset: * chain it to the last fragment */ #if IP_REASS_CHECK_OVERLAP LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start); #endif /* IP_REASS_CHECK_OVERLAP */ iprh_prev->next_pbuf = p; if (iprh_prev->end != iprh->start) { valid = 0; } } else { #if IP_REASS_CHECK_OVERLAP LWIP_ASSERT("no previous fragment, this must be the first fragment!", ipr->p == NULL); #endif /* IP_REASS_CHECK_OVERLAP */ /* this is the first fragment we ever received for this ip datagram */ ipr->p = p; } } /* Track the current number of pbufs current 'in-flight', in order to limit the number of fragments that may be enqueued at any one time */ ip6_reass_pbufcount += clen; /* Remember IPv6 header if this is the first fragment. */ if (iprh->start == 0) { #if IPV6_FRAG_COPYHEADER if (iprh->next_pbuf != NULL) { MEMCPY(&ipr->iphdr, ip6_current_header(), IP6_HLEN); } #else /* IPV6_FRAG_COPYHEADER */ /* need to use the none-const pointer here: */ ipr->iphdr = ip_data.current_ip6_header; #endif /* IPV6_FRAG_COPYHEADER */ } /* If this is the last fragment, calculate total packet length. */ if ((offset & IP6_FRAG_MORE_FLAG) == 0) { ipr->datagram_len = iprh->end; } /* Additional validity tests: we have received first and last fragment. */ iprh_tmp = (struct ip6_reass_helper*)ipr->p->payload; if (iprh_tmp->start != 0) { valid = 0; } if (ipr->datagram_len == 0) { valid = 0; } /* Final validity test: no gaps between current and last fragment. */ iprh_prev = iprh; q = iprh->next_pbuf; while ((q != NULL) && valid) { iprh = (struct ip6_reass_helper*)q->payload; if (iprh_prev->end != iprh->start) { valid = 0; break; } iprh_prev = iprh; q = iprh->next_pbuf; } if (valid) { /* All fragments have been received */ struct ip6_hdr* iphdr_ptr; /* chain together the pbufs contained within the ip6_reassdata list. */ iprh = (struct ip6_reass_helper*) ipr->p->payload; while (iprh != NULL) { struct pbuf* next_pbuf = iprh->next_pbuf; if (next_pbuf != NULL) { /* Save next helper struct (will be hidden in next step). */ iprh_tmp = (struct ip6_reass_helper*)next_pbuf->payload; /* hide the fragment header for every succeeding fragment */ pbuf_header(next_pbuf, -IP6_FRAG_HLEN); #if IPV6_FRAG_COPYHEADER if (IPV6_FRAG_REQROOM > 0) { /* hide the extra bytes borrowed from ip6_hdr for struct ip6_reass_helper */ u8_t hdrerr = pbuf_header(next_pbuf, -(s16_t)(IPV6_FRAG_REQROOM)); LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); } #endif pbuf_cat(ipr->p, next_pbuf); } else { iprh_tmp = NULL; } iprh = iprh_tmp; } #if IPV6_FRAG_COPYHEADER if (IPV6_FRAG_REQROOM > 0) { /* get back room for struct ip6_reass_helper (only required if sizeof(void*) > 4) */ u8_t hdrerr = pbuf_header(ipr->p, -(s16_t)(IPV6_FRAG_REQROOM)); LWIP_UNUSED_ARG(hdrerr); /* in case of LWIP_NOASSERT */ LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0); } iphdr_ptr = (struct ip6_hdr*)((u8_t*)ipr->p->payload - IP6_HLEN); MEMCPY(iphdr_ptr, &ipr->iphdr, IP6_HLEN); #else iphdr_ptr = ipr->iphdr; #endif /* Adjust datagram length by adding header lengths. */ ipr->datagram_len += (u16_t)(((u8_t*)ipr->p->payload - (u8_t*)iphdr_ptr) + IP6_FRAG_HLEN - IP6_HLEN); /* Set payload length in ip header. */ iphdr_ptr->_plen = lwip_htons(ipr->datagram_len); /* Get the first pbuf. */ p = ipr->p; /* Restore Fragment Header in first pbuf. Mark as "single fragment" * packet. Restore nexth. */ frag_hdr = (struct ip6_frag_hdr *) p->payload; frag_hdr->_nexth = ipr->nexth; frag_hdr->reserved = 0; frag_hdr->_fragment_offset = 0; frag_hdr->_identification = 0; /* release the sources allocate for the fragment queue entry */ if (reassdatagrams == ipr) { /* it was the first in the list */ reassdatagrams = ipr->next; } else { /* it wasn't the first, so it must have a valid 'prev' */ LWIP_ASSERT("sanity check linked list", ipr_prev != NULL); ipr_prev->next = ipr->next; } memp_free(MEMP_IP6_REASSDATA, ipr); /* adjust the number of pbufs currently queued for reassembly. */ ip6_reass_pbufcount -= pbuf_clen(p); /* Move pbuf back to IPv6 header. This cannot fail since we already checked when receiving this fragment. */ if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)iphdr_ptr))) { LWIP_ASSERT("ip6_reass: moving p->payload to ip6 header failed\n", 0); pbuf_free(p); return NULL; } /* Return the pbuf chain */ return p; } /* the datagram is not (yet?) reassembled completely */ return NULL; nullreturn: pbuf_free(p); return NULL; } #endif /* LWIP_IPV6 && LWIP_IPV6_REASS */ #if LWIP_IPV6 && LWIP_IPV6_FRAG #if !LWIP_NETIF_TX_SINGLE_PBUF /** Allocate a new struct pbuf_custom_ref */ static struct pbuf_custom_ref* ip6_frag_alloc_pbuf_custom_ref(void) { return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF); } /** Free a struct pbuf_custom_ref */ static void ip6_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p) { LWIP_ASSERT("p != NULL", p != NULL); memp_free(MEMP_FRAG_PBUF, p); } /** Free-callback function to free a 'struct pbuf_custom_ref', called by * pbuf_free. */ static void ip6_frag_free_pbuf_custom(struct pbuf *p) { struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p; LWIP_ASSERT("pcr != NULL", pcr != NULL); LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p); if (pcr->original != NULL) { pbuf_free(pcr->original); } ip6_frag_free_pbuf_custom_ref(pcr); } #endif /* !LWIP_NETIF_TX_SINGLE_PBUF */ /** * Fragment an IPv6 datagram if too large for the netif or path MTU. * * Chop the datagram in MTU sized chunks and send them in order * by pointing PBUF_REFs into p * * @param p ipv6 packet to send * @param netif the netif on which to send * @param dest destination ipv6 address to which to send * * @return ERR_OK if sent successfully, err_t otherwise */ err_t ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest) { struct ip6_hdr *original_ip6hdr; struct ip6_hdr *ip6hdr; struct ip6_frag_hdr *frag_hdr; struct pbuf *rambuf; #if !LWIP_NETIF_TX_SINGLE_PBUF struct pbuf *newpbuf; u16_t newpbuflen = 0; u16_t left_to_copy; #endif static u32_t identification; u16_t nfb; u16_t left, cop; u16_t mtu; u16_t fragment_offset = 0; u16_t last; u16_t poff = IP6_HLEN; identification++; original_ip6hdr = (struct ip6_hdr *)p->payload; mtu = nd6_get_destination_mtu(dest, netif); /* @todo we assume there are no options in the unfragmentable part (IPv6 header). */ left = p->tot_len - IP6_HLEN; nfb = (mtu - (IP6_HLEN + IP6_FRAG_HLEN)) & IP6_FRAG_OFFSET_MASK; while (left) { last = (left <= nfb); /* Fill this fragment */ cop = last ? left : nfb; #if LWIP_NETIF_TX_SINGLE_PBUF rambuf = pbuf_alloc(PBUF_IP, cop + IP6_FRAG_HLEN, PBUF_RAM); if (rambuf == NULL) { IP6_FRAG_STATS_INC(ip6_frag.memerr); return ERR_MEM; } LWIP_ASSERT("this needs a pbuf in one piece!", (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL)); poff += pbuf_copy_partial(p, (u8_t*)rambuf->payload + IP6_FRAG_HLEN, cop, poff); /* make room for the IP header */ if (pbuf_header(rambuf, IP6_HLEN)) { pbuf_free(rambuf); IP6_FRAG_STATS_INC(ip6_frag.memerr); return ERR_MEM; } /* fill in the IP header */ SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); ip6hdr = (struct ip6_hdr *)rambuf->payload; frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); #else /* When not using a static buffer, create a chain of pbufs. * The first will be a PBUF_RAM holding the link, IPv6, and Fragment header. * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged, * but limited to the size of an mtu. */ rambuf = pbuf_alloc(PBUF_LINK, IP6_HLEN + IP6_FRAG_HLEN, PBUF_RAM); if (rambuf == NULL) { IP6_FRAG_STATS_INC(ip6_frag.memerr); return ERR_MEM; } LWIP_ASSERT("this needs a pbuf in one piece!", (p->len >= (IP6_HLEN))); SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN); ip6hdr = (struct ip6_hdr *)rambuf->payload; frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN); /* Can just adjust p directly for needed offset. */ p->payload = (u8_t *)p->payload + poff; p->len -= poff; p->tot_len -= poff; left_to_copy = cop; while (left_to_copy) { struct pbuf_custom_ref *pcr; newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len; /* Is this pbuf already empty? */ if (!newpbuflen) { p = p->next; continue; } pcr = ip6_frag_alloc_pbuf_custom_ref(); if (pcr == NULL) { pbuf_free(rambuf); IP6_FRAG_STATS_INC(ip6_frag.memerr); return ERR_MEM; } /* Mirror this pbuf, although we might not need all of it. */ newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen); if (newpbuf == NULL) { ip6_frag_free_pbuf_custom_ref(pcr); pbuf_free(rambuf); IP6_FRAG_STATS_INC(ip6_frag.memerr); return ERR_MEM; } pbuf_ref(p); pcr->original = p; pcr->pc.custom_free_function = ip6_frag_free_pbuf_custom; /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain * so that it is removed when pbuf_dechain is later called on rambuf. */ pbuf_cat(rambuf, newpbuf); left_to_copy -= newpbuflen; if (left_to_copy) { p = p->next; } } poff = newpbuflen; #endif /* LWIP_NETIF_TX_SINGLE_PBUF */ /* Set headers */ frag_hdr->_nexth = original_ip6hdr->_nexth; frag_hdr->reserved = 0; frag_hdr->_fragment_offset = lwip_htons((fragment_offset & IP6_FRAG_OFFSET_MASK) | (last ? 0 : IP6_FRAG_MORE_FLAG)); frag_hdr->_identification = lwip_htonl(identification); IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_FRAGMENT); IP6H_PLEN_SET(ip6hdr, cop + IP6_FRAG_HLEN); /* No need for separate header pbuf - we allowed room for it in rambuf * when allocated. */ IP6_FRAG_STATS_INC(ip6_frag.xmit); netif->output_ip6(netif, rambuf, dest); /* Unfortunately we can't reuse rambuf - the hardware may still be * using the buffer. Instead we free it (and the ensuing chain) and * recreate it next time round the loop. If we're lucky the hardware * will have already sent the packet, the free will really free, and * there will be zero memory penalty. */ pbuf_free(rambuf); left -= cop; fragment_offset += cop; } return ERR_OK; } #endif /* LWIP_IPV6 && LWIP_IPV6_FRAG */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv6/ip6_frag.c
C
unknown
27,416
/** * @file * Multicast listener discovery * * @defgroup mld6 MLD6 * @ingroup ip6 * Multicast listener discovery for IPv6. Aims to be compliant with RFC 2710. * No support for MLDv2.\n * To be called from TCPIP thread */ /* * Copyright (c) 2010 Inico Technologies Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Ivan Delamer <delamer@inicotech.com> * * * Please coordinate changes and requests with Ivan Delamer * <delamer@inicotech.com> */ /* Based on igmp.c implementation of igmp v2 protocol */ #include "lwip/opt.h" #if LWIP_IPV6 && LWIP_IPV6_MLD /* don't build if not configured for use in lwipopts.h */ #include "lwip/mld6.h" #include "lwip/prot/mld6.h" #include "lwip/icmp6.h" #include "lwip/ip6.h" #include "lwip/ip6_addr.h" #include "lwip/ip.h" #include "lwip/inet_chksum.h" #include "lwip/pbuf.h" #include "lwip/netif.h" #include "lwip/memp.h" #include "lwip/stats.h" #include <string.h> /* * MLD constants */ #define MLD6_HL 1 #define MLD6_JOIN_DELAYING_MEMBER_TMR_MS (500) #define MLD6_GROUP_NON_MEMBER 0 #define MLD6_GROUP_DELAYING_MEMBER 1 #define MLD6_GROUP_IDLE_MEMBER 2 /* Forward declarations. */ static struct mld_group *mld6_new_group(struct netif *ifp, const ip6_addr_t *addr); static err_t mld6_remove_group(struct netif *netif, struct mld_group *group); static void mld6_delayed_report(struct mld_group *group, u16_t maxresp); static void mld6_send(struct netif *netif, struct mld_group *group, u8_t type); /** * Stop MLD processing on interface * * @param netif network interface on which stop MLD processing */ err_t mld6_stop(struct netif *netif) { struct mld_group *group = netif_mld6_data(netif); netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, NULL); while (group != NULL) { struct mld_group *next = group->next; /* avoid use-after-free below */ /* disable the group at the MAC level */ if (netif->mld_mac_filter != NULL) { netif->mld_mac_filter(netif, &(group->group_address), NETIF_DEL_MAC_FILTER); } /* free group */ memp_free(MEMP_MLD6_GROUP, group); /* move to "next" */ group = next; } return ERR_OK; } /** * Report MLD memberships for this interface * * @param netif network interface on which report MLD memberships */ void mld6_report_groups(struct netif *netif) { struct mld_group *group = netif_mld6_data(netif); while (group != NULL) { mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); group = group->next; } } /** * Search for a group that is joined on a netif * * @param ifp the network interface for which to look * @param addr the group ipv6 address to search for * @return a struct mld_group* if the group has been found, * NULL if the group wasn't found. */ struct mld_group * mld6_lookfor_group(struct netif *ifp, const ip6_addr_t *addr) { struct mld_group *group = netif_mld6_data(ifp); while (group != NULL) { if (ip6_addr_cmp(&(group->group_address), addr)) { return group; } group = group->next; } return NULL; } /** * create a new group * * @param ifp the network interface for which to create * @param addr the new group ipv6 * @return a struct mld_group*, * NULL on memory error. */ static struct mld_group * mld6_new_group(struct netif *ifp, const ip6_addr_t *addr) { struct mld_group *group; group = (struct mld_group *)memp_malloc(MEMP_MLD6_GROUP); if (group != NULL) { ip6_addr_set(&(group->group_address), addr); group->timer = 0; /* Not running */ group->group_state = MLD6_GROUP_IDLE_MEMBER; group->last_reporter_flag = 0; group->use = 0; group->next = netif_mld6_data(ifp); netif_set_client_data(ifp, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group); } return group; } /** * Remove a group from the mld_group_list, but do not free it yet * * @param group the group to remove * @return ERR_OK if group was removed from the list, an err_t otherwise */ static err_t mld6_remove_group(struct netif *netif, struct mld_group *group) { err_t err = ERR_OK; /* Is it the first group? */ if (netif_mld6_data(netif) == group) { netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_MLD6, group->next); } else { /* look for group further down the list */ struct mld_group *tmpGroup; for (tmpGroup = netif_mld6_data(netif); tmpGroup != NULL; tmpGroup = tmpGroup->next) { if (tmpGroup->next == group) { tmpGroup->next = group->next; break; } } /* Group not find group */ if (tmpGroup == NULL) { err = ERR_ARG; } } return err; } /** * Process an input MLD message. Called by icmp6_input. * * @param p the mld packet, p->payload pointing to the icmpv6 header * @param inp the netif on which this packet was received */ void mld6_input(struct pbuf *p, struct netif *inp) { struct mld_header *mld_hdr; struct mld_group *group; MLD6_STATS_INC(mld6.recv); /* Check that mld header fits in packet. */ if (p->len < sizeof(struct mld_header)) { /* @todo debug message */ pbuf_free(p); MLD6_STATS_INC(mld6.lenerr); MLD6_STATS_INC(mld6.drop); return; } mld_hdr = (struct mld_header *)p->payload; switch (mld_hdr->type) { case ICMP6_TYPE_MLQ: /* Multicast listener query. */ /* Is it a general query? */ if (ip6_addr_isallnodes_linklocal(ip6_current_dest_addr()) && ip6_addr_isany(&(mld_hdr->multicast_address))) { MLD6_STATS_INC(mld6.rx_general); /* Report all groups, except all nodes group, and if-local groups. */ group = netif_mld6_data(inp); while (group != NULL) { if ((!(ip6_addr_ismulticast_iflocal(&(group->group_address)))) && (!(ip6_addr_isallnodes_linklocal(&(group->group_address))))) { mld6_delayed_report(group, mld_hdr->max_resp_delay); } group = group->next; } } else { /* Have we joined this group? * We use IP6 destination address to have a memory aligned copy. * mld_hdr->multicast_address should be the same. */ MLD6_STATS_INC(mld6.rx_group); group = mld6_lookfor_group(inp, ip6_current_dest_addr()); if (group != NULL) { /* Schedule a report. */ mld6_delayed_report(group, mld_hdr->max_resp_delay); } } break; /* ICMP6_TYPE_MLQ */ case ICMP6_TYPE_MLR: /* Multicast listener report. */ /* Have we joined this group? * We use IP6 destination address to have a memory aligned copy. * mld_hdr->multicast_address should be the same. */ MLD6_STATS_INC(mld6.rx_report); group = mld6_lookfor_group(inp, ip6_current_dest_addr()); if (group != NULL) { /* If we are waiting to report, cancel it. */ if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { group->timer = 0; /* stopped */ group->group_state = MLD6_GROUP_IDLE_MEMBER; group->last_reporter_flag = 0; } } break; /* ICMP6_TYPE_MLR */ case ICMP6_TYPE_MLD: /* Multicast listener done. */ /* Do nothing, router will query us. */ break; /* ICMP6_TYPE_MLD */ default: MLD6_STATS_INC(mld6.proterr); MLD6_STATS_INC(mld6.drop); break; } pbuf_free(p); } /** * @ingroup mld6 * Join a group on a network interface. * * @param srcaddr ipv6 address of the network interface which should * join a new group. If IP6_ADDR_ANY, join on all netifs * @param groupaddr the ipv6 address of the group to join * @return ERR_OK if group was joined on the netif(s), an err_t otherwise */ err_t mld6_joingroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) { err_t err = ERR_VAL; /* no matching interface */ struct netif *netif; /* loop through netif's */ netif = netif_list; while (netif != NULL) { /* Should we join this interface ? */ if (ip6_addr_isany(srcaddr) || netif_get_ip6_addr_match(netif, srcaddr) >= 0) { err = mld6_joingroup_netif(netif, groupaddr); if (err != ERR_OK) { return err; } } /* proceed to next network interface */ netif = netif->next; } return err; } /** * @ingroup mld6 * Join a group on a network interface. * * @param netif the network interface which should join a new group. * @param groupaddr the ipv6 address of the group to join * @return ERR_OK if group was joined on the netif, an err_t otherwise */ err_t mld6_joingroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) { struct mld_group *group; /* find group or create a new one if not found */ group = mld6_lookfor_group(netif, groupaddr); if (group == NULL) { /* Joining a new group. Create a new group entry. */ group = mld6_new_group(netif, groupaddr); if (group == NULL) { return ERR_MEM; } /* Activate this address on the MAC layer. */ if (netif->mld_mac_filter != NULL) { netif->mld_mac_filter(netif, groupaddr, NETIF_ADD_MAC_FILTER); } /* Report our membership. */ MLD6_STATS_INC(mld6.tx_report); mld6_send(netif, group, ICMP6_TYPE_MLR); mld6_delayed_report(group, MLD6_JOIN_DELAYING_MEMBER_TMR_MS); } /* Increment group use */ group->use++; return ERR_OK; } /** * @ingroup mld6 * Leave a group on a network interface. * * @param srcaddr ipv6 address of the network interface which should * leave the group. If IP6_ISANY, leave on all netifs * @param groupaddr the ipv6 address of the group to leave * @return ERR_OK if group was left on the netif(s), an err_t otherwise */ err_t mld6_leavegroup(const ip6_addr_t *srcaddr, const ip6_addr_t *groupaddr) { err_t err = ERR_VAL; /* no matching interface */ struct netif *netif; /* loop through netif's */ netif = netif_list; while (netif != NULL) { /* Should we leave this interface ? */ if (ip6_addr_isany(srcaddr) || netif_get_ip6_addr_match(netif, srcaddr) >= 0) { err_t res = mld6_leavegroup_netif(netif, groupaddr); if (err != ERR_OK) { /* Store this result if we have not yet gotten a success */ err = res; } } /* proceed to next network interface */ netif = netif->next; } return err; } /** * @ingroup mld6 * Leave a group on a network interface. * * @param netif the network interface which should leave the group. * @param groupaddr the ipv6 address of the group to leave * @return ERR_OK if group was left on the netif, an err_t otherwise */ err_t mld6_leavegroup_netif(struct netif *netif, const ip6_addr_t *groupaddr) { struct mld_group *group; /* find group */ group = mld6_lookfor_group(netif, groupaddr); if (group != NULL) { /* Leave if there is no other use of the group */ if (group->use <= 1) { /* Remove the group from the list */ mld6_remove_group(netif, group); /* If we are the last reporter for this group */ if (group->last_reporter_flag) { MLD6_STATS_INC(mld6.tx_leave); mld6_send(netif, group, ICMP6_TYPE_MLD); } /* Disable the group at the MAC level */ if (netif->mld_mac_filter != NULL) { netif->mld_mac_filter(netif, groupaddr, NETIF_DEL_MAC_FILTER); } /* free group struct */ memp_free(MEMP_MLD6_GROUP, group); } else { /* Decrement group use */ group->use--; } /* Left group */ return ERR_OK; } /* Group not found */ return ERR_VAL; } /** * Periodic timer for mld processing. Must be called every * MLD6_TMR_INTERVAL milliseconds (100). * * When a delaying member expires, a membership report is sent. */ void mld6_tmr(void) { struct netif *netif = netif_list; while (netif != NULL) { struct mld_group *group = netif_mld6_data(netif); while (group != NULL) { if (group->timer > 0) { group->timer--; if (group->timer == 0) { /* If the state is MLD6_GROUP_DELAYING_MEMBER then we send a report for this group */ if (group->group_state == MLD6_GROUP_DELAYING_MEMBER) { MLD6_STATS_INC(mld6.tx_report); mld6_send(netif, group, ICMP6_TYPE_MLR); group->group_state = MLD6_GROUP_IDLE_MEMBER; } } } group = group->next; } netif = netif->next; } } /** * Schedule a delayed membership report for a group * * @param group the mld_group for which "delaying" membership report * should be sent * @param maxresp the max resp delay provided in the query */ static void mld6_delayed_report(struct mld_group *group, u16_t maxresp) { /* Convert maxresp from milliseconds to tmr ticks */ maxresp = maxresp / MLD6_TMR_INTERVAL; if (maxresp == 0) { maxresp = 1; } #ifdef LWIP_RAND /* Randomize maxresp. (if LWIP_RAND is supported) */ maxresp = LWIP_RAND() % maxresp; if (maxresp == 0) { maxresp = 1; } #endif /* LWIP_RAND */ /* Apply timer value if no report has been scheduled already. */ if ((group->group_state == MLD6_GROUP_IDLE_MEMBER) || ((group->group_state == MLD6_GROUP_DELAYING_MEMBER) && ((group->timer == 0) || (maxresp < group->timer)))) { group->timer = maxresp; group->group_state = MLD6_GROUP_DELAYING_MEMBER; } } /** * Send a MLD message (report or done). * * An IPv6 hop-by-hop options header with a router alert option * is prepended. * * @param group the group to report or quit * @param type ICMP6_TYPE_MLR (report) or ICMP6_TYPE_MLD (done) */ static void mld6_send(struct netif *netif, struct mld_group *group, u8_t type) { struct mld_header *mld_hdr; struct pbuf *p; const ip6_addr_t *src_addr; /* Allocate a packet. Size is MLD header + IPv6 Hop-by-hop options header. */ p = pbuf_alloc(PBUF_IP, sizeof(struct mld_header) + sizeof(struct ip6_hbh_hdr), PBUF_RAM); if (p == NULL) { MLD6_STATS_INC(mld6.memerr); return; } /* Move to make room for Hop-by-hop options header. */ if (pbuf_header(p, -IP6_HBH_HLEN)) { pbuf_free(p); MLD6_STATS_INC(mld6.lenerr); return; } /* Select our source address. */ if (!ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { /* This is a special case, when we are performing duplicate address detection. * We must join the multicast group, but we don't have a valid address yet. */ src_addr = IP6_ADDR_ANY6; } else { /* Use link-local address as source address. */ src_addr = netif_ip6_addr(netif, 0); } /* MLD message header pointer. */ mld_hdr = (struct mld_header *)p->payload; /* Set fields. */ mld_hdr->type = type; mld_hdr->code = 0; mld_hdr->chksum = 0; mld_hdr->max_resp_delay = 0; mld_hdr->reserved = 0; ip6_addr_set(&(mld_hdr->multicast_address), &(group->group_address)); #if CHECKSUM_GEN_ICMP6 IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { mld_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, &(group->group_address)); } #endif /* CHECKSUM_GEN_ICMP6 */ /* Add hop-by-hop headers options: router alert with MLD value. */ ip6_options_add_hbh_ra(p, IP6_NEXTH_ICMP6, IP6_ROUTER_ALERT_VALUE_MLD); if (type == ICMP6_TYPE_MLR) { /* Remember we were the last to report */ group->last_reporter_flag = 1; } /* Send the packet out. */ MLD6_STATS_INC(mld6.xmit); ip6_output_if(p, (ip6_addr_isany(src_addr)) ? NULL : src_addr, &(group->group_address), MLD6_HL, 0, IP6_NEXTH_HOPBYHOP, netif); pbuf_free(p); } #endif /* LWIP_IPV6 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv6/mld6.c
C
unknown
17,561
/** * @file * * Neighbor discovery and stateless address autoconfiguration for IPv6. * Aims to be compliant with RFC 4861 (Neighbor discovery) and RFC 4862 * (Address autoconfiguration). */ /* * Copyright (c) 2010 Inico Technologies Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Ivan Delamer <delamer@inicotech.com> * * * Please coordinate changes and requests with Ivan Delamer * <delamer@inicotech.com> */ #include "lwip/opt.h" #if LWIP_IPV6 /* don't build if not configured for use in lwipopts.h */ #include "lwip/nd6.h" #include "lwip/priv/nd6_priv.h" #include "lwip/prot/nd6.h" #include "lwip/prot/icmp6.h" #include "lwip/pbuf.h" #include "lwip/mem.h" #include "lwip/memp.h" #include "lwip/ip6.h" #include "lwip/ip6_addr.h" #include "lwip/inet_chksum.h" #include "lwip/netif.h" #include "lwip/icmp6.h" #include "lwip/mld6.h" #include "lwip/ip.h" #include "lwip/stats.h" #include "lwip/dns.h" #include <string.h> #ifdef LWIP_HOOK_FILENAME #include LWIP_HOOK_FILENAME #endif #if LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK #error LWIP_IPV6_DUP_DETECT_ATTEMPTS > IP6_ADDR_TENTATIVE_COUNT_MASK #endif /* Router tables. */ struct nd6_neighbor_cache_entry neighbor_cache[LWIP_ND6_NUM_NEIGHBORS]; struct nd6_destination_cache_entry destination_cache[LWIP_ND6_NUM_DESTINATIONS]; struct nd6_prefix_list_entry prefix_list[LWIP_ND6_NUM_PREFIXES]; struct nd6_router_list_entry default_router_list[LWIP_ND6_NUM_ROUTERS]; /* Default values, can be updated by a RA message. */ u32_t reachable_time = LWIP_ND6_REACHABLE_TIME; u32_t retrans_timer = LWIP_ND6_RETRANS_TIMER; /* @todo implement this value in timer */ /* Index for cache entries. */ static u8_t nd6_cached_neighbor_index; static u8_t nd6_cached_destination_index; /* Multicast address holder. */ static ip6_addr_t multicast_address; /* Static buffer to parse RA packet options (size of a prefix option, biggest option) */ static u8_t nd6_ra_buffer[sizeof(struct prefix_option)]; /* Forward declarations. */ static s8_t nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr); static s8_t nd6_new_neighbor_cache_entry(void); static void nd6_free_neighbor_cache_entry(s8_t i); static s8_t nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr); static s8_t nd6_new_destination_cache_entry(void); static s8_t nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif); static s8_t nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif); static s8_t nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif); static s8_t nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif); static s8_t nd6_get_onlink_prefix(ip6_addr_t *prefix, struct netif *netif); static s8_t nd6_new_onlink_prefix(ip6_addr_t *prefix, struct netif *netif); static s8_t nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif); static err_t nd6_queue_packet(s8_t neighbor_index, struct pbuf *q); #define ND6_SEND_FLAG_MULTICAST_DEST 0x01 #define ND6_SEND_FLAG_ALLNODES_DEST 0x02 static void nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); static void nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags); static void nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags); #if LWIP_IPV6_SEND_ROUTER_SOLICIT static err_t nd6_send_rs(struct netif *netif); #endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ #if LWIP_ND6_QUEUEING static void nd6_free_q(struct nd6_q_entry *q); #else /* LWIP_ND6_QUEUEING */ #define nd6_free_q(q) pbuf_free(q) #endif /* LWIP_ND6_QUEUEING */ static void nd6_send_q(s8_t i); /** * Process an incoming neighbor discovery message * * @param p the nd packet, p->payload pointing to the icmpv6 header * @param inp the netif on which this packet was received */ void nd6_input(struct pbuf *p, struct netif *inp) { u8_t msg_type; s8_t i; ND6_STATS_INC(nd6.recv); msg_type = *((u8_t *)p->payload); switch (msg_type) { case ICMP6_TYPE_NA: /* Neighbor Advertisement. */ { struct na_header *na_hdr; struct lladdr_option *lladdr_opt; /* Check that na header fits in packet. */ if (p->len < (sizeof(struct na_header))) { /* @todo debug message */ pbuf_free(p); ND6_STATS_INC(nd6.lenerr); ND6_STATS_INC(nd6.drop); return; } na_hdr = (struct na_header *)p->payload; /* Unsolicited NA?*/ if (ip6_addr_ismulticast(ip6_current_dest_addr())) { ip6_addr_t target_address; /* This is an unsolicited NA. * link-layer changed? * part of DAD mechanism? */ /* Create an aligned copy. */ ip6_addr_set(&target_address, &(na_hdr->target_address)); #if LWIP_IPV6_DUP_DETECT_ATTEMPTS /* If the target address matches this netif, it is a DAD response. */ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && ip6_addr_cmp(&target_address, netif_ip6_addr(inp, i))) { /* We are using a duplicate address. */ netif_ip6_addr_set_state(inp, i, IP6_ADDR_INVALID); #if LWIP_IPV6_AUTOCONFIG /* Check to see if this address was autoconfigured. */ if (!ip6_addr_islinklocal(&target_address)) { i = nd6_get_onlink_prefix(&target_address, inp); if (i >= 0) { /* Mark this prefix as duplicate, so that we don't use it * to generate this address again. */ prefix_list[i].flags |= ND6_PREFIX_AUTOCONFIG_ADDRESS_DUPLICATE; } } #endif /* LWIP_IPV6_AUTOCONFIG */ pbuf_free(p); return; } } #endif /* LWIP_IPV6_DUP_DETECT_ATTEMPTS */ /* Check that link-layer address option also fits in packet. */ if (p->len < (sizeof(struct na_header) + 2)) { /* @todo debug message */ pbuf_free(p); ND6_STATS_INC(nd6.lenerr); ND6_STATS_INC(nd6.drop); return; } lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { /* @todo debug message */ pbuf_free(p); ND6_STATS_INC(nd6.lenerr); ND6_STATS_INC(nd6.drop); return; } /* This is an unsolicited NA, most likely there was a LLADDR change. */ i = nd6_find_neighbor_cache_entry(&target_address); if (i >= 0) { if (na_hdr->flags & ND6_FLAG_OVERRIDE) { MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); } } } else { ip6_addr_t target_address; /* This is a solicited NA. * neighbor address resolution response? * neighbor unreachability detection response? */ /* Create an aligned copy. */ ip6_addr_set(&target_address, &(na_hdr->target_address)); /* Find the cache entry corresponding to this na. */ i = nd6_find_neighbor_cache_entry(&target_address); if (i < 0) { /* We no longer care about this target address. drop it. */ pbuf_free(p); return; } /* Update cache entry. */ if ((na_hdr->flags & ND6_FLAG_OVERRIDE) || (neighbor_cache[i].state == ND6_INCOMPLETE)) { /* Check that link-layer address option also fits in packet. */ if (p->len < (sizeof(struct na_header) + 2)) { /* @todo debug message */ pbuf_free(p); ND6_STATS_INC(nd6.lenerr); ND6_STATS_INC(nd6.drop); return; } lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); if (p->len < (sizeof(struct na_header) + (lladdr_opt->length << 3))) { /* @todo debug message */ pbuf_free(p); ND6_STATS_INC(nd6.lenerr); ND6_STATS_INC(nd6.drop); return; } MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); } neighbor_cache[i].netif = inp; neighbor_cache[i].state = ND6_REACHABLE; neighbor_cache[i].counter.reachable_time = reachable_time; /* Send queued packets, if any. */ if (neighbor_cache[i].q != NULL) { nd6_send_q(i); } } break; /* ICMP6_TYPE_NA */ } case ICMP6_TYPE_NS: /* Neighbor solicitation. */ { struct ns_header *ns_hdr; struct lladdr_option *lladdr_opt; u8_t accepted; /* Check that ns header fits in packet. */ if (p->len < sizeof(struct ns_header)) { /* @todo debug message */ pbuf_free(p); ND6_STATS_INC(nd6.lenerr); ND6_STATS_INC(nd6.drop); return; } ns_hdr = (struct ns_header *)p->payload; /* Check if there is a link-layer address provided. Only point to it if in this buffer. */ if (p->len >= (sizeof(struct ns_header) + 2)) { lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); if (p->len < (sizeof(struct ns_header) + (lladdr_opt->length << 3))) { lladdr_opt = NULL; } } else { lladdr_opt = NULL; } /* Check if the target address is configured on the receiving netif. */ accepted = 0; for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { if ((ip6_addr_isvalid(netif_ip6_addr_state(inp, i)) || (ip6_addr_istentative(netif_ip6_addr_state(inp, i)) && ip6_addr_isany(ip6_current_src_addr()))) && ip6_addr_cmp(&(ns_hdr->target_address), netif_ip6_addr(inp, i))) { accepted = 1; break; } } /* NS not for us? */ if (!accepted) { pbuf_free(p); return; } /* Check for ANY address in src (DAD algorithm). */ if (ip6_addr_isany(ip6_current_src_addr())) { /* Sender is validating this address. */ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { if (!ip6_addr_isinvalid(netif_ip6_addr_state(inp, i)) && ip6_addr_cmp(&(ns_hdr->target_address), netif_ip6_addr(inp, i))) { /* Send a NA back so that the sender does not use this address. */ nd6_send_na(inp, netif_ip6_addr(inp, i), ND6_FLAG_OVERRIDE | ND6_SEND_FLAG_ALLNODES_DEST); if (ip6_addr_istentative(netif_ip6_addr_state(inp, i))) { /* We shouldn't use this address either. */ netif_ip6_addr_set_state(inp, i, IP6_ADDR_INVALID); } } } } else { ip6_addr_t target_address; /* Sender is trying to resolve our address. */ /* Verify that they included their own link-layer address. */ if (lladdr_opt == NULL) { /* Not a valid message. */ pbuf_free(p); ND6_STATS_INC(nd6.proterr); ND6_STATS_INC(nd6.drop); return; } i = nd6_find_neighbor_cache_entry(ip6_current_src_addr()); if (i>= 0) { /* We already have a record for the solicitor. */ if (neighbor_cache[i].state == ND6_INCOMPLETE) { neighbor_cache[i].netif = inp; MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); /* Delay probe in case we get confirmation of reachability from upper layer (TCP). */ neighbor_cache[i].state = ND6_DELAY; neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; } } else { /* Add their IPv6 address and link-layer address to neighbor cache. * We will need it at least to send a unicast NA message, but most * likely we will also be communicating with this node soon. */ i = nd6_new_neighbor_cache_entry(); if (i < 0) { /* We couldn't assign a cache entry for this neighbor. * we won't be able to reply. drop it. */ pbuf_free(p); ND6_STATS_INC(nd6.memerr); return; } neighbor_cache[i].netif = inp; MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); ip6_addr_set(&(neighbor_cache[i].next_hop_address), ip6_current_src_addr()); /* Receiving a message does not prove reachability: only in one direction. * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ neighbor_cache[i].state = ND6_DELAY; neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; } /* Create an aligned copy. */ ip6_addr_set(&target_address, &(ns_hdr->target_address)); /* Send back a NA for us. Allocate the reply pbuf. */ nd6_send_na(inp, &target_address, ND6_FLAG_SOLICITED | ND6_FLAG_OVERRIDE); } break; /* ICMP6_TYPE_NS */ } case ICMP6_TYPE_RA: /* Router Advertisement. */ { struct ra_header *ra_hdr; u8_t *buffer; /* Used to copy options. */ u16_t offset; #if LWIP_ND6_RDNSS_MAX_DNS_SERVERS /* There can by multiple RDNSS options per RA */ u8_t rdnss_server_idx = 0; #endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ /* Check that RA header fits in packet. */ if (p->len < sizeof(struct ra_header)) { /* @todo debug message */ pbuf_free(p); ND6_STATS_INC(nd6.lenerr); ND6_STATS_INC(nd6.drop); return; } ra_hdr = (struct ra_header *)p->payload; /* If we are sending RS messages, stop. */ #if LWIP_IPV6_SEND_ROUTER_SOLICIT /* ensure at least one solicitation is sent */ if ((inp->rs_count < LWIP_ND6_MAX_MULTICAST_SOLICIT) || (nd6_send_rs(inp) == ERR_OK)) { inp->rs_count = 0; } #endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ /* Get the matching default router entry. */ i = nd6_get_router(ip6_current_src_addr(), inp); if (i < 0) { /* Create a new router entry. */ i = nd6_new_router(ip6_current_src_addr(), inp); } if (i < 0) { /* Could not create a new router entry. */ pbuf_free(p); ND6_STATS_INC(nd6.memerr); return; } /* Re-set invalidation timer. */ default_router_list[i].invalidation_timer = lwip_htons(ra_hdr->router_lifetime); /* Re-set default timer values. */ #if LWIP_ND6_ALLOW_RA_UPDATES if (ra_hdr->retrans_timer > 0) { retrans_timer = lwip_htonl(ra_hdr->retrans_timer); } if (ra_hdr->reachable_time > 0) { reachable_time = lwip_htonl(ra_hdr->reachable_time); } #endif /* LWIP_ND6_ALLOW_RA_UPDATES */ /* @todo set default hop limit... */ /* ra_hdr->current_hop_limit;*/ /* Update flags in local entry (incl. preference). */ default_router_list[i].flags = ra_hdr->flags; /* Offset to options. */ offset = sizeof(struct ra_header); /* Process each option. */ while ((p->tot_len - offset) > 0) { if (p->len == p->tot_len) { /* no need to copy from contiguous pbuf */ buffer = &((u8_t*)p->payload)[offset]; } else { buffer = nd6_ra_buffer; if (pbuf_copy_partial(p, buffer, sizeof(struct prefix_option), offset) != sizeof(struct prefix_option)) { pbuf_free(p); ND6_STATS_INC(nd6.lenerr); ND6_STATS_INC(nd6.drop); return; } } if (buffer[1] == 0) { /* zero-length extension. drop packet */ pbuf_free(p); ND6_STATS_INC(nd6.lenerr); ND6_STATS_INC(nd6.drop); return; } switch (buffer[0]) { case ND6_OPTION_TYPE_SOURCE_LLADDR: { struct lladdr_option *lladdr_opt; lladdr_opt = (struct lladdr_option *)buffer; if ((default_router_list[i].neighbor_entry != NULL) && (default_router_list[i].neighbor_entry->state == ND6_INCOMPLETE)) { SMEMCPY(default_router_list[i].neighbor_entry->lladdr, lladdr_opt->addr, inp->hwaddr_len); default_router_list[i].neighbor_entry->state = ND6_REACHABLE; default_router_list[i].neighbor_entry->counter.reachable_time = reachable_time; } break; } case ND6_OPTION_TYPE_MTU: { struct mtu_option *mtu_opt; mtu_opt = (struct mtu_option *)buffer; if (lwip_htonl(mtu_opt->mtu) >= 1280) { #if LWIP_ND6_ALLOW_RA_UPDATES inp->mtu = (u16_t)lwip_htonl(mtu_opt->mtu); #endif /* LWIP_ND6_ALLOW_RA_UPDATES */ } break; } case ND6_OPTION_TYPE_PREFIX_INFO: { struct prefix_option *prefix_opt; prefix_opt = (struct prefix_option *)buffer; if ((prefix_opt->flags & ND6_PREFIX_FLAG_ON_LINK) && (prefix_opt->prefix_length == 64) && !ip6_addr_islinklocal(&(prefix_opt->prefix))) { /* Add to on-link prefix list. */ s8_t prefix; ip6_addr_t prefix_addr; /* Get a memory-aligned copy of the prefix. */ ip6_addr_set(&prefix_addr, &(prefix_opt->prefix)); /* find cache entry for this prefix. */ prefix = nd6_get_onlink_prefix(&prefix_addr, inp); if (prefix < 0) { /* Create a new cache entry. */ prefix = nd6_new_onlink_prefix(&prefix_addr, inp); } if (prefix >= 0) { prefix_list[prefix].invalidation_timer = lwip_htonl(prefix_opt->valid_lifetime); #if LWIP_IPV6_AUTOCONFIG if (prefix_opt->flags & ND6_PREFIX_FLAG_AUTONOMOUS) { /* Mark prefix as autonomous, so that address autoconfiguration can take place. * Only OR flag, so that we don't over-write other flags (such as ADDRESS_DUPLICATE)*/ prefix_list[prefix].flags |= ND6_PREFIX_AUTOCONFIG_AUTONOMOUS; } #endif /* LWIP_IPV6_AUTOCONFIG */ } } break; } case ND6_OPTION_TYPE_ROUTE_INFO: /* @todo implement preferred routes. struct route_option * route_opt; route_opt = (struct route_option *)buffer;*/ break; #if LWIP_ND6_RDNSS_MAX_DNS_SERVERS case ND6_OPTION_TYPE_RDNSS: { u8_t num, n; struct rdnss_option * rdnss_opt; rdnss_opt = (struct rdnss_option *)buffer; num = (rdnss_opt->length - 1) / 2; for (n = 0; (rdnss_server_idx < DNS_MAX_SERVERS) && (n < num); n++) { ip_addr_t rdnss_address; /* Get a memory-aligned copy of the prefix. */ ip_addr_copy_from_ip6(rdnss_address, rdnss_opt->rdnss_address[n]); if (htonl(rdnss_opt->lifetime) > 0) { /* TODO implement Lifetime > 0 */ dns_setserver(rdnss_server_idx++, &rdnss_address); } else { /* TODO implement DNS removal in dns.c */ u8_t s; for (s = 0; s < DNS_MAX_SERVERS; s++) { const ip_addr_t *addr = dns_getserver(s); if(ip_addr_cmp(addr, &rdnss_address)) { dns_setserver(s, NULL); } } } } break; } #endif /* LWIP_ND6_RDNSS_MAX_DNS_SERVERS */ default: /* Unrecognized option, abort. */ ND6_STATS_INC(nd6.proterr); break; } /* option length is checked earlier to be non-zero to make sure loop ends */ offset += 8 * ((u16_t)buffer[1]); } break; /* ICMP6_TYPE_RA */ } case ICMP6_TYPE_RD: /* Redirect */ { struct redirect_header *redir_hdr; struct lladdr_option *lladdr_opt; ip6_addr_t tmp; /* Check that Redir header fits in packet. */ if (p->len < sizeof(struct redirect_header)) { /* @todo debug message */ pbuf_free(p); ND6_STATS_INC(nd6.lenerr); ND6_STATS_INC(nd6.drop); return; } redir_hdr = (struct redirect_header *)p->payload; if (p->len >= (sizeof(struct redirect_header) + 2)) { lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct redirect_header)); if (p->len < (sizeof(struct redirect_header) + (lladdr_opt->length << 3))) { lladdr_opt = NULL; } } else { lladdr_opt = NULL; } /* Copy original destination address to current source address, to have an aligned copy. */ ip6_addr_set(&tmp, &(redir_hdr->destination_address)); /* Find dest address in cache */ i = nd6_find_destination_cache_entry(&tmp); if (i < 0) { /* Destination not in cache, drop packet. */ pbuf_free(p); return; } /* Set the new target address. */ ip6_addr_set(&(destination_cache[i].next_hop_addr), &(redir_hdr->target_address)); /* If Link-layer address of other router is given, try to add to neighbor cache. */ if (lladdr_opt != NULL) { if (lladdr_opt->type == ND6_OPTION_TYPE_TARGET_LLADDR) { /* Copy target address to current source address, to have an aligned copy. */ ip6_addr_set(&tmp, &(redir_hdr->target_address)); i = nd6_find_neighbor_cache_entry(&tmp); if (i < 0) { i = nd6_new_neighbor_cache_entry(); if (i >= 0) { neighbor_cache[i].netif = inp; MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); ip6_addr_set(&(neighbor_cache[i].next_hop_address), &tmp); /* Receiving a message does not prove reachability: only in one direction. * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ neighbor_cache[i].state = ND6_DELAY; neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; } } if (i >= 0) { if (neighbor_cache[i].state == ND6_INCOMPLETE) { MEMCPY(neighbor_cache[i].lladdr, lladdr_opt->addr, inp->hwaddr_len); /* Receiving a message does not prove reachability: only in one direction. * Delay probe in case we get confirmation of reachability from upper layer (TCP). */ neighbor_cache[i].state = ND6_DELAY; neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; } } } } break; /* ICMP6_TYPE_RD */ } case ICMP6_TYPE_PTB: /* Packet too big */ { struct icmp6_hdr *icmp6hdr; /* Packet too big message */ struct ip6_hdr *ip6hdr; /* IPv6 header of the packet which caused the error */ u32_t pmtu; ip6_addr_t tmp; /* Check that ICMPv6 header + IPv6 header fit in payload */ if (p->len < (sizeof(struct icmp6_hdr) + IP6_HLEN)) { /* drop short packets */ pbuf_free(p); ND6_STATS_INC(nd6.lenerr); ND6_STATS_INC(nd6.drop); return; } icmp6hdr = (struct icmp6_hdr *)p->payload; ip6hdr = (struct ip6_hdr *)((u8_t*)p->payload + sizeof(struct icmp6_hdr)); /* Copy original destination address to current source address, to have an aligned copy. */ ip6_addr_set(&tmp, &(ip6hdr->dest)); /* Look for entry in destination cache. */ i = nd6_find_destination_cache_entry(&tmp); if (i < 0) { /* Destination not in cache, drop packet. */ pbuf_free(p); return; } /* Change the Path MTU. */ pmtu = lwip_htonl(icmp6hdr->data); destination_cache[i].pmtu = (u16_t)LWIP_MIN(pmtu, 0xFFFF); break; /* ICMP6_TYPE_PTB */ } default: ND6_STATS_INC(nd6.proterr); ND6_STATS_INC(nd6.drop); break; /* default */ } pbuf_free(p); } /** * Periodic timer for Neighbor discovery functions: * * - Update neighbor reachability states * - Update destination cache entries age * - Update invalidation timers of default routers and on-link prefixes * - Perform duplicate address detection (DAD) for our addresses * - Send router solicitations */ void nd6_tmr(void) { s8_t i; struct netif *netif; /* Process neighbor entries. */ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { switch (neighbor_cache[i].state) { case ND6_INCOMPLETE: if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && (!neighbor_cache[i].isrouter)) { /* Retries exceeded. */ nd6_free_neighbor_cache_entry(i); } else { /* Send a NS for this entry. */ neighbor_cache[i].counter.probes_sent++; nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); } break; case ND6_REACHABLE: /* Send queued packets, if any are left. Should have been sent already. */ if (neighbor_cache[i].q != NULL) { nd6_send_q(i); } if (neighbor_cache[i].counter.reachable_time <= ND6_TMR_INTERVAL) { /* Change to stale state. */ neighbor_cache[i].state = ND6_STALE; neighbor_cache[i].counter.stale_time = 0; } else { neighbor_cache[i].counter.reachable_time -= ND6_TMR_INTERVAL; } break; case ND6_STALE: neighbor_cache[i].counter.stale_time++; break; case ND6_DELAY: if (neighbor_cache[i].counter.delay_time <= 1) { /* Change to PROBE state. */ neighbor_cache[i].state = ND6_PROBE; neighbor_cache[i].counter.probes_sent = 0; } else { neighbor_cache[i].counter.delay_time--; } break; case ND6_PROBE: if ((neighbor_cache[i].counter.probes_sent >= LWIP_ND6_MAX_MULTICAST_SOLICIT) && (!neighbor_cache[i].isrouter)) { /* Retries exceeded. */ nd6_free_neighbor_cache_entry(i); } else { /* Send a NS for this entry. */ neighbor_cache[i].counter.probes_sent++; nd6_send_neighbor_cache_probe(&neighbor_cache[i], 0); } break; case ND6_NO_ENTRY: default: /* Do nothing. */ break; } } /* Process destination entries. */ for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { destination_cache[i].age++; } /* Process router entries. */ for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { if (default_router_list[i].neighbor_entry != NULL) { /* Active entry. */ if (default_router_list[i].invalidation_timer > 0) { default_router_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; } if (default_router_list[i].invalidation_timer < ND6_TMR_INTERVAL / 1000) { /* Less than 1 second remaining. Clear this entry. */ default_router_list[i].neighbor_entry->isrouter = 0; default_router_list[i].neighbor_entry = NULL; default_router_list[i].invalidation_timer = 0; default_router_list[i].flags = 0; } } } /* Process prefix entries. */ for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { if (prefix_list[i].netif != NULL) { if (prefix_list[i].invalidation_timer < ND6_TMR_INTERVAL / 1000) { /* Entry timed out, remove it */ prefix_list[i].invalidation_timer = 0; #if LWIP_IPV6_AUTOCONFIG /* If any addresses were configured with this prefix, remove them */ if (prefix_list[i].flags & ND6_PREFIX_AUTOCONFIG_ADDRESS_GENERATED) { s8_t j; for (j = 1; j < LWIP_IPV6_NUM_ADDRESSES; j++) { if ((netif_ip6_addr_state(prefix_list[i].netif, j) != IP6_ADDR_INVALID) && ip6_addr_netcmp(&prefix_list[i].prefix, netif_ip6_addr(prefix_list[i].netif, j))) { netif_ip6_addr_set_state(prefix_list[i].netif, j, IP6_ADDR_INVALID); prefix_list[i].flags = 0; /* Exit loop. */ break; } } } #endif /* LWIP_IPV6_AUTOCONFIG */ prefix_list[i].netif = NULL; prefix_list[i].flags = 0; } else { prefix_list[i].invalidation_timer -= ND6_TMR_INTERVAL / 1000; #if LWIP_IPV6_AUTOCONFIG /* Initiate address autoconfiguration for this prefix, if conditions are met. */ if (prefix_list[i].netif->ip6_autoconfig_enabled && (prefix_list[i].flags & ND6_PREFIX_AUTOCONFIG_AUTONOMOUS) && !(prefix_list[i].flags & ND6_PREFIX_AUTOCONFIG_ADDRESS_GENERATED)) { s8_t j; /* Try to get an address on this netif that is invalid. * Skip 0 index (link-local address) */ for (j = 1; j < LWIP_IPV6_NUM_ADDRESSES; j++) { if (netif_ip6_addr_state(prefix_list[i].netif, j) == IP6_ADDR_INVALID) { /* Generate an address using this prefix and interface ID from link-local address. */ netif_ip6_addr_set_parts(prefix_list[i].netif, j, prefix_list[i].prefix.addr[0], prefix_list[i].prefix.addr[1], netif_ip6_addr(prefix_list[i].netif, 0)->addr[2], netif_ip6_addr(prefix_list[i].netif, 0)->addr[3]); /* Mark it as tentative (DAD will be performed if configured). */ netif_ip6_addr_set_state(prefix_list[i].netif, j, IP6_ADDR_TENTATIVE); /* Mark this prefix with ADDRESS_GENERATED, so that we don't try again. */ prefix_list[i].flags |= ND6_PREFIX_AUTOCONFIG_ADDRESS_GENERATED; /* Exit loop. */ break; } } } #endif /* LWIP_IPV6_AUTOCONFIG */ } } } /* Process our own addresses, if DAD configured. */ for (netif = netif_list; netif != NULL; netif = netif->next) { for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; ++i) { u8_t addr_state = netif_ip6_addr_state(netif, i); if (ip6_addr_istentative(addr_state)) { if ((addr_state & IP6_ADDR_TENTATIVE_COUNT_MASK) >= LWIP_IPV6_DUP_DETECT_ATTEMPTS) { /* No NA received in response. Mark address as valid. */ netif_ip6_addr_set_state(netif, i, IP6_ADDR_PREFERRED); /* @todo implement preferred and valid lifetimes. */ } else if (netif->flags & NETIF_FLAG_UP) { /* Send a NS for this address. */ nd6_send_ns(netif, netif_ip6_addr(netif, i), ND6_SEND_FLAG_MULTICAST_DEST); /* tentative: set next state by increasing by one */ netif_ip6_addr_set_state(netif, i, addr_state + 1); /* @todo send max 1 NS per tmr call? enable return*/ /*return;*/ } } } } #if LWIP_IPV6_SEND_ROUTER_SOLICIT /* Send router solicitation messages, if necessary. */ for (netif = netif_list; netif != NULL; netif = netif->next) { if ((netif->rs_count > 0) && (netif->flags & NETIF_FLAG_UP) && (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, 0)))) { if (nd6_send_rs(netif) == ERR_OK) { netif->rs_count--; } } } #endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ } /** Send a neighbor solicitation message for a specific neighbor cache entry * * @param entry the neightbor cache entry for wich to send the message * @param flags one of ND6_SEND_FLAG_* */ static void nd6_send_neighbor_cache_probe(struct nd6_neighbor_cache_entry *entry, u8_t flags) { nd6_send_ns(entry->netif, &entry->next_hop_address, flags); } /** * Send a neighbor solicitation message * * @param netif the netif on which to send the message * @param target_addr the IPv6 target address for the ND message * @param flags one of ND6_SEND_FLAG_* */ static void nd6_send_ns(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) { struct ns_header *ns_hdr; struct pbuf *p; const ip6_addr_t *src_addr; u16_t lladdr_opt_len; if (ip6_addr_isvalid(netif_ip6_addr_state(netif,0))) { /* Use link-local address as source address. */ src_addr = netif_ip6_addr(netif, 0); /* calculate option length (in 8-byte-blocks) */ lladdr_opt_len = ((netif->hwaddr_len + 2) + 7) >> 3; } else { src_addr = IP6_ADDR_ANY6; /* Option "MUST NOT be included when the source IP address is the unspecified address." */ lladdr_opt_len = 0; } /* Allocate a packet. */ p = pbuf_alloc(PBUF_IP, sizeof(struct ns_header) + (lladdr_opt_len << 3), PBUF_RAM); if (p == NULL) { ND6_STATS_INC(nd6.memerr); return; } /* Set fields. */ ns_hdr = (struct ns_header *)p->payload; ns_hdr->type = ICMP6_TYPE_NS; ns_hdr->code = 0; ns_hdr->chksum = 0; ns_hdr->reserved = 0; ip6_addr_set(&(ns_hdr->target_address), target_addr); if (lladdr_opt_len != 0) { struct lladdr_option *lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct ns_header)); lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; lladdr_opt->length = (u8_t)lladdr_opt_len; SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); } /* Generate the solicited node address for the target address. */ if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); target_addr = &multicast_address; } #if CHECKSUM_GEN_ICMP6 IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { ns_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, target_addr); } #endif /* CHECKSUM_GEN_ICMP6 */ /* Send the packet out. */ ND6_STATS_INC(nd6.xmit); ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, target_addr, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); pbuf_free(p); } /** * Send a neighbor advertisement message * * @param netif the netif on which to send the message * @param target_addr the IPv6 target address for the ND message * @param flags one of ND6_SEND_FLAG_* */ static void nd6_send_na(struct netif *netif, const ip6_addr_t *target_addr, u8_t flags) { struct na_header *na_hdr; struct lladdr_option *lladdr_opt; struct pbuf *p; const ip6_addr_t *src_addr; const ip6_addr_t *dest_addr; u16_t lladdr_opt_len; /* Use link-local address as source address. */ /* src_addr = netif_ip6_addr(netif, 0); */ /* Use target address as source address. */ src_addr = target_addr; /* Allocate a packet. */ lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); p = pbuf_alloc(PBUF_IP, sizeof(struct na_header) + (lladdr_opt_len << 3), PBUF_RAM); if (p == NULL) { ND6_STATS_INC(nd6.memerr); return; } /* Set fields. */ na_hdr = (struct na_header *)p->payload; lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct na_header)); na_hdr->type = ICMP6_TYPE_NA; na_hdr->code = 0; na_hdr->chksum = 0; na_hdr->flags = flags & 0xf0; na_hdr->reserved[0] = 0; na_hdr->reserved[1] = 0; na_hdr->reserved[2] = 0; ip6_addr_set(&(na_hdr->target_address), target_addr); lladdr_opt->type = ND6_OPTION_TYPE_TARGET_LLADDR; lladdr_opt->length = (u8_t)lladdr_opt_len; SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); /* Generate the solicited node address for the target address. */ if (flags & ND6_SEND_FLAG_MULTICAST_DEST) { ip6_addr_set_solicitednode(&multicast_address, target_addr->addr[3]); dest_addr = &multicast_address; } else if (flags & ND6_SEND_FLAG_ALLNODES_DEST) { ip6_addr_set_allnodes_linklocal(&multicast_address); dest_addr = &multicast_address; } else { dest_addr = ip6_current_src_addr(); } #if CHECKSUM_GEN_ICMP6 IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { na_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, dest_addr); } #endif /* CHECKSUM_GEN_ICMP6 */ /* Send the packet out. */ ND6_STATS_INC(nd6.xmit); ip6_output_if(p, src_addr, dest_addr, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); pbuf_free(p); } #if LWIP_IPV6_SEND_ROUTER_SOLICIT /** * Send a router solicitation message * * @param netif the netif on which to send the message */ static err_t nd6_send_rs(struct netif *netif) { struct rs_header *rs_hdr; struct lladdr_option *lladdr_opt; struct pbuf *p; const ip6_addr_t *src_addr; err_t err; u16_t lladdr_opt_len = 0; /* Link-local source address, or unspecified address? */ if (ip6_addr_isvalid(netif_ip6_addr_state(netif, 0))) { src_addr = netif_ip6_addr(netif, 0); } else { src_addr = IP6_ADDR_ANY6; } /* Generate the all routers target address. */ ip6_addr_set_allrouters_linklocal(&multicast_address); /* Allocate a packet. */ if (src_addr != IP6_ADDR_ANY6) { lladdr_opt_len = ((netif->hwaddr_len + 2) >> 3) + (((netif->hwaddr_len + 2) & 0x07) ? 1 : 0); } p = pbuf_alloc(PBUF_IP, sizeof(struct rs_header) + (lladdr_opt_len << 3), PBUF_RAM); if (p == NULL) { ND6_STATS_INC(nd6.memerr); return ERR_BUF; } /* Set fields. */ rs_hdr = (struct rs_header *)p->payload; rs_hdr->type = ICMP6_TYPE_RS; rs_hdr->code = 0; rs_hdr->chksum = 0; rs_hdr->reserved = 0; if (src_addr != IP6_ADDR_ANY6) { /* Include our hw address. */ lladdr_opt = (struct lladdr_option *)((u8_t*)p->payload + sizeof(struct rs_header)); lladdr_opt->type = ND6_OPTION_TYPE_SOURCE_LLADDR; lladdr_opt->length = (u8_t)lladdr_opt_len; SMEMCPY(lladdr_opt->addr, netif->hwaddr, netif->hwaddr_len); } #if CHECKSUM_GEN_ICMP6 IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_ICMP6) { rs_hdr->chksum = ip6_chksum_pseudo(p, IP6_NEXTH_ICMP6, p->len, src_addr, &multicast_address); } #endif /* CHECKSUM_GEN_ICMP6 */ /* Send the packet out. */ ND6_STATS_INC(nd6.xmit); err = ip6_output_if(p, (src_addr == IP6_ADDR_ANY6) ? NULL : src_addr, &multicast_address, LWIP_ICMP6_HL, 0, IP6_NEXTH_ICMP6, netif); pbuf_free(p); return err; } #endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ /** * Search for a neighbor cache entry * * @param ip6addr the IPv6 address of the neighbor * @return The neighbor cache entry index that matched, -1 if no * entry is found */ static s8_t nd6_find_neighbor_cache_entry(const ip6_addr_t *ip6addr) { s8_t i; for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { if (ip6_addr_cmp(ip6addr, &(neighbor_cache[i].next_hop_address))) { return i; } } return -1; } /** * Create a new neighbor cache entry. * * If no unused entry is found, will try to recycle an old entry * according to ad-hoc "age" heuristic. * * @return The neighbor cache entry index that was created, -1 if no * entry could be created */ static s8_t nd6_new_neighbor_cache_entry(void) { s8_t i; s8_t j; u32_t time; /* First, try to find an empty entry. */ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { if (neighbor_cache[i].state == ND6_NO_ENTRY) { return i; } } /* We need to recycle an entry. in general, do not recycle if it is a router. */ /* Next, try to find a Stale entry. */ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { if ((neighbor_cache[i].state == ND6_STALE) && (!neighbor_cache[i].isrouter)) { nd6_free_neighbor_cache_entry(i); return i; } } /* Next, try to find a Probe entry. */ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { if ((neighbor_cache[i].state == ND6_PROBE) && (!neighbor_cache[i].isrouter)) { nd6_free_neighbor_cache_entry(i); return i; } } /* Next, try to find a Delayed entry. */ for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { if ((neighbor_cache[i].state == ND6_DELAY) && (!neighbor_cache[i].isrouter)) { nd6_free_neighbor_cache_entry(i); return i; } } /* Next, try to find the oldest reachable entry. */ time = 0xfffffffful; j = -1; for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { if ((neighbor_cache[i].state == ND6_REACHABLE) && (!neighbor_cache[i].isrouter)) { if (neighbor_cache[i].counter.reachable_time < time) { j = i; time = neighbor_cache[i].counter.reachable_time; } } } if (j >= 0) { nd6_free_neighbor_cache_entry(j); return j; } /* Next, find oldest incomplete entry without queued packets. */ time = 0; j = -1; for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { if ( (neighbor_cache[i].q == NULL) && (neighbor_cache[i].state == ND6_INCOMPLETE) && (!neighbor_cache[i].isrouter)) { if (neighbor_cache[i].counter.probes_sent >= time) { j = i; time = neighbor_cache[i].counter.probes_sent; } } } if (j >= 0) { nd6_free_neighbor_cache_entry(j); return j; } /* Next, find oldest incomplete entry with queued packets. */ time = 0; j = -1; for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { if ((neighbor_cache[i].state == ND6_INCOMPLETE) && (!neighbor_cache[i].isrouter)) { if (neighbor_cache[i].counter.probes_sent >= time) { j = i; time = neighbor_cache[i].counter.probes_sent; } } } if (j >= 0) { nd6_free_neighbor_cache_entry(j); return j; } /* No more entries to try. */ return -1; } /** * Will free any resources associated with a neighbor cache * entry, and will mark it as unused. * * @param i the neighbor cache entry index to free */ static void nd6_free_neighbor_cache_entry(s8_t i) { if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { return; } if (neighbor_cache[i].isrouter) { /* isrouter needs to be cleared before deleting a neighbor cache entry */ return; } /* Free any queued packets. */ if (neighbor_cache[i].q != NULL) { nd6_free_q(neighbor_cache[i].q); neighbor_cache[i].q = NULL; } neighbor_cache[i].state = ND6_NO_ENTRY; neighbor_cache[i].isrouter = 0; neighbor_cache[i].netif = NULL; neighbor_cache[i].counter.reachable_time = 0; ip6_addr_set_zero(&(neighbor_cache[i].next_hop_address)); } /** * Search for a destination cache entry * * @param ip6addr the IPv6 address of the destination * @return The destination cache entry index that matched, -1 if no * entry is found */ static s8_t nd6_find_destination_cache_entry(const ip6_addr_t *ip6addr) { s8_t i; for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { if (ip6_addr_cmp(ip6addr, &(destination_cache[i].destination_addr))) { return i; } } return -1; } /** * Create a new destination cache entry. If no unused entry is found, * will recycle oldest entry. * * @return The destination cache entry index that was created, -1 if no * entry was created */ static s8_t nd6_new_destination_cache_entry(void) { s8_t i, j; u32_t age; /* Find an empty entry. */ for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { if (ip6_addr_isany(&(destination_cache[i].destination_addr))) { return i; } } /* Find oldest entry. */ age = 0; j = LWIP_ND6_NUM_DESTINATIONS - 1; for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { if (destination_cache[i].age > age) { j = i; } } return j; } /** * Clear the destination cache. * * This operation may be necessary for consistency in the light of changing * local addresses and/or use of the gateway hook. */ void nd6_clear_destination_cache(void) { int i; for (i = 0; i < LWIP_ND6_NUM_DESTINATIONS; i++) { ip6_addr_set_any(&destination_cache[i].destination_addr); } } /** * Determine whether an address matches an on-link prefix. * * @param ip6addr the IPv6 address to match * @return 1 if the address is on-link, 0 otherwise */ static s8_t nd6_is_prefix_in_netif(const ip6_addr_t *ip6addr, struct netif *netif) { s8_t i; for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { if ((prefix_list[i].netif == netif) && (prefix_list[i].invalidation_timer > 0) && ip6_addr_netcmp(ip6addr, &(prefix_list[i].prefix))) { return 1; } } /* Check to see if address prefix matches a (manually?) configured address. */ for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_netcmp(ip6addr, netif_ip6_addr(netif, i))) { return 1; } } return 0; } /** * Select a default router for a destination. * * @param ip6addr the destination address * @param netif the netif for the outgoing packet, if known * @return the default router entry index, or -1 if no suitable * router is found */ static s8_t nd6_select_router(const ip6_addr_t *ip6addr, struct netif *netif) { s8_t i; /* last_router is used for round-robin router selection (as recommended * in RFC). This is more robust in case one router is not reachable, * we are not stuck trying to resolve it. */ static s8_t last_router; (void)ip6addr; /* @todo match preferred routes!! (must implement ND6_OPTION_TYPE_ROUTE_INFO) */ /* @todo: implement default router preference */ /* Look for reachable routers. */ for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { if (++last_router >= LWIP_ND6_NUM_ROUTERS) { last_router = 0; } if ((default_router_list[i].neighbor_entry != NULL) && (netif != NULL ? netif == default_router_list[i].neighbor_entry->netif : 1) && (default_router_list[i].invalidation_timer > 0) && (default_router_list[i].neighbor_entry->state == ND6_REACHABLE)) { return i; } } /* Look for router in other reachability states, but still valid according to timer. */ for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { if (++last_router >= LWIP_ND6_NUM_ROUTERS) { last_router = 0; } if ((default_router_list[i].neighbor_entry != NULL) && (netif != NULL ? netif == default_router_list[i].neighbor_entry->netif : 1) && (default_router_list[i].invalidation_timer > 0)) { return i; } } /* Look for any router for which we have any information at all. */ for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { if (++last_router >= LWIP_ND6_NUM_ROUTERS) { last_router = 0; } if (default_router_list[i].neighbor_entry != NULL && (netif != NULL ? netif == default_router_list[i].neighbor_entry->netif : 1)) { return i; } } /* no suitable router found. */ return -1; } /** * Find a router-announced route to the given destination. * * The caller is responsible for checking whether the returned netif, if any, * is in a suitable state (up, link up) to be used for packet transmission. * * @param ip6addr the destination IPv6 address * @return the netif to use for the destination, or NULL if none found */ struct netif * nd6_find_route(const ip6_addr_t *ip6addr) { s8_t i; i = nd6_select_router(ip6addr, NULL); if (i >= 0) { if (default_router_list[i].neighbor_entry != NULL) { return default_router_list[i].neighbor_entry->netif; /* may be NULL */ } } return NULL; } /** * Find an entry for a default router. * * @param router_addr the IPv6 address of the router * @param netif the netif on which the router is found, if known * @return the index of the router entry, or -1 if not found */ static s8_t nd6_get_router(const ip6_addr_t *router_addr, struct netif *netif) { s8_t i; /* Look for router. */ for (i = 0; i < LWIP_ND6_NUM_ROUTERS; i++) { if ((default_router_list[i].neighbor_entry != NULL) && ((netif != NULL) ? netif == default_router_list[i].neighbor_entry->netif : 1) && ip6_addr_cmp(router_addr, &(default_router_list[i].neighbor_entry->next_hop_address))) { return i; } } /* router not found. */ return -1; } /** * Create a new entry for a default router. * * @param router_addr the IPv6 address of the router * @param netif the netif on which the router is connected, if known * @return the index on the router table, or -1 if could not be created */ static s8_t nd6_new_router(const ip6_addr_t *router_addr, struct netif *netif) { s8_t router_index; s8_t free_router_index; s8_t neighbor_index; /* Do we have a neighbor entry for this router? */ neighbor_index = nd6_find_neighbor_cache_entry(router_addr); if (neighbor_index < 0) { /* Create a neighbor entry for this router. */ neighbor_index = nd6_new_neighbor_cache_entry(); if (neighbor_index < 0) { /* Could not create neighbor entry for this router. */ return -1; } ip6_addr_set(&(neighbor_cache[neighbor_index].next_hop_address), router_addr); neighbor_cache[neighbor_index].netif = netif; neighbor_cache[neighbor_index].q = NULL; neighbor_cache[neighbor_index].state = ND6_INCOMPLETE; neighbor_cache[neighbor_index].counter.probes_sent = 1; nd6_send_neighbor_cache_probe(&neighbor_cache[neighbor_index], ND6_SEND_FLAG_MULTICAST_DEST); } /* Mark neighbor as router. */ neighbor_cache[neighbor_index].isrouter = 1; /* Look for empty entry. */ free_router_index = LWIP_ND6_NUM_ROUTERS; for (router_index = LWIP_ND6_NUM_ROUTERS - 1; router_index >= 0; router_index--) { /* check if router already exists (this is a special case for 2 netifs on the same subnet - e.g. wifi and cable) */ if(default_router_list[router_index].neighbor_entry == &(neighbor_cache[neighbor_index])){ return router_index; } if (default_router_list[router_index].neighbor_entry == NULL) { /* remember lowest free index to create a new entry */ free_router_index = router_index; } } if (free_router_index < LWIP_ND6_NUM_ROUTERS) { default_router_list[free_router_index].neighbor_entry = &(neighbor_cache[neighbor_index]); return free_router_index; } /* Could not create a router entry. */ /* Mark neighbor entry as not-router. Entry might be useful as neighbor still. */ neighbor_cache[neighbor_index].isrouter = 0; /* router not found. */ return -1; } /** * Find the cached entry for an on-link prefix. * * @param prefix the IPv6 prefix that is on-link * @param netif the netif on which the prefix is on-link * @return the index on the prefix table, or -1 if not found */ static s8_t nd6_get_onlink_prefix(ip6_addr_t *prefix, struct netif *netif) { s8_t i; /* Look for prefix in list. */ for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { if ((ip6_addr_netcmp(&(prefix_list[i].prefix), prefix)) && (prefix_list[i].netif == netif)) { return i; } } /* Entry not available. */ return -1; } /** * Creates a new entry for an on-link prefix. * * @param prefix the IPv6 prefix that is on-link * @param netif the netif on which the prefix is on-link * @return the index on the prefix table, or -1 if not created */ static s8_t nd6_new_onlink_prefix(ip6_addr_t *prefix, struct netif *netif) { s8_t i; /* Create new entry. */ for (i = 0; i < LWIP_ND6_NUM_PREFIXES; ++i) { if ((prefix_list[i].netif == NULL) || (prefix_list[i].invalidation_timer == 0)) { /* Found empty prefix entry. */ prefix_list[i].netif = netif; ip6_addr_set(&(prefix_list[i].prefix), prefix); #if LWIP_IPV6_AUTOCONFIG prefix_list[i].flags = 0; #endif /* LWIP_IPV6_AUTOCONFIG */ return i; } } /* Entry not available. */ return -1; } /** * Determine the next hop for a destination. Will determine if the * destination is on-link, else a suitable on-link router is selected. * * The last entry index is cached for fast entry search. * * @param ip6addr the destination address * @param netif the netif on which the packet will be sent * @return the neighbor cache entry for the next hop, ERR_RTE if no * suitable next hop was found, ERR_MEM if no cache entry * could be created */ static s8_t nd6_get_next_hop_entry(const ip6_addr_t *ip6addr, struct netif *netif) { #ifdef LWIP_HOOK_ND6_GET_GW const ip6_addr_t *next_hop_addr; #endif /* LWIP_HOOK_ND6_GET_GW */ s8_t i; #if LWIP_NETIF_HWADDRHINT if (netif->addr_hint != NULL) { /* per-pcb cached entry was given */ u8_t addr_hint = *(netif->addr_hint); if (addr_hint < LWIP_ND6_NUM_DESTINATIONS) { nd6_cached_destination_index = addr_hint; } } #endif /* LWIP_NETIF_HWADDRHINT */ /* Look for ip6addr in destination cache. */ if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { /* the cached entry index is the right one! */ /* do nothing. */ ND6_STATS_INC(nd6.cachehit); } else { /* Search destination cache. */ i = nd6_find_destination_cache_entry(ip6addr); if (i >= 0) { /* found destination entry. make it our new cached index. */ nd6_cached_destination_index = i; } else { /* Not found. Create a new destination entry. */ i = nd6_new_destination_cache_entry(); if (i >= 0) { /* got new destination entry. make it our new cached index. */ nd6_cached_destination_index = i; } else { /* Could not create a destination cache entry. */ return ERR_MEM; } /* Copy dest address to destination cache. */ ip6_addr_set(&(destination_cache[nd6_cached_destination_index].destination_addr), ip6addr); /* Now find the next hop. is it a neighbor? */ if (ip6_addr_islinklocal(ip6addr) || nd6_is_prefix_in_netif(ip6addr, netif)) { /* Destination in local link. */ destination_cache[nd6_cached_destination_index].pmtu = netif->mtu; ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, destination_cache[nd6_cached_destination_index].destination_addr); #ifdef LWIP_HOOK_ND6_GET_GW } else if ((next_hop_addr = LWIP_HOOK_ND6_GET_GW(netif, ip6addr)) != NULL) { /* Next hop for destination provided by hook function. */ destination_cache[nd6_cached_destination_index].pmtu = netif->mtu; ip6_addr_set(&destination_cache[nd6_cached_destination_index].next_hop_addr, next_hop_addr); #endif /* LWIP_HOOK_ND6_GET_GW */ } else { /* We need to select a router. */ i = nd6_select_router(ip6addr, netif); if (i < 0) { /* No router found. */ ip6_addr_set_any(&(destination_cache[nd6_cached_destination_index].destination_addr)); return ERR_RTE; } destination_cache[nd6_cached_destination_index].pmtu = netif->mtu; /* Start with netif mtu, correct through ICMPv6 if necessary */ ip6_addr_copy(destination_cache[nd6_cached_destination_index].next_hop_addr, default_router_list[i].neighbor_entry->next_hop_address); } } } #if LWIP_NETIF_HWADDRHINT if (netif->addr_hint != NULL) { /* per-pcb cached entry was given */ *(netif->addr_hint) = nd6_cached_destination_index; } #endif /* LWIP_NETIF_HWADDRHINT */ /* Look in neighbor cache for the next-hop address. */ if (ip6_addr_cmp(&(destination_cache[nd6_cached_destination_index].next_hop_addr), &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { /* Cache hit. */ /* Do nothing. */ ND6_STATS_INC(nd6.cachehit); } else { i = nd6_find_neighbor_cache_entry(&(destination_cache[nd6_cached_destination_index].next_hop_addr)); if (i >= 0) { /* Found a matching record, make it new cached entry. */ nd6_cached_neighbor_index = i; } else { /* Neighbor not in cache. Make a new entry. */ i = nd6_new_neighbor_cache_entry(); if (i >= 0) { /* got new neighbor entry. make it our new cached index. */ nd6_cached_neighbor_index = i; } else { /* Could not create a neighbor cache entry. */ return ERR_MEM; } /* Initialize fields. */ ip6_addr_copy(neighbor_cache[i].next_hop_address, destination_cache[nd6_cached_destination_index].next_hop_addr); neighbor_cache[i].isrouter = 0; neighbor_cache[i].netif = netif; neighbor_cache[i].state = ND6_INCOMPLETE; neighbor_cache[i].counter.probes_sent = 1; nd6_send_neighbor_cache_probe(&neighbor_cache[i], ND6_SEND_FLAG_MULTICAST_DEST); } } /* Reset this destination's age. */ destination_cache[nd6_cached_destination_index].age = 0; return nd6_cached_neighbor_index; } /** * Queue a packet for a neighbor. * * @param neighbor_index the index in the neighbor cache table * @param q packet to be queued * @return ERR_OK if succeeded, ERR_MEM if out of memory */ static err_t nd6_queue_packet(s8_t neighbor_index, struct pbuf *q) { err_t result = ERR_MEM; struct pbuf *p; int copy_needed = 0; #if LWIP_ND6_QUEUEING struct nd6_q_entry *new_entry, *r; #endif /* LWIP_ND6_QUEUEING */ if ((neighbor_index < 0) || (neighbor_index >= LWIP_ND6_NUM_NEIGHBORS)) { return ERR_ARG; } /* IF q includes a PBUF_REF, PBUF_POOL or PBUF_RAM, we have no choice but * to copy the whole queue into a new PBUF_RAM (see bug #11400) * PBUF_ROMs can be left as they are, since ROM must not get changed. */ p = q; while (p) { if (p->type != PBUF_ROM) { copy_needed = 1; break; } p = p->next; } if (copy_needed) { /* copy the whole packet into new pbufs */ p = pbuf_alloc(PBUF_LINK, q->tot_len, PBUF_RAM); while ((p == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { /* Free oldest packet (as per RFC recommendation) */ #if LWIP_ND6_QUEUEING r = neighbor_cache[neighbor_index].q; neighbor_cache[neighbor_index].q = r->next; r->next = NULL; nd6_free_q(r); #else /* LWIP_ND6_QUEUEING */ pbuf_free(neighbor_cache[neighbor_index].q); neighbor_cache[neighbor_index].q = NULL; #endif /* LWIP_ND6_QUEUEING */ p = pbuf_alloc(PBUF_LINK, q->tot_len, PBUF_RAM); } if (p != NULL) { if (pbuf_copy(p, q) != ERR_OK) { pbuf_free(p); p = NULL; } } } else { /* referencing the old pbuf is enough */ p = q; pbuf_ref(p); } /* packet was copied/ref'd? */ if (p != NULL) { /* queue packet ... */ #if LWIP_ND6_QUEUEING /* allocate a new nd6 queue entry */ new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); if ((new_entry == NULL) && (neighbor_cache[neighbor_index].q != NULL)) { /* Free oldest packet (as per RFC recommendation) */ r = neighbor_cache[neighbor_index].q; neighbor_cache[neighbor_index].q = r->next; r->next = NULL; nd6_free_q(r); new_entry = (struct nd6_q_entry *)memp_malloc(MEMP_ND6_QUEUE); } if (new_entry != NULL) { new_entry->next = NULL; new_entry->p = p; if (neighbor_cache[neighbor_index].q != NULL) { /* queue was already existent, append the new entry to the end */ r = neighbor_cache[neighbor_index].q; while (r->next != NULL) { r = r->next; } r->next = new_entry; } else { /* queue did not exist, first item in queue */ neighbor_cache[neighbor_index].q = new_entry; } LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); result = ERR_OK; } else { /* the pool MEMP_ND6_QUEUE is empty */ pbuf_free(p); LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)p)); /* { result == ERR_MEM } through initialization */ } #else /* LWIP_ND6_QUEUEING */ /* Queue a single packet. If an older packet is already queued, free it as per RFC. */ if (neighbor_cache[neighbor_index].q != NULL) { pbuf_free(neighbor_cache[neighbor_index].q); } neighbor_cache[neighbor_index].q = p; LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: queued packet %p on neighbor entry %"S16_F"\n", (void *)p, (s16_t)neighbor_index)); result = ERR_OK; #endif /* LWIP_ND6_QUEUEING */ } else { LWIP_DEBUGF(LWIP_DBG_TRACE, ("ipv6: could not queue a copy of packet %p (out of memory)\n", (void *)q)); /* { result == ERR_MEM } through initialization */ } return result; } #if LWIP_ND6_QUEUEING /** * Free a complete queue of nd6 q entries * * @param q a queue of nd6_q_entry to free */ static void nd6_free_q(struct nd6_q_entry *q) { struct nd6_q_entry *r; LWIP_ASSERT("q != NULL", q != NULL); LWIP_ASSERT("q->p != NULL", q->p != NULL); while (q) { r = q; q = q->next; LWIP_ASSERT("r->p != NULL", (r->p != NULL)); pbuf_free(r->p); memp_free(MEMP_ND6_QUEUE, r); } } #endif /* LWIP_ND6_QUEUEING */ /** * Send queued packets for a neighbor * * @param i the neighbor to send packets to */ static void nd6_send_q(s8_t i) { struct ip6_hdr *ip6hdr; ip6_addr_t dest; #if LWIP_ND6_QUEUEING struct nd6_q_entry *q; #endif /* LWIP_ND6_QUEUEING */ if ((i < 0) || (i >= LWIP_ND6_NUM_NEIGHBORS)) { return; } #if LWIP_ND6_QUEUEING while (neighbor_cache[i].q != NULL) { /* remember first in queue */ q = neighbor_cache[i].q; /* pop first item off the queue */ neighbor_cache[i].q = q->next; /* Get ipv6 header. */ ip6hdr = (struct ip6_hdr *)(q->p->payload); /* Create an aligned copy. */ ip6_addr_set(&dest, &(ip6hdr->dest)); /* send the queued IPv6 packet */ (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, q->p, &dest); /* free the queued IP packet */ pbuf_free(q->p); /* now queue entry can be freed */ memp_free(MEMP_ND6_QUEUE, q); } #else /* LWIP_ND6_QUEUEING */ if (neighbor_cache[i].q != NULL) { /* Get ipv6 header. */ ip6hdr = (struct ip6_hdr *)(neighbor_cache[i].q->payload); /* Create an aligned copy. */ ip6_addr_set(&dest, &(ip6hdr->dest)); /* send the queued IPv6 packet */ (neighbor_cache[i].netif)->output_ip6(neighbor_cache[i].netif, neighbor_cache[i].q, &dest); /* free the queued IP packet */ pbuf_free(neighbor_cache[i].q); neighbor_cache[i].q = NULL; } #endif /* LWIP_ND6_QUEUEING */ } /** * A packet is to be transmitted to a specific IPv6 destination on a specific * interface. Check if we can find the hardware address of the next hop to use * for the packet. If so, give the hardware address to the caller, which should * use it to send the packet right away. Otherwise, enqueue the packet for * later transmission while looking up the hardware address, if possible. * * As such, this function returns one of three different possible results: * * - ERR_OK with a non-NULL 'hwaddrp': the caller should send the packet now. * - ERR_OK with a NULL 'hwaddrp': the packet has been enqueued for later. * - not ERR_OK: something went wrong; forward the error upward in the stack. * * @param netif The lwIP network interface on which the IP packet will be sent. * @param q The pbuf(s) containing the IP packet to be sent. * @param ip6addr The destination IPv6 address of the packet. * @param hwaddrp On success, filled with a pointer to a HW address or NULL (meaning * the packet has been queued). * @return * - ERR_OK on success, ERR_RTE if no route was found for the packet, * or ERR_MEM if low memory conditions prohibit sending the packet at all. */ err_t nd6_get_next_hop_addr_or_queue(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr, const u8_t **hwaddrp) { s8_t i; /* Get next hop record. */ i = nd6_get_next_hop_entry(ip6addr, netif); if (i < 0) { /* failed to get a next hop neighbor record. */ return i; } /* Now that we have a destination record, send or queue the packet. */ if (neighbor_cache[i].state == ND6_STALE) { /* Switch to delay state. */ neighbor_cache[i].state = ND6_DELAY; neighbor_cache[i].counter.delay_time = LWIP_ND6_DELAY_FIRST_PROBE_TIME / ND6_TMR_INTERVAL; } /* @todo should we send or queue if PROBE? send for now, to let unicast NS pass. */ if ((neighbor_cache[i].state == ND6_REACHABLE) || (neighbor_cache[i].state == ND6_DELAY) || (neighbor_cache[i].state == ND6_PROBE)) { /* Tell the caller to send out the packet now. */ *hwaddrp = neighbor_cache[i].lladdr; return ERR_OK; } /* We should queue packet on this interface. */ *hwaddrp = NULL; return nd6_queue_packet(i, q); } /** * Get the Path MTU for a destination. * * @param ip6addr the destination address * @param netif the netif on which the packet will be sent * @return the Path MTU, if known, or the netif default MTU */ u16_t nd6_get_destination_mtu(const ip6_addr_t *ip6addr, struct netif *netif) { s8_t i; i = nd6_find_destination_cache_entry(ip6addr); if (i >= 0) { if (destination_cache[i].pmtu > 0) { return destination_cache[i].pmtu; } } if (netif != NULL) { return netif->mtu; } return 1280; /* Minimum MTU */ } #if LWIP_ND6_TCP_REACHABILITY_HINTS /** * Provide the Neighbor discovery process with a hint that a * destination is reachable. Called by tcp_receive when ACKs are * received or sent (as per RFC). This is useful to avoid sending * NS messages every 30 seconds. * * @param ip6addr the destination address which is know to be reachable * by an upper layer protocol (TCP) */ void nd6_reachability_hint(const ip6_addr_t *ip6addr) { s8_t i; /* Find destination in cache. */ if (ip6_addr_cmp(ip6addr, &(destination_cache[nd6_cached_destination_index].destination_addr))) { i = nd6_cached_destination_index; ND6_STATS_INC(nd6.cachehit); } else { i = nd6_find_destination_cache_entry(ip6addr); } if (i < 0) { return; } /* Find next hop neighbor in cache. */ if (ip6_addr_cmp(&(destination_cache[i].next_hop_addr), &(neighbor_cache[nd6_cached_neighbor_index].next_hop_address))) { i = nd6_cached_neighbor_index; ND6_STATS_INC(nd6.cachehit); } else { i = nd6_find_neighbor_cache_entry(&(destination_cache[i].next_hop_addr)); } if (i < 0) { return; } /* For safety: don't set as reachable if we don't have a LL address yet. Misuse protection. */ if (neighbor_cache[i].state == ND6_INCOMPLETE || neighbor_cache[i].state == ND6_NO_ENTRY) { return; } /* Set reachability state. */ neighbor_cache[i].state = ND6_REACHABLE; neighbor_cache[i].counter.reachable_time = reachable_time; } #endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ /** * Remove all prefix, neighbor_cache and router entries of the specified netif. * * @param netif points to a network interface */ void nd6_cleanup_netif(struct netif *netif) { u8_t i; s8_t router_index; for (i = 0; i < LWIP_ND6_NUM_PREFIXES; i++) { if (prefix_list[i].netif == netif) { prefix_list[i].netif = NULL; prefix_list[i].flags = 0; } } for (i = 0; i < LWIP_ND6_NUM_NEIGHBORS; i++) { if (neighbor_cache[i].netif == netif) { for (router_index = 0; router_index < LWIP_ND6_NUM_ROUTERS; router_index++) { if (default_router_list[router_index].neighbor_entry == &neighbor_cache[i]) { default_router_list[router_index].neighbor_entry = NULL; default_router_list[router_index].flags = 0; } } neighbor_cache[i].isrouter = 0; nd6_free_neighbor_cache_entry(i); } } } #if LWIP_IPV6_MLD /** * The state of a local IPv6 address entry is about to change. If needed, join * or leave the solicited-node multicast group for the address. * * @param netif The netif that owns the address. * @param addr_idx The index of the address. * @param new_state The new (IP6_ADDR_) state for the address. */ void nd6_adjust_mld_membership(struct netif *netif, s8_t addr_idx, u8_t new_state) { u8_t old_state, old_member, new_member; old_state = netif_ip6_addr_state(netif, addr_idx); /* Determine whether we were, and should be, a member of the solicited-node * multicast group for this address. For tentative addresses, the group is * not joined until the address enters the TENTATIVE_1 (or VALID) state. */ old_member = (old_state != IP6_ADDR_INVALID && old_state != IP6_ADDR_TENTATIVE); new_member = (new_state != IP6_ADDR_INVALID && new_state != IP6_ADDR_TENTATIVE); if (old_member != new_member) { ip6_addr_set_solicitednode(&multicast_address, netif_ip6_addr(netif, addr_idx)->addr[3]); if (new_member) { mld6_joingroup_netif(netif, &multicast_address); } else { mld6_leavegroup_netif(netif, &multicast_address); } } } #endif /* LWIP_IPV6_MLD */ #endif /* LWIP_IPV6 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/ipv6/nd6.c
C
unknown
70,862
/** * @file * Dynamic memory manager * * This is a lightweight replacement for the standard C library malloc(). * * If you want to use the standard C library malloc() instead, define * MEM_LIBC_MALLOC to 1 in your lwipopts.h * * To let mem_malloc() use pools (prevents fragmentation and is much faster than * a heap but might waste some memory), define MEM_USE_POOLS to 1, define * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list * of pools like this (more pools can be added between _START and _END): * * Define three pools with sizes 256, 512, and 1512 bytes * LWIP_MALLOC_MEMPOOL_START * LWIP_MALLOC_MEMPOOL(20, 256) * LWIP_MALLOC_MEMPOOL(10, 512) * LWIP_MALLOC_MEMPOOL(5, 1512) * LWIP_MALLOC_MEMPOOL_END */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * Simon Goldschmidt * */ #include "lwip/opt.h" #include "lwip/mem.h" #include "lwip/def.h" #include "lwip/sys.h" #include "lwip/stats.h" #include "lwip/err.h" #include <string.h> #if MEM_LIBC_MALLOC #include <stdlib.h> /* for malloc()/free() */ #endif #if MEM_LIBC_MALLOC || MEM_USE_POOLS /** mem_init is not used when using pools instead of a heap or using * C library malloc(). */ void mem_init(void) { } /** mem_trim is not used when using pools instead of a heap or using * C library malloc(): we can't free part of a pool element and the stack * support mem_trim() to return a different pointer */ void* mem_trim(void *mem, mem_size_t size) { LWIP_UNUSED_ARG(size); return mem; } #endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */ #if MEM_LIBC_MALLOC /* lwIP heap implemented using C library malloc() */ /* in case C library malloc() needs extra protection, * allow these defines to be overridden. */ #ifndef mem_clib_free #define mem_clib_free free #endif #ifndef mem_clib_malloc #define mem_clib_malloc malloc #endif #ifndef mem_clib_calloc #define mem_clib_calloc calloc #endif #if LWIP_STATS && MEM_STATS #define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t)) #else #define MEM_LIBC_STATSHELPER_SIZE 0 #endif /** * Allocate a block of memory with a minimum of 'size' bytes. * * @param size is the minimum size of the requested block in bytes. * @return pointer to allocated memory or NULL if no free memory was found. * * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT). */ void * mem_malloc(mem_size_t size) { void* ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE); if (ret == NULL) { MEM_STATS_INC(err); } else { LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret); #if LWIP_STATS && MEM_STATS *(mem_size_t*)ret = size; ret = (u8_t*)ret + MEM_LIBC_STATSHELPER_SIZE; MEM_STATS_INC_USED(used, size); #endif } return ret; } /** Put memory back on the heap * * @param rmem is the pointer as returned by a previous call to mem_malloc() */ void mem_free(void *rmem) { LWIP_ASSERT("rmem != NULL", (rmem != NULL)); LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); #if LWIP_STATS && MEM_STATS rmem = (u8_t*)rmem - MEM_LIBC_STATSHELPER_SIZE; MEM_STATS_DEC_USED(used, *(mem_size_t*)rmem); #endif mem_clib_free(rmem); } #elif MEM_USE_POOLS /* lwIP heap implemented with different sized pools */ /** * Allocate memory: determine the smallest pool that is big enough * to contain an element of 'size' and get an element from that pool. * * @param size the size in bytes of the memory needed * @return a pointer to the allocated memory or NULL if the pool is empty */ void * mem_malloc(mem_size_t size) { void *ret; struct memp_malloc_helper *element = NULL; memp_t poolnr; mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) { /* is this pool big enough to hold an element of the required size plus a struct memp_malloc_helper that saves the pool this element came from? */ if (required_size <= memp_pools[poolnr]->size) { element = (struct memp_malloc_helper*)memp_malloc(poolnr); if (element == NULL) { /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */ #if MEM_USE_POOLS_TRY_BIGGER_POOL /** Try a bigger pool if this one is empty! */ if (poolnr < MEMP_POOL_LAST) { continue; } #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */ MEM_STATS_INC(err); return NULL; } break; } } if (poolnr > MEMP_POOL_LAST) { LWIP_ASSERT("mem_malloc(): no pool is that big!", 0); MEM_STATS_INC(err); return NULL; } /* save the pool number this element came from */ element->poolnr = poolnr; /* and return a pointer to the memory directly after the struct memp_malloc_helper */ ret = (u8_t*)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)); #if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) /* truncating to u16_t is safe because struct memp_desc::size is u16_t */ element->size = (u16_t)size; MEM_STATS_INC_USED(used, element->size); #endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */ #if MEMP_OVERFLOW_CHECK /* initialize unused memory (diff between requested size and selected pool's size) */ memset((u8_t*)ret + size, 0xcd, memp_pools[poolnr]->size - size); #endif /* MEMP_OVERFLOW_CHECK */ return ret; } /** * Free memory previously allocated by mem_malloc. Loads the pool number * and calls memp_free with that pool number to put the element back into * its pool * * @param rmem the memory element to free */ void mem_free(void *rmem) { struct memp_malloc_helper *hmem; LWIP_ASSERT("rmem != NULL", (rmem != NULL)); LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem))); /* get the original struct memp_malloc_helper */ /* cast through void* to get rid of alignment warnings */ hmem = (struct memp_malloc_helper*)(void*)((u8_t*)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper))); LWIP_ASSERT("hmem != NULL", (hmem != NULL)); LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem))); LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX)); MEM_STATS_DEC_USED(used, hmem->size); #if MEMP_OVERFLOW_CHECK { u16_t i; LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size", hmem->size <= memp_pools[hmem->poolnr]->size); /* check that unused memory remained untouched (diff between requested size and selected pool's size) */ for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) { u8_t data = *((u8_t*)rmem + i); LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd); } } #endif /* MEMP_OVERFLOW_CHECK */ /* and put it in the pool we saved earlier */ memp_free(hmem->poolnr, hmem); } #else /* MEM_USE_POOLS */ /* lwIP replacement for your libc malloc() */ /** * The heap is made up as a list of structs of this type. * This does not have to be aligned since for getting its size, * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns. */ struct mem { /** index (-> ram[next]) of the next struct */ mem_size_t next; /** index (-> ram[prev]) of the previous struct */ mem_size_t prev; /** 1: this area is used; 0: this area is unused */ u8_t used; }; /** All allocated blocks will be MIN_SIZE bytes big, at least! * MIN_SIZE can be overridden to suit your needs. Smaller values save space, * larger values could prevent too small blocks to fragment the RAM too much. */ #ifndef MIN_SIZE #define MIN_SIZE 12 #endif /* MIN_SIZE */ /* some alignment macros: we define them here for better source code layout */ #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE) #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem)) #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE) /** If you want to relocate the heap to external memory, simply define * LWIP_RAM_HEAP_POINTER as a void-pointer to that location. * If so, make sure the memory at that location is big enough (see below on * how that space is calculated). */ #ifndef LWIP_RAM_HEAP_POINTER /** the heap. we need one struct mem at the end and some room for alignment */ LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U*SIZEOF_STRUCT_MEM)); #define LWIP_RAM_HEAP_POINTER ram_heap #endif /* LWIP_RAM_HEAP_POINTER */ /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */ static u8_t *ram; /** the last entry, always unused! */ static struct mem *ram_end; /** pointer to the lowest free block, this is used for faster search */ static struct mem *lfree; /** concurrent access protection */ #if !NO_SYS static sys_mutex_t mem_mutex; #endif #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT static volatile u8_t mem_free_count; /* Allow mem_free from other (e.g. interrupt) context */ #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free) #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free) #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free) #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc) #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc) #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc) #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ /* Protect the heap only by using a semaphore */ #define LWIP_MEM_FREE_DECL_PROTECT() #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex) #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex) /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */ #define LWIP_MEM_ALLOC_DECL_PROTECT() #define LWIP_MEM_ALLOC_PROTECT() #define LWIP_MEM_ALLOC_UNPROTECT() #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ /** * "Plug holes" by combining adjacent empty struct mems. * After this function is through, there should not exist * one empty struct mem pointing to another empty struct mem. * * @param mem this points to a struct mem which just has been freed * @internal this function is only called by mem_free() and mem_trim() * * This assumes access to the heap is protected by the calling function * already. */ static void plug_holes(struct mem *mem) { struct mem *nmem; struct mem *pmem; LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram); LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end); LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0); /* plug hole forward */ LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED); nmem = (struct mem *)(void *)&ram[mem->next]; if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) { /* if mem->next is unused and not end of ram, combine mem and mem->next */ if (lfree == nmem) { lfree = mem; } mem->next = nmem->next; ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram); } /* plug hole backward */ pmem = (struct mem *)(void *)&ram[mem->prev]; if (pmem != mem && pmem->used == 0) { /* if mem->prev is unused, combine mem and mem->prev */ if (lfree == mem) { lfree = pmem; } pmem->next = mem->next; ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram); } } /** * Zero the heap and initialize start, end and lowest-free */ void mem_init(void) { struct mem *mem; LWIP_ASSERT("Sanity check alignment", (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0); /* align the heap */ ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER); /* initialize the start of the heap */ mem = (struct mem *)(void *)ram; mem->next = MEM_SIZE_ALIGNED; mem->prev = 0; mem->used = 0; /* initialize the end of the heap */ ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED]; ram_end->used = 1; ram_end->next = MEM_SIZE_ALIGNED; ram_end->prev = MEM_SIZE_ALIGNED; /* initialize the lowest-free pointer to the start of the heap */ lfree = (struct mem *)(void *)ram; MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED); if (sys_mutex_new(&mem_mutex) != ERR_OK) { LWIP_ASSERT("failed to create mem_mutex", 0); } } /** * Put a struct mem back on the heap * * @param rmem is the data portion of a struct mem as returned by a previous * call to mem_malloc() */ void mem_free(void *rmem) { struct mem *mem; LWIP_MEM_FREE_DECL_PROTECT(); if (rmem == NULL) { LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n")); return; } LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0); LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram && (u8_t *)rmem < (u8_t *)ram_end); if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { SYS_ARCH_DECL_PROTECT(lev); LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n")); /* protect mem stats from concurrent access */ SYS_ARCH_PROTECT(lev); MEM_STATS_INC(illegal); SYS_ARCH_UNPROTECT(lev); return; } /* protect the heap from concurrent access */ LWIP_MEM_FREE_PROTECT(); /* Get the corresponding struct mem ... */ /* cast through void* to get rid of alignment warnings */ mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); /* ... which has to be in a used state ... */ LWIP_ASSERT("mem_free: mem->used", mem->used); /* ... and is now unused. */ mem->used = 0; if (mem < lfree) { /* the newly freed struct is now the lowest */ lfree = mem; } MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram))); /* finally, see if prev or next are free also */ plug_holes(mem); #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT mem_free_count = 1; #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ LWIP_MEM_FREE_UNPROTECT(); } /** * Shrink memory returned by mem_malloc(). * * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked * @param newsize required size after shrinking (needs to be smaller than or * equal to the previous size) * @return for compatibility reasons: is always == rmem, at the moment * or NULL if newsize is > old size, in which case rmem is NOT touched * or freed! */ void * mem_trim(void *rmem, mem_size_t newsize) { mem_size_t size; mem_size_t ptr, ptr2; struct mem *mem, *mem2; /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */ LWIP_MEM_FREE_DECL_PROTECT(); /* Expand the size of the allocated memory region so that we can adjust for alignment. */ newsize = LWIP_MEM_ALIGN_SIZE(newsize); if (newsize < MIN_SIZE_ALIGNED) { /* every data block must be at least MIN_SIZE_ALIGNED long */ newsize = MIN_SIZE_ALIGNED; } if (newsize > MEM_SIZE_ALIGNED) { return NULL; } LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram && (u8_t *)rmem < (u8_t *)ram_end); if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { SYS_ARCH_DECL_PROTECT(lev); LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n")); /* protect mem stats from concurrent access */ SYS_ARCH_PROTECT(lev); MEM_STATS_INC(illegal); SYS_ARCH_UNPROTECT(lev); return rmem; } /* Get the corresponding struct mem ... */ /* cast through void* to get rid of alignment warnings */ mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); /* ... and its offset pointer */ ptr = (mem_size_t)((u8_t *)mem - ram); size = mem->next - ptr - SIZEOF_STRUCT_MEM; LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size); if (newsize > size) { /* not supported */ return NULL; } if (newsize == size) { /* No change in size, simply return */ return rmem; } /* protect the heap from concurrent access */ LWIP_MEM_FREE_PROTECT(); mem2 = (struct mem *)(void *)&ram[mem->next]; if (mem2->used == 0) { /* The next struct is unused, we can simply move it at little */ mem_size_t next; /* remember the old next pointer */ next = mem2->next; /* create new struct mem which is moved directly after the shrinked mem */ ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; if (lfree == mem2) { lfree = (struct mem *)(void *)&ram[ptr2]; } mem2 = (struct mem *)(void *)&ram[ptr2]; mem2->used = 0; /* restore the next pointer */ mem2->next = next; /* link it back to mem */ mem2->prev = ptr; /* link mem to it */ mem->next = ptr2; /* last thing to restore linked list: as we have moved mem2, * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not * the end of the heap */ if (mem2->next != MEM_SIZE_ALIGNED) { ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; } MEM_STATS_DEC_USED(used, (size - newsize)); /* no need to plug holes, we've already done that */ } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { /* Next struct is used but there's room for another struct mem with * at least MIN_SIZE_ALIGNED of data. * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty * region that couldn't hold data, but when mem->next gets freed, * the 2 regions would be combined, resulting in more free memory */ ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; mem2 = (struct mem *)(void *)&ram[ptr2]; if (mem2 < lfree) { lfree = mem2; } mem2->used = 0; mem2->next = mem->next; mem2->prev = ptr; mem->next = ptr2; if (mem2->next != MEM_SIZE_ALIGNED) { ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; } MEM_STATS_DEC_USED(used, (size - newsize)); /* the original mem->next is used, so no need to plug holes! */ } /* else { next struct mem is used but size between mem and mem2 is not big enough to create another struct mem -> don't do anyhting. -> the remaining space stays unused since it is too small } */ #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT mem_free_count = 1; #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ LWIP_MEM_FREE_UNPROTECT(); return rmem; } /** * Allocate a block of memory with a minimum of 'size' bytes. * * @param size is the minimum size of the requested block in bytes. * @return pointer to allocated memory or NULL if no free memory was found. * * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). */ void * mem_malloc(mem_size_t size) { mem_size_t ptr, ptr2; struct mem *mem, *mem2; #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT u8_t local_mem_free_count = 0; #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ LWIP_MEM_ALLOC_DECL_PROTECT(); if (size == 0) { return NULL; } /* Expand the size of the allocated memory region so that we can adjust for alignment. */ size = LWIP_MEM_ALIGN_SIZE(size); if (size < MIN_SIZE_ALIGNED) { /* every data block must be at least MIN_SIZE_ALIGNED long */ size = MIN_SIZE_ALIGNED; } if (size > MEM_SIZE_ALIGNED) { return NULL; } /* protect the heap from concurrent access */ sys_mutex_lock(&mem_mutex); LWIP_MEM_ALLOC_PROTECT(); #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT /* run as long as a mem_free disturbed mem_malloc or mem_trim */ do { local_mem_free_count = 0; #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ /* Scan through the heap searching for a free block that is big enough, * beginning with the lowest free block. */ for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size; ptr = ((struct mem *)(void *)&ram[ptr])->next) { mem = (struct mem *)(void *)&ram[ptr]; #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT mem_free_count = 0; LWIP_MEM_ALLOC_UNPROTECT(); /* allow mem_free or mem_trim to run */ LWIP_MEM_ALLOC_PROTECT(); if (mem_free_count != 0) { /* If mem_free or mem_trim have run, we have to restart since they could have altered our current struct mem. */ local_mem_free_count = 1; break; } #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ if ((!mem->used) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { /* mem is not used and at least perfect fit is possible: * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') * -> split large block, create empty remainder, * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, * struct mem would fit in but no data between mem2 and mem2->next * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty * region that couldn't hold data, but when mem->next gets freed, * the 2 regions would be combined, resulting in more free memory */ ptr2 = ptr + SIZEOF_STRUCT_MEM + size; /* create mem2 struct */ mem2 = (struct mem *)(void *)&ram[ptr2]; mem2->used = 0; mem2->next = mem->next; mem2->prev = ptr; /* and insert it between mem and mem->next */ mem->next = ptr2; mem->used = 1; if (mem2->next != MEM_SIZE_ALIGNED) { ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2; } MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM)); } else { /* (a mem2 struct does no fit into the user data space of mem and mem->next will always * be used at this point: if not we have 2 unused structs in a row, plug_holes should have * take care of this). * -> near fit or exact fit: do not split, no mem2 creation * also can't move mem->next directly behind mem, since mem->next * will always be used at this point! */ mem->used = 1; MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram)); } #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT mem_malloc_adjust_lfree: #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ if (mem == lfree) { struct mem *cur = lfree; /* Find next free block after mem and update lowest free pointer */ while (cur->used && cur != ram_end) { #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT mem_free_count = 0; LWIP_MEM_ALLOC_UNPROTECT(); /* prevent high interrupt latency... */ LWIP_MEM_ALLOC_PROTECT(); if (mem_free_count != 0) { /* If mem_free or mem_trim have run, we have to restart since they could have altered our current struct mem or lfree. */ goto mem_malloc_adjust_lfree; } #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ cur = (struct mem *)(void *)&ram[cur->next]; } lfree = cur; LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); } LWIP_MEM_ALLOC_UNPROTECT(); sys_mutex_unlock(&mem_mutex); LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); LWIP_ASSERT("mem_malloc: sanity check alignment", (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0); return (u8_t *)mem + SIZEOF_STRUCT_MEM; } } #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT /* if we got interrupted by a mem_free, try again */ } while (local_mem_free_count != 0); #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); MEM_STATS_INC(err); LWIP_MEM_ALLOC_UNPROTECT(); sys_mutex_unlock(&mem_mutex); return NULL; } #endif /* MEM_USE_POOLS */ #if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) void * mem_calloc(mem_size_t count, mem_size_t size) { return mem_clib_calloc(count, size); } #else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */ /** * Contiguously allocates enough space for count objects that are size bytes * of memory each and returns a pointer to the allocated memory. * * The allocated memory is filled with bytes of value zero. * * @param count number of objects to allocate * @param size size of the objects to allocate * @return pointer to allocated memory / NULL pointer if there is an error */ void * mem_calloc(mem_size_t count, mem_size_t size) { void *p; /* allocate 'count' objects of size 'size' */ p = mem_malloc(count * size); if (p) { /* zero the memory */ memset(p, 0, (size_t)count * (size_t)size); } return p; } #endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/mem.c
C
unknown
27,808
/** * @file * Dynamic pool memory manager * * lwIP has dedicated pools for many structures (netconn, protocol control blocks, * packet buffers, ...). All these pools are managed here. * * @defgroup mempool Memory pools * @ingroup infrastructure * Custom memory pools */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #include "lwip/memp.h" #include "lwip/sys.h" #include "lwip/stats.h" #include <string.h> /* Make sure we include everything we need for size calculation required by memp_std.h */ #include "lwip/pbuf.h" #include "lwip/raw.h" #include "lwip/udp.h" #include "lwip/tcp.h" #include "lwip/priv/tcp_priv.h" #include "lwip/ip4_frag.h" #include "lwip/netbuf.h" #include "lwip/api.h" #include "lwip/priv/tcpip_priv.h" #include "lwip/priv/api_msg.h" #include "lwip/sockets.h" #include "lwip/netifapi.h" #include "lwip/etharp.h" #include "lwip/igmp.h" #include "lwip/timeouts.h" /* needed by default MEMP_NUM_SYS_TIMEOUT */ #include "netif/ppp/ppp_opts.h" #include "lwip/netdb.h" #include "lwip/dns.h" #include "lwip/priv/nd6_priv.h" #include "lwip/ip6_frag.h" #include "lwip/mld6.h" #define LWIP_MEMPOOL(name,num,size,desc) LWIP_MEMPOOL_DECLARE(name,num,size,desc) #include "lwip/priv/memp_std.h" const struct memp_desc* const memp_pools[MEMP_MAX] = { #define LWIP_MEMPOOL(name,num,size,desc) &memp_ ## name, #include "lwip/priv/memp_std.h" }; #ifdef LWIP_HOOK_FILENAME #include LWIP_HOOK_FILENAME #endif #if MEMP_MEM_MALLOC && MEMP_OVERFLOW_CHECK >= 2 #undef MEMP_OVERFLOW_CHECK /* MEMP_OVERFLOW_CHECK >= 2 does not work with MEMP_MEM_MALLOC, use 1 instead */ #define MEMP_OVERFLOW_CHECK 1 #endif #if MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC /** * Check that memp-lists don't form a circle, using "Floyd's cycle-finding algorithm". */ static int memp_sanity(const struct memp_desc *desc) { struct memp *t, *h; t = *desc->tab; if (t != NULL) { for (h = t->next; (t != NULL) && (h != NULL); t = t->next, h = ((h->next != NULL) ? h->next->next : NULL)) { if (t == h) { return 0; } } } return 1; } #endif /* MEMP_SANITY_CHECK && !MEMP_MEM_MALLOC */ #if MEMP_OVERFLOW_CHECK /** * Check if a memp element was victim of an overflow * (e.g. the restricted area after it has been altered) * * @param p the memp element to check * @param desc the pool p comes from */ static void memp_overflow_check_element_overflow(struct memp *p, const struct memp_desc *desc) { #if MEMP_SANITY_REGION_AFTER_ALIGNED > 0 u16_t k; u8_t *m; m = (u8_t*)p + MEMP_SIZE + desc->size; for (k = 0; k < MEMP_SANITY_REGION_AFTER_ALIGNED; k++) { if (m[k] != 0xcd) { char errstr[128] = "detected memp overflow in pool "; strcat(errstr, desc->desc); LWIP_ASSERT(errstr, 0); } } #else /* MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ LWIP_UNUSED_ARG(p); LWIP_UNUSED_ARG(desc); #endif /* MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ } /** * Check if a memp element was victim of an underflow * (e.g. the restricted area before it has been altered) * * @param p the memp element to check * @param desc the pool p comes from */ static void memp_overflow_check_element_underflow(struct memp *p, const struct memp_desc *desc) { #if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 u16_t k; u8_t *m; m = (u8_t*)p + MEMP_SIZE - MEMP_SANITY_REGION_BEFORE_ALIGNED; for (k = 0; k < MEMP_SANITY_REGION_BEFORE_ALIGNED; k++) { if (m[k] != 0xcd) { char errstr[128] = "detected memp underflow in pool "; strcat(errstr, desc->desc); LWIP_ASSERT(errstr, 0); } } #else /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 */ LWIP_UNUSED_ARG(p); LWIP_UNUSED_ARG(desc); #endif /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 */ } /** * Initialize the restricted area of on memp element. */ static void memp_overflow_init_element(struct memp *p, const struct memp_desc *desc) { #if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 || MEMP_SANITY_REGION_AFTER_ALIGNED > 0 u8_t *m; #if MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 m = (u8_t*)p + MEMP_SIZE - MEMP_SANITY_REGION_BEFORE_ALIGNED; memset(m, 0xcd, MEMP_SANITY_REGION_BEFORE_ALIGNED); #endif #if MEMP_SANITY_REGION_AFTER_ALIGNED > 0 m = (u8_t*)p + MEMP_SIZE + desc->size; memset(m, 0xcd, MEMP_SANITY_REGION_AFTER_ALIGNED); #endif #else /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 || MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ LWIP_UNUSED_ARG(p); LWIP_UNUSED_ARG(desc); #endif /* MEMP_SANITY_REGION_BEFORE_ALIGNED > 0 || MEMP_SANITY_REGION_AFTER_ALIGNED > 0 */ } #if MEMP_OVERFLOW_CHECK >= 2 /** * Do an overflow check for all elements in every pool. * * @see memp_overflow_check_element for a description of the check */ static void memp_overflow_check_all(void) { u16_t i, j; struct memp *p; SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); for (i = 0; i < MEMP_MAX; ++i) { p = (struct memp*)LWIP_MEM_ALIGN(memp_pools[i]->base); for (j = 0; j < memp_pools[i]->num; ++j) { memp_overflow_check_element_overflow(p, memp_pools[i]); memp_overflow_check_element_underflow(p, memp_pools[i]); p = LWIP_ALIGNMENT_CAST(struct memp*, ((u8_t*)p + MEMP_SIZE + memp_pools[i]->size + MEMP_SANITY_REGION_AFTER_ALIGNED)); } } SYS_ARCH_UNPROTECT(old_level); } #endif /* MEMP_OVERFLOW_CHECK >= 2 */ #endif /* MEMP_OVERFLOW_CHECK */ /** * Initialize custom memory pool. * Related functions: memp_malloc_pool, memp_free_pool * * @param desc pool to initialize */ void memp_init_pool(const struct memp_desc *desc) { #if MEMP_MEM_MALLOC LWIP_UNUSED_ARG(desc); #else int i; struct memp *memp; *desc->tab = NULL; memp = (struct memp*)LWIP_MEM_ALIGN(desc->base); /* create a linked list of memp elements */ for (i = 0; i < desc->num; ++i) { memp->next = *desc->tab; *desc->tab = memp; #if MEMP_OVERFLOW_CHECK memp_overflow_init_element(memp, desc); #endif /* MEMP_OVERFLOW_CHECK */ /* cast through void* to get rid of alignment warnings */ memp = (struct memp *)(void *)((u8_t *)memp + MEMP_SIZE + desc->size #if MEMP_OVERFLOW_CHECK + MEMP_SANITY_REGION_AFTER_ALIGNED #endif ); } #if MEMP_STATS desc->stats->avail = desc->num; #endif /* MEMP_STATS */ #endif /* !MEMP_MEM_MALLOC */ #if MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) desc->stats->name = desc->desc; #endif /* MEMP_STATS && (defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY) */ } /** * Initializes lwIP built-in pools. * Related functions: memp_malloc, memp_free * * Carves out memp_memory into linked lists for each pool-type. */ void memp_init(void) { u16_t i; /* for every pool: */ for (i = 0; i < LWIP_ARRAYSIZE(memp_pools); i++) { memp_init_pool(memp_pools[i]); #if LWIP_STATS && MEMP_STATS lwip_stats.memp[i] = memp_pools[i]->stats; #endif } #if MEMP_OVERFLOW_CHECK >= 2 /* check everything a first time to see if it worked */ memp_overflow_check_all(); #endif /* MEMP_OVERFLOW_CHECK >= 2 */ } static void* #if !MEMP_OVERFLOW_CHECK do_memp_malloc_pool(const struct memp_desc *desc) #else do_memp_malloc_pool_fn(const struct memp_desc *desc, const char* file, const int line) #endif { struct memp *memp; SYS_ARCH_DECL_PROTECT(old_level); #if MEMP_MEM_MALLOC memp = (struct memp *)mem_malloc(MEMP_SIZE + MEMP_ALIGN_SIZE(desc->size)); SYS_ARCH_PROTECT(old_level); #else /* MEMP_MEM_MALLOC */ SYS_ARCH_PROTECT(old_level); memp = *desc->tab; #endif /* MEMP_MEM_MALLOC */ if (memp != NULL) { #if !MEMP_MEM_MALLOC #if MEMP_OVERFLOW_CHECK == 1 memp_overflow_check_element_overflow(memp, desc); memp_overflow_check_element_underflow(memp, desc); #endif /* MEMP_OVERFLOW_CHECK */ *desc->tab = memp->next; #if MEMP_OVERFLOW_CHECK memp->next = NULL; #endif /* MEMP_OVERFLOW_CHECK */ #endif /* !MEMP_MEM_MALLOC */ #if MEMP_OVERFLOW_CHECK memp->file = file; memp->line = line; #if MEMP_MEM_MALLOC memp_overflow_init_element(memp, desc); #endif /* MEMP_MEM_MALLOC */ #endif /* MEMP_OVERFLOW_CHECK */ LWIP_ASSERT("memp_malloc: memp properly aligned", ((mem_ptr_t)memp % MEM_ALIGNMENT) == 0); #if MEMP_STATS desc->stats->used++; if (desc->stats->used > desc->stats->max) { desc->stats->max = desc->stats->used; } #endif SYS_ARCH_UNPROTECT(old_level); /* cast through u8_t* to get rid of alignment warnings */ return ((u8_t*)memp + MEMP_SIZE); } else { LWIP_DEBUGF(MEMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("memp_malloc: out of memory in pool %s\n", desc->desc)); #if MEMP_STATS desc->stats->err++; #endif } SYS_ARCH_UNPROTECT(old_level); return NULL; } /** * Get an element from a custom pool. * * @param desc the pool to get an element from * * @return a pointer to the allocated memory or a NULL pointer on error */ void * #if !MEMP_OVERFLOW_CHECK memp_malloc_pool(const struct memp_desc *desc) #else memp_malloc_pool_fn(const struct memp_desc *desc, const char* file, const int line) #endif { LWIP_ASSERT("invalid pool desc", desc != NULL); if (desc == NULL) { return NULL; } #if !MEMP_OVERFLOW_CHECK return do_memp_malloc_pool(desc); #else return do_memp_malloc_pool_fn(desc, file, line); #endif } /** * Get an element from a specific pool. * * @param type the pool to get an element from * * @return a pointer to the allocated memory or a NULL pointer on error */ void * #if !MEMP_OVERFLOW_CHECK memp_malloc(memp_t type) #else memp_malloc_fn(memp_t type, const char* file, const int line) #endif { void *memp; LWIP_ERROR("memp_malloc: type < MEMP_MAX", (type < MEMP_MAX), return NULL;); #if MEMP_OVERFLOW_CHECK >= 2 memp_overflow_check_all(); #endif /* MEMP_OVERFLOW_CHECK >= 2 */ #if !MEMP_OVERFLOW_CHECK memp = do_memp_malloc_pool(memp_pools[type]); #else memp = do_memp_malloc_pool_fn(memp_pools[type], file, line); #endif return memp; } static void do_memp_free_pool(const struct memp_desc* desc, void *mem) { struct memp *memp; SYS_ARCH_DECL_PROTECT(old_level); LWIP_ASSERT("memp_free: mem properly aligned", ((mem_ptr_t)mem % MEM_ALIGNMENT) == 0); /* cast through void* to get rid of alignment warnings */ memp = (struct memp *)(void *)((u8_t*)mem - MEMP_SIZE); SYS_ARCH_PROTECT(old_level); #if MEMP_OVERFLOW_CHECK == 1 memp_overflow_check_element_overflow(memp, desc); memp_overflow_check_element_underflow(memp, desc); #endif /* MEMP_OVERFLOW_CHECK */ #if MEMP_STATS desc->stats->used--; #endif #if MEMP_MEM_MALLOC LWIP_UNUSED_ARG(desc); SYS_ARCH_UNPROTECT(old_level); mem_free(memp); #else /* MEMP_MEM_MALLOC */ memp->next = *desc->tab; *desc->tab = memp; #if MEMP_SANITY_CHECK LWIP_ASSERT("memp sanity", memp_sanity(desc)); #endif /* MEMP_SANITY_CHECK */ SYS_ARCH_UNPROTECT(old_level); #endif /* !MEMP_MEM_MALLOC */ } /** * Put a custom pool element back into its pool. * * @param desc the pool where to put mem * @param mem the memp element to free */ void memp_free_pool(const struct memp_desc* desc, void *mem) { LWIP_ASSERT("invalid pool desc", desc != NULL); if ((desc == NULL) || (mem == NULL)) { return; } do_memp_free_pool(desc, mem); } /** * Put an element back into its pool. * * @param type the pool where to put mem * @param mem the memp element to free */ void memp_free(memp_t type, void *mem) { #ifdef LWIP_HOOK_MEMP_AVAILABLE struct memp *old_first; #endif LWIP_ERROR("memp_free: type < MEMP_MAX", (type < MEMP_MAX), return;); if (mem == NULL) { return; } #if MEMP_OVERFLOW_CHECK >= 2 memp_overflow_check_all(); #endif /* MEMP_OVERFLOW_CHECK >= 2 */ #ifdef LWIP_HOOK_MEMP_AVAILABLE old_first = *memp_pools[type]->tab; #endif do_memp_free_pool(memp_pools[type], mem); #ifdef LWIP_HOOK_MEMP_AVAILABLE if (old_first == NULL) { LWIP_HOOK_MEMP_AVAILABLE(type); } #endif }
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/memp.c
C
unknown
13,830
/** * @file * lwIP network interface abstraction * * @defgroup netif Network interface (NETIF) * @ingroup callbackstyle_api * * @defgroup netif_ip4 IPv4 address handling * @ingroup netif * * @defgroup netif_ip6 IPv6 address handling * @ingroup netif * * @defgroup netif_cd Client data handling * Store data (void*) on a netif for application usage. * @see @ref LWIP_NUM_NETIF_CLIENT_DATA * @ingroup netif */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> */ #include "lwip/opt.h" #include <string.h> #include "lwip/def.h" #include "lwip/ip_addr.h" #include "lwip/ip6_addr.h" #include "lwip/netif.h" #include "lwip/priv/tcp_priv.h" #include "lwip/udp.h" #include "lwip/raw.h" #include "lwip/snmp.h" #include "lwip/igmp.h" #include "lwip/etharp.h" #include "lwip/stats.h" #include "lwip/sys.h" #include "lwip/ip.h" #if ENABLE_LOOPBACK #if LWIP_NETIF_LOOPBACK_MULTITHREADING #include "lwip/tcpip.h" #endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ #endif /* ENABLE_LOOPBACK */ #include "netif/ethernet.h" #if LWIP_AUTOIP #include "lwip/autoip.h" #endif /* LWIP_AUTOIP */ #if LWIP_DHCP #include "lwip/dhcp.h" #endif /* LWIP_DHCP */ #if LWIP_IPV6_DHCP6 #include "lwip/dhcp6.h" #endif /* LWIP_IPV6_DHCP6 */ #if LWIP_IPV6_MLD #include "lwip/mld6.h" #endif /* LWIP_IPV6_MLD */ #if LWIP_IPV6 #include "lwip/nd6.h" #endif #if LWIP_NETIF_STATUS_CALLBACK #define NETIF_STATUS_CALLBACK(n) do{ if (n->status_callback) { (n->status_callback)(n); }}while(0) #else #define NETIF_STATUS_CALLBACK(n) #endif /* LWIP_NETIF_STATUS_CALLBACK */ #if LWIP_NETIF_LINK_CALLBACK #define NETIF_LINK_CALLBACK(n) do{ if (n->link_callback) { (n->link_callback)(n); }}while(0) #else #define NETIF_LINK_CALLBACK(n) #endif /* LWIP_NETIF_LINK_CALLBACK */ struct netif *netif_list; struct netif *netif_default; static u8_t netif_num; #if LWIP_NUM_NETIF_CLIENT_DATA > 0 static u8_t netif_client_id; #endif #define NETIF_REPORT_TYPE_IPV4 0x01 #define NETIF_REPORT_TYPE_IPV6 0x02 static void netif_issue_reports(struct netif* netif, u8_t report_type); #if LWIP_IPV6 static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr); #endif /* LWIP_IPV6 */ #if LWIP_HAVE_LOOPIF #if LWIP_IPV4 static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t* addr); #endif #if LWIP_IPV6 static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t* addr); #endif static struct netif loop_netif; /** * Initialize a lwip network interface structure for a loopback interface * * @param netif the lwip network interface structure for this loopif * @return ERR_OK if the loopif is initialized * ERR_MEM if private data couldn't be allocated */ static err_t netif_loopif_init(struct netif *netif) { /* initialize the snmp variables and counters inside the struct netif * ifSpeed: no assumption can be made! */ MIB2_INIT_NETIF(netif, snmp_ifType_softwareLoopback, 0); netif->name[0] = 'l'; netif->name[1] = 'o'; #if LWIP_IPV4 netif->output = netif_loop_output_ipv4; #endif #if LWIP_IPV6 netif->output_ip6 = netif_loop_output_ipv6; #endif #if LWIP_LOOPIF_MULTICAST netif->flags |= NETIF_FLAG_IGMP; #endif return ERR_OK; } #endif /* LWIP_HAVE_LOOPIF */ void netif_init(void) { #if LWIP_HAVE_LOOPIF #if LWIP_IPV4 #define LOOPIF_ADDRINIT &loop_ipaddr, &loop_netmask, &loop_gw, ip4_addr_t loop_ipaddr, loop_netmask, loop_gw; IP4_ADDR(&loop_gw, 127,0,0,1); IP4_ADDR(&loop_ipaddr, 127,0,0,1); IP4_ADDR(&loop_netmask, 255,0,0,0); #else /* LWIP_IPV4 */ #define LOOPIF_ADDRINIT #endif /* LWIP_IPV4 */ #if NO_SYS netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, ip_input); #else /* NO_SYS */ netif_add(&loop_netif, LOOPIF_ADDRINIT NULL, netif_loopif_init, tcpip_input); #endif /* NO_SYS */ #if LWIP_IPV6 IP_ADDR6_HOST(loop_netif.ip6_addr, 0, 0, 0, 0x00000001UL); loop_netif.ip6_addr_state[0] = IP6_ADDR_VALID; #endif /* LWIP_IPV6 */ netif_set_link_up(&loop_netif); netif_set_up(&loop_netif); #endif /* LWIP_HAVE_LOOPIF */ } /** * @ingroup lwip_nosys * Forwards a received packet for input processing with * ethernet_input() or ip_input() depending on netif flags. * Don't call directly, pass to netif_add() and call * netif->input(). * Only works if the netif driver correctly sets * NETIF_FLAG_ETHARP and/or NETIF_FLAG_ETHERNET flag! */ err_t netif_input(struct pbuf *p, struct netif *inp) { #if LWIP_ETHERNET if (inp->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) { return ethernet_input(p, inp); } else #endif /* LWIP_ETHERNET */ return ip_input(p, inp); } /** * @ingroup netif * Add a network interface to the list of lwIP netifs. * * @param netif a pre-allocated netif structure * @param ipaddr IP address for the new netif * @param netmask network mask for the new netif * @param gw default gateway IP address for the new netif * @param state opaque data passed to the new netif * @param init callback function that initializes the interface * @param input callback function that is called to pass * ingress packets up in the protocol layer stack.\n * It is recommended to use a function that passes the input directly * to the stack (netif_input(), NO_SYS=1 mode) or via sending a * message to TCPIP thread (tcpip_input(), NO_SYS=0 mode).\n * These functions use netif flags NETIF_FLAG_ETHARP and NETIF_FLAG_ETHERNET * to decide whether to forward to ethernet_input() or ip_input(). * In other words, the functions only work when the netif * driver is implemented correctly!\n * Most members of struct netif should be be initialized by the * netif init function = netif driver (init parameter of this function).\n * IPv6: Don't forget to call netif_create_ip6_linklocal_address() after * setting the MAC address in struct netif.hwaddr * (IPv6 requires a link-local address). * * @return netif, or NULL if failed. */ struct netif * netif_add(struct netif *netif, #if LWIP_IPV4 const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw, #endif /* LWIP_IPV4 */ void *state, netif_init_fn init, netif_input_fn input) { #if LWIP_IPV6 s8_t i; #endif LWIP_ASSERT("No init function given", init != NULL); /* reset new interface configuration state */ #if LWIP_IPV4 ip_addr_set_zero_ip4(&netif->ip_addr); ip_addr_set_zero_ip4(&netif->netmask); ip_addr_set_zero_ip4(&netif->gw); #endif /* LWIP_IPV4 */ #if LWIP_IPV6 for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { ip_addr_set_zero_ip6(&netif->ip6_addr[i]); netif->ip6_addr_state[i] = IP6_ADDR_INVALID; } netif->output_ip6 = netif_null_output_ip6; #endif /* LWIP_IPV6 */ NETIF_SET_CHECKSUM_CTRL(netif, NETIF_CHECKSUM_ENABLE_ALL); netif->flags = 0; #ifdef netif_get_client_data memset(netif->client_data, 0, sizeof(netif->client_data)); #endif /* LWIP_NUM_NETIF_CLIENT_DATA */ #if LWIP_IPV6_AUTOCONFIG /* IPv6 address autoconfiguration not enabled by default */ netif->ip6_autoconfig_enabled = 0; #endif /* LWIP_IPV6_AUTOCONFIG */ #if LWIP_IPV6_SEND_ROUTER_SOLICIT netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; #endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ #if LWIP_NETIF_STATUS_CALLBACK netif->status_callback = NULL; #endif /* LWIP_NETIF_STATUS_CALLBACK */ #if LWIP_NETIF_LINK_CALLBACK netif->link_callback = NULL; #endif /* LWIP_NETIF_LINK_CALLBACK */ #if LWIP_IGMP netif->igmp_mac_filter = NULL; #endif /* LWIP_IGMP */ #if LWIP_IPV6 && LWIP_IPV6_MLD netif->mld_mac_filter = NULL; #endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ #if ENABLE_LOOPBACK netif->loop_first = NULL; netif->loop_last = NULL; #endif /* ENABLE_LOOPBACK */ /* remember netif specific state information data */ netif->state = state; netif->num = netif_num++; netif->input = input; NETIF_SET_HWADDRHINT(netif, NULL); #if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS netif->loop_cnt_current = 0; #endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */ #if LWIP_IPV4 netif_set_addr(netif, ipaddr, netmask, gw); #endif /* LWIP_IPV4 */ /* call user specified initialization function for netif */ if (init(netif) != ERR_OK) { return NULL; } /* add this netif to the list */ netif->next = netif_list; netif_list = netif; mib2_netif_added(netif); #if LWIP_IGMP /* start IGMP processing */ if (netif->flags & NETIF_FLAG_IGMP) { igmp_start(netif); } #endif /* LWIP_IGMP */ LWIP_DEBUGF(NETIF_DEBUG, ("netif: added interface %c%c IP", netif->name[0], netif->name[1])); #if LWIP_IPV4 LWIP_DEBUGF(NETIF_DEBUG, (" addr ")); ip4_addr_debug_print(NETIF_DEBUG, ipaddr); LWIP_DEBUGF(NETIF_DEBUG, (" netmask ")); ip4_addr_debug_print(NETIF_DEBUG, netmask); LWIP_DEBUGF(NETIF_DEBUG, (" gw ")); ip4_addr_debug_print(NETIF_DEBUG, gw); #endif /* LWIP_IPV4 */ LWIP_DEBUGF(NETIF_DEBUG, ("\n")); return netif; } #if LWIP_IPV4 /** * @ingroup netif_ip4 * Change IP address configuration for a network interface (including netmask * and default gateway). * * @param netif the network interface to change * @param ipaddr the new IP address * @param netmask the new netmask * @param gw the new default gateway */ void netif_set_addr(struct netif *netif, const ip4_addr_t *ipaddr, const ip4_addr_t *netmask, const ip4_addr_t *gw) { if (ip4_addr_isany(ipaddr)) { /* when removing an address, we have to remove it *before* changing netmask/gw to ensure that tcp RST segment can be sent correctly */ netif_set_ipaddr(netif, ipaddr); netif_set_netmask(netif, netmask); netif_set_gw(netif, gw); } else { netif_set_netmask(netif, netmask); netif_set_gw(netif, gw); /* set ipaddr last to ensure netmask/gw have been set when status callback is called */ netif_set_ipaddr(netif, ipaddr); } } #endif /* LWIP_IPV4*/ /** * @ingroup netif * Remove a network interface from the list of lwIP netifs. * * @param netif the network interface to remove */ void netif_remove(struct netif *netif) { #if LWIP_IPV6 int i; #endif if (netif == NULL) { return; } #if LWIP_IPV4 if (!ip4_addr_isany_val(*netif_ip4_addr(netif))) { #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr4(netif), NULL); #endif /* LWIP_RAW */ } #if LWIP_IGMP /* stop IGMP processing */ if (netif->flags & NETIF_FLAG_IGMP) { igmp_stop(netif); } #endif /* LWIP_IGMP */ #endif /* LWIP_IPV4*/ #if LWIP_IPV6 for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isvalid(netif_ip6_addr_state(netif, i))) { #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr6(netif, i), NULL); #endif /* LWIP_RAW */ } } #if LWIP_IPV6_MLD /* stop MLD processing */ mld6_stop(netif); #endif /* LWIP_IPV6_MLD */ #endif /* LWIP_IPV6 */ if (netif_is_up(netif)) { /* set netif down before removing (call callback function) */ netif_set_down(netif); } mib2_remove_ip4(netif); /* this netif is default? */ if (netif_default == netif) { /* reset default netif */ netif_set_default(NULL); } /* is it the first netif? */ if (netif_list == netif) { netif_list = netif->next; } else { /* look for netif further down the list */ struct netif * tmp_netif; for (tmp_netif = netif_list; tmp_netif != NULL; tmp_netif = tmp_netif->next) { if (tmp_netif->next == netif) { tmp_netif->next = netif->next; break; } } if (tmp_netif == NULL) { return; /* netif is not on the list */ } } mib2_netif_removed(netif); #if LWIP_NETIF_REMOVE_CALLBACK if (netif->remove_callback) { netif->remove_callback(netif); } #endif /* LWIP_NETIF_REMOVE_CALLBACK */ LWIP_DEBUGF( NETIF_DEBUG, ("netif_remove: removed netif\n") ); } /** * @ingroup netif * Find a network interface by searching for its name * * @param name the name of the netif (like netif->name) plus concatenated number * in ascii representation (e.g. 'en0') */ struct netif * netif_find(const char *name) { struct netif *netif; u8_t num; if (name == NULL) { return NULL; } num = (u8_t)(name[2] - '0'); for (netif = netif_list; netif != NULL; netif = netif->next) { if (num == netif->num && name[0] == netif->name[0] && name[1] == netif->name[1]) { LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: found %c%c\n", name[0], name[1])); return netif; } } LWIP_DEBUGF(NETIF_DEBUG, ("netif_find: didn't find %c%c\n", name[0], name[1])); return NULL; } #if LWIP_IPV4 /** * @ingroup netif_ip4 * Change the IP address of a network interface * * @param netif the network interface to change * @param ipaddr the new IP address * * @note call netif_set_addr() if you also want to change netmask and * default gateway */ void netif_set_ipaddr(struct netif *netif, const ip4_addr_t *ipaddr) { ip_addr_t new_addr; *ip_2_ip4(&new_addr) = (ipaddr ? *ipaddr : *IP4_ADDR_ANY4); IP_SET_TYPE_VAL(new_addr, IPADDR_TYPE_V4); /* address is actually being changed? */ if (ip4_addr_cmp(ip_2_ip4(&new_addr), netif_ip4_addr(netif)) == 0) { LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: netif address being changed\n")); #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr4(netif), &new_addr); #endif /* LWIP_RAW */ mib2_remove_ip4(netif); mib2_remove_route_ip4(0, netif); /* set new IP address to netif */ ip4_addr_set(ip_2_ip4(&netif->ip_addr), ipaddr); IP_SET_TYPE_VAL(netif->ip_addr, IPADDR_TYPE_V4); mib2_add_ip4(netif); mib2_add_route_ip4(0, netif); netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4); NETIF_STATUS_CALLBACK(netif); } LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IP address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", netif->name[0], netif->name[1], ip4_addr1_16(netif_ip4_addr(netif)), ip4_addr2_16(netif_ip4_addr(netif)), ip4_addr3_16(netif_ip4_addr(netif)), ip4_addr4_16(netif_ip4_addr(netif)))); } /** * @ingroup netif_ip4 * Change the default gateway for a network interface * * @param netif the network interface to change * @param gw the new default gateway * * @note call netif_set_addr() if you also want to change ip address and netmask */ void netif_set_gw(struct netif *netif, const ip4_addr_t *gw) { ip4_addr_set(ip_2_ip4(&netif->gw), gw); IP_SET_TYPE_VAL(netif->gw, IPADDR_TYPE_V4); LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: GW address of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", netif->name[0], netif->name[1], ip4_addr1_16(netif_ip4_gw(netif)), ip4_addr2_16(netif_ip4_gw(netif)), ip4_addr3_16(netif_ip4_gw(netif)), ip4_addr4_16(netif_ip4_gw(netif)))); } /** * @ingroup netif_ip4 * Change the netmask of a network interface * * @param netif the network interface to change * @param netmask the new netmask * * @note call netif_set_addr() if you also want to change ip address and * default gateway */ void netif_set_netmask(struct netif *netif, const ip4_addr_t *netmask) { mib2_remove_route_ip4(0, netif); /* set new netmask to netif */ ip4_addr_set(ip_2_ip4(&netif->netmask), netmask); IP_SET_TYPE_VAL(netif->netmask, IPADDR_TYPE_V4); mib2_add_route_ip4(0, netif); LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: netmask of interface %c%c set to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n", netif->name[0], netif->name[1], ip4_addr1_16(netif_ip4_netmask(netif)), ip4_addr2_16(netif_ip4_netmask(netif)), ip4_addr3_16(netif_ip4_netmask(netif)), ip4_addr4_16(netif_ip4_netmask(netif)))); } #endif /* LWIP_IPV4 */ /** * @ingroup netif * Set a network interface as the default network interface * (used to output all packets for which no specific route is found) * * @param netif the default network interface */ void netif_set_default(struct netif *netif) { if (netif == NULL) { /* remove default route */ mib2_remove_route_ip4(1, netif); } else { /* install default route */ mib2_add_route_ip4(1, netif); } netif_default = netif; LWIP_DEBUGF(NETIF_DEBUG, ("netif: setting default interface %c%c\n", netif ? netif->name[0] : '\'', netif ? netif->name[1] : '\'')); } /** * @ingroup netif * Bring an interface up, available for processing * traffic. */ void netif_set_up(struct netif *netif) { if (!(netif->flags & NETIF_FLAG_UP)) { netif->flags |= NETIF_FLAG_UP; MIB2_COPY_SYSUPTIME_TO(&netif->ts); NETIF_STATUS_CALLBACK(netif); if (netif->flags & NETIF_FLAG_LINK_UP) { netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4|NETIF_REPORT_TYPE_IPV6); } } } /** Send ARP/IGMP/MLD/RS events, e.g. on link-up/netif-up or addr-change */ static void netif_issue_reports(struct netif* netif, u8_t report_type) { #if LWIP_IPV4 if ((report_type & NETIF_REPORT_TYPE_IPV4) && !ip4_addr_isany_val(*netif_ip4_addr(netif))) { #if LWIP_ARP /* For Ethernet network interfaces, we would like to send a "gratuitous ARP" */ if (netif->flags & (NETIF_FLAG_ETHARP)) { etharp_gratuitous(netif); } #endif /* LWIP_ARP */ #if LWIP_IGMP /* resend IGMP memberships */ if (netif->flags & NETIF_FLAG_IGMP) { igmp_report_groups(netif); } #endif /* LWIP_IGMP */ } #endif /* LWIP_IPV4 */ #if LWIP_IPV6 if (report_type & NETIF_REPORT_TYPE_IPV6) { #if LWIP_IPV6_MLD /* send mld memberships */ mld6_report_groups(netif); #endif /* LWIP_IPV6_MLD */ #if LWIP_IPV6_SEND_ROUTER_SOLICIT /* Send Router Solicitation messages. */ netif->rs_count = LWIP_ND6_MAX_MULTICAST_SOLICIT; #endif /* LWIP_IPV6_SEND_ROUTER_SOLICIT */ } #endif /* LWIP_IPV6 */ } /** * @ingroup netif * Bring an interface down, disabling any traffic processing. */ void netif_set_down(struct netif *netif) { if (netif->flags & NETIF_FLAG_UP) { netif->flags &= ~NETIF_FLAG_UP; MIB2_COPY_SYSUPTIME_TO(&netif->ts); #if LWIP_IPV4 && LWIP_ARP if (netif->flags & NETIF_FLAG_ETHARP) { etharp_cleanup_netif(netif); } #endif /* LWIP_IPV4 && LWIP_ARP */ #if LWIP_IPV6 nd6_cleanup_netif(netif); #endif /* LWIP_IPV6 */ NETIF_STATUS_CALLBACK(netif); } } #if LWIP_NETIF_STATUS_CALLBACK /** * @ingroup netif * Set callback to be called when interface is brought up/down or address is changed while up */ void netif_set_status_callback(struct netif *netif, netif_status_callback_fn status_callback) { if (netif) { netif->status_callback = status_callback; } } #endif /* LWIP_NETIF_STATUS_CALLBACK */ #if LWIP_NETIF_REMOVE_CALLBACK /** * @ingroup netif * Set callback to be called when the interface has been removed */ void netif_set_remove_callback(struct netif *netif, netif_status_callback_fn remove_callback) { if (netif) { netif->remove_callback = remove_callback; } } #endif /* LWIP_NETIF_REMOVE_CALLBACK */ /** * @ingroup netif * Called by a driver when its link goes up */ void netif_set_link_up(struct netif *netif) { if (!(netif->flags & NETIF_FLAG_LINK_UP)) { netif->flags |= NETIF_FLAG_LINK_UP; #if LWIP_DHCP dhcp_network_changed(netif); #endif /* LWIP_DHCP */ #if LWIP_AUTOIP autoip_network_changed(netif); #endif /* LWIP_AUTOIP */ if (netif->flags & NETIF_FLAG_UP) { netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV4|NETIF_REPORT_TYPE_IPV6); } NETIF_LINK_CALLBACK(netif); } } /** * @ingroup netif * Called by a driver when its link goes down */ void netif_set_link_down(struct netif *netif ) { if (netif->flags & NETIF_FLAG_LINK_UP) { netif->flags &= ~NETIF_FLAG_LINK_UP; NETIF_LINK_CALLBACK(netif); } } #if LWIP_NETIF_LINK_CALLBACK /** * @ingroup netif * Set callback to be called when link is brought up/down */ void netif_set_link_callback(struct netif *netif, netif_status_callback_fn link_callback) { if (netif) { netif->link_callback = link_callback; } } #endif /* LWIP_NETIF_LINK_CALLBACK */ #if ENABLE_LOOPBACK /** * @ingroup netif * Send an IP packet to be received on the same netif (loopif-like). * The pbuf is simply copied and handed back to netif->input. * In multithreaded mode, this is done directly since netif->input must put * the packet on a queue. * In callback mode, the packet is put on an internal queue and is fed to * netif->input by netif_poll(). * * @param netif the lwip network interface structure * @param p the (IP) packet to 'send' * @return ERR_OK if the packet has been sent * ERR_MEM if the pbuf used to copy the packet couldn't be allocated */ err_t netif_loop_output(struct netif *netif, struct pbuf *p) { struct pbuf *r; err_t err; struct pbuf *last; #if LWIP_LOOPBACK_MAX_PBUFS u16_t clen = 0; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* If we have a loopif, SNMP counters are adjusted for it, * if not they are adjusted for 'netif'. */ #if MIB2_STATS #if LWIP_HAVE_LOOPIF struct netif *stats_if = &loop_netif; #else /* LWIP_HAVE_LOOPIF */ struct netif *stats_if = netif; #endif /* LWIP_HAVE_LOOPIF */ #endif /* MIB2_STATS */ SYS_ARCH_DECL_PROTECT(lev); /* Allocate a new pbuf */ r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM); if (r == NULL) { LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); return ERR_MEM; } #if LWIP_LOOPBACK_MAX_PBUFS clen = pbuf_clen(r); /* check for overflow or too many pbuf on queue */ if (((netif->loop_cnt_current + clen) < netif->loop_cnt_current) || ((netif->loop_cnt_current + clen) > LWIP_LOOPBACK_MAX_PBUFS)) { pbuf_free(r); LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); return ERR_MEM; } netif->loop_cnt_current += clen; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* Copy the whole pbuf queue p into the single pbuf r */ if ((err = pbuf_copy(r, p)) != ERR_OK) { pbuf_free(r); LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); MIB2_STATS_NETIF_INC(stats_if, ifoutdiscards); return err; } /* Put the packet on a linked list which gets emptied through calling netif_poll(). */ /* let last point to the last pbuf in chain r */ for (last = r; last->next != NULL; last = last->next) { /* nothing to do here, just get to the last pbuf */ } SYS_ARCH_PROTECT(lev); if (netif->loop_first != NULL) { LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL); netif->loop_last->next = r; netif->loop_last = last; } else { netif->loop_first = r; netif->loop_last = last; } SYS_ARCH_UNPROTECT(lev); LINK_STATS_INC(link.xmit); MIB2_STATS_NETIF_ADD(stats_if, ifoutoctets, p->tot_len); MIB2_STATS_NETIF_INC(stats_if, ifoutucastpkts); #if LWIP_NETIF_LOOPBACK_MULTITHREADING /* For multithreading environment, schedule a call to netif_poll */ tcpip_callback_with_block((tcpip_callback_fn)netif_poll, netif, 0); #endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */ return ERR_OK; } #if LWIP_HAVE_LOOPIF #if LWIP_IPV4 static err_t netif_loop_output_ipv4(struct netif *netif, struct pbuf *p, const ip4_addr_t* addr) { LWIP_UNUSED_ARG(addr); return netif_loop_output(netif, p); } #endif /* LWIP_IPV4 */ #if LWIP_IPV6 static err_t netif_loop_output_ipv6(struct netif *netif, struct pbuf *p, const ip6_addr_t* addr) { LWIP_UNUSED_ARG(addr); return netif_loop_output(netif, p); } #endif /* LWIP_IPV6 */ #endif /* LWIP_HAVE_LOOPIF */ /** * Call netif_poll() in the main loop of your application. This is to prevent * reentering non-reentrant functions like tcp_input(). Packets passed to * netif_loop_output() are put on a list that is passed to netif->input() by * netif_poll(). */ void netif_poll(struct netif *netif) { /* If we have a loopif, SNMP counters are adjusted for it, * if not they are adjusted for 'netif'. */ #if MIB2_STATS #if LWIP_HAVE_LOOPIF struct netif *stats_if = &loop_netif; #else /* LWIP_HAVE_LOOPIF */ struct netif *stats_if = netif; #endif /* LWIP_HAVE_LOOPIF */ #endif /* MIB2_STATS */ SYS_ARCH_DECL_PROTECT(lev); /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ SYS_ARCH_PROTECT(lev); while (netif->loop_first != NULL) { struct pbuf *in, *in_end; #if LWIP_LOOPBACK_MAX_PBUFS u8_t clen = 1; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ in = in_end = netif->loop_first; while (in_end->len != in_end->tot_len) { LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL); in_end = in_end->next; #if LWIP_LOOPBACK_MAX_PBUFS clen++; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ } #if LWIP_LOOPBACK_MAX_PBUFS /* adjust the number of pbufs on queue */ LWIP_ASSERT("netif->loop_cnt_current underflow", ((netif->loop_cnt_current - clen) < netif->loop_cnt_current)); netif->loop_cnt_current -= clen; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ /* 'in_end' now points to the last pbuf from 'in' */ if (in_end == netif->loop_last) { /* this was the last pbuf in the list */ netif->loop_first = netif->loop_last = NULL; } else { /* pop the pbuf off the list */ netif->loop_first = in_end->next; LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL); } /* De-queue the pbuf from its successors on the 'loop_' list. */ in_end->next = NULL; SYS_ARCH_UNPROTECT(lev); LINK_STATS_INC(link.recv); MIB2_STATS_NETIF_ADD(stats_if, ifinoctets, in->tot_len); MIB2_STATS_NETIF_INC(stats_if, ifinucastpkts); /* loopback packets are always IP packets! */ if (ip_input(in, netif) != ERR_OK) { pbuf_free(in); } SYS_ARCH_PROTECT(lev); } SYS_ARCH_UNPROTECT(lev); } #if !LWIP_NETIF_LOOPBACK_MULTITHREADING /** * Calls netif_poll() for every netif on the netif_list. */ void netif_poll_all(void) { struct netif *netif = netif_list; /* loop through netifs */ while (netif != NULL) { netif_poll(netif); /* proceed to next network interface */ netif = netif->next; } } #endif /* !LWIP_NETIF_LOOPBACK_MULTITHREADING */ #endif /* ENABLE_LOOPBACK */ #if LWIP_NUM_NETIF_CLIENT_DATA > 0 /** * @ingroup netif_cd * Allocate an index to store data in client_data member of struct netif. * Returned value is an index in mentioned array. * @see LWIP_NUM_NETIF_CLIENT_DATA */ u8_t netif_alloc_client_data_id(void) { u8_t result = netif_client_id; netif_client_id++; LWIP_ASSERT("Increase LWIP_NUM_NETIF_CLIENT_DATA in lwipopts.h", result < LWIP_NUM_NETIF_CLIENT_DATA); return result + LWIP_NETIF_CLIENT_DATA_INDEX_MAX; } #endif #if LWIP_IPV6 /** * @ingroup netif_ip6 * Change an IPv6 address of a network interface * * @param netif the network interface to change * @param addr_idx index of the IPv6 address * @param addr6 the new IPv6 address * * @note call netif_ip6_addr_set_state() to set the address valid/temptative */ void netif_ip6_addr_set(struct netif *netif, s8_t addr_idx, const ip6_addr_t *addr6) { LWIP_ASSERT("addr6 != NULL", addr6 != NULL); netif_ip6_addr_set_parts(netif, addr_idx, addr6->addr[0], addr6->addr[1], addr6->addr[2], addr6->addr[3]); } /* * Change an IPv6 address of a network interface (internal version taking 4 * u32_t) * * @param netif the network interface to change * @param addr_idx index of the IPv6 address * @param i0 word0 of the new IPv6 address * @param i1 word1 of the new IPv6 address * @param i2 word2 of the new IPv6 address * @param i3 word3 of the new IPv6 address */ void netif_ip6_addr_set_parts(struct netif *netif, s8_t addr_idx, u32_t i0, u32_t i1, u32_t i2, u32_t i3) { const ip6_addr_t *old_addr; LWIP_ASSERT("netif != NULL", netif != NULL); LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); old_addr = netif_ip6_addr(netif, addr_idx); /* address is actually being changed? */ if ((old_addr->addr[0] != i0) || (old_addr->addr[1] != i1) || (old_addr->addr[2] != i2) || (old_addr->addr[3] != i3)) { LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set: netif address being changed\n")); if (netif_ip6_addr_state(netif, addr_idx) & IP6_ADDR_VALID) { #if LWIP_TCP || LWIP_UDP ip_addr_t new_ipaddr; IP_ADDR6(&new_ipaddr, i0, i1, i2, i3); #endif /* LWIP_TCP || LWIP_UDP */ #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), &new_ipaddr); #endif /* LWIP_RAW */ } /* @todo: remove/readd mib2 ip6 entries? */ IP6_ADDR(ip_2_ip6(&(netif->ip6_addr[addr_idx])), i0, i1, i2, i3); IP_SET_TYPE_VAL(netif->ip6_addr[addr_idx], IPADDR_TYPE_V6); if (netif_ip6_addr_state(netif, addr_idx) & IP6_ADDR_VALID) { netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); NETIF_STATUS_CALLBACK(netif); } } LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), netif_ip6_addr_state(netif, addr_idx))); } /** * @ingroup netif_ip6 * Change the state of an IPv6 address of a network interface * (INVALID, TEMPTATIVE, PREFERRED, DEPRECATED, where TEMPTATIVE * includes the number of checks done, see ip6_addr.h) * * @param netif the network interface to change * @param addr_idx index of the IPv6 address * @param state the new IPv6 address state */ void netif_ip6_addr_set_state(struct netif* netif, s8_t addr_idx, u8_t state) { u8_t old_state; LWIP_ASSERT("netif != NULL", netif != NULL); LWIP_ASSERT("invalid index", addr_idx < LWIP_IPV6_NUM_ADDRESSES); old_state = netif_ip6_addr_state(netif, addr_idx); /* state is actually being changed? */ if (old_state != state) { u8_t old_valid = old_state & IP6_ADDR_VALID; u8_t new_valid = state & IP6_ADDR_VALID; LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_ip6_addr_set_state: netif address state being changed\n")); #if LWIP_IPV6_MLD /* Reevaluate solicited-node multicast group membership. */ if (netif->flags & NETIF_FLAG_MLD6) { nd6_adjust_mld_membership(netif, addr_idx, state); } #endif /* LWIP_IPV6_MLD */ if (old_valid && !new_valid) { /* address about to be removed by setting invalid */ #if LWIP_TCP tcp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); #endif /* LWIP_TCP */ #if LWIP_UDP udp_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); #endif /* LWIP_UDP */ #if LWIP_RAW raw_netif_ip_addr_changed(netif_ip_addr6(netif, addr_idx), NULL); #endif /* LWIP_RAW */ /* @todo: remove mib2 ip6 entries? */ } netif->ip6_addr_state[addr_idx] = state; if (!old_valid && new_valid) { /* address added by setting valid */ /* @todo: add mib2 ip6 entries? */ netif_issue_reports(netif, NETIF_REPORT_TYPE_IPV6); } if ((old_state & IP6_ADDR_PREFERRED) != (state & IP6_ADDR_PREFERRED)) { /* address state has changed (valid flag changed or switched between preferred and deprecated) -> call the callback function */ NETIF_STATUS_CALLBACK(netif); } } LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("netif: IPv6 address %d of interface %c%c set to %s/0x%"X8_F"\n", addr_idx, netif->name[0], netif->name[1], ip6addr_ntoa(netif_ip6_addr(netif, addr_idx)), netif_ip6_addr_state(netif, addr_idx))); } /** * Checks if a specific address is assigned to the netif and returns its * index. * * @param netif the netif to check * @param ip6addr the IPv6 address to find * @return >= 0: address found, this is its index * -1: address not found on this netif */ s8_t netif_get_ip6_addr_match(struct netif *netif, const ip6_addr_t *ip6addr) { s8_t i; for (i = 0; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (!ip6_addr_isinvalid(netif_ip6_addr_state(netif, i)) && ip6_addr_cmp(netif_ip6_addr(netif, i), ip6addr)) { return i; } } return -1; } /** * @ingroup netif_ip6 * Create a link-local IPv6 address on a netif (stored in slot 0) * * @param netif the netif to create the address on * @param from_mac_48bit if != 0, assume hwadr is a 48-bit MAC address (std conversion) * if == 0, use hwaddr directly as interface ID */ void netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit) { u8_t i, addr_index; /* Link-local prefix. */ ip_2_ip6(&netif->ip6_addr[0])->addr[0] = PP_HTONL(0xfe800000ul); ip_2_ip6(&netif->ip6_addr[0])->addr[1] = 0; /* Generate interface ID. */ if (from_mac_48bit) { /* Assume hwaddr is a 48-bit IEEE 802 MAC. Convert to EUI-64 address. Complement Group bit. */ ip_2_ip6(&netif->ip6_addr[0])->addr[2] = lwip_htonl((((u32_t)(netif->hwaddr[0] ^ 0x02)) << 24) | ((u32_t)(netif->hwaddr[1]) << 16) | ((u32_t)(netif->hwaddr[2]) << 8) | (0xff)); ip_2_ip6(&netif->ip6_addr[0])->addr[3] = lwip_htonl((0xfeul << 24) | ((u32_t)(netif->hwaddr[3]) << 16) | ((u32_t)(netif->hwaddr[4]) << 8) | (netif->hwaddr[5])); } else { /* Use hwaddr directly as interface ID. */ ip_2_ip6(&netif->ip6_addr[0])->addr[2] = 0; ip_2_ip6(&netif->ip6_addr[0])->addr[3] = 0; addr_index = 3; for (i = 0; (i < 8) && (i < netif->hwaddr_len); i++) { if (i == 4) { addr_index--; } ip_2_ip6(&netif->ip6_addr[0])->addr[addr_index] |= ((u32_t)(netif->hwaddr[netif->hwaddr_len - i - 1])) << (8 * (i & 0x03)); } } /* Set address state. */ #if LWIP_IPV6_DUP_DETECT_ATTEMPTS /* Will perform duplicate address detection (DAD). */ netif_ip6_addr_set_state(netif, 0, IP6_ADDR_TENTATIVE); #else /* Consider address valid. */ netif_ip6_addr_set_state(netif, 0, IP6_ADDR_PREFERRED); #endif /* LWIP_IPV6_AUTOCONFIG */ } /** * @ingroup netif_ip6 * This function allows for the easy addition of a new IPv6 address to an interface. * It takes care of finding an empty slot and then sets the address tentative * (to make sure that all the subsequent processing happens). * * @param netif netif to add the address on * @param ip6addr address to add * @param chosen_idx if != NULL, the chosen IPv6 address index will be stored here */ err_t netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chosen_idx) { s8_t i; i = netif_get_ip6_addr_match(netif, ip6addr); if (i >= 0) { /* Address already added */ if (chosen_idx != NULL) { *chosen_idx = i; } return ERR_OK; } /* Find a free slot -- musn't be the first one (reserved for link local) */ for (i = 1; i < LWIP_IPV6_NUM_ADDRESSES; i++) { if (ip6_addr_isinvalid(netif_ip6_addr_state(netif, i))) { ip_addr_copy_from_ip6(netif->ip6_addr[i], *ip6addr); netif_ip6_addr_set_state(netif, i, IP6_ADDR_TENTATIVE); if (chosen_idx != NULL) { *chosen_idx = i; } return ERR_OK; } } if (chosen_idx != NULL) { *chosen_idx = -1; } return ERR_VAL; } /** Dummy IPv6 output function for netifs not supporting IPv6 */ static err_t netif_null_output_ip6(struct netif *netif, struct pbuf *p, const ip6_addr_t *ipaddr) { LWIP_UNUSED_ARG(netif); LWIP_UNUSED_ARG(p); LWIP_UNUSED_ARG(ipaddr); return ERR_IF; } #endif /* LWIP_IPV6 */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/netif.c
C
unknown
38,832
/** * @file * Packet buffer management */ /** * @defgroup pbuf Packet buffers (PBUF) * @ingroup infrastructure * * Packets are built from the pbuf data structure. It supports dynamic * memory allocation for packet contents or can reference externally * managed packet contents both in RAM and ROM. Quick allocation for * incoming packets is provided through pools with fixed sized pbufs. * * A packet may span over multiple pbufs, chained as a singly linked * list. This is called a "pbuf chain". * * Multiple packets may be queued, also using this singly linked list. * This is called a "packet queue". * * So, a packet queue consists of one or more pbuf chains, each of * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE * NOT SUPPORTED!!! Use helper structs to queue multiple packets. * * The differences between a pbuf chain and a packet queue are very * precise but subtle. * * The last pbuf of a packet has a ->tot_len field that equals the * ->len field. It can be found by traversing the list. If the last * pbuf of a packet has a ->next field other than NULL, more packets * are on the queue. * * Therefore, looping through a pbuf of a single packet, has an * loop end condition (tot_len == p->len), NOT (next == NULL). * * Example of custom pbuf usage for zero-copy RX: @code{.c} typedef struct my_custom_pbuf { struct pbuf_custom p; void* dma_descriptor; } my_custom_pbuf_t; LWIP_MEMPOOL_DECLARE(RX_POOL, 10, sizeof(my_custom_pbuf_t), "Zero-copy RX PBUF pool"); void my_pbuf_free_custom(void* p) { my_custom_pbuf_t* my_puf = (my_custom_pbuf_t*)p; LOCK_INTERRUPTS(); free_rx_dma_descriptor(my_pbuf->dma_descriptor); LWIP_MEMPOOL_FREE(RX_POOL, my_pbuf); UNLOCK_INTERRUPTS(); } void eth_rx_irq() { dma_descriptor* dma_desc = get_RX_DMA_descriptor_from_ethernet(); my_custom_pbuf_t* my_pbuf = (my_custom_pbuf_t*)LWIP_MEMPOOL_ALLOC(RX_POOL); my_pbuf->p.custom_free_function = my_pbuf_free_custom; my_pbuf->dma_descriptor = dma_desc; invalidate_cpu_cache(dma_desc->rx_data, dma_desc->rx_length); struct pbuf* p = pbuf_alloced_custom(PBUF_RAW, dma_desc->rx_length, PBUF_REF, &my_pbuf->p, dma_desc->rx_data, dma_desc->max_buffer_size); if(netif->input(p, netif) != ERR_OK) { pbuf_free(p); } } @endcode */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #include "lwip/stats.h" #include "lwip/def.h" #include "lwip/mem.h" #include "lwip/memp.h" #include "lwip/pbuf.h" #include "lwip/sys.h" #if LWIP_TCP && TCP_QUEUE_OOSEQ #include "lwip/priv/tcp_priv.h" #endif #if LWIP_CHECKSUM_ON_COPY #include "lwip/inet_chksum.h" #endif #include <string.h> #define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) /* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */ #define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE) #if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ #define PBUF_POOL_IS_EMPTY() #else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ #if !NO_SYS #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL #include "lwip/tcpip.h" #define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \ if (tcpip_callback_with_block(pbuf_free_ooseq_callback, NULL, 0) != ERR_OK) { \ SYS_ARCH_PROTECT(old_level); \ pbuf_free_ooseq_pending = 0; \ SYS_ARCH_UNPROTECT(old_level); \ } } while(0) #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ #endif /* !NO_SYS */ volatile u8_t pbuf_free_ooseq_pending; #define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty() /** * Attempt to reclaim some memory from queued out-of-sequence TCP segments * if we run out of pool pbufs. It's better to give priority to new packets * if we're running out. * * This must be done in the correct thread context therefore this function * can only be used with NO_SYS=0 and through tcpip_callback. */ #if !NO_SYS static #endif /* !NO_SYS */ void pbuf_free_ooseq(void) { struct tcp_pcb* pcb; SYS_ARCH_SET(pbuf_free_ooseq_pending, 0); for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { if (NULL != pcb->ooseq) { /** Free the ooseq pbufs of one PCB only */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); tcp_segs_free(pcb->ooseq); pcb->ooseq = NULL; return; } } } #if !NO_SYS /** * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq(). */ static void pbuf_free_ooseq_callback(void *arg) { LWIP_UNUSED_ARG(arg); pbuf_free_ooseq(); } #endif /* !NO_SYS */ /** Queue a call to pbuf_free_ooseq if not already queued. */ static void pbuf_pool_is_empty(void) { #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL SYS_ARCH_SET(pbuf_free_ooseq_pending, 1); #else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ u8_t queued; SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); queued = pbuf_free_ooseq_pending; pbuf_free_ooseq_pending = 1; SYS_ARCH_UNPROTECT(old_level); if (!queued) { /* queue a call to pbuf_free_ooseq if not already queued */ PBUF_POOL_FREE_OOSEQ_QUEUE_CALL(); } #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ } #endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */ /** * @ingroup pbuf * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type). * * The actual memory allocated for the pbuf is determined by the * layer at which the pbuf is allocated and the requested size * (from the size parameter). * * @param layer flag to define header size * @param length size of the pbuf's payload * @param type this parameter decides how and where the pbuf * should be allocated as follows: * * - PBUF_RAM: buffer memory for pbuf is allocated as one large * chunk. This includes protocol headers as well. * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for * protocol headers. Additional headers must be prepended * by allocating another pbuf and chain in to the front of * the ROM pbuf. It is assumed that the memory used is really * similar to ROM in that it is immutable and will not be * changed. Memory which is dynamic should generally not * be attached to PBUF_ROM pbufs. Use PBUF_REF instead. * - PBUF_REF: no buffer memory is allocated for the pbuf, even for * protocol headers. It is assumed that the pbuf is only * being used in a single thread. If the pbuf gets queued, * then pbuf_take should be called to copy the buffer. * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from * the pbuf pool that is allocated during pbuf_init(). * * @return the allocated pbuf. If multiple pbufs where allocated, this * is the first pbuf of a pbuf chain. */ struct pbuf * pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) { struct pbuf *p, *q, *r; u16_t offset; s32_t rem_len; /* remaining length */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length)); /* determine header offset */ switch (layer) { case PBUF_TRANSPORT: /* add room for transport (often TCP) layer header */ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN; break; case PBUF_IP: /* add room for IP layer header */ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN; break; case PBUF_LINK: /* add room for link layer header */ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN; break; case PBUF_RAW_TX: /* add room for encapsulating link layer headers (e.g. 802.11) */ offset = PBUF_LINK_ENCAPSULATION_HLEN; break; case PBUF_RAW: /* no offset (e.g. RX buffers or chain successors) */ offset = 0; break; default: LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0); return NULL; } switch (type) { case PBUF_POOL: /* allocate head of pbuf chain into p */ p = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc: allocated pbuf %p\n", (void *)p)); if (p == NULL) { PBUF_POOL_IS_EMPTY(); return NULL; } p->type = type; p->next = NULL; /* make the payload pointer point 'offset' bytes into pbuf data memory */ p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + (SIZEOF_STRUCT_PBUF + offset))); LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned", ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); /* the total length of the pbuf chain is the requested size */ p->tot_len = length; /* set the length of the first pbuf in the chain */ p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)); LWIP_ASSERT("check p->payload + p->len does not overflow pbuf", ((u8_t*)p->payload + p->len <= (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED)); LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT", (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 ); /* set reference count (needed here in case we fail) */ p->ref = 1; /* now allocate the tail of the pbuf chain */ /* remember first pbuf for linkage in next iteration */ r = p; /* remaining length to be allocated */ rem_len = length - p->len; /* any remaining pbufs to be allocated? */ while (rem_len > 0) { q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL); if (q == NULL) { PBUF_POOL_IS_EMPTY(); /* free chain so far allocated */ pbuf_free(p); /* bail out unsuccessfully */ return NULL; } q->type = type; q->flags = 0; q->next = NULL; /* make previous pbuf point to this pbuf */ r->next = q; /* set total length of this pbuf and next in chain */ LWIP_ASSERT("rem_len < max_u16_t", rem_len < 0xffff); q->tot_len = (u16_t)rem_len; /* this pbuf length is pool size, unless smaller sized tail */ q->len = LWIP_MIN((u16_t)rem_len, PBUF_POOL_BUFSIZE_ALIGNED); q->payload = (void *)((u8_t *)q + SIZEOF_STRUCT_PBUF); LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned", ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0); LWIP_ASSERT("check p->payload + p->len does not overflow pbuf", ((u8_t*)p->payload + p->len <= (u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED)); q->ref = 1; /* calculate remaining length to be allocated */ rem_len -= q->len; /* remember this pbuf for linkage in next iteration */ r = q; } /* end of chain */ /*r->next = NULL;*/ break; case PBUF_RAM: { mem_size_t alloc_len = LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length); /* bug #50040: Check for integer overflow when calculating alloc_len */ if (alloc_len < LWIP_MEM_ALIGN_SIZE(length)) { return NULL; } /* If pbuf is to be allocated in RAM, allocate memory for it. */ p = (struct pbuf*)mem_malloc(alloc_len); } if (p == NULL) { return NULL; } /* Set up internal structure of the pbuf. */ p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)); p->len = p->tot_len = length; p->next = NULL; p->type = type; LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned", ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0); break; /* pbuf references existing (non-volatile static constant) ROM payload? */ case PBUF_ROM: /* pbuf references existing (externally allocated) RAM payload? */ case PBUF_REF: /* only allocate memory for the pbuf structure */ p = (struct pbuf *)memp_malloc(MEMP_PBUF); if (p == NULL) { LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n", (type == PBUF_ROM) ? "ROM" : "REF")); return NULL; } /* caller must set this field properly, afterwards */ p->payload = NULL; p->len = p->tot_len = length; p->next = NULL; p->type = type; break; default: LWIP_ASSERT("pbuf_alloc: erroneous type", 0); return NULL; } /* set reference count */ p->ref = 1; /* set flags */ p->flags = 0; LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p)); return p; } #if LWIP_SUPPORT_CUSTOM_PBUF /** * @ingroup pbuf * Initialize a custom pbuf (already allocated). * * @param l flag to define header size * @param length size of the pbuf's payload * @param type type of the pbuf (only used to treat the pbuf accordingly, as * this function allocates no memory) * @param p pointer to the custom pbuf to initialize (already allocated) * @param payload_mem pointer to the buffer that is used for payload and headers, * must be at least big enough to hold 'length' plus the header size, * may be NULL if set later. * ATTENTION: The caller is responsible for correct alignment of this buffer!! * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least * big enough to hold 'length' plus the header size */ struct pbuf* pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p, void *payload_mem, u16_t payload_mem_len) { u16_t offset; LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length)); /* determine header offset */ switch (l) { case PBUF_TRANSPORT: /* add room for transport (often TCP) layer header */ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN; break; case PBUF_IP: /* add room for IP layer header */ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN; break; case PBUF_LINK: /* add room for link layer header */ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN; break; case PBUF_RAW_TX: /* add room for encapsulating link layer headers (e.g. 802.11) */ offset = PBUF_LINK_ENCAPSULATION_HLEN; break; case PBUF_RAW: offset = 0; break; default: LWIP_ASSERT("pbuf_alloced_custom: bad pbuf layer", 0); return NULL; } if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) { LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length)); return NULL; } p->pbuf.next = NULL; if (payload_mem != NULL) { p->pbuf.payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset); } else { p->pbuf.payload = NULL; } p->pbuf.flags = PBUF_FLAG_IS_CUSTOM; p->pbuf.len = p->pbuf.tot_len = length; p->pbuf.type = type; p->pbuf.ref = 1; return &p->pbuf; } #endif /* LWIP_SUPPORT_CUSTOM_PBUF */ /** * @ingroup pbuf * Shrink a pbuf chain to a desired length. * * @param p pbuf to shrink. * @param new_len desired new length of pbuf chain * * Depending on the desired length, the first few pbufs in a chain might * be skipped and left unchanged. The new last pbuf in the chain will be * resized, and any remaining pbufs will be freed. * * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted. * @note May not be called on a packet queue. * * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain). */ void pbuf_realloc(struct pbuf *p, u16_t new_len) { struct pbuf *q; u16_t rem_len; /* remaining length */ s32_t grow; LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL); LWIP_ASSERT("pbuf_realloc: sane p->type", p->type == PBUF_POOL || p->type == PBUF_ROM || p->type == PBUF_RAM || p->type == PBUF_REF); /* desired length larger than current length? */ if (new_len >= p->tot_len) { /* enlarging not yet supported */ return; } /* the pbuf chain grows by (new_len - p->tot_len) bytes * (which may be negative in case of shrinking) */ grow = new_len - p->tot_len; /* first, step over any pbufs that should remain in the chain */ rem_len = new_len; q = p; /* should this pbuf be kept? */ while (rem_len > q->len) { /* decrease remaining length by pbuf length */ rem_len -= q->len; /* decrease total length indicator */ LWIP_ASSERT("grow < max_u16_t", grow < 0xffff); q->tot_len += (u16_t)grow; /* proceed to next pbuf in chain */ q = q->next; LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL); } /* we have now reached the new last pbuf (in q) */ /* rem_len == desired length for pbuf q */ /* shrink allocated memory for PBUF_RAM */ /* (other types merely adjust their length fields */ if ((q->type == PBUF_RAM) && (rem_len != q->len) #if LWIP_SUPPORT_CUSTOM_PBUF && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0) #endif /* LWIP_SUPPORT_CUSTOM_PBUF */ ) { /* reallocate and adjust the length of the pbuf that will be split */ q = (struct pbuf *)mem_trim(q, (u16_t)((u8_t *)q->payload - (u8_t *)q) + rem_len); LWIP_ASSERT("mem_trim returned q == NULL", q != NULL); } /* adjust length fields for new last pbuf */ q->len = rem_len; q->tot_len = q->len; /* any remaining pbufs in chain? */ if (q->next != NULL) { /* free remaining pbufs in chain */ pbuf_free(q->next); } /* q is last packet in chain */ q->next = NULL; } /** * Adjusts the payload pointer to hide or reveal headers in the payload. * @see pbuf_header. * * @param p pbuf to change the header size. * @param header_size_increment Number of bytes to increment header size. * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types * * @return non-zero on failure, zero on success. * */ static u8_t pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force) { u16_t type; void *payload; u16_t increment_magnitude; LWIP_ASSERT("p != NULL", p != NULL); if ((header_size_increment == 0) || (p == NULL)) { return 0; } if (header_size_increment < 0) { increment_magnitude = (u16_t)-header_size_increment; /* Check that we aren't going to move off the end of the pbuf */ LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;); } else { increment_magnitude = (u16_t)header_size_increment; #if 0 /* Can't assert these as some callers speculatively call pbuf_header() to see if it's OK. Will return 1 below instead. */ /* Check that we've got the correct type of pbuf to work with */ LWIP_ASSERT("p->type == PBUF_RAM || p->type == PBUF_POOL", p->type == PBUF_RAM || p->type == PBUF_POOL); /* Check that we aren't going to move off the beginning of the pbuf */ LWIP_ASSERT("p->payload - increment_magnitude >= p + SIZEOF_STRUCT_PBUF", (u8_t *)p->payload - increment_magnitude >= (u8_t *)p + SIZEOF_STRUCT_PBUF); #endif } type = p->type; /* remember current payload pointer */ payload = p->payload; /* pbuf types containing payloads? */ if (type == PBUF_RAM || type == PBUF_POOL) { /* set new payload pointer */ p->payload = (u8_t *)p->payload - header_size_increment; /* boundary check fails? */ if ((u8_t *)p->payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) { LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_header: failed as %p < %p (not enough space for new header size)\n", (void *)p->payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF))); /* restore old payload pointer */ p->payload = payload; /* bail out unsuccessfully */ return 1; } /* pbuf types referring to external payloads? */ } else if (type == PBUF_REF || type == PBUF_ROM) { /* hide a header in the payload? */ if ((header_size_increment < 0) && (increment_magnitude <= p->len)) { /* increase payload pointer */ p->payload = (u8_t *)p->payload - header_size_increment; } else if ((header_size_increment > 0) && force) { p->payload = (u8_t *)p->payload - header_size_increment; } else { /* cannot expand payload to front (yet!) * bail out unsuccessfully */ return 1; } } else { /* Unknown type */ LWIP_ASSERT("bad pbuf type", 0); return 1; } /* modify pbuf length fields */ p->len += header_size_increment; p->tot_len += header_size_increment; LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_header: old %p new %p (%"S16_F")\n", (void *)payload, (void *)p->payload, header_size_increment)); return 0; } /** * Adjusts the payload pointer to hide or reveal headers in the payload. * * Adjusts the ->payload pointer so that space for a header * (dis)appears in the pbuf payload. * * The ->payload, ->tot_len and ->len fields are adjusted. * * @param p pbuf to change the header size. * @param header_size_increment Number of bytes to increment header size which * increases the size of the pbuf. New space is on the front. * (Using a negative value decreases the header size.) * If hdr_size_inc is 0, this function does nothing and returns successful. * * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so * the call will fail. A check is made that the increase in header size does * not move the payload pointer in front of the start of the buffer. * @return non-zero on failure, zero on success. * */ u8_t pbuf_header(struct pbuf *p, s16_t header_size_increment) { return pbuf_header_impl(p, header_size_increment, 0); } /** * Same as pbuf_header but does not check if 'header_size > 0' is allowed. * This is used internally only, to allow PBUF_REF for RX. */ u8_t pbuf_header_force(struct pbuf *p, s16_t header_size_increment) { return pbuf_header_impl(p, header_size_increment, 1); } /** * @ingroup pbuf * Dereference a pbuf chain or queue and deallocate any no-longer-used * pbufs at the head of this chain or queue. * * Decrements the pbuf reference count. If it reaches zero, the pbuf is * deallocated. * * For a pbuf chain, this is repeated for each pbuf in the chain, * up to the first pbuf which has a non-zero reference count after * decrementing. So, when all reference counts are one, the whole * chain is free'd. * * @param p The pbuf (chain) to be dereferenced. * * @return the number of pbufs that were de-allocated * from the head of the chain. * * @note MUST NOT be called on a packet queue (Not verified to work yet). * @note the reference counter of a pbuf equals the number of pointers * that refer to the pbuf (or into the pbuf). * * @internal examples: * * Assuming existing chains a->b->c with the following reference * counts, calling pbuf_free(a) results in: * * 1->2->3 becomes ...1->3 * 3->3->3 becomes 2->3->3 * 1->1->2 becomes ......1 * 2->1->1 becomes 1->1->1 * 1->1->1 becomes ....... * */ u8_t pbuf_free(struct pbuf *p) { u16_t type; struct pbuf *q; u8_t count; if (p == NULL) { LWIP_ASSERT("p != NULL", p != NULL); /* if assertions are disabled, proceed with debug output */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("pbuf_free(p == NULL) was called.\n")); return 0; } LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p)); PERF_START; LWIP_ASSERT("pbuf_free: sane type", p->type == PBUF_RAM || p->type == PBUF_ROM || p->type == PBUF_REF || p->type == PBUF_POOL); count = 0; /* de-allocate all consecutive pbufs from the head of the chain that * obtain a zero reference count after decrementing*/ while (p != NULL) { u16_t ref; SYS_ARCH_DECL_PROTECT(old_level); /* Since decrementing ref cannot be guaranteed to be a single machine operation * we must protect it. We put the new ref into a local variable to prevent * further protection. */ SYS_ARCH_PROTECT(old_level); /* all pbufs in a chain are referenced at least once */ LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0); /* decrease reference count (number of pointers to pbuf) */ ref = --(p->ref); SYS_ARCH_UNPROTECT(old_level); /* this pbuf is no longer referenced to? */ if (ref == 0) { /* remember next pbuf in chain for next iteration */ q = p->next; LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p)); type = p->type; #if LWIP_SUPPORT_CUSTOM_PBUF /* is this a custom pbuf? */ if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) { struct pbuf_custom *pc = (struct pbuf_custom*)p; LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL); pc->custom_free_function(p); } else #endif /* LWIP_SUPPORT_CUSTOM_PBUF */ { /* is this a pbuf from the pool? */ if (type == PBUF_POOL) { memp_free(MEMP_PBUF_POOL, p); /* is this a ROM or RAM referencing pbuf? */ } else if (type == PBUF_ROM || type == PBUF_REF) { memp_free(MEMP_PBUF, p); /* type == PBUF_RAM */ } else { mem_free(p); } } count++; /* proceed to next pbuf */ p = q; /* p->ref > 0, this pbuf is still referenced to */ /* (and so the remaining pbufs in chain as well) */ } else { LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, ref)); /* stop walking through the chain */ p = NULL; } } PERF_STOP("pbuf_free"); /* return number of de-allocated pbufs */ return count; } /** * Count number of pbufs in a chain * * @param p first pbuf of chain * @return the number of pbufs in a chain */ u16_t pbuf_clen(const struct pbuf *p) { u16_t len; len = 0; while (p != NULL) { ++len; p = p->next; } return len; } /** * @ingroup pbuf * Increment the reference count of the pbuf. * * @param p pbuf to increase reference counter of * */ void pbuf_ref(struct pbuf *p) { /* pbuf given? */ if (p != NULL) { SYS_ARCH_INC(p->ref, 1); LWIP_ASSERT("pbuf ref overflow", p->ref > 0); } } /** * @ingroup pbuf * Concatenate two pbufs (each may be a pbuf chain) and take over * the caller's reference of the tail pbuf. * * @note The caller MAY NOT reference the tail pbuf afterwards. * Use pbuf_chain() for that purpose. * * @see pbuf_chain() */ void pbuf_cat(struct pbuf *h, struct pbuf *t) { struct pbuf *p; LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)", ((h != NULL) && (t != NULL)), return;); /* proceed to last pbuf of chain */ for (p = h; p->next != NULL; p = p->next) { /* add total length of second chain to all totals of first chain */ p->tot_len += t->tot_len; } /* { p is last pbuf of first h chain, p->next == NULL } */ LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len); LWIP_ASSERT("p->next == NULL", p->next == NULL); /* add total length of second chain to last pbuf total of first chain */ p->tot_len += t->tot_len; /* chain last pbuf of head (p) with first of tail (t) */ p->next = t; /* p->next now references t, but the caller will drop its reference to t, * so netto there is no change to the reference count of t. */ } /** * @ingroup pbuf * Chain two pbufs (or pbuf chains) together. * * The caller MUST call pbuf_free(t) once it has stopped * using it. Use pbuf_cat() instead if you no longer use t. * * @param h head pbuf (chain) * @param t tail pbuf (chain) * @note The pbufs MUST belong to the same packet. * @note MAY NOT be called on a packet queue. * * The ->tot_len fields of all pbufs of the head chain are adjusted. * The ->next field of the last pbuf of the head chain is adjusted. * The ->ref field of the first pbuf of the tail chain is adjusted. * */ void pbuf_chain(struct pbuf *h, struct pbuf *t) { pbuf_cat(h, t); /* t is now referenced by h */ pbuf_ref(t); LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t)); } /** * Dechains the first pbuf from its succeeding pbufs in the chain. * * Makes p->tot_len field equal to p->len. * @param p pbuf to dechain * @return remainder of the pbuf chain, or NULL if it was de-allocated. * @note May not be called on a packet queue. */ struct pbuf * pbuf_dechain(struct pbuf *p) { struct pbuf *q; u8_t tail_gone = 1; /* tail */ q = p->next; /* pbuf has successor in chain? */ if (q != NULL) { /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len); /* enforce invariant if assertion is disabled */ q->tot_len = p->tot_len - p->len; /* decouple pbuf from remainder */ p->next = NULL; /* total length of pbuf p is its own length only */ p->tot_len = p->len; /* q is no longer referenced by p, free it */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q)); tail_gone = pbuf_free(q); if (tail_gone > 0) { LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q)); } /* return remaining tail or NULL if deallocated */ } /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */ LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len); return ((tail_gone > 0) ? NULL : q); } /** * @ingroup pbuf * Create PBUF_RAM copies of pbufs. * * Used to queue packets on behalf of the lwIP stack, such as * ARP based queueing. * * @note You MUST explicitly use p = pbuf_take(p); * * @note Only one packet is copied, no packet queue! * * @param p_to pbuf destination of the copy * @param p_from pbuf source of the copy * * @return ERR_OK if pbuf was copied * ERR_ARG if one of the pbufs is NULL or p_to is not big * enough to hold p_from */ err_t pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from) { u16_t offset_to=0, offset_from=0, len; LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n", (const void*)p_to, (const void*)p_from)); /* is the target big enough to hold the source? */ LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) && (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;); /* iterate through pbuf chain */ do { /* copy one part of the original chain */ if ((p_to->len - offset_to) >= (p_from->len - offset_from)) { /* complete current p_from fits into current p_to */ len = p_from->len - offset_from; } else { /* current p_from does not fit into current p_to */ len = p_to->len - offset_to; } MEMCPY((u8_t*)p_to->payload + offset_to, (u8_t*)p_from->payload + offset_from, len); offset_to += len; offset_from += len; LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len); LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len); if (offset_from >= p_from->len) { /* on to next p_from (if any) */ offset_from = 0; p_from = p_from->next; } if (offset_to == p_to->len) { /* on to next p_to (if any) */ offset_to = 0; p_to = p_to->next; LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL) , return ERR_ARG;); } if ((p_from != NULL) && (p_from->len == p_from->tot_len)) { /* don't copy more than one packet! */ LWIP_ERROR("pbuf_copy() does not allow packet queues!", (p_from->next == NULL), return ERR_VAL;); } if ((p_to != NULL) && (p_to->len == p_to->tot_len)) { /* don't copy more than one packet! */ LWIP_ERROR("pbuf_copy() does not allow packet queues!", (p_to->next == NULL), return ERR_VAL;); } } while (p_from); LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n")); return ERR_OK; } /** * @ingroup pbuf * Copy (part of) the contents of a packet buffer * to an application supplied buffer. * * @param buf the pbuf from which to copy data * @param dataptr the application supplied buffer * @param len length of data to copy (dataptr must be big enough). No more * than buf->tot_len will be copied, irrespective of len * @param offset offset into the packet buffer from where to begin copying len bytes * @return the number of bytes copied, or 0 on failure */ u16_t pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset) { const struct pbuf *p; u16_t left; u16_t buf_copy_len; u16_t copied_total = 0; LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;); LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;); left = 0; if ((buf == NULL) || (dataptr == NULL)) { return 0; } /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ for (p = buf; len != 0 && p != NULL; p = p->next) { if ((offset != 0) && (offset >= p->len)) { /* don't copy from this buffer -> on to the next */ offset -= p->len; } else { /* copy from this buffer. maybe only partially. */ buf_copy_len = p->len - offset; if (buf_copy_len > len) { buf_copy_len = len; } /* copy the necessary parts of the buffer */ MEMCPY(&((char*)dataptr)[left], &((char*)p->payload)[offset], buf_copy_len); copied_total += buf_copy_len; left += buf_copy_len; len -= buf_copy_len; offset = 0; } } return copied_total; } #if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE /** * This method modifies a 'pbuf chain', so that its total length is * smaller than 64K. The remainder of the original pbuf chain is stored * in *rest. * This function never creates new pbufs, but splits an existing chain * in two parts. The tot_len of the modified packet queue will likely be * smaller than 64K. * 'packet queues' are not supported by this function. * * @param p the pbuf queue to be split * @param rest pointer to store the remainder (after the first 64K) */ void pbuf_split_64k(struct pbuf *p, struct pbuf **rest) { *rest = NULL; if ((p != NULL) && (p->next != NULL)) { u16_t tot_len_front = p->len; struct pbuf *i = p; struct pbuf *r = p->next; /* continue until the total length (summed up as u16_t) overflows */ while ((r != NULL) && ((u16_t)(tot_len_front + r->len) > tot_len_front)) { tot_len_front += r->len; i = r; r = r->next; } /* i now points to last packet of the first segment. Set next pointer to NULL */ i->next = NULL; if (r != NULL) { /* Update the tot_len field in the first part */ for (i = p; i != NULL; i = i->next) { i->tot_len -= r->tot_len; LWIP_ASSERT("tot_len/len mismatch in last pbuf", (i->next != NULL) || (i->tot_len == i->len)); } if (p->flags & PBUF_FLAG_TCP_FIN) { r->flags |= PBUF_FLAG_TCP_FIN; } /* tot_len field in rest does not need modifications */ /* reference counters do not need modifications */ *rest = r; } } } #endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ /* Actual implementation of pbuf_skip() but returning const pointer... */ static const struct pbuf* pbuf_skip_const(const struct pbuf* in, u16_t in_offset, u16_t* out_offset) { u16_t offset_left = in_offset; const struct pbuf* q = in; /* get the correct pbuf */ while ((q != NULL) && (q->len <= offset_left)) { offset_left -= q->len; q = q->next; } if (out_offset != NULL) { *out_offset = offset_left; } return q; } /** * @ingroup pbuf * Skip a number of bytes at the start of a pbuf * * @param in input pbuf * @param in_offset offset to skip * @param out_offset resulting offset in the returned pbuf * @return the pbuf in the queue where the offset is */ struct pbuf* pbuf_skip(struct pbuf* in, u16_t in_offset, u16_t* out_offset) { const struct pbuf* out = pbuf_skip_const(in, in_offset, out_offset); return LWIP_CONST_CAST(struct pbuf*, out); } /** * @ingroup pbuf * Copy application supplied data into a pbuf. * This function can only be used to copy the equivalent of buf->tot_len data. * * @param buf pbuf to fill with data * @param dataptr application supplied data buffer * @param len length of the application supplied data buffer * * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough */ err_t pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len) { struct pbuf *p; u16_t buf_copy_len; u16_t total_copy_len = len; u16_t copied_total = 0; LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;); LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;); LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;); if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) { return ERR_ARG; } /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */ for (p = buf; total_copy_len != 0; p = p->next) { LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL); buf_copy_len = total_copy_len; if (buf_copy_len > p->len) { /* this pbuf cannot hold all remaining data */ buf_copy_len = p->len; } /* copy the necessary parts of the buffer */ MEMCPY(p->payload, &((const char*)dataptr)[copied_total], buf_copy_len); total_copy_len -= buf_copy_len; copied_total += buf_copy_len; } LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len); return ERR_OK; } /** * @ingroup pbuf * Same as pbuf_take() but puts data at an offset * * @param buf pbuf to fill with data * @param dataptr application supplied data buffer * @param len length of the application supplied data buffer * @param offset offset in pbuf where to copy dataptr to * * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough */ err_t pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset) { u16_t target_offset; struct pbuf* q = pbuf_skip(buf, offset, &target_offset); /* return requested data if pbuf is OK */ if ((q != NULL) && (q->tot_len >= target_offset + len)) { u16_t remaining_len = len; const u8_t* src_ptr = (const u8_t*)dataptr; /* copy the part that goes into the first pbuf */ u16_t first_copy_len = LWIP_MIN(q->len - target_offset, len); MEMCPY(((u8_t*)q->payload) + target_offset, dataptr, first_copy_len); remaining_len -= first_copy_len; src_ptr += first_copy_len; if (remaining_len > 0) { return pbuf_take(q->next, src_ptr, remaining_len); } return ERR_OK; } return ERR_MEM; } /** * @ingroup pbuf * Creates a single pbuf out of a queue of pbufs. * * @remark: Either the source pbuf 'p' is freed by this function or the original * pbuf 'p' is returned, therefore the caller has to check the result! * * @param p the source pbuf * @param layer pbuf_layer of the new pbuf * * @return a new, single pbuf (p->next is NULL) * or the old pbuf if allocation fails */ struct pbuf* pbuf_coalesce(struct pbuf *p, pbuf_layer layer) { struct pbuf *q; err_t err; if (p->next == NULL) { return p; } q = pbuf_alloc(layer, p->tot_len, PBUF_RAM); if (q == NULL) { /* @todo: what do we do now? */ return p; } err = pbuf_copy(q, p); LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */ LWIP_ASSERT("pbuf_copy failed", err == ERR_OK); pbuf_free(p); return q; } #if LWIP_CHECKSUM_ON_COPY /** * Copies data into a single pbuf (*not* into a pbuf queue!) and updates * the checksum while copying * * @param p the pbuf to copy data into * @param start_offset offset of p->payload where to copy the data to * @param dataptr data to copy into the pbuf * @param len length of data to copy into the pbuf * @param chksum pointer to the checksum which is updated * @return ERR_OK if successful, another error if the data does not fit * within the (first) pbuf (no pbuf queues!) */ err_t pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr, u16_t len, u16_t *chksum) { u32_t acc; u16_t copy_chksum; char *dst_ptr; LWIP_ASSERT("p != NULL", p != NULL); LWIP_ASSERT("dataptr != NULL", dataptr != NULL); LWIP_ASSERT("chksum != NULL", chksum != NULL); LWIP_ASSERT("len != 0", len != 0); if ((start_offset >= p->len) || (start_offset + len > p->len)) { return ERR_ARG; } dst_ptr = ((char*)p->payload) + start_offset; copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len); if ((start_offset & 1) != 0) { copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum); } acc = *chksum; acc += copy_chksum; *chksum = FOLD_U32T(acc); return ERR_OK; } #endif /* LWIP_CHECKSUM_ON_COPY */ /** * @ingroup pbuf * Get one byte from the specified position in a pbuf * WARNING: returns zero for offset >= p->tot_len * * @param p pbuf to parse * @param offset offset into p of the byte to return * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len */ u8_t pbuf_get_at(const struct pbuf* p, u16_t offset) { int ret = pbuf_try_get_at(p, offset); if (ret >= 0) { return (u8_t)ret; } return 0; } /** * @ingroup pbuf * Get one byte from the specified position in a pbuf * * @param p pbuf to parse * @param offset offset into p of the byte to return * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len */ int pbuf_try_get_at(const struct pbuf* p, u16_t offset) { u16_t q_idx; const struct pbuf* q = pbuf_skip_const(p, offset, &q_idx); /* return requested data if pbuf is OK */ if ((q != NULL) && (q->len > q_idx)) { return ((u8_t*)q->payload)[q_idx]; } return -1; } /** * @ingroup pbuf * Put one byte to the specified position in a pbuf * WARNING: silently ignores offset >= p->tot_len * * @param p pbuf to fill * @param offset offset into p of the byte to write * @param data byte to write at an offset into p */ void pbuf_put_at(struct pbuf* p, u16_t offset, u8_t data) { u16_t q_idx; struct pbuf* q = pbuf_skip(p, offset, &q_idx); /* write requested data if pbuf is OK */ if ((q != NULL) && (q->len > q_idx)) { ((u8_t*)q->payload)[q_idx] = data; } } /** * @ingroup pbuf * Compare pbuf contents at specified offset with memory s2, both of length n * * @param p pbuf to compare * @param offset offset into p at which to start comparing * @param s2 buffer to compare * @param n length of buffer to compare * @return zero if equal, nonzero otherwise * (0xffff if p is too short, diffoffset+1 otherwise) */ u16_t pbuf_memcmp(const struct pbuf* p, u16_t offset, const void* s2, u16_t n) { u16_t start = offset; const struct pbuf* q = p; u16_t i; /* pbuf long enough to perform check? */ if(p->tot_len < (offset + n)) { return 0xffff; } /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */ while ((q != NULL) && (q->len <= start)) { start -= q->len; q = q->next; } /* return requested data if pbuf is OK */ for (i = 0; i < n; i++) { /* We know pbuf_get_at() succeeds because of p->tot_len check above. */ u8_t a = pbuf_get_at(q, start + i); u8_t b = ((const u8_t*)s2)[i]; if (a != b) { return i+1; } } return 0; } /** * @ingroup pbuf * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset * start_offset. * * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as * return value 'not found' * @param mem search for the contents of this buffer * @param mem_len length of 'mem' * @param start_offset offset into p at which to start searching * @return 0xFFFF if substr was not found in p or the index where it was found */ u16_t pbuf_memfind(const struct pbuf* p, const void* mem, u16_t mem_len, u16_t start_offset) { u16_t i; u16_t max = p->tot_len - mem_len; if (p->tot_len >= mem_len + start_offset) { for (i = start_offset; i <= max; i++) { u16_t plus = pbuf_memcmp(p, i, mem, mem_len); if (plus == 0) { return i; } } } return 0xFFFF; } /** * Find occurrence of substr with length substr_len in pbuf p, start at offset * start_offset * WARNING: in contrast to strstr(), this one does not stop at the first \0 in * the pbuf/source string! * * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as * return value 'not found' * @param substr string to search for in p, maximum length is 0xFFFE * @return 0xFFFF if substr was not found in p or the index where it was found */ u16_t pbuf_strstr(const struct pbuf* p, const char* substr) { size_t substr_len; if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) { return 0xFFFF; } substr_len = strlen(substr); if (substr_len >= 0xFFFF) { return 0xFFFF; } return pbuf_memfind(p, substr, (u16_t)substr_len, 0); }
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/pbuf.c
C
unknown
48,279
/** * @file * Implementation of raw protocol PCBs for low-level handling of * different types of protocols besides (or overriding) those * already available in lwIP.\n * See also @ref raw_raw * * @defgroup raw_raw RAW * @ingroup callbackstyle_api * Implementation of raw protocol PCBs for low-level handling of * different types of protocols besides (or overriding) those * already available in lwIP.\n * @see @ref raw_api */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #if LWIP_RAW /* don't build if not configured for use in lwipopts.h */ #include "lwip/def.h" #include "lwip/memp.h" #include "lwip/ip_addr.h" #include "lwip/netif.h" #include "lwip/raw.h" #include "lwip/stats.h" #include "lwip/ip6.h" #include "lwip/ip6_addr.h" #include "lwip/inet_chksum.h" #include <string.h> /** The list of RAW PCBs */ static struct raw_pcb *raw_pcbs; static u8_t raw_input_match(struct raw_pcb *pcb, u8_t broadcast) { LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ #if LWIP_IPV4 && LWIP_IPV6 /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { #if IP_SOF_BROADCAST_RECV if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { return 0; } #endif /* IP_SOF_BROADCAST_RECV */ return 1; } #endif /* LWIP_IPV4 && LWIP_IPV6 */ /* Only need to check PCB if incoming IP version matches PCB IP version */ if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { #if LWIP_IPV4 /* Special case: IPv4 broadcast: receive all broadcasts * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ if (broadcast != 0) { #if IP_SOF_BROADCAST_RECV if (ip_get_option(pcb, SOF_BROADCAST)) #endif /* IP_SOF_BROADCAST_RECV */ { if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip))) { return 1; } } } else #endif /* LWIP_IPV4 */ /* Handle IPv4 and IPv6: catch all or exact match */ if (ip_addr_isany(&pcb->local_ip) || ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { return 1; } } return 0; } /** * Determine if in incoming IP packet is covered by a RAW PCB * and if so, pass it to a user-provided receive callback function. * * Given an incoming IP datagram (as a chain of pbufs) this function * finds a corresponding RAW PCB and calls the corresponding receive * callback function. * * @param p pbuf to be demultiplexed to a RAW PCB. * @param inp network interface on which the datagram was received. * @return - 1 if the packet has been eaten by a RAW PCB receive * callback function. The caller MAY NOT not reference the * packet any longer, and MAY NOT call pbuf_free(). * @return - 0 if packet is not eaten (pbuf is still referenced by the * caller). * */ u8_t raw_input(struct pbuf *p, struct netif *inp) { struct raw_pcb *pcb, *prev; s16_t proto; u8_t eaten = 0; u8_t broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); LWIP_UNUSED_ARG(inp); #if LWIP_IPV6 #if LWIP_IPV4 if (IP_HDR_GET_VERSION(p->payload) == 6) #endif /* LWIP_IPV4 */ { struct ip6_hdr *ip6hdr = (struct ip6_hdr *)p->payload; proto = IP6H_NEXTH(ip6hdr); } #if LWIP_IPV4 else #endif /* LWIP_IPV4 */ #endif /* LWIP_IPV6 */ #if LWIP_IPV4 { proto = IPH_PROTO((struct ip_hdr *)p->payload); } #endif /* LWIP_IPV4 */ prev = NULL; pcb = raw_pcbs; /* loop through all raw pcbs until the packet is eaten by one */ /* this allows multiple pcbs to match against the packet by design */ while ((eaten == 0) && (pcb != NULL)) { if ((pcb->protocol == proto) && raw_input_match(pcb, broadcast)) { /* receive callback function available? */ if (pcb->recv != NULL) { #ifndef LWIP_NOASSERT void* old_payload = p->payload; #endif /* the receive callback function did not eat the packet? */ eaten = pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr()); if (eaten != 0) { /* receive function ate the packet */ p = NULL; eaten = 1; if (prev != NULL) { /* move the pcb to the front of raw_pcbs so that is found faster next time */ prev->next = pcb->next; pcb->next = raw_pcbs; raw_pcbs = pcb; } } else { /* sanity-check that the receive callback did not alter the pbuf */ LWIP_ASSERT("raw pcb recv callback altered pbuf payload pointer without eating packet", p->payload == old_payload); } } /* no receive callback function was set for this raw PCB */ } /* drop the packet */ prev = pcb; pcb = pcb->next; } return eaten; } /** * @ingroup raw_raw * Bind a RAW PCB. * * @param pcb RAW PCB to be bound with a local address ipaddr. * @param ipaddr local IP address to bind with. Use IP4_ADDR_ANY to * bind to all local interfaces. * * @return lwIP error code. * - ERR_OK. Successful. No error occurred. * - ERR_USE. The specified IP address is already bound to by * another RAW PCB. * * @see raw_disconnect() */ err_t raw_bind(struct raw_pcb *pcb, const ip_addr_t *ipaddr) { if ((pcb == NULL) || (ipaddr == NULL)) { return ERR_VAL; } ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); return ERR_OK; } /** * @ingroup raw_raw * Connect an RAW PCB. This function is required by upper layers * of lwip. Using the raw api you could use raw_sendto() instead * * This will associate the RAW PCB with the remote address. * * @param pcb RAW PCB to be connected with remote address ipaddr and port. * @param ipaddr remote IP address to connect with. * * @return lwIP error code * * @see raw_disconnect() and raw_sendto() */ err_t raw_connect(struct raw_pcb *pcb, const ip_addr_t *ipaddr) { if ((pcb == NULL) || (ipaddr == NULL)) { return ERR_VAL; } ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); return ERR_OK; } /** * @ingroup raw_raw * Set the callback function for received packets that match the * raw PCB's protocol and binding. * * The callback function MUST either * - eat the packet by calling pbuf_free() and returning non-zero. The * packet will not be passed to other raw PCBs or other protocol layers. * - not free the packet, and return zero. The packet will be matched * against further PCBs and/or forwarded to another protocol layers. */ void raw_recv(struct raw_pcb *pcb, raw_recv_fn recv, void *recv_arg) { /* remember recv() callback and user data */ pcb->recv = recv; pcb->recv_arg = recv_arg; } /** * @ingroup raw_raw * Send the raw IP packet to the given address. Note that actually you cannot * modify the IP headers (this is inconsistent with the receive callback where * you actually get the IP headers), you can only specify the IP payload here. * It requires some more changes in lwIP. (there will be a raw_send() function * then.) * * @param pcb the raw pcb which to send * @param p the IP payload to send * @param ipaddr the destination address of the IP packet * */ err_t raw_sendto(struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *ipaddr) { err_t err; struct netif *netif; const ip_addr_t *src_ip; struct pbuf *q; /* q will be sent down the stack */ s16_t header_size; if ((pcb == NULL) || (ipaddr == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, ipaddr)) { return ERR_VAL; } LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_sendto\n")); header_size = ( #if LWIP_IPV4 && LWIP_IPV6 IP_IS_V6(ipaddr) ? IP6_HLEN : IP_HLEN); #elif LWIP_IPV4 IP_HLEN); #else IP6_HLEN); #endif /* not enough space to add an IP header to first pbuf in given p chain? */ if (pbuf_header(p, header_size)) { /* allocate header in new pbuf */ q = pbuf_alloc(PBUF_IP, 0, PBUF_RAM); /* new header pbuf could not be allocated? */ if (q == NULL) { LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("raw_sendto: could not allocate header\n")); return ERR_MEM; } if (p->tot_len != 0) { /* chain header q in front of given pbuf p */ pbuf_chain(q, p); } /* { first pbuf q points to header pbuf } */ LWIP_DEBUGF(RAW_DEBUG, ("raw_sendto: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); } else { /* first pbuf q equals given pbuf */ q = p; if (pbuf_header(q, -header_size)) { LWIP_ASSERT("Can't restore header we just removed!", 0); return ERR_MEM; } } if(IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { /* Don't call ip_route() with IP_ANY_TYPE */ netif = ip_route(IP46_ADDR_ANY(IP_GET_TYPE(ipaddr)), ipaddr); } else { netif = ip_route(&pcb->local_ip, ipaddr); } if (netif == NULL) { LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: No route to ")); ip_addr_debug_print(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ipaddr); /* free any temporary header pbuf allocated by pbuf_header() */ if (q != p) { pbuf_free(q); } return ERR_RTE; } #if IP_SOF_BROADCAST if (IP_IS_V4(ipaddr)) { /* broadcast filter? */ if (!ip_get_option(pcb, SOF_BROADCAST) && ip_addr_isbroadcast(ipaddr, netif)) { LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); /* free any temporary header pbuf allocated by pbuf_header() */ if (q != p) { pbuf_free(q); } return ERR_VAL; } } #endif /* IP_SOF_BROADCAST */ if (ip_addr_isany(&pcb->local_ip)) { /* use outgoing network interface IP address as source address */ src_ip = ip_netif_get_local_ip(netif, ipaddr); #if LWIP_IPV6 if (src_ip == NULL) { if (q != p) { pbuf_free(q); } return ERR_RTE; } #endif /* LWIP_IPV6 */ } else { /* use RAW PCB local IP address as source address */ src_ip = &pcb->local_ip; } #if LWIP_IPV6 /* If requested, based on the IPV6_CHECKSUM socket option per RFC3542, compute the checksum and update the checksum in the payload. */ if (IP_IS_V6(ipaddr) && pcb->chksum_reqd) { u16_t chksum = ip6_chksum_pseudo(p, pcb->protocol, p->tot_len, ip_2_ip6(src_ip), ip_2_ip6(ipaddr)); LWIP_ASSERT("Checksum must fit into first pbuf", p->len >= (pcb->chksum_offset + 2)); SMEMCPY(((u8_t *)p->payload) + pcb->chksum_offset, &chksum, sizeof(u16_t)); } #endif NETIF_SET_HWADDRHINT(netif, &pcb->addr_hint); err = ip_output_if(q, src_ip, ipaddr, pcb->ttl, pcb->tos, pcb->protocol, netif); NETIF_SET_HWADDRHINT(netif, NULL); /* did we chain a header earlier? */ if (q != p) { /* free the header */ pbuf_free(q); } return err; } /** * @ingroup raw_raw * Send the raw IP packet to the address given by raw_connect() * * @param pcb the raw pcb which to send * @param p the IP payload to send * */ err_t raw_send(struct raw_pcb *pcb, struct pbuf *p) { return raw_sendto(pcb, p, &pcb->remote_ip); } /** * @ingroup raw_raw * Remove an RAW PCB. * * @param pcb RAW PCB to be removed. The PCB is removed from the list of * RAW PCB's and the data structure is freed from memory. * * @see raw_new() */ void raw_remove(struct raw_pcb *pcb) { struct raw_pcb *pcb2; /* pcb to be removed is first in list? */ if (raw_pcbs == pcb) { /* make list start at 2nd pcb */ raw_pcbs = raw_pcbs->next; /* pcb not 1st in list */ } else { for (pcb2 = raw_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { /* find pcb in raw_pcbs list */ if (pcb2->next != NULL && pcb2->next == pcb) { /* remove pcb from list */ pcb2->next = pcb->next; break; } } } memp_free(MEMP_RAW_PCB, pcb); } /** * @ingroup raw_raw * Create a RAW PCB. * * @return The RAW PCB which was created. NULL if the PCB data structure * could not be allocated. * * @param proto the protocol number of the IPs payload (e.g. IP_PROTO_ICMP) * * @see raw_remove() */ struct raw_pcb * raw_new(u8_t proto) { struct raw_pcb *pcb; LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_new\n")); pcb = (struct raw_pcb *)memp_malloc(MEMP_RAW_PCB); /* could allocate RAW PCB? */ if (pcb != NULL) { /* initialize PCB to all zeroes */ memset(pcb, 0, sizeof(struct raw_pcb)); pcb->protocol = proto; pcb->ttl = RAW_TTL; pcb->next = raw_pcbs; raw_pcbs = pcb; } return pcb; } /** * @ingroup raw_raw * Create a RAW PCB for specific IP type. * * @return The RAW PCB which was created. NULL if the PCB data structure * could not be allocated. * * @param type IP address type, see @ref lwip_ip_addr_type definitions. * If you want to listen to IPv4 and IPv6 (dual-stack) packets, * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. * @param proto the protocol number (next header) of the IPv6 packet payload * (e.g. IP6_NEXTH_ICMP6) * * @see raw_remove() */ struct raw_pcb * raw_new_ip_type(u8_t type, u8_t proto) { struct raw_pcb *pcb; pcb = raw_new(proto); #if LWIP_IPV4 && LWIP_IPV6 if (pcb != NULL) { IP_SET_TYPE_VAL(pcb->local_ip, type); IP_SET_TYPE_VAL(pcb->remote_ip, type); } #else /* LWIP_IPV4 && LWIP_IPV6 */ LWIP_UNUSED_ARG(type); #endif /* LWIP_IPV4 && LWIP_IPV6 */ return pcb; } /** This function is called from netif.c when address is changed * * @param old_addr IP address of the netif before change * @param new_addr IP address of the netif after change */ void raw_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr) { struct raw_pcb* rpcb; if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { for (rpcb = raw_pcbs; rpcb != NULL; rpcb = rpcb->next) { /* PCB bound to current local interface address? */ if (ip_addr_cmp(&rpcb->local_ip, old_addr)) { /* The PCB is bound to the old ipaddr and * is set to bound to the new one instead */ ip_addr_copy(rpcb->local_ip, *new_addr); } } } } #endif /* LWIP_RAW */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/raw.c
C
unknown
16,070
/** * @file * Statistics module * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #if LWIP_STATS /* don't build if not configured for use in lwipopts.h */ #include "lwip/def.h" #include "lwip/stats.h" #include "lwip/mem.h" #include "lwip/debug.h" #include <string.h> struct stats_ lwip_stats; void stats_init(void) { #ifdef LWIP_DEBUG #if MEM_STATS lwip_stats.mem.name = "MEM"; #endif /* MEM_STATS */ #endif /* LWIP_DEBUG */ } #if LWIP_STATS_DISPLAY void stats_display_proto(struct stats_proto *proto, const char *name) { LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", proto->xmit)); LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", proto->recv)); LWIP_PLATFORM_DIAG(("fw: %"STAT_COUNTER_F"\n\t", proto->fw)); LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", proto->drop)); LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", proto->chkerr)); LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", proto->lenerr)); LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", proto->memerr)); LWIP_PLATFORM_DIAG(("rterr: %"STAT_COUNTER_F"\n\t", proto->rterr)); LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", proto->proterr)); LWIP_PLATFORM_DIAG(("opterr: %"STAT_COUNTER_F"\n\t", proto->opterr)); LWIP_PLATFORM_DIAG(("err: %"STAT_COUNTER_F"\n\t", proto->err)); LWIP_PLATFORM_DIAG(("cachehit: %"STAT_COUNTER_F"\n", proto->cachehit)); } #if IGMP_STATS || MLD6_STATS void stats_display_igmp(struct stats_igmp *igmp, const char *name) { LWIP_PLATFORM_DIAG(("\n%s\n\t", name)); LWIP_PLATFORM_DIAG(("xmit: %"STAT_COUNTER_F"\n\t", igmp->xmit)); LWIP_PLATFORM_DIAG(("recv: %"STAT_COUNTER_F"\n\t", igmp->recv)); LWIP_PLATFORM_DIAG(("drop: %"STAT_COUNTER_F"\n\t", igmp->drop)); LWIP_PLATFORM_DIAG(("chkerr: %"STAT_COUNTER_F"\n\t", igmp->chkerr)); LWIP_PLATFORM_DIAG(("lenerr: %"STAT_COUNTER_F"\n\t", igmp->lenerr)); LWIP_PLATFORM_DIAG(("memerr: %"STAT_COUNTER_F"\n\t", igmp->memerr)); LWIP_PLATFORM_DIAG(("proterr: %"STAT_COUNTER_F"\n\t", igmp->proterr)); LWIP_PLATFORM_DIAG(("rx_v1: %"STAT_COUNTER_F"\n\t", igmp->rx_v1)); LWIP_PLATFORM_DIAG(("rx_group: %"STAT_COUNTER_F"\n\t", igmp->rx_group)); LWIP_PLATFORM_DIAG(("rx_general: %"STAT_COUNTER_F"\n\t", igmp->rx_general)); LWIP_PLATFORM_DIAG(("rx_report: %"STAT_COUNTER_F"\n\t", igmp->rx_report)); LWIP_PLATFORM_DIAG(("tx_join: %"STAT_COUNTER_F"\n\t", igmp->tx_join)); LWIP_PLATFORM_DIAG(("tx_leave: %"STAT_COUNTER_F"\n\t", igmp->tx_leave)); LWIP_PLATFORM_DIAG(("tx_report: %"STAT_COUNTER_F"\n", igmp->tx_report)); } #endif /* IGMP_STATS || MLD6_STATS */ #if MEM_STATS || MEMP_STATS void stats_display_mem(struct stats_mem *mem, const char *name) { LWIP_PLATFORM_DIAG(("\nMEM %s\n\t", name)); LWIP_PLATFORM_DIAG(("avail: %"U32_F"\n\t", (u32_t)mem->avail)); LWIP_PLATFORM_DIAG(("used: %"U32_F"\n\t", (u32_t)mem->used)); LWIP_PLATFORM_DIAG(("max: %"U32_F"\n\t", (u32_t)mem->max)); LWIP_PLATFORM_DIAG(("err: %"U32_F"\n", (u32_t)mem->err)); } #if MEMP_STATS void stats_display_memp(struct stats_mem *mem, int index) { if (index < MEMP_MAX) { stats_display_mem(mem, mem->name); } } #endif /* MEMP_STATS */ #endif /* MEM_STATS || MEMP_STATS */ #if SYS_STATS void stats_display_sys(struct stats_sys *sys) { LWIP_PLATFORM_DIAG(("\nSYS\n\t")); LWIP_PLATFORM_DIAG(("sem.used: %"U32_F"\n\t", (u32_t)sys->sem.used)); LWIP_PLATFORM_DIAG(("sem.max: %"U32_F"\n\t", (u32_t)sys->sem.max)); LWIP_PLATFORM_DIAG(("sem.err: %"U32_F"\n\t", (u32_t)sys->sem.err)); LWIP_PLATFORM_DIAG(("mutex.used: %"U32_F"\n\t", (u32_t)sys->mutex.used)); LWIP_PLATFORM_DIAG(("mutex.max: %"U32_F"\n\t", (u32_t)sys->mutex.max)); LWIP_PLATFORM_DIAG(("mutex.err: %"U32_F"\n\t", (u32_t)sys->mutex.err)); LWIP_PLATFORM_DIAG(("mbox.used: %"U32_F"\n\t", (u32_t)sys->mbox.used)); LWIP_PLATFORM_DIAG(("mbox.max: %"U32_F"\n\t", (u32_t)sys->mbox.max)); LWIP_PLATFORM_DIAG(("mbox.err: %"U32_F"\n", (u32_t)sys->mbox.err)); } #endif /* SYS_STATS */ void stats_display(void) { s16_t i; LINK_STATS_DISPLAY(); ETHARP_STATS_DISPLAY(); IPFRAG_STATS_DISPLAY(); IP6_FRAG_STATS_DISPLAY(); IP_STATS_DISPLAY(); ND6_STATS_DISPLAY(); IP6_STATS_DISPLAY(); IGMP_STATS_DISPLAY(); MLD6_STATS_DISPLAY(); ICMP_STATS_DISPLAY(); ICMP6_STATS_DISPLAY(); UDP_STATS_DISPLAY(); TCP_STATS_DISPLAY(); MEM_STATS_DISPLAY(); for (i = 0; i < MEMP_MAX; i++) { MEMP_STATS_DISPLAY(i); } SYS_STATS_DISPLAY(); } #endif /* LWIP_STATS_DISPLAY */ #endif /* LWIP_STATS */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/stats.c
C
unknown
6,270
/** * @file * lwIP Operating System abstraction * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ /** * @defgroup sys_layer Porting (system abstraction layer) * @ingroup lwip * @verbinclude "sys_arch.txt" * * @defgroup sys_os OS abstraction layer * @ingroup sys_layer * No need to implement functions in this section in NO_SYS mode. * * @defgroup sys_sem Semaphores * @ingroup sys_os * * @defgroup sys_mutex Mutexes * @ingroup sys_os * Mutexes are recommended to correctly handle priority inversion, * especially if you use LWIP_CORE_LOCKING . * * @defgroup sys_mbox Mailboxes * @ingroup sys_os * * @defgroup sys_time Time * @ingroup sys_layer * * @defgroup sys_prot Critical sections * @ingroup sys_layer * Used to protect short regions of code against concurrent access. * - Your system is a bare-metal system (probably with an RTOS) * and interrupts are under your control: * Implement this as LockInterrupts() / UnlockInterrupts() * - Your system uses an RTOS with deferred interrupt handling from a * worker thread: Implement as a global mutex or lock/unlock scheduler * - Your system uses a high-level OS with e.g. POSIX signals: * Implement as a global mutex * * @defgroup sys_misc Misc * @ingroup sys_os */ #include "lwip/opt.h" #include "lwip/sys.h" /* Most of the functions defined in sys.h must be implemented in the * architecture-dependent file sys_arch.c */ #if !NO_SYS #ifndef sys_msleep /** * Sleep for some ms. Timeouts are NOT processed while sleeping. * * @param ms number of milliseconds to sleep */ void sys_msleep(u32_t ms) { if (ms > 0) { sys_sem_t delaysem; err_t err = sys_sem_new(&delaysem, 0); if (err == ERR_OK) { sys_arch_sem_wait(&delaysem, ms); sys_sem_free(&delaysem); } } } #endif /* sys_msleep */ #endif /* !NO_SYS */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/sys.c
C
unknown
3,500
/** * @file * Transmission Control Protocol for IP * See also @ref tcp_raw * * @defgroup tcp_raw TCP * @ingroup callbackstyle_api * Transmission Control Protocol for IP\n * @see @ref raw_api and @ref netconn * * Common functions for the TCP implementation, such as functinos * for manipulating the data structures and the TCP timer functions. TCP functions * related to input and output is found in tcp_in.c and tcp_out.c respectively.\n */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ #include "lwip/def.h" #include "lwip/mem.h" #include "lwip/memp.h" #include "lwip/tcp.h" #include "lwip/priv/tcp_priv.h" #include "lwip/debug.h" #include "lwip/stats.h" #include "lwip/ip6.h" #include "lwip/ip6_addr.h" #include "lwip/nd6.h" #include <string.h> #ifdef LWIP_HOOK_FILENAME #include LWIP_HOOK_FILENAME #endif #ifndef TCP_LOCAL_PORT_RANGE_START /* From http://www.iana.org/assignments/port-numbers: "The Dynamic and/or Private Ports are those from 49152 through 65535" */ #define TCP_LOCAL_PORT_RANGE_START 0xc000 #define TCP_LOCAL_PORT_RANGE_END 0xffff #define TCP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & ~TCP_LOCAL_PORT_RANGE_START) + TCP_LOCAL_PORT_RANGE_START)) #endif #if LWIP_TCP_KEEPALIVE #define TCP_KEEP_DUR(pcb) ((pcb)->keep_cnt * (pcb)->keep_intvl) #define TCP_KEEP_INTVL(pcb) ((pcb)->keep_intvl) #else /* LWIP_TCP_KEEPALIVE */ #define TCP_KEEP_DUR(pcb) TCP_MAXIDLE #define TCP_KEEP_INTVL(pcb) TCP_KEEPINTVL_DEFAULT #endif /* LWIP_TCP_KEEPALIVE */ /* As initial send MSS, we use TCP_MSS but limit it to 536. */ #if TCP_MSS > 536 #define INITIAL_MSS 536 #else #define INITIAL_MSS TCP_MSS #endif static const char * const tcp_state_str[] = { "CLOSED", "LISTEN", "SYN_SENT", "SYN_RCVD", "ESTABLISHED", "FIN_WAIT_1", "FIN_WAIT_2", "CLOSE_WAIT", "CLOSING", "LAST_ACK", "TIME_WAIT" }; /* last local TCP port */ static u16_t tcp_port = TCP_LOCAL_PORT_RANGE_START; /* Incremented every coarse grained timer shot (typically every 500 ms). */ u32_t tcp_ticks; static const u8_t tcp_backoff[13] = { 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7}; /* Times per slowtmr hits */ static const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 }; /* The TCP PCB lists. */ /** List of all TCP PCBs bound but not yet (connected || listening) */ struct tcp_pcb *tcp_bound_pcbs; /** List of all TCP PCBs in LISTEN state */ union tcp_listen_pcbs_t tcp_listen_pcbs; /** List of all TCP PCBs that are in a state in which * they accept or send data. */ struct tcp_pcb *tcp_active_pcbs; /** List of all TCP PCBs in TIME-WAIT state */ struct tcp_pcb *tcp_tw_pcbs; /** An array with all (non-temporary) PCB lists, mainly used for smaller code size */ struct tcp_pcb ** const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs, &tcp_active_pcbs, &tcp_tw_pcbs}; u8_t tcp_active_pcbs_changed; /** Timer counter to handle calling slow-timer from tcp_tmr() */ static u8_t tcp_timer; static u8_t tcp_timer_ctr; static u16_t tcp_new_port(void); static err_t tcp_close_shutdown_fin(struct tcp_pcb *pcb); /** * Initialize this module. */ void tcp_init(void) { #if LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS && defined(LWIP_RAND) tcp_port = TCP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); #endif /* LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS && defined(LWIP_RAND) */ } /** * Called periodically to dispatch TCP timers. */ void tcp_tmr(void) { /* Call tcp_fasttmr() every 250 ms */ tcp_fasttmr(); if (++tcp_timer & 1) { /* Call tcp_slowtmr() every 500 ms, i.e., every other timer tcp_tmr() is called. */ tcp_slowtmr(); } } #if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG /** Called when a listen pcb is closed. Iterates one pcb list and removes the * closed listener pcb from pcb->listener if matching. */ static void tcp_remove_listener(struct tcp_pcb *list, struct tcp_pcb_listen *lpcb) { struct tcp_pcb *pcb; for (pcb = list; pcb != NULL; pcb = pcb->next) { if (pcb->listener == lpcb) { pcb->listener = NULL; } } } #endif /** Called when a listen pcb is closed. Iterates all pcb lists and removes the * closed listener pcb from pcb->listener if matching. */ static void tcp_listen_closed(struct tcp_pcb *pcb) { #if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG size_t i; LWIP_ASSERT("pcb != NULL", pcb != NULL); LWIP_ASSERT("pcb->state == LISTEN", pcb->state == LISTEN); for (i = 1; i < LWIP_ARRAYSIZE(tcp_pcb_lists); i++) { tcp_remove_listener(*tcp_pcb_lists[i], (struct tcp_pcb_listen*)pcb); } #endif LWIP_UNUSED_ARG(pcb); } #if TCP_LISTEN_BACKLOG /** @ingroup tcp_raw * Delay accepting a connection in respect to the listen backlog: * the number of outstanding connections is increased until * tcp_backlog_accepted() is called. * * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() * or else the backlog feature will get out of sync! * * @param pcb the connection pcb which is not fully accepted yet */ void tcp_backlog_delayed(struct tcp_pcb* pcb) { LWIP_ASSERT("pcb != NULL", pcb != NULL); if ((pcb->flags & TF_BACKLOGPEND) == 0) { if (pcb->listener != NULL) { pcb->listener->accepts_pending++; LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); pcb->flags |= TF_BACKLOGPEND; } } } /** @ingroup tcp_raw * A delayed-accept a connection is accepted (or closed/aborted): decreases * the number of outstanding connections after calling tcp_backlog_delayed(). * * ATTENTION: the caller is responsible for calling tcp_backlog_accepted() * or else the backlog feature will get out of sync! * * @param pcb the connection pcb which is now fully accepted (or closed/aborted) */ void tcp_backlog_accepted(struct tcp_pcb* pcb) { LWIP_ASSERT("pcb != NULL", pcb != NULL); if ((pcb->flags & TF_BACKLOGPEND) != 0) { if (pcb->listener != NULL) { LWIP_ASSERT("accepts_pending != 0", pcb->listener->accepts_pending != 0); pcb->listener->accepts_pending--; pcb->flags &= ~TF_BACKLOGPEND; } } } #endif /* TCP_LISTEN_BACKLOG */ /** * Closes the TX side of a connection held by the PCB. * For tcp_close(), a RST is sent if the application didn't receive all data * (tcp_recved() not called for all data passed to recv callback). * * Listening pcbs are freed and may not be referenced any more. * Connection pcbs are freed if not yet connected and may not be referenced * any more. If a connection is established (at least SYN received or in * a closing state), the connection is closed, and put in a closing state. * The pcb is then automatically freed in tcp_slowtmr(). It is therefore * unsafe to reference it. * * @param pcb the tcp_pcb to close * @return ERR_OK if connection has been closed * another err_t if closing failed and pcb is not freed */ static err_t tcp_close_shutdown(struct tcp_pcb *pcb, u8_t rst_on_unacked_data) { if (rst_on_unacked_data && ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT))) { if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND_MAX(pcb))) { /* Not all data received by application, send RST to tell the remote side about this. */ LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED); /* don't call tcp_abort here: we must not deallocate the pcb since that might not be expected when calling tcp_close */ tcp_rst(pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, pcb->local_port, pcb->remote_port); tcp_pcb_purge(pcb); TCP_RMV_ACTIVE(pcb); if (pcb->state == ESTABLISHED) { /* move to TIME_WAIT since we close actively */ pcb->state = TIME_WAIT; TCP_REG(&tcp_tw_pcbs, pcb); } else { /* CLOSE_WAIT: deallocate the pcb since we already sent a RST for it */ if (tcp_input_pcb == pcb) { /* prevent using a deallocated pcb: free it from tcp_input later */ tcp_trigger_input_pcb_close(); } else { memp_free(MEMP_TCP_PCB, pcb); } } return ERR_OK; } } /* - states which free the pcb are handled here, - states which send FIN and change state are handled in tcp_close_shutdown_fin() */ switch (pcb->state) { case CLOSED: /* Closing a pcb in the CLOSED state might seem erroneous, * however, it is in this state once allocated and as yet unused * and the user needs some way to free it should the need arise. * Calling tcp_close() with a pcb that has already been closed, (i.e. twice) * or for a pcb that has been used and then entered the CLOSED state * is erroneous, but this should never happen as the pcb has in those cases * been freed, and so any remaining handles are bogus. */ if (pcb->local_port != 0) { TCP_RMV(&tcp_bound_pcbs, pcb); } memp_free(MEMP_TCP_PCB, pcb); break; case LISTEN: tcp_listen_closed(pcb); tcp_pcb_remove(&tcp_listen_pcbs.pcbs, pcb); memp_free(MEMP_TCP_PCB_LISTEN, pcb); break; case SYN_SENT: TCP_PCB_REMOVE_ACTIVE(pcb); memp_free(MEMP_TCP_PCB, pcb); MIB2_STATS_INC(mib2.tcpattemptfails); break; default: return tcp_close_shutdown_fin(pcb); } return ERR_OK; } static err_t tcp_close_shutdown_fin(struct tcp_pcb *pcb) { err_t err; LWIP_ASSERT("pcb != NULL", pcb != NULL); switch (pcb->state) { case SYN_RCVD: err = tcp_send_fin(pcb); if (err == ERR_OK) { tcp_backlog_accepted(pcb); MIB2_STATS_INC(mib2.tcpattemptfails); pcb->state = FIN_WAIT_1; } break; case ESTABLISHED: err = tcp_send_fin(pcb); if (err == ERR_OK) { MIB2_STATS_INC(mib2.tcpestabresets); pcb->state = FIN_WAIT_1; } break; case CLOSE_WAIT: err = tcp_send_fin(pcb); if (err == ERR_OK) { MIB2_STATS_INC(mib2.tcpestabresets); pcb->state = LAST_ACK; } break; default: /* Has already been closed, do nothing. */ return ERR_OK; } if (err == ERR_OK) { /* To ensure all data has been sent when tcp_close returns, we have to make sure tcp_output doesn't fail. Since we don't really have to ensure all data has been sent when tcp_close returns (unsent data is sent from tcp timer functions, also), we don't care for the return value of tcp_output for now. */ tcp_output(pcb); } else if (err == ERR_MEM) { /* Mark this pcb for closing. Closing is retried from tcp_tmr. */ pcb->flags |= TF_CLOSEPEND; /* We have to return ERR_OK from here to indicate to the callers that this pcb should not be used any more as it will be freed soon via tcp_tmr. This is OK here since sending FIN does not guarantee a time frime for actually freeing the pcb, either (it is left in closure states for remote ACK or timeout) */ return ERR_OK; } return err; } /** * @ingroup tcp_raw * Closes the connection held by the PCB. * * Listening pcbs are freed and may not be referenced any more. * Connection pcbs are freed if not yet connected and may not be referenced * any more. If a connection is established (at least SYN received or in * a closing state), the connection is closed, and put in a closing state. * The pcb is then automatically freed in tcp_slowtmr(). It is therefore * unsafe to reference it (unless an error is returned). * * @param pcb the tcp_pcb to close * @return ERR_OK if connection has been closed * another err_t if closing failed and pcb is not freed */ err_t tcp_close(struct tcp_pcb *pcb) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_close: closing in ")); tcp_debug_print_state(pcb->state); if (pcb->state != LISTEN) { /* Set a flag not to receive any more data... */ pcb->flags |= TF_RXCLOSED; } /* ... and close */ return tcp_close_shutdown(pcb, 1); } /** * @ingroup tcp_raw * Causes all or part of a full-duplex connection of this PCB to be shut down. * This doesn't deallocate the PCB unless shutting down both sides! * Shutting down both sides is the same as calling tcp_close, so if it succeds * (i.e. returns ER_OK), the PCB must not be referenced any more! * * @param pcb PCB to shutdown * @param shut_rx shut down receive side if this is != 0 * @param shut_tx shut down send side if this is != 0 * @return ERR_OK if shutdown succeeded (or the PCB has already been shut down) * another err_t on error. */ err_t tcp_shutdown(struct tcp_pcb *pcb, int shut_rx, int shut_tx) { if (pcb->state == LISTEN) { return ERR_CONN; } if (shut_rx) { /* shut down the receive side: set a flag not to receive any more data... */ pcb->flags |= TF_RXCLOSED; if (shut_tx) { /* shutting down the tx AND rx side is the same as closing for the raw API */ return tcp_close_shutdown(pcb, 1); } /* ... and free buffered data */ if (pcb->refused_data != NULL) { pbuf_free(pcb->refused_data); pcb->refused_data = NULL; } } if (shut_tx) { /* This can't happen twice since if it succeeds, the pcb's state is changed. Only close in these states as the others directly deallocate the PCB */ switch (pcb->state) { case SYN_RCVD: case ESTABLISHED: case CLOSE_WAIT: return tcp_close_shutdown(pcb, (u8_t)shut_rx); default: /* Not (yet?) connected, cannot shutdown the TX side as that would bring us into CLOSED state, where the PCB is deallocated. */ return ERR_CONN; } } return ERR_OK; } /** * Abandons a connection and optionally sends a RST to the remote * host. Deletes the local protocol control block. This is done when * a connection is killed because of shortage of memory. * * @param pcb the tcp_pcb to abort * @param reset boolean to indicate whether a reset should be sent */ void tcp_abandon(struct tcp_pcb *pcb, int reset) { u32_t seqno, ackno; #if LWIP_CALLBACK_API tcp_err_fn errf; #endif /* LWIP_CALLBACK_API */ void *errf_arg; /* pcb->state LISTEN not allowed here */ LWIP_ASSERT("don't call tcp_abort/tcp_abandon for listen-pcbs", pcb->state != LISTEN); /* Figure out on which TCP PCB list we are, and remove us. If we are in an active state, call the receive function associated with the PCB with a NULL argument, and send an RST to the remote end. */ if (pcb->state == TIME_WAIT) { tcp_pcb_remove(&tcp_tw_pcbs, pcb); memp_free(MEMP_TCP_PCB, pcb); } else { int send_rst = 0; u16_t local_port = 0; enum tcp_state last_state; seqno = pcb->snd_nxt; ackno = pcb->rcv_nxt; #if LWIP_CALLBACK_API errf = pcb->errf; #endif /* LWIP_CALLBACK_API */ errf_arg = pcb->callback_arg; if (pcb->state == CLOSED) { if (pcb->local_port != 0) { /* bound, not yet opened */ TCP_RMV(&tcp_bound_pcbs, pcb); } } else { send_rst = reset; local_port = pcb->local_port; TCP_PCB_REMOVE_ACTIVE(pcb); } if (pcb->unacked != NULL) { tcp_segs_free(pcb->unacked); } if (pcb->unsent != NULL) { tcp_segs_free(pcb->unsent); } #if TCP_QUEUE_OOSEQ if (pcb->ooseq != NULL) { tcp_segs_free(pcb->ooseq); } #endif /* TCP_QUEUE_OOSEQ */ tcp_backlog_accepted(pcb); if (send_rst) { LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_abandon: sending RST\n")); tcp_rst(seqno, ackno, &pcb->local_ip, &pcb->remote_ip, local_port, pcb->remote_port); } last_state = pcb->state; memp_free(MEMP_TCP_PCB, pcb); TCP_EVENT_ERR(last_state, errf, errf_arg, ERR_ABRT); } } /** * @ingroup tcp_raw * Aborts the connection by sending a RST (reset) segment to the remote * host. The pcb is deallocated. This function never fails. * * ATTENTION: When calling this from one of the TCP callbacks, make * sure you always return ERR_ABRT (and never return ERR_ABRT otherwise * or you will risk accessing deallocated memory or memory leaks! * * @param pcb the tcp pcb to abort */ void tcp_abort(struct tcp_pcb *pcb) { tcp_abandon(pcb, 1); } /** * @ingroup tcp_raw * Binds the connection to a local port number and IP address. If the * IP address is not given (i.e., ipaddr == NULL), the IP address of * the outgoing network interface is used instead. * * @param pcb the tcp_pcb to bind (no check is done whether this pcb is * already bound!) * @param ipaddr the local ip address to bind to (use IP4_ADDR_ANY to bind * to any local address * @param port the local port to bind to * @return ERR_USE if the port is already in use * ERR_VAL if bind failed because the PCB is not in a valid state * ERR_OK if bound */ err_t tcp_bind(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) { int i; int max_pcb_list = NUM_TCP_PCB_LISTS; struct tcp_pcb *cpcb; #if LWIP_IPV4 /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ if (ipaddr == NULL) { ipaddr = IP4_ADDR_ANY; } #endif /* LWIP_IPV4 */ /* still need to check for ipaddr == NULL in IPv6 only case */ if ((pcb == NULL) || (ipaddr == NULL)) { return ERR_VAL; } LWIP_ERROR("tcp_bind: can only bind in state CLOSED", pcb->state == CLOSED, return ERR_VAL); #if SO_REUSE /* Unless the REUSEADDR flag is set, we have to check the pcbs in TIME-WAIT state, also. We do not dump TIME_WAIT pcb's; they can still be matched by incoming packets using both local and remote IP addresses and ports to distinguish. */ if (ip_get_option(pcb, SOF_REUSEADDR)) { max_pcb_list = NUM_TCP_PCB_LISTS_NO_TIME_WAIT; } #endif /* SO_REUSE */ if (port == 0) { port = tcp_new_port(); if (port == 0) { return ERR_BUF; } } else { /* Check if the address already is in use (on all lists) */ for (i = 0; i < max_pcb_list; i++) { for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { if (cpcb->local_port == port) { #if SO_REUSE /* Omit checking for the same port if both pcbs have REUSEADDR set. For SO_REUSEADDR, the duplicate-check for a 5-tuple is done in tcp_connect. */ if (!ip_get_option(pcb, SOF_REUSEADDR) || !ip_get_option(cpcb, SOF_REUSEADDR)) #endif /* SO_REUSE */ { /* @todo: check accept_any_ip_version */ if ((IP_IS_V6(ipaddr) == IP_IS_V6_VAL(cpcb->local_ip)) && (ip_addr_isany(&cpcb->local_ip) || ip_addr_isany(ipaddr) || ip_addr_cmp(&cpcb->local_ip, ipaddr))) { return ERR_USE; } } } } } } if (!ip_addr_isany(ipaddr)) { ip_addr_set(&pcb->local_ip, ipaddr); } pcb->local_port = port; TCP_REG(&tcp_bound_pcbs, pcb); LWIP_DEBUGF(TCP_DEBUG, ("tcp_bind: bind to port %"U16_F"\n", port)); return ERR_OK; } #if LWIP_CALLBACK_API /** * Default accept callback if no accept callback is specified by the user. */ static err_t tcp_accept_null(void *arg, struct tcp_pcb *pcb, err_t err) { LWIP_UNUSED_ARG(arg); LWIP_UNUSED_ARG(err); tcp_abort(pcb); return ERR_ABRT; } #endif /* LWIP_CALLBACK_API */ /** * @ingroup tcp_raw * Set the state of the connection to be LISTEN, which means that it * is able to accept incoming connections. The protocol control block * is reallocated in order to consume less memory. Setting the * connection to LISTEN is an irreversible process. * * @param pcb the original tcp_pcb * @param backlog the incoming connections queue limit * @return tcp_pcb used for listening, consumes less memory. * * @note The original tcp_pcb is freed. This function therefore has to be * called like this: * tpcb = tcp_listen_with_backlog(tpcb, backlog); */ struct tcp_pcb * tcp_listen_with_backlog(struct tcp_pcb *pcb, u8_t backlog) { return tcp_listen_with_backlog_and_err(pcb, backlog, NULL); } /** * @ingroup tcp_raw * Set the state of the connection to be LISTEN, which means that it * is able to accept incoming connections. The protocol control block * is reallocated in order to consume less memory. Setting the * connection to LISTEN is an irreversible process. * * @param pcb the original tcp_pcb * @param backlog the incoming connections queue limit * @param err when NULL is returned, this contains the error reason * @return tcp_pcb used for listening, consumes less memory. * * @note The original tcp_pcb is freed. This function therefore has to be * called like this: * tpcb = tcp_listen_with_backlog_and_err(tpcb, backlog, &err); */ struct tcp_pcb * tcp_listen_with_backlog_and_err(struct tcp_pcb *pcb, u8_t backlog, err_t *err) { struct tcp_pcb_listen *lpcb = NULL; err_t res; LWIP_UNUSED_ARG(backlog); LWIP_ERROR("tcp_listen: pcb already connected", pcb->state == CLOSED, res = ERR_CLSD; goto done); /* already listening? */ if (pcb->state == LISTEN) { lpcb = (struct tcp_pcb_listen*)pcb; res = ERR_ALREADY; goto done; } #if SO_REUSE if (ip_get_option(pcb, SOF_REUSEADDR)) { /* Since SOF_REUSEADDR allows reusing a local address before the pcb's usage is declared (listen-/connection-pcb), we have to make sure now that this port is only used once for every local IP. */ for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { if ((lpcb->local_port == pcb->local_port) && ip_addr_cmp(&lpcb->local_ip, &pcb->local_ip)) { /* this address/port is already used */ lpcb = NULL; res = ERR_USE; goto done; } } } #endif /* SO_REUSE */ lpcb = (struct tcp_pcb_listen *)memp_malloc(MEMP_TCP_PCB_LISTEN); if (lpcb == NULL) { res = ERR_MEM; goto done; } lpcb->callback_arg = pcb->callback_arg; lpcb->local_port = pcb->local_port; lpcb->state = LISTEN; lpcb->prio = pcb->prio; lpcb->so_options = pcb->so_options; lpcb->ttl = pcb->ttl; lpcb->tos = pcb->tos; #if LWIP_IPV4 && LWIP_IPV6 IP_SET_TYPE_VAL(lpcb->remote_ip, pcb->local_ip.type); #endif /* LWIP_IPV4 && LWIP_IPV6 */ ip_addr_copy(lpcb->local_ip, pcb->local_ip); if (pcb->local_port != 0) { TCP_RMV(&tcp_bound_pcbs, pcb); } memp_free(MEMP_TCP_PCB, pcb); #if LWIP_CALLBACK_API lpcb->accept = tcp_accept_null; #endif /* LWIP_CALLBACK_API */ #if TCP_LISTEN_BACKLOG lpcb->accepts_pending = 0; tcp_backlog_set(lpcb, backlog); #endif /* TCP_LISTEN_BACKLOG */ TCP_REG(&tcp_listen_pcbs.pcbs, (struct tcp_pcb *)lpcb); res = ERR_OK; done: if (err != NULL) { *err = res; } return (struct tcp_pcb *)lpcb; } /** * Update the state that tracks the available window space to advertise. * * Returns how much extra window would be advertised if we sent an * update now. */ u32_t tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb) { u32_t new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd; if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND / 2), pcb->mss))) { /* we can advertise more window */ pcb->rcv_ann_wnd = pcb->rcv_wnd; return new_right_edge - pcb->rcv_ann_right_edge; } else { if (TCP_SEQ_GT(pcb->rcv_nxt, pcb->rcv_ann_right_edge)) { /* Can happen due to other end sending out of advertised window, * but within actual available (but not yet advertised) window */ pcb->rcv_ann_wnd = 0; } else { /* keep the right edge of window constant */ u32_t new_rcv_ann_wnd = pcb->rcv_ann_right_edge - pcb->rcv_nxt; #if !LWIP_WND_SCALE LWIP_ASSERT("new_rcv_ann_wnd <= 0xffff", new_rcv_ann_wnd <= 0xffff); #endif pcb->rcv_ann_wnd = (tcpwnd_size_t)new_rcv_ann_wnd; } return 0; } } /** * @ingroup tcp_raw * This function should be called by the application when it has * processed the data. The purpose is to advertise a larger window * when the data has been processed. * * @param pcb the tcp_pcb for which data is read * @param len the amount of bytes that have been read by the application */ void tcp_recved(struct tcp_pcb *pcb, u16_t len) { int wnd_inflation; /* pcb->state LISTEN not allowed here */ LWIP_ASSERT("don't call tcp_recved for listen-pcbs", pcb->state != LISTEN); pcb->rcv_wnd += len; if (pcb->rcv_wnd > TCP_WND_MAX(pcb)) { pcb->rcv_wnd = TCP_WND_MAX(pcb); } else if (pcb->rcv_wnd == 0) { /* rcv_wnd overflowed */ if ((pcb->state == CLOSE_WAIT) || (pcb->state == LAST_ACK)) { /* In passive close, we allow this, since the FIN bit is added to rcv_wnd by the stack itself, since it is not mandatory for an application to call tcp_recved() for the FIN bit, but e.g. the netconn API does so. */ pcb->rcv_wnd = TCP_WND_MAX(pcb); } else { LWIP_ASSERT("tcp_recved: len wrapped rcv_wnd\n", 0); } } wnd_inflation = tcp_update_rcv_ann_wnd(pcb); /* If the change in the right edge of window is significant (default * watermark is TCP_WND/4), then send an explicit update now. * Otherwise wait for a packet to be sent in the normal course of * events (or more window to be available later) */ if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) { tcp_ack_now(pcb); tcp_output(pcb); } LWIP_DEBUGF(TCP_DEBUG, ("tcp_recved: received %"U16_F" bytes, wnd %"TCPWNDSIZE_F" (%"TCPWNDSIZE_F").\n", len, pcb->rcv_wnd, (u16_t)(TCP_WND_MAX(pcb) - pcb->rcv_wnd))); } /** * Allocate a new local TCP port. * * @return a new (free) local TCP port number */ static u16_t tcp_new_port(void) { u8_t i; u16_t n = 0; struct tcp_pcb *pcb; again: if (tcp_port++ == TCP_LOCAL_PORT_RANGE_END) { tcp_port = TCP_LOCAL_PORT_RANGE_START; } /* Check all PCB lists. */ for (i = 0; i < NUM_TCP_PCB_LISTS; i++) { for (pcb = *tcp_pcb_lists[i]; pcb != NULL; pcb = pcb->next) { if (pcb->local_port == tcp_port) { if (++n > (TCP_LOCAL_PORT_RANGE_END - TCP_LOCAL_PORT_RANGE_START)) { return 0; } goto again; } } } return tcp_port; } /** * @ingroup tcp_raw * Connects to another host. The function given as the "connected" * argument will be called when the connection has been established. * * @param pcb the tcp_pcb used to establish the connection * @param ipaddr the remote ip address to connect to * @param port the remote tcp port to connect to * @param connected callback function to call when connected (on error, the err calback will be called) * @return ERR_VAL if invalid arguments are given * ERR_OK if connect request has been sent * other err_t values if connect request couldn't be sent */ err_t tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port, tcp_connected_fn connected) { err_t ret; u32_t iss; u16_t old_local_port; if ((pcb == NULL) || (ipaddr == NULL)) { return ERR_VAL; } LWIP_ERROR("tcp_connect: can only connect from state CLOSED", pcb->state == CLOSED, return ERR_ISCONN); LWIP_DEBUGF(TCP_DEBUG, ("tcp_connect to port %"U16_F"\n", port)); ip_addr_set(&pcb->remote_ip, ipaddr); pcb->remote_port = port; /* check if we have a route to the remote host */ if (ip_addr_isany(&pcb->local_ip)) { /* no local IP address set, yet. */ struct netif *netif; const ip_addr_t *local_ip; ip_route_get_local_ip(&pcb->local_ip, &pcb->remote_ip, netif, local_ip); if ((netif == NULL) || (local_ip == NULL)) { /* Don't even try to send a SYN packet if we have no route since that will fail. */ return ERR_RTE; } /* Use the address as local address of the pcb. */ ip_addr_copy(pcb->local_ip, *local_ip); } old_local_port = pcb->local_port; if (pcb->local_port == 0) { pcb->local_port = tcp_new_port(); if (pcb->local_port == 0) { return ERR_BUF; } } else { #if SO_REUSE if (ip_get_option(pcb, SOF_REUSEADDR)) { /* Since SOF_REUSEADDR allows reusing a local address, we have to make sure now that the 5-tuple is unique. */ struct tcp_pcb *cpcb; int i; /* Don't check listen- and bound-PCBs, check active- and TIME-WAIT PCBs. */ for (i = 2; i < NUM_TCP_PCB_LISTS; i++) { for (cpcb = *tcp_pcb_lists[i]; cpcb != NULL; cpcb = cpcb->next) { if ((cpcb->local_port == pcb->local_port) && (cpcb->remote_port == port) && ip_addr_cmp(&cpcb->local_ip, &pcb->local_ip) && ip_addr_cmp(&cpcb->remote_ip, ipaddr)) { /* linux returns EISCONN here, but ERR_USE should be OK for us */ return ERR_USE; } } } } #endif /* SO_REUSE */ } iss = tcp_next_iss(pcb); pcb->rcv_nxt = 0; pcb->snd_nxt = iss; pcb->lastack = iss - 1; pcb->snd_wl2 = iss - 1; pcb->snd_lbb = iss - 1; /* Start with a window that does not need scaling. When window scaling is enabled and used, the window is enlarged when both sides agree on scaling. */ pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); pcb->rcv_ann_right_edge = pcb->rcv_nxt; pcb->snd_wnd = TCP_WND; /* As initial send MSS, we use TCP_MSS but limit it to 536. The send MSS is updated when an MSS option is received. */ pcb->mss = INITIAL_MSS; #if TCP_CALCULATE_EFF_SEND_MSS pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip); #endif /* TCP_CALCULATE_EFF_SEND_MSS */ pcb->cwnd = 1; #if LWIP_CALLBACK_API pcb->connected = connected; #else /* LWIP_CALLBACK_API */ LWIP_UNUSED_ARG(connected); #endif /* LWIP_CALLBACK_API */ /* Send a SYN together with the MSS option. */ ret = tcp_enqueue_flags(pcb, TCP_SYN); if (ret == ERR_OK) { /* SYN segment was enqueued, changed the pcbs state now */ pcb->state = SYN_SENT; if (old_local_port != 0) { TCP_RMV(&tcp_bound_pcbs, pcb); } TCP_REG_ACTIVE(pcb); MIB2_STATS_INC(mib2.tcpactiveopens); tcp_output(pcb); } return ret; } /** * Called every 500 ms and implements the retransmission timer and the timer that * removes PCBs that have been in TIME-WAIT for enough time. It also increments * various timers such as the inactivity timer in each PCB. * * Automatically called from tcp_tmr(). */ void tcp_slowtmr(void) { struct tcp_pcb *pcb, *prev; tcpwnd_size_t eff_wnd; u8_t pcb_remove; /* flag if a PCB should be removed */ u8_t pcb_reset; /* flag if a RST should be sent when removing */ err_t err; err = ERR_OK; ++tcp_ticks; ++tcp_timer_ctr; tcp_slowtmr_start: /* Steps through all of the active PCBs. */ prev = NULL; pcb = tcp_active_pcbs; if (pcb == NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n")); } while (pcb != NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n")); LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED); LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN); LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT); if (pcb->last_timer == tcp_timer_ctr) { /* skip this pcb, we have already processed it */ pcb = pcb->next; continue; } pcb->last_timer = tcp_timer_ctr; pcb_remove = 0; pcb_reset = 0; if (pcb->state == SYN_SENT && pcb->nrtx >= TCP_SYNMAXRTX) { ++pcb_remove; LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max SYN retries reached\n")); } else if (pcb->nrtx >= TCP_MAXRTX) { ++pcb_remove; LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max DATA retries reached\n")); } else { if (pcb->persist_backoff > 0) { /* If snd_wnd is zero, use persist timer to send 1 byte probes * instead of using the standard retransmission mechanism. */ u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff-1]; if (pcb->persist_cnt < backoff_cnt) { pcb->persist_cnt++; } if (pcb->persist_cnt >= backoff_cnt) { if (tcp_zero_window_probe(pcb) == ERR_OK) { pcb->persist_cnt = 0; if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) { pcb->persist_backoff++; } } } } else { /* Increase the retransmission timer if it is running */ if (pcb->rtime >= 0) { ++pcb->rtime; } if (pcb->unacked != NULL && pcb->rtime >= pcb->rto) { /* Time for a retransmission. */ LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F " pcb->rto %"S16_F"\n", pcb->rtime, pcb->rto)); /* Double retransmission time-out unless we are trying to * connect to somebody (i.e., we are in SYN_SENT). */ if (pcb->state != SYN_SENT) { u8_t backoff_idx = LWIP_MIN(pcb->nrtx, sizeof(tcp_backoff)-1); pcb->rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[backoff_idx]; } /* Reset the retransmission timer. */ pcb->rtime = 0; /* Reduce congestion window and ssthresh. */ eff_wnd = LWIP_MIN(pcb->cwnd, pcb->snd_wnd); pcb->ssthresh = eff_wnd >> 1; if (pcb->ssthresh < (tcpwnd_size_t)(pcb->mss << 1)) { pcb->ssthresh = (pcb->mss << 1); } pcb->cwnd = pcb->mss; LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"TCPWNDSIZE_F " ssthresh %"TCPWNDSIZE_F"\n", pcb->cwnd, pcb->ssthresh)); /* The following needs to be called AFTER cwnd is set to one mss - STJ */ tcp_rexmit_rto(pcb); } } } /* Check if this PCB has stayed too long in FIN-WAIT-2 */ if (pcb->state == FIN_WAIT_2) { /* If this PCB is in FIN_WAIT_2 because of SHUT_WR don't let it time out. */ if (pcb->flags & TF_RXCLOSED) { /* PCB was fully closed (either through close() or SHUT_RDWR): normal FIN-WAIT timeout handling. */ if ((u32_t)(tcp_ticks - pcb->tmr) > TCP_FIN_WAIT_TIMEOUT / TCP_SLOW_INTERVAL) { ++pcb_remove; LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in FIN-WAIT-2\n")); } } } /* Check if KEEPALIVE should be sent */ if (ip_get_option(pcb, SOF_KEEPALIVE) && ((pcb->state == ESTABLISHED) || (pcb->state == CLOSE_WAIT))) { if ((u32_t)(tcp_ticks - pcb->tmr) > (pcb->keep_idle + TCP_KEEP_DUR(pcb)) / TCP_SLOW_INTERVAL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: KEEPALIVE timeout. Aborting connection to ")); ip_addr_debug_print(TCP_DEBUG, &pcb->remote_ip); LWIP_DEBUGF(TCP_DEBUG, ("\n")); ++pcb_remove; ++pcb_reset; } else if ((u32_t)(tcp_ticks - pcb->tmr) > (pcb->keep_idle + pcb->keep_cnt_sent * TCP_KEEP_INTVL(pcb)) / TCP_SLOW_INTERVAL) { err = tcp_keepalive(pcb); if (err == ERR_OK) { pcb->keep_cnt_sent++; } } } /* If this PCB has queued out of sequence data, but has been inactive for too long, will drop the data (it will eventually be retransmitted). */ #if TCP_QUEUE_OOSEQ if (pcb->ooseq != NULL && (u32_t)tcp_ticks - pcb->tmr >= pcb->rto * TCP_OOSEQ_TIMEOUT) { tcp_segs_free(pcb->ooseq); pcb->ooseq = NULL; LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: dropping OOSEQ queued data\n")); } #endif /* TCP_QUEUE_OOSEQ */ /* Check if this PCB has stayed too long in SYN-RCVD */ if (pcb->state == SYN_RCVD) { if ((u32_t)(tcp_ticks - pcb->tmr) > TCP_SYN_RCVD_TIMEOUT / TCP_SLOW_INTERVAL) { ++pcb_remove; LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in SYN-RCVD\n")); } } /* Check if this PCB has stayed too long in LAST-ACK */ if (pcb->state == LAST_ACK) { if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { ++pcb_remove; LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: removing pcb stuck in LAST-ACK\n")); } } /* If the PCB should be removed, do it. */ if (pcb_remove) { struct tcp_pcb *pcb2; #if LWIP_CALLBACK_API tcp_err_fn err_fn = pcb->errf; #endif /* LWIP_CALLBACK_API */ void *err_arg; enum tcp_state last_state; tcp_pcb_purge(pcb); /* Remove PCB from tcp_active_pcbs list. */ if (prev != NULL) { LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_active_pcbs", pcb != tcp_active_pcbs); prev->next = pcb->next; } else { /* This PCB was the first. */ LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_active_pcbs", tcp_active_pcbs == pcb); tcp_active_pcbs = pcb->next; } if (pcb_reset) { tcp_rst(pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip, pcb->local_port, pcb->remote_port); } err_arg = pcb->callback_arg; last_state = pcb->state; pcb2 = pcb; pcb = pcb->next; memp_free(MEMP_TCP_PCB, pcb2); tcp_active_pcbs_changed = 0; TCP_EVENT_ERR(last_state, err_fn, err_arg, ERR_ABRT); if (tcp_active_pcbs_changed) { goto tcp_slowtmr_start; } } else { /* get the 'next' element now and work with 'prev' below (in case of abort) */ prev = pcb; pcb = pcb->next; /* We check if we should poll the connection. */ ++prev->polltmr; if (prev->polltmr >= prev->pollinterval) { prev->polltmr = 0; LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: polling application\n")); tcp_active_pcbs_changed = 0; TCP_EVENT_POLL(prev, err); if (tcp_active_pcbs_changed) { goto tcp_slowtmr_start; } /* if err == ERR_ABRT, 'prev' is already deallocated */ if (err == ERR_OK) { tcp_output(prev); } } } } /* Steps through all of the TIME-WAIT PCBs. */ prev = NULL; pcb = tcp_tw_pcbs; while (pcb != NULL) { LWIP_ASSERT("tcp_slowtmr: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); pcb_remove = 0; /* Check if this PCB has stayed long enough in TIME-WAIT */ if ((u32_t)(tcp_ticks - pcb->tmr) > 2 * TCP_MSL / TCP_SLOW_INTERVAL) { ++pcb_remove; } /* If the PCB should be removed, do it. */ if (pcb_remove) { struct tcp_pcb *pcb2; tcp_pcb_purge(pcb); /* Remove PCB from tcp_tw_pcbs list. */ if (prev != NULL) { LWIP_ASSERT("tcp_slowtmr: middle tcp != tcp_tw_pcbs", pcb != tcp_tw_pcbs); prev->next = pcb->next; } else { /* This PCB was the first. */ LWIP_ASSERT("tcp_slowtmr: first pcb == tcp_tw_pcbs", tcp_tw_pcbs == pcb); tcp_tw_pcbs = pcb->next; } pcb2 = pcb; pcb = pcb->next; memp_free(MEMP_TCP_PCB, pcb2); } else { prev = pcb; pcb = pcb->next; } } } /** * Is called every TCP_FAST_INTERVAL (250 ms) and process data previously * "refused" by upper layer (application) and sends delayed ACKs. * * Automatically called from tcp_tmr(). */ void tcp_fasttmr(void) { struct tcp_pcb *pcb; ++tcp_timer_ctr; tcp_fasttmr_start: pcb = tcp_active_pcbs; while (pcb != NULL) { if (pcb->last_timer != tcp_timer_ctr) { struct tcp_pcb *next; pcb->last_timer = tcp_timer_ctr; /* send delayed ACKs */ if (pcb->flags & TF_ACK_DELAY) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: delayed ACK\n")); tcp_ack_now(pcb); tcp_output(pcb); pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW); } /* send pending FIN */ if (pcb->flags & TF_CLOSEPEND) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_fasttmr: pending FIN\n")); pcb->flags &= ~(TF_CLOSEPEND); tcp_close_shutdown_fin(pcb); } next = pcb->next; /* If there is data which was previously "refused" by upper layer */ if (pcb->refused_data != NULL) { tcp_active_pcbs_changed = 0; tcp_process_refused_data(pcb); if (tcp_active_pcbs_changed) { /* application callback has changed the pcb list: restart the loop */ goto tcp_fasttmr_start; } } pcb = next; } else { pcb = pcb->next; } } } /** Call tcp_output for all active pcbs that have TF_NAGLEMEMERR set */ void tcp_txnow(void) { struct tcp_pcb *pcb; for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { if (pcb->flags & TF_NAGLEMEMERR) { tcp_output(pcb); } } } /** Pass pcb->refused_data to the recv callback */ err_t tcp_process_refused_data(struct tcp_pcb *pcb) { #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE struct pbuf *rest; while (pcb->refused_data != NULL) #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ { err_t err; u8_t refused_flags = pcb->refused_data->flags; /* set pcb->refused_data to NULL in case the callback frees it and then closes the pcb */ struct pbuf *refused_data = pcb->refused_data; #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE pbuf_split_64k(refused_data, &rest); pcb->refused_data = rest; #else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ pcb->refused_data = NULL; #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ /* Notify again application with data previously received. */ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: notify kept packet\n")); TCP_EVENT_RECV(pcb, refused_data, ERR_OK, err); if (err == ERR_OK) { /* did refused_data include a FIN? */ if (refused_flags & PBUF_FLAG_TCP_FIN #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE && (rest == NULL) #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ ) { /* correct rcv_wnd as the application won't call tcp_recved() for the FIN's seqno */ if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { pcb->rcv_wnd++; } TCP_EVENT_CLOSED(pcb, err); if (err == ERR_ABRT) { return ERR_ABRT; } } } else if (err == ERR_ABRT) { /* if err == ERR_ABRT, 'pcb' is already deallocated */ /* Drop incoming packets because pcb is "full" (only if the incoming segment contains data). */ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: drop incoming packets, because pcb is \"full\"\n")); return ERR_ABRT; } else { /* data is still refused, pbuf is still valid (go on for ACK-only packets) */ #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE if (rest != NULL) { pbuf_cat(refused_data, rest); } #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ pcb->refused_data = refused_data; return ERR_INPROGRESS; } } return ERR_OK; } /** * Deallocates a list of TCP segments (tcp_seg structures). * * @param seg tcp_seg list of TCP segments to free */ void tcp_segs_free(struct tcp_seg *seg) { while (seg != NULL) { struct tcp_seg *next = seg->next; tcp_seg_free(seg); seg = next; } } /** * Frees a TCP segment (tcp_seg structure). * * @param seg single tcp_seg to free */ void tcp_seg_free(struct tcp_seg *seg) { if (seg != NULL) { if (seg->p != NULL) { pbuf_free(seg->p); #if TCP_DEBUG seg->p = NULL; #endif /* TCP_DEBUG */ } memp_free(MEMP_TCP_SEG, seg); } } /** * Sets the priority of a connection. * * @param pcb the tcp_pcb to manipulate * @param prio new priority */ void tcp_setprio(struct tcp_pcb *pcb, u8_t prio) { pcb->prio = prio; } #if TCP_QUEUE_OOSEQ /** * Returns a copy of the given TCP segment. * The pbuf and data are not copied, only the pointers * * @param seg the old tcp_seg * @return a copy of seg */ struct tcp_seg * tcp_seg_copy(struct tcp_seg *seg) { struct tcp_seg *cseg; cseg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG); if (cseg == NULL) { return NULL; } SMEMCPY((u8_t *)cseg, (const u8_t *)seg, sizeof(struct tcp_seg)); pbuf_ref(cseg->p); return cseg; } #endif /* TCP_QUEUE_OOSEQ */ #if LWIP_CALLBACK_API /** * Default receive callback that is called if the user didn't register * a recv callback for the pcb. */ err_t tcp_recv_null(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err) { LWIP_UNUSED_ARG(arg); if (p != NULL) { tcp_recved(pcb, p->tot_len); pbuf_free(p); } else if (err == ERR_OK) { return tcp_close(pcb); } return ERR_OK; } #endif /* LWIP_CALLBACK_API */ /** * Kills the oldest active connection that has the same or lower priority than * 'prio'. * * @param prio minimum priority */ static void tcp_kill_prio(u8_t prio) { struct tcp_pcb *pcb, *inactive; u32_t inactivity; u8_t mprio; mprio = LWIP_MIN(TCP_PRIO_MAX, prio); /* We kill the oldest active connection that has lower priority than prio. */ inactivity = 0; inactive = NULL; for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { if (pcb->prio <= mprio && (u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { inactivity = tcp_ticks - pcb->tmr; inactive = pcb; mprio = pcb->prio; } } if (inactive != NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_prio: killing oldest PCB %p (%"S32_F")\n", (void *)inactive, inactivity)); tcp_abort(inactive); } } /** * Kills the oldest connection that is in specific state. * Called from tcp_alloc() for LAST_ACK and CLOSING if no more connections are available. */ static void tcp_kill_state(enum tcp_state state) { struct tcp_pcb *pcb, *inactive; u32_t inactivity; LWIP_ASSERT("invalid state", (state == CLOSING) || (state == LAST_ACK)); inactivity = 0; inactive = NULL; /* Go through the list of active pcbs and get the oldest pcb that is in state CLOSING/LAST_ACK. */ for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { if (pcb->state == state) { if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { inactivity = tcp_ticks - pcb->tmr; inactive = pcb; } } } if (inactive != NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_closing: killing oldest %s PCB %p (%"S32_F")\n", tcp_state_str[state], (void *)inactive, inactivity)); /* Don't send a RST, since no data is lost. */ tcp_abandon(inactive, 0); } } /** * Kills the oldest connection that is in TIME_WAIT state. * Called from tcp_alloc() if no more connections are available. */ static void tcp_kill_timewait(void) { struct tcp_pcb *pcb, *inactive; u32_t inactivity; inactivity = 0; inactive = NULL; /* Go through the list of TIME_WAIT pcbs and get the oldest pcb. */ for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { if ((u32_t)(tcp_ticks - pcb->tmr) >= inactivity) { inactivity = tcp_ticks - pcb->tmr; inactive = pcb; } } if (inactive != NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_kill_timewait: killing oldest TIME-WAIT PCB %p (%"S32_F")\n", (void *)inactive, inactivity)); tcp_abort(inactive); } } /** * Allocate a new tcp_pcb structure. * * @param prio priority for the new pcb * @return a new tcp_pcb that initially is in state CLOSED */ struct tcp_pcb * tcp_alloc(u8_t prio) { struct tcp_pcb *pcb; pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); if (pcb == NULL) { /* Try killing oldest connection in TIME-WAIT. */ LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest TIME-WAIT connection\n")); tcp_kill_timewait(); /* Try to allocate a tcp_pcb again. */ pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); if (pcb == NULL) { /* Try killing oldest connection in LAST-ACK (these wouldn't go to TIME-WAIT). */ LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest LAST-ACK connection\n")); tcp_kill_state(LAST_ACK); /* Try to allocate a tcp_pcb again. */ pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); if (pcb == NULL) { /* Try killing oldest connection in CLOSING. */ LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing off oldest CLOSING connection\n")); tcp_kill_state(CLOSING); /* Try to allocate a tcp_pcb again. */ pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); if (pcb == NULL) { /* Try killing active connections with lower priority than the new one. */ LWIP_DEBUGF(TCP_DEBUG, ("tcp_alloc: killing connection with prio lower than %d\n", prio)); tcp_kill_prio(prio); /* Try to allocate a tcp_pcb again. */ pcb = (struct tcp_pcb *)memp_malloc(MEMP_TCP_PCB); if (pcb != NULL) { /* adjust err stats: memp_malloc failed multiple times before */ MEMP_STATS_DEC(err, MEMP_TCP_PCB); } } if (pcb != NULL) { /* adjust err stats: memp_malloc failed multiple times before */ MEMP_STATS_DEC(err, MEMP_TCP_PCB); } } if (pcb != NULL) { /* adjust err stats: memp_malloc failed multiple times before */ MEMP_STATS_DEC(err, MEMP_TCP_PCB); } } if (pcb != NULL) { /* adjust err stats: memp_malloc failed above */ MEMP_STATS_DEC(err, MEMP_TCP_PCB); } } if (pcb != NULL) { /* zero out the whole pcb, so there is no need to initialize members to zero */ memset(pcb, 0, sizeof(struct tcp_pcb)); pcb->prio = prio; pcb->snd_buf = TCP_SND_BUF; /* Start with a window that does not need scaling. When window scaling is enabled and used, the window is enlarged when both sides agree on scaling. */ pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); pcb->ttl = TCP_TTL; /* As initial send MSS, we use TCP_MSS but limit it to 536. The send MSS is updated when an MSS option is received. */ pcb->mss = INITIAL_MSS; pcb->rto = 3000 / TCP_SLOW_INTERVAL; pcb->sv = 3000 / TCP_SLOW_INTERVAL; pcb->rtime = -1; pcb->cwnd = 1; pcb->tmr = tcp_ticks; pcb->last_timer = tcp_timer_ctr; /* RFC 5681 recommends setting ssthresh abritrarily high and gives an example of using the largest advertised receive window. We've seen complications with receiving TCPs that use window scaling and/or window auto-tuning where the initial advertised window is very small and then grows rapidly once the connection is established. To avoid these complications, we set ssthresh to the largest effective cwnd (amount of in-flight data) that the sender can have. */ pcb->ssthresh = TCP_SND_BUF; #if LWIP_CALLBACK_API pcb->recv = tcp_recv_null; #endif /* LWIP_CALLBACK_API */ /* Init KEEPALIVE timer */ pcb->keep_idle = TCP_KEEPIDLE_DEFAULT; #if LWIP_TCP_KEEPALIVE pcb->keep_intvl = TCP_KEEPINTVL_DEFAULT; pcb->keep_cnt = TCP_KEEPCNT_DEFAULT; #endif /* LWIP_TCP_KEEPALIVE */ } return pcb; } /** * @ingroup tcp_raw * Creates a new TCP protocol control block but doesn't place it on * any of the TCP PCB lists. * The pcb is not put on any list until binding using tcp_bind(). * * @internal: Maybe there should be a idle TCP PCB list where these * PCBs are put on. Port reservation using tcp_bind() is implemented but * allocated pcbs that are not bound can't be killed automatically if wanting * to allocate a pcb with higher prio (@see tcp_kill_prio()) * * @return a new tcp_pcb that initially is in state CLOSED */ struct tcp_pcb * tcp_new(void) { return tcp_alloc(TCP_PRIO_NORMAL); } /** * @ingroup tcp_raw * Creates a new TCP protocol control block but doesn't * place it on any of the TCP PCB lists. * The pcb is not put on any list until binding using tcp_bind(). * * @param type IP address type, see @ref lwip_ip_addr_type definitions. * If you want to listen to IPv4 and IPv6 (dual-stack) connections, * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. * @return a new tcp_pcb that initially is in state CLOSED */ struct tcp_pcb * tcp_new_ip_type(u8_t type) { struct tcp_pcb * pcb; pcb = tcp_alloc(TCP_PRIO_NORMAL); #if LWIP_IPV4 && LWIP_IPV6 if (pcb != NULL) { IP_SET_TYPE_VAL(pcb->local_ip, type); IP_SET_TYPE_VAL(pcb->remote_ip, type); } #else LWIP_UNUSED_ARG(type); #endif /* LWIP_IPV4 && LWIP_IPV6 */ return pcb; } /** * @ingroup tcp_raw * Used to specify the argument that should be passed callback * functions. * * @param pcb tcp_pcb to set the callback argument * @param arg void pointer argument to pass to callback functions */ void tcp_arg(struct tcp_pcb *pcb, void *arg) { /* This function is allowed to be called for both listen pcbs and connection pcbs. */ if (pcb != NULL) { pcb->callback_arg = arg; } } #if LWIP_CALLBACK_API /** * @ingroup tcp_raw * Used to specify the function that should be called when a TCP * connection receives data. * * @param pcb tcp_pcb to set the recv callback * @param recv callback function to call for this pcb when data is received */ void tcp_recv(struct tcp_pcb *pcb, tcp_recv_fn recv) { if (pcb != NULL) { LWIP_ASSERT("invalid socket state for recv callback", pcb->state != LISTEN); pcb->recv = recv; } } /** * @ingroup tcp_raw * Used to specify the function that should be called when TCP data * has been successfully delivered to the remote host. * * @param pcb tcp_pcb to set the sent callback * @param sent callback function to call for this pcb when data is successfully sent */ void tcp_sent(struct tcp_pcb *pcb, tcp_sent_fn sent) { if (pcb != NULL) { LWIP_ASSERT("invalid socket state for sent callback", pcb->state != LISTEN); pcb->sent = sent; } } /** * @ingroup tcp_raw * Used to specify the function that should be called when a fatal error * has occurred on the connection. * * @note The corresponding pcb is already freed when this callback is called! * * @param pcb tcp_pcb to set the err callback * @param err callback function to call for this pcb when a fatal error * has occurred on the connection */ void tcp_err(struct tcp_pcb *pcb, tcp_err_fn err) { if (pcb != NULL) { LWIP_ASSERT("invalid socket state for err callback", pcb->state != LISTEN); pcb->errf = err; } } /** * @ingroup tcp_raw * Used for specifying the function that should be called when a * LISTENing connection has been connected to another host. * * @param pcb tcp_pcb to set the accept callback * @param accept callback function to call for this pcb when LISTENing * connection has been connected to another host */ void tcp_accept(struct tcp_pcb *pcb, tcp_accept_fn accept) { if ((pcb != NULL) && (pcb->state == LISTEN)) { struct tcp_pcb_listen *lpcb = (struct tcp_pcb_listen*)pcb; lpcb->accept = accept; } } #endif /* LWIP_CALLBACK_API */ /** * @ingroup tcp_raw * Used to specify the function that should be called periodically * from TCP. The interval is specified in terms of the TCP coarse * timer interval, which is called twice a second. * */ void tcp_poll(struct tcp_pcb *pcb, tcp_poll_fn poll, u8_t interval) { LWIP_ASSERT("invalid socket state for poll", pcb->state != LISTEN); #if LWIP_CALLBACK_API pcb->poll = poll; #else /* LWIP_CALLBACK_API */ LWIP_UNUSED_ARG(poll); #endif /* LWIP_CALLBACK_API */ pcb->pollinterval = interval; } /** * Purges a TCP PCB. Removes any buffered data and frees the buffer memory * (pcb->ooseq, pcb->unsent and pcb->unacked are freed). * * @param pcb tcp_pcb to purge. The pcb itself is not deallocated! */ void tcp_pcb_purge(struct tcp_pcb *pcb) { if (pcb->state != CLOSED && pcb->state != TIME_WAIT && pcb->state != LISTEN) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge\n")); tcp_backlog_accepted(pcb); if (pcb->refused_data != NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->refused_data\n")); pbuf_free(pcb->refused_data); pcb->refused_data = NULL; } if (pcb->unsent != NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: not all data sent\n")); } if (pcb->unacked != NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->unacked\n")); } #if TCP_QUEUE_OOSEQ if (pcb->ooseq != NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_pcb_purge: data left on ->ooseq\n")); } tcp_segs_free(pcb->ooseq); pcb->ooseq = NULL; #endif /* TCP_QUEUE_OOSEQ */ /* Stop the retransmission timer as it will expect data on unacked queue if it fires */ pcb->rtime = -1; tcp_segs_free(pcb->unsent); tcp_segs_free(pcb->unacked); pcb->unacked = pcb->unsent = NULL; #if TCP_OVERSIZE pcb->unsent_oversize = 0; #endif /* TCP_OVERSIZE */ } } /** * Purges the PCB and removes it from a PCB list. Any delayed ACKs are sent first. * * @param pcblist PCB list to purge. * @param pcb tcp_pcb to purge. The pcb itself is NOT deallocated! */ void tcp_pcb_remove(struct tcp_pcb **pcblist, struct tcp_pcb *pcb) { TCP_RMV(pcblist, pcb); tcp_pcb_purge(pcb); /* if there is an outstanding delayed ACKs, send it */ if (pcb->state != TIME_WAIT && pcb->state != LISTEN && pcb->flags & TF_ACK_DELAY) { pcb->flags |= TF_ACK_NOW; tcp_output(pcb); } if (pcb->state != LISTEN) { LWIP_ASSERT("unsent segments leaking", pcb->unsent == NULL); LWIP_ASSERT("unacked segments leaking", pcb->unacked == NULL); #if TCP_QUEUE_OOSEQ LWIP_ASSERT("ooseq segments leaking", pcb->ooseq == NULL); #endif /* TCP_QUEUE_OOSEQ */ } pcb->state = CLOSED; /* reset the local port to prevent the pcb from being 'bound' */ pcb->local_port = 0; LWIP_ASSERT("tcp_pcb_remove: tcp_pcbs_sane()", tcp_pcbs_sane()); } /** * Calculates a new initial sequence number for new connections. * * @return u32_t pseudo random sequence number */ u32_t tcp_next_iss(struct tcp_pcb *pcb) { #ifdef LWIP_HOOK_TCP_ISN return LWIP_HOOK_TCP_ISN(&pcb->local_ip, pcb->local_port, &pcb->remote_ip, pcb->remote_port); #else /* LWIP_HOOK_TCP_ISN */ static u32_t iss = 6510; LWIP_UNUSED_ARG(pcb); iss += tcp_ticks; /* XXX */ return iss; #endif /* LWIP_HOOK_TCP_ISN */ } #if TCP_CALCULATE_EFF_SEND_MSS /** * Calculates the effective send mss that can be used for a specific IP address * by using ip_route to determine the netif used to send to the address and * calculating the minimum of TCP_MSS and that netif's mtu (if set). */ u16_t tcp_eff_send_mss_impl(u16_t sendmss, const ip_addr_t *dest #if LWIP_IPV6 || LWIP_IPV4_SRC_ROUTING , const ip_addr_t *src #endif /* LWIP_IPV6 || LWIP_IPV4_SRC_ROUTING */ ) { u16_t mss_s; struct netif *outif; s16_t mtu; outif = ip_route(src, dest); #if LWIP_IPV6 #if LWIP_IPV4 if (IP_IS_V6(dest)) #endif /* LWIP_IPV4 */ { /* First look in destination cache, to see if there is a Path MTU. */ mtu = nd6_get_destination_mtu(ip_2_ip6(dest), outif); } #if LWIP_IPV4 else #endif /* LWIP_IPV4 */ #endif /* LWIP_IPV6 */ #if LWIP_IPV4 { if (outif == NULL) { return sendmss; } mtu = outif->mtu; } #endif /* LWIP_IPV4 */ if (mtu != 0) { #if LWIP_IPV6 #if LWIP_IPV4 if (IP_IS_V6(dest)) #endif /* LWIP_IPV4 */ { mss_s = mtu - IP6_HLEN - TCP_HLEN; } #if LWIP_IPV4 else #endif /* LWIP_IPV4 */ #endif /* LWIP_IPV6 */ #if LWIP_IPV4 { mss_s = mtu - IP_HLEN - TCP_HLEN; } #endif /* LWIP_IPV4 */ /* RFC 1122, chap 4.2.2.6: * Eff.snd.MSS = min(SendMSS+20, MMS_S) - TCPhdrsize - IPoptionsize * We correct for TCP options in tcp_write(), and don't support IP options. */ sendmss = LWIP_MIN(sendmss, mss_s); } return sendmss; } #endif /* TCP_CALCULATE_EFF_SEND_MSS */ /** Helper function for tcp_netif_ip_addr_changed() that iterates a pcb list */ static void tcp_netif_ip_addr_changed_pcblist(const ip_addr_t* old_addr, struct tcp_pcb* pcb_list) { struct tcp_pcb *pcb; pcb = pcb_list; while (pcb != NULL) { /* PCB bound to current local interface address? */ if (ip_addr_cmp(&pcb->local_ip, old_addr) #if LWIP_AUTOIP /* connections to link-local addresses must persist (RFC3927 ch. 1.9) */ && (!IP_IS_V4_VAL(pcb->local_ip) || !ip4_addr_islinklocal(ip_2_ip4(&pcb->local_ip))) #endif /* LWIP_AUTOIP */ ) { /* this connection must be aborted */ struct tcp_pcb *next = pcb->next; LWIP_DEBUGF(NETIF_DEBUG | LWIP_DBG_STATE, ("netif_set_ipaddr: aborting TCP pcb %p\n", (void *)pcb)); tcp_abort(pcb); pcb = next; } else { pcb = pcb->next; } } } /** This function is called from netif.c when address is changed or netif is removed * * @param old_addr IP address of the netif before change * @param new_addr IP address of the netif after change or NULL if netif has been removed */ void tcp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr) { struct tcp_pcb_listen *lpcb, *next; if (!ip_addr_isany(old_addr)) { tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_active_pcbs); tcp_netif_ip_addr_changed_pcblist(old_addr, tcp_bound_pcbs); if (!ip_addr_isany(new_addr)) { /* PCB bound to current local interface address? */ for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = next) { next = lpcb->next; /* PCB bound to current local interface address? */ if (ip_addr_cmp(&lpcb->local_ip, old_addr)) { /* The PCB is listening to the old ipaddr and * is set to listen to the new one instead */ ip_addr_copy(lpcb->local_ip, *new_addr); } } } } } const char* tcp_debug_state_str(enum tcp_state s) { return tcp_state_str[s]; } #if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG /** * Print a tcp header for debugging purposes. * * @param tcphdr pointer to a struct tcp_hdr */ void tcp_debug_print(struct tcp_hdr *tcphdr) { LWIP_DEBUGF(TCP_DEBUG, ("TCP header:\n")); LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(TCP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", lwip_ntohs(tcphdr->src), lwip_ntohs(tcphdr->dest))); LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (seq no)\n", lwip_ntohl(tcphdr->seqno))); LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(TCP_DEBUG, ("| %010"U32_F" | (ack no)\n", lwip_ntohl(tcphdr->ackno))); LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(TCP_DEBUG, ("| %2"U16_F" | |%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"%"U16_F"| %5"U16_F" | (hdrlen, flags (", TCPH_HDRLEN(tcphdr), (u16_t)(TCPH_FLAGS(tcphdr) >> 5 & 1), (u16_t)(TCPH_FLAGS(tcphdr) >> 4 & 1), (u16_t)(TCPH_FLAGS(tcphdr) >> 3 & 1), (u16_t)(TCPH_FLAGS(tcphdr) >> 2 & 1), (u16_t)(TCPH_FLAGS(tcphdr) >> 1 & 1), (u16_t)(TCPH_FLAGS(tcphdr) & 1), lwip_ntohs(tcphdr->wnd))); tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); LWIP_DEBUGF(TCP_DEBUG, ("), win)\n")); LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(TCP_DEBUG, ("| 0x%04"X16_F" | %5"U16_F" | (chksum, urgp)\n", lwip_ntohs(tcphdr->chksum), lwip_ntohs(tcphdr->urgp))); LWIP_DEBUGF(TCP_DEBUG, ("+-------------------------------+\n")); } /** * Print a tcp state for debugging purposes. * * @param s enum tcp_state to print */ void tcp_debug_print_state(enum tcp_state s) { LWIP_DEBUGF(TCP_DEBUG, ("State: %s\n", tcp_state_str[s])); } /** * Print tcp flags for debugging purposes. * * @param flags tcp flags, all active flags are printed */ void tcp_debug_print_flags(u8_t flags) { if (flags & TCP_FIN) { LWIP_DEBUGF(TCP_DEBUG, ("FIN ")); } if (flags & TCP_SYN) { LWIP_DEBUGF(TCP_DEBUG, ("SYN ")); } if (flags & TCP_RST) { LWIP_DEBUGF(TCP_DEBUG, ("RST ")); } if (flags & TCP_PSH) { LWIP_DEBUGF(TCP_DEBUG, ("PSH ")); } if (flags & TCP_ACK) { LWIP_DEBUGF(TCP_DEBUG, ("ACK ")); } if (flags & TCP_URG) { LWIP_DEBUGF(TCP_DEBUG, ("URG ")); } if (flags & TCP_ECE) { LWIP_DEBUGF(TCP_DEBUG, ("ECE ")); } if (flags & TCP_CWR) { LWIP_DEBUGF(TCP_DEBUG, ("CWR ")); } LWIP_DEBUGF(TCP_DEBUG, ("\n")); } /** * Print all tcp_pcbs in every list for debugging purposes. */ void tcp_debug_print_pcbs(void) { struct tcp_pcb *pcb; struct tcp_pcb_listen *pcbl; LWIP_DEBUGF(TCP_DEBUG, ("Active PCB states:\n")); for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", pcb->local_port, pcb->remote_port, pcb->snd_nxt, pcb->rcv_nxt)); tcp_debug_print_state(pcb->state); } LWIP_DEBUGF(TCP_DEBUG, ("Listen PCB states:\n")); for (pcbl = tcp_listen_pcbs.listen_pcbs; pcbl != NULL; pcbl = pcbl->next) { LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F" ", pcbl->local_port)); tcp_debug_print_state(pcbl->state); } LWIP_DEBUGF(TCP_DEBUG, ("TIME-WAIT PCB states:\n")); for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { LWIP_DEBUGF(TCP_DEBUG, ("Local port %"U16_F", foreign port %"U16_F" snd_nxt %"U32_F" rcv_nxt %"U32_F" ", pcb->local_port, pcb->remote_port, pcb->snd_nxt, pcb->rcv_nxt)); tcp_debug_print_state(pcb->state); } } /** * Check state consistency of the tcp_pcb lists. */ s16_t tcp_pcbs_sane(void) { struct tcp_pcb *pcb; for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != CLOSED", pcb->state != CLOSED); LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != LISTEN", pcb->state != LISTEN); LWIP_ASSERT("tcp_pcbs_sane: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); } for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { LWIP_ASSERT("tcp_pcbs_sane: tw pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); } return 1; } #endif /* TCP_DEBUG */ #endif /* LWIP_TCP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/tcp.c
C
unknown
69,608
/** * @file * Transmission Control Protocol, incoming traffic * * The input processing functions of the TCP layer. * * These functions are generally called in the order (ip_input() ->) * tcp_input() -> * tcp_process() -> tcp_receive() (-> application). * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ #include "lwip/priv/tcp_priv.h" #include "lwip/def.h" #include "lwip/ip_addr.h" #include "lwip/netif.h" #include "lwip/mem.h" #include "lwip/memp.h" #include "lwip/inet_chksum.h" #include "lwip/stats.h" #include "lwip/ip6.h" #include "lwip/ip6_addr.h" #if LWIP_ND6_TCP_REACHABILITY_HINTS #include "lwip/nd6.h" #endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ /** Initial CWND calculation as defined RFC 2581 */ #define LWIP_TCP_CALC_INITIAL_CWND(mss) LWIP_MIN((4U * (mss)), LWIP_MAX((2U * (mss)), 4380U)); /* These variables are global to all functions involved in the input processing of TCP segments. They are set by the tcp_input() function. */ static struct tcp_seg inseg; static struct tcp_hdr *tcphdr; static u16_t tcphdr_optlen; static u16_t tcphdr_opt1len; static u8_t* tcphdr_opt2; static u16_t tcp_optidx; static u32_t seqno, ackno; static tcpwnd_size_t recv_acked; static u16_t tcplen; static u8_t flags; static u8_t recv_flags; static struct pbuf *recv_data; struct tcp_pcb *tcp_input_pcb; /* Forward declarations. */ static err_t tcp_process(struct tcp_pcb *pcb); static void tcp_receive(struct tcp_pcb *pcb); static void tcp_parseopt(struct tcp_pcb *pcb); static void tcp_listen_input(struct tcp_pcb_listen *pcb); static void tcp_timewait_input(struct tcp_pcb *pcb); static int tcp_input_delayed_close(struct tcp_pcb *pcb); /** * The initial input processing of TCP. It verifies the TCP header, demultiplexes * the segment between the PCBs and passes it on to tcp_process(), which implements * the TCP finite state machine. This function is called by the IP layer (in * ip_input()). * * @param p received TCP segment to process (p->payload pointing to the TCP header) * @param inp network interface on which this segment was received */ void tcp_input(struct pbuf *p, struct netif *inp) { struct tcp_pcb *pcb, *prev; struct tcp_pcb_listen *lpcb; #if SO_REUSE struct tcp_pcb *lpcb_prev = NULL; struct tcp_pcb_listen *lpcb_any = NULL; #endif /* SO_REUSE */ u8_t hdrlen_bytes; err_t err; LWIP_UNUSED_ARG(inp); PERF_START; TCP_STATS_INC(tcp.recv); MIB2_STATS_INC(mib2.tcpinsegs); tcphdr = (struct tcp_hdr *)p->payload; #if TCP_INPUT_DEBUG tcp_debug_print(tcphdr); #endif /* Check that TCP header fits in payload */ if (p->len < TCP_HLEN) { /* drop short packets */ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: short packet (%"U16_F" bytes) discarded\n", p->tot_len)); TCP_STATS_INC(tcp.lenerr); goto dropped; } /* Don't even process incoming broadcasts/multicasts. */ if (ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()) || ip_addr_ismulticast(ip_current_dest_addr())) { TCP_STATS_INC(tcp.proterr); goto dropped; } #if CHECKSUM_CHECK_TCP IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_TCP) { /* Verify TCP checksum. */ u16_t chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, ip_current_src_addr(), ip_current_dest_addr()); if (chksum != 0) { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packet discarded due to failing checksum 0x%04"X16_F"\n", chksum)); tcp_debug_print(tcphdr); TCP_STATS_INC(tcp.chkerr); goto dropped; } } #endif /* CHECKSUM_CHECK_TCP */ /* sanity-check header length */ hdrlen_bytes = TCPH_HDRLEN(tcphdr) * 4; if ((hdrlen_bytes < TCP_HLEN) || (hdrlen_bytes > p->tot_len)) { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: invalid header length (%"U16_F")\n", (u16_t)hdrlen_bytes)); TCP_STATS_INC(tcp.lenerr); goto dropped; } /* Move the payload pointer in the pbuf so that it points to the TCP data instead of the TCP header. */ tcphdr_optlen = hdrlen_bytes - TCP_HLEN; tcphdr_opt2 = NULL; if (p->len >= hdrlen_bytes) { /* all options are in the first pbuf */ tcphdr_opt1len = tcphdr_optlen; pbuf_header(p, -(s16_t)hdrlen_bytes); /* cannot fail */ } else { u16_t opt2len; /* TCP header fits into first pbuf, options don't - data is in the next pbuf */ /* there must be a next pbuf, due to hdrlen_bytes sanity check above */ LWIP_ASSERT("p->next != NULL", p->next != NULL); /* advance over the TCP header (cannot fail) */ pbuf_header(p, -TCP_HLEN); /* determine how long the first and second parts of the options are */ tcphdr_opt1len = p->len; opt2len = tcphdr_optlen - tcphdr_opt1len; /* options continue in the next pbuf: set p to zero length and hide the options in the next pbuf (adjusting p->tot_len) */ pbuf_header(p, -(s16_t)tcphdr_opt1len); /* check that the options fit in the second pbuf */ if (opt2len > p->next->len) { /* drop short packets */ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: options overflow second pbuf (%"U16_F" bytes)\n", p->next->len)); TCP_STATS_INC(tcp.lenerr); goto dropped; } /* remember the pointer to the second part of the options */ tcphdr_opt2 = (u8_t*)p->next->payload; /* advance p->next to point after the options, and manually adjust p->tot_len to keep it consistent with the changed p->next */ pbuf_header(p->next, -(s16_t)opt2len); p->tot_len -= opt2len; LWIP_ASSERT("p->len == 0", p->len == 0); LWIP_ASSERT("p->tot_len == p->next->tot_len", p->tot_len == p->next->tot_len); } /* Convert fields in TCP header to host byte order. */ tcphdr->src = lwip_ntohs(tcphdr->src); tcphdr->dest = lwip_ntohs(tcphdr->dest); seqno = tcphdr->seqno = lwip_ntohl(tcphdr->seqno); ackno = tcphdr->ackno = lwip_ntohl(tcphdr->ackno); tcphdr->wnd = lwip_ntohs(tcphdr->wnd); flags = TCPH_FLAGS(tcphdr); tcplen = p->tot_len + ((flags & (TCP_FIN | TCP_SYN)) ? 1 : 0); /* Demultiplex an incoming segment. First, we check if it is destined for an active connection. */ prev = NULL; for (pcb = tcp_active_pcbs; pcb != NULL; pcb = pcb->next) { LWIP_ASSERT("tcp_input: active pcb->state != CLOSED", pcb->state != CLOSED); LWIP_ASSERT("tcp_input: active pcb->state != TIME-WAIT", pcb->state != TIME_WAIT); LWIP_ASSERT("tcp_input: active pcb->state != LISTEN", pcb->state != LISTEN); if (pcb->remote_port == tcphdr->src && pcb->local_port == tcphdr->dest && ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { /* Move this PCB to the front of the list so that subsequent lookups will be faster (we exploit locality in TCP segment arrivals). */ LWIP_ASSERT("tcp_input: pcb->next != pcb (before cache)", pcb->next != pcb); if (prev != NULL) { prev->next = pcb->next; pcb->next = tcp_active_pcbs; tcp_active_pcbs = pcb; } else { TCP_STATS_INC(tcp.cachehit); } LWIP_ASSERT("tcp_input: pcb->next != pcb (after cache)", pcb->next != pcb); break; } prev = pcb; } if (pcb == NULL) { /* If it did not go to an active connection, we check the connections in the TIME-WAIT state. */ for (pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { LWIP_ASSERT("tcp_input: TIME-WAIT pcb->state == TIME-WAIT", pcb->state == TIME_WAIT); if (pcb->remote_port == tcphdr->src && pcb->local_port == tcphdr->dest && ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()) && ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { /* We don't really care enough to move this PCB to the front of the list since we are not very likely to receive that many segments for connections in TIME-WAIT. */ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for TIME_WAITing connection.\n")); tcp_timewait_input(pcb); pbuf_free(p); return; } } /* Finally, if we still did not get a match, we check all PCBs that are LISTENing for incoming connections. */ prev = NULL; for (lpcb = tcp_listen_pcbs.listen_pcbs; lpcb != NULL; lpcb = lpcb->next) { if (lpcb->local_port == tcphdr->dest) { if (IP_IS_ANY_TYPE_VAL(lpcb->local_ip)) { /* found an ANY TYPE (IPv4/IPv6) match */ #if SO_REUSE lpcb_any = lpcb; lpcb_prev = prev; #else /* SO_REUSE */ break; #endif /* SO_REUSE */ } else if (IP_ADDR_PCB_VERSION_MATCH_EXACT(lpcb, ip_current_dest_addr())) { if (ip_addr_cmp(&lpcb->local_ip, ip_current_dest_addr())) { /* found an exact match */ break; } else if (ip_addr_isany(&lpcb->local_ip)) { /* found an ANY-match */ #if SO_REUSE lpcb_any = lpcb; lpcb_prev = prev; #else /* SO_REUSE */ break; #endif /* SO_REUSE */ } } } prev = (struct tcp_pcb *)lpcb; } #if SO_REUSE /* first try specific local IP */ if (lpcb == NULL) { /* only pass to ANY if no specific local IP has been found */ lpcb = lpcb_any; prev = lpcb_prev; } #endif /* SO_REUSE */ if (lpcb != NULL) { /* Move this PCB to the front of the list so that subsequent lookups will be faster (we exploit locality in TCP segment arrivals). */ if (prev != NULL) { ((struct tcp_pcb_listen *)prev)->next = lpcb->next; /* our successor is the remainder of the listening list */ lpcb->next = tcp_listen_pcbs.listen_pcbs; /* put this listening pcb at the head of the listening list */ tcp_listen_pcbs.listen_pcbs = lpcb; } else { TCP_STATS_INC(tcp.cachehit); } LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: packed for LISTENing connection.\n")); tcp_listen_input(lpcb); pbuf_free(p); return; } } #if TCP_INPUT_DEBUG LWIP_DEBUGF(TCP_INPUT_DEBUG, ("+-+-+-+-+-+-+-+-+-+-+-+-+-+- tcp_input: flags ")); tcp_debug_print_flags(TCPH_FLAGS(tcphdr)); LWIP_DEBUGF(TCP_INPUT_DEBUG, ("-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n")); #endif /* TCP_INPUT_DEBUG */ if (pcb != NULL) { /* The incoming segment belongs to a connection. */ #if TCP_INPUT_DEBUG tcp_debug_print_state(pcb->state); #endif /* TCP_INPUT_DEBUG */ /* Set up a tcp_seg structure. */ inseg.next = NULL; inseg.len = p->tot_len; inseg.p = p; inseg.tcphdr = tcphdr; recv_data = NULL; recv_flags = 0; recv_acked = 0; if (flags & TCP_PSH) { p->flags |= PBUF_FLAG_PUSH; } /* If there is data which was previously "refused" by upper layer */ if (pcb->refused_data != NULL) { if ((tcp_process_refused_data(pcb) == ERR_ABRT) || ((pcb->refused_data != NULL) && (tcplen > 0))) { /* pcb has been aborted or refused data is still refused and the new segment contains data */ if (pcb->rcv_ann_wnd == 0) { /* this is a zero-window probe, we respond to it with current RCV.NXT and drop the data segment */ tcp_send_empty_ack(pcb); } TCP_STATS_INC(tcp.drop); MIB2_STATS_INC(mib2.tcpinerrs); goto aborted; } } tcp_input_pcb = pcb; err = tcp_process(pcb); /* A return value of ERR_ABRT means that tcp_abort() was called and that the pcb has been freed. If so, we don't do anything. */ if (err != ERR_ABRT) { if (recv_flags & TF_RESET) { /* TF_RESET means that the connection was reset by the other end. We then call the error callback to inform the application that the connection is dead before we deallocate the PCB. */ TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_RST); tcp_pcb_remove(&tcp_active_pcbs, pcb); memp_free(MEMP_TCP_PCB, pcb); } else { err = ERR_OK; /* If the application has registered a "sent" function to be called when new send buffer space is available, we call it now. */ if (recv_acked > 0) { u16_t acked16; #if LWIP_WND_SCALE /* recv_acked is u32_t but the sent callback only takes a u16_t, so we might have to call it multiple times. */ u32_t acked = recv_acked; while (acked > 0) { acked16 = (u16_t)LWIP_MIN(acked, 0xffffu); acked -= acked16; #else { acked16 = recv_acked; #endif TCP_EVENT_SENT(pcb, (u16_t)acked16, err); if (err == ERR_ABRT) { goto aborted; } } recv_acked = 0; } if (tcp_input_delayed_close(pcb)) { goto aborted; } #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE while (recv_data != NULL) { struct pbuf *rest = NULL; pbuf_split_64k(recv_data, &rest); #else /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ if (recv_data != NULL) { #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ LWIP_ASSERT("pcb->refused_data == NULL", pcb->refused_data == NULL); if (pcb->flags & TF_RXCLOSED) { /* received data although already closed -> abort (send RST) to notify the remote host that not all data has been processed */ pbuf_free(recv_data); #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE if (rest != NULL) { pbuf_free(rest); } #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ tcp_abort(pcb); goto aborted; } /* Notify application that data has been received. */ TCP_EVENT_RECV(pcb, recv_data, ERR_OK, err); if (err == ERR_ABRT) { #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE if (rest != NULL) { pbuf_free(rest); } #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ goto aborted; } /* If the upper layer can't receive this data, store it */ if (err != ERR_OK) { #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE if (rest != NULL) { pbuf_cat(recv_data, rest); } #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ pcb->refused_data = recv_data; LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_input: keep incoming packet, because pcb is \"full\"\n")); #if TCP_QUEUE_OOSEQ && LWIP_WND_SCALE break; } else { /* Upper layer received the data, go on with the rest if > 64K */ recv_data = rest; #endif /* TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */ } } /* If a FIN segment was received, we call the callback function with a NULL buffer to indicate EOF. */ if (recv_flags & TF_GOT_FIN) { if (pcb->refused_data != NULL) { /* Delay this if we have refused data. */ pcb->refused_data->flags |= PBUF_FLAG_TCP_FIN; } else { /* correct rcv_wnd as the application won't call tcp_recved() for the FIN's seqno */ if (pcb->rcv_wnd != TCP_WND_MAX(pcb)) { pcb->rcv_wnd++; } TCP_EVENT_CLOSED(pcb, err); if (err == ERR_ABRT) { goto aborted; } } } tcp_input_pcb = NULL; if (tcp_input_delayed_close(pcb)) { goto aborted; } /* Try to send something out. */ tcp_output(pcb); #if TCP_INPUT_DEBUG #if TCP_DEBUG tcp_debug_print_state(pcb->state); #endif /* TCP_DEBUG */ #endif /* TCP_INPUT_DEBUG */ } } /* Jump target if pcb has been aborted in a callback (by calling tcp_abort()). Below this line, 'pcb' may not be dereferenced! */ aborted: tcp_input_pcb = NULL; recv_data = NULL; /* give up our reference to inseg.p */ if (inseg.p != NULL) { pbuf_free(inseg.p); inseg.p = NULL; } } else { /* If no matching PCB was found, send a TCP RST (reset) to the sender. */ LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_input: no PCB match found, resetting.\n")); if (!(TCPH_FLAGS(tcphdr) & TCP_RST)) { TCP_STATS_INC(tcp.proterr); TCP_STATS_INC(tcp.drop); tcp_rst(ackno, seqno + tcplen, ip_current_dest_addr(), ip_current_src_addr(), tcphdr->dest, tcphdr->src); } pbuf_free(p); } LWIP_ASSERT("tcp_input: tcp_pcbs_sane()", tcp_pcbs_sane()); PERF_STOP("tcp_input"); return; dropped: TCP_STATS_INC(tcp.drop); MIB2_STATS_INC(mib2.tcpinerrs); pbuf_free(p); } /** Called from tcp_input to check for TF_CLOSED flag. This results in closing * and deallocating a pcb at the correct place to ensure noone references it * any more. * @returns 1 if the pcb has been closed and deallocated, 0 otherwise */ static int tcp_input_delayed_close(struct tcp_pcb *pcb) { if (recv_flags & TF_CLOSED) { /* The connection has been closed and we will deallocate the PCB. */ if (!(pcb->flags & TF_RXCLOSED)) { /* Connection closed although the application has only shut down the tx side: call the PCB's err callback and indicate the closure to ensure the application doesn't continue using the PCB. */ TCP_EVENT_ERR(pcb->state, pcb->errf, pcb->callback_arg, ERR_CLSD); } tcp_pcb_remove(&tcp_active_pcbs, pcb); memp_free(MEMP_TCP_PCB, pcb); return 1; } return 0; } /** * Called by tcp_input() when a segment arrives for a listening * connection (from tcp_input()). * * @param pcb the tcp_pcb_listen for which a segment arrived * * @note the segment which arrived is saved in global variables, therefore only the pcb * involved is passed as a parameter to this function */ static void tcp_listen_input(struct tcp_pcb_listen *pcb) { struct tcp_pcb *npcb; u32_t iss; err_t rc; if (flags & TCP_RST) { /* An incoming RST should be ignored. Return. */ return; } /* In the LISTEN state, we check for incoming SYN segments, creates a new PCB, and responds with a SYN|ACK. */ if (flags & TCP_ACK) { /* For incoming segments with the ACK flag set, respond with a RST. */ LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_listen_input: ACK in LISTEN, sending reset\n")); tcp_rst(ackno, seqno + tcplen, ip_current_dest_addr(), ip_current_src_addr(), tcphdr->dest, tcphdr->src); } else if (flags & TCP_SYN) { LWIP_DEBUGF(TCP_DEBUG, ("TCP connection request %"U16_F" -> %"U16_F".\n", tcphdr->src, tcphdr->dest)); #if TCP_LISTEN_BACKLOG if (pcb->accepts_pending >= pcb->backlog) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: listen backlog exceeded for port %"U16_F"\n", tcphdr->dest)); return; } #endif /* TCP_LISTEN_BACKLOG */ npcb = tcp_alloc(pcb->prio); /* If a new PCB could not be created (probably due to lack of memory), we don't do anything, but rely on the sender will retransmit the SYN at a time when we have more memory available. */ if (npcb == NULL) { err_t err; LWIP_DEBUGF(TCP_DEBUG, ("tcp_listen_input: could not allocate PCB\n")); TCP_STATS_INC(tcp.memerr); TCP_EVENT_ACCEPT(pcb, NULL, pcb->callback_arg, ERR_MEM, err); LWIP_UNUSED_ARG(err); /* err not useful here */ return; } #if TCP_LISTEN_BACKLOG pcb->accepts_pending++; npcb->flags |= TF_BACKLOGPEND; #endif /* TCP_LISTEN_BACKLOG */ /* Set up the new PCB. */ ip_addr_copy(npcb->local_ip, *ip_current_dest_addr()); ip_addr_copy(npcb->remote_ip, *ip_current_src_addr()); npcb->local_port = pcb->local_port; npcb->remote_port = tcphdr->src; npcb->state = SYN_RCVD; npcb->rcv_nxt = seqno + 1; npcb->rcv_ann_right_edge = npcb->rcv_nxt; iss = tcp_next_iss(npcb); npcb->snd_wl2 = iss; npcb->snd_nxt = iss; npcb->lastack = iss; npcb->snd_lbb = iss; npcb->snd_wl1 = seqno - 1;/* initialise to seqno-1 to force window update */ npcb->callback_arg = pcb->callback_arg; #if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG npcb->listener = pcb; #endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ /* inherit socket options */ npcb->so_options = pcb->so_options & SOF_INHERITED; /* Register the new PCB so that we can begin receiving segments for it. */ TCP_REG_ACTIVE(npcb); /* Parse any options in the SYN. */ tcp_parseopt(npcb); npcb->snd_wnd = tcphdr->wnd; npcb->snd_wnd_max = npcb->snd_wnd; #if TCP_CALCULATE_EFF_SEND_MSS npcb->mss = tcp_eff_send_mss(npcb->mss, &npcb->local_ip, &npcb->remote_ip); #endif /* TCP_CALCULATE_EFF_SEND_MSS */ MIB2_STATS_INC(mib2.tcppassiveopens); /* Send a SYN|ACK together with the MSS option. */ rc = tcp_enqueue_flags(npcb, TCP_SYN | TCP_ACK); if (rc != ERR_OK) { tcp_abandon(npcb, 0); return; } tcp_output(npcb); } return; } /** * Called by tcp_input() when a segment arrives for a connection in * TIME_WAIT. * * @param pcb the tcp_pcb for which a segment arrived * * @note the segment which arrived is saved in global variables, therefore only the pcb * involved is passed as a parameter to this function */ static void tcp_timewait_input(struct tcp_pcb *pcb) { /* RFC 1337: in TIME_WAIT, ignore RST and ACK FINs + any 'acceptable' segments */ /* RFC 793 3.9 Event Processing - Segment Arrives: * - first check sequence number - we skip that one in TIME_WAIT (always * acceptable since we only send ACKs) * - second check the RST bit (... return) */ if (flags & TCP_RST) { return; } /* - fourth, check the SYN bit, */ if (flags & TCP_SYN) { /* If an incoming segment is not acceptable, an acknowledgment should be sent in reply */ if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd)) { /* If the SYN is in the window it is an error, send a reset */ tcp_rst(ackno, seqno + tcplen, ip_current_dest_addr(), ip_current_src_addr(), tcphdr->dest, tcphdr->src); return; } } else if (flags & TCP_FIN) { /* - eighth, check the FIN bit: Remain in the TIME-WAIT state. Restart the 2 MSL time-wait timeout.*/ pcb->tmr = tcp_ticks; } if ((tcplen > 0)) { /* Acknowledge data, FIN or out-of-window SYN */ pcb->flags |= TF_ACK_NOW; tcp_output(pcb); } return; } /** * Implements the TCP state machine. Called by tcp_input. In some * states tcp_receive() is called to receive data. The tcp_seg * argument will be freed by the caller (tcp_input()) unless the * recv_data pointer in the pcb is set. * * @param pcb the tcp_pcb for which a segment arrived * * @note the segment which arrived is saved in global variables, therefore only the pcb * involved is passed as a parameter to this function */ static err_t tcp_process(struct tcp_pcb *pcb) { struct tcp_seg *rseg; u8_t acceptable = 0; err_t err; err = ERR_OK; /* Process incoming RST segments. */ if (flags & TCP_RST) { /* First, determine if the reset is acceptable. */ if (pcb->state == SYN_SENT) { /* "In the SYN-SENT state (a RST received in response to an initial SYN), the RST is acceptable if the ACK field acknowledges the SYN." */ if (ackno == pcb->snd_nxt) { acceptable = 1; } } else { /* "In all states except SYN-SENT, all reset (RST) segments are validated by checking their SEQ-fields." */ if (seqno == pcb->rcv_nxt) { acceptable = 1; } else if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd)) { /* If the sequence number is inside the window, we only send an ACK and wait for a re-send with matching sequence number. This violates RFC 793, but is required to protection against CVE-2004-0230 (RST spoofing attack). */ tcp_ack_now(pcb); } } if (acceptable) { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: Connection RESET\n")); LWIP_ASSERT("tcp_input: pcb->state != CLOSED", pcb->state != CLOSED); recv_flags |= TF_RESET; pcb->flags &= ~TF_ACK_DELAY; return ERR_RST; } else { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", seqno, pcb->rcv_nxt)); LWIP_DEBUGF(TCP_DEBUG, ("tcp_process: unacceptable reset seqno %"U32_F" rcv_nxt %"U32_F"\n", seqno, pcb->rcv_nxt)); return ERR_OK; } } if ((flags & TCP_SYN) && (pcb->state != SYN_SENT && pcb->state != SYN_RCVD)) { /* Cope with new connection attempt after remote end crashed */ tcp_ack_now(pcb); return ERR_OK; } if ((pcb->flags & TF_RXCLOSED) == 0) { /* Update the PCB (in)activity timer unless rx is closed (see tcp_shutdown) */ pcb->tmr = tcp_ticks; } pcb->keep_cnt_sent = 0; tcp_parseopt(pcb); /* Do different things depending on the TCP state. */ switch (pcb->state) { case SYN_SENT: LWIP_DEBUGF(TCP_INPUT_DEBUG, ("SYN-SENT: ackno %"U32_F" pcb->snd_nxt %"U32_F" unacked %"U32_F"\n", ackno, pcb->snd_nxt, lwip_ntohl(pcb->unacked->tcphdr->seqno))); /* received SYN ACK with expected sequence number? */ if ((flags & TCP_ACK) && (flags & TCP_SYN) && (ackno == pcb->lastack + 1)) { pcb->rcv_nxt = seqno + 1; pcb->rcv_ann_right_edge = pcb->rcv_nxt; pcb->lastack = ackno; pcb->snd_wnd = tcphdr->wnd; pcb->snd_wnd_max = pcb->snd_wnd; pcb->snd_wl1 = seqno - 1; /* initialise to seqno - 1 to force window update */ pcb->state = ESTABLISHED; #if TCP_CALCULATE_EFF_SEND_MSS pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip); #endif /* TCP_CALCULATE_EFF_SEND_MSS */ pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SENT): cwnd %"TCPWNDSIZE_F " ssthresh %"TCPWNDSIZE_F"\n", pcb->cwnd, pcb->ssthresh)); LWIP_ASSERT("pcb->snd_queuelen > 0", (pcb->snd_queuelen > 0)); --pcb->snd_queuelen; LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_process: SYN-SENT --queuelen %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); rseg = pcb->unacked; if (rseg == NULL) { /* might happen if tcp_output fails in tcp_rexmit_rto() in which case the segment is on the unsent list */ rseg = pcb->unsent; LWIP_ASSERT("no segment to free", rseg != NULL); pcb->unsent = rseg->next; } else { pcb->unacked = rseg->next; } tcp_seg_free(rseg); /* If there's nothing left to acknowledge, stop the retransmit timer, otherwise reset it to start again */ if (pcb->unacked == NULL) { pcb->rtime = -1; } else { pcb->rtime = 0; pcb->nrtx = 0; } /* Call the user specified function to call when successfully * connected. */ TCP_EVENT_CONNECTED(pcb, ERR_OK, err); if (err == ERR_ABRT) { return ERR_ABRT; } tcp_ack_now(pcb); } /* received ACK? possibly a half-open connection */ else if (flags & TCP_ACK) { /* send a RST to bring the other side in a non-synchronized state. */ tcp_rst(ackno, seqno + tcplen, ip_current_dest_addr(), ip_current_src_addr(), tcphdr->dest, tcphdr->src); /* Resend SYN immediately (don't wait for rto timeout) to establish connection faster, but do not send more SYNs than we otherwise would have, or we might get caught in a loop on loopback interfaces. */ if (pcb->nrtx < TCP_SYNMAXRTX) { pcb->rtime = 0; tcp_rexmit_rto(pcb); } } break; case SYN_RCVD: if (flags & TCP_ACK) { /* expected ACK number? */ if (TCP_SEQ_BETWEEN(ackno, pcb->lastack+1, pcb->snd_nxt)) { pcb->state = ESTABLISHED; LWIP_DEBUGF(TCP_DEBUG, ("TCP connection established %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); #if LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG #if LWIP_CALLBACK_API LWIP_ASSERT("pcb->listener->accept != NULL", (pcb->listener == NULL) || (pcb->listener->accept != NULL)); #endif if (pcb->listener == NULL) { /* listen pcb might be closed by now */ err = ERR_VAL; } else #endif /* LWIP_CALLBACK_API || TCP_LISTEN_BACKLOG */ { tcp_backlog_accepted(pcb); /* Call the accept function. */ TCP_EVENT_ACCEPT(pcb->listener, pcb, pcb->callback_arg, ERR_OK, err); } if (err != ERR_OK) { /* If the accept function returns with an error, we abort * the connection. */ /* Already aborted? */ if (err != ERR_ABRT) { tcp_abort(pcb); } return ERR_ABRT; } /* If there was any data contained within this ACK, * we'd better pass it on to the application as well. */ tcp_receive(pcb); /* Prevent ACK for SYN to generate a sent event */ if (recv_acked != 0) { recv_acked--; } pcb->cwnd = LWIP_TCP_CALC_INITIAL_CWND(pcb->mss); LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_process (SYN_RCVD): cwnd %"TCPWNDSIZE_F " ssthresh %"TCPWNDSIZE_F"\n", pcb->cwnd, pcb->ssthresh)); if (recv_flags & TF_GOT_FIN) { tcp_ack_now(pcb); pcb->state = CLOSE_WAIT; } } else { /* incorrect ACK number, send RST */ tcp_rst(ackno, seqno + tcplen, ip_current_dest_addr(), ip_current_src_addr(), tcphdr->dest, tcphdr->src); } } else if ((flags & TCP_SYN) && (seqno == pcb->rcv_nxt - 1)) { /* Looks like another copy of the SYN - retransmit our SYN-ACK */ tcp_rexmit(pcb); } break; case CLOSE_WAIT: /* FALLTHROUGH */ case ESTABLISHED: tcp_receive(pcb); if (recv_flags & TF_GOT_FIN) { /* passive close */ tcp_ack_now(pcb); pcb->state = CLOSE_WAIT; } break; case FIN_WAIT_1: tcp_receive(pcb); if (recv_flags & TF_GOT_FIN) { if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && pcb->unsent == NULL) { LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: FIN_WAIT_1 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); tcp_ack_now(pcb); tcp_pcb_purge(pcb); TCP_RMV_ACTIVE(pcb); pcb->state = TIME_WAIT; TCP_REG(&tcp_tw_pcbs, pcb); } else { tcp_ack_now(pcb); pcb->state = CLOSING; } } else if ((flags & TCP_ACK) && (ackno == pcb->snd_nxt) && pcb->unsent == NULL) { pcb->state = FIN_WAIT_2; } break; case FIN_WAIT_2: tcp_receive(pcb); if (recv_flags & TF_GOT_FIN) { LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: FIN_WAIT_2 %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); tcp_ack_now(pcb); tcp_pcb_purge(pcb); TCP_RMV_ACTIVE(pcb); pcb->state = TIME_WAIT; TCP_REG(&tcp_tw_pcbs, pcb); } break; case CLOSING: tcp_receive(pcb); if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: CLOSING %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); tcp_pcb_purge(pcb); TCP_RMV_ACTIVE(pcb); pcb->state = TIME_WAIT; TCP_REG(&tcp_tw_pcbs, pcb); } break; case LAST_ACK: tcp_receive(pcb); if ((flags & TCP_ACK) && ackno == pcb->snd_nxt && pcb->unsent == NULL) { LWIP_DEBUGF(TCP_DEBUG, ("TCP connection closed: LAST_ACK %"U16_F" -> %"U16_F".\n", inseg.tcphdr->src, inseg.tcphdr->dest)); /* bugfix #21699: don't set pcb->state to CLOSED here or we risk leaking segments */ recv_flags |= TF_CLOSED; } break; default: break; } return ERR_OK; } #if TCP_QUEUE_OOSEQ /** * Insert segment into the list (segments covered with new one will be deleted) * * Called from tcp_receive() */ static void tcp_oos_insert_segment(struct tcp_seg *cseg, struct tcp_seg *next) { struct tcp_seg *old_seg; if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { /* received segment overlaps all following segments */ tcp_segs_free(next); next = NULL; } else { /* delete some following segments oos queue may have segments with FIN flag */ while (next && TCP_SEQ_GEQ((seqno + cseg->len), (next->tcphdr->seqno + next->len))) { /* cseg with FIN already processed */ if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { TCPH_SET_FLAG(cseg->tcphdr, TCP_FIN); } old_seg = next; next = next->next; tcp_seg_free(old_seg); } if (next && TCP_SEQ_GT(seqno + cseg->len, next->tcphdr->seqno)) { /* We need to trim the incoming segment. */ cseg->len = (u16_t)(next->tcphdr->seqno - seqno); pbuf_realloc(cseg->p, cseg->len); } } cseg->next = next; } #endif /* TCP_QUEUE_OOSEQ */ /** * Called by tcp_process. Checks if the given segment is an ACK for outstanding * data, and if so frees the memory of the buffered data. Next, it places the * segment on any of the receive queues (pcb->recved or pcb->ooseq). If the segment * is buffered, the pbuf is referenced by pbuf_ref so that it will not be freed until * it has been removed from the buffer. * * If the incoming segment constitutes an ACK for a segment that was used for RTT * estimation, the RTT is estimated here as well. * * Called from tcp_process(). */ static void tcp_receive(struct tcp_pcb *pcb) { struct tcp_seg *next; #if TCP_QUEUE_OOSEQ struct tcp_seg *prev, *cseg; #endif /* TCP_QUEUE_OOSEQ */ s32_t off; s16_t m; u32_t right_wnd_edge; u16_t new_tot_len; int found_dupack = 0; #if TCP_OOSEQ_MAX_BYTES || TCP_OOSEQ_MAX_PBUFS u32_t ooseq_blen; u16_t ooseq_qlen; #endif /* TCP_OOSEQ_MAX_BYTES || TCP_OOSEQ_MAX_PBUFS */ LWIP_ASSERT("tcp_receive: wrong state", pcb->state >= ESTABLISHED); if (flags & TCP_ACK) { right_wnd_edge = pcb->snd_wnd + pcb->snd_wl2; /* Update window. */ if (TCP_SEQ_LT(pcb->snd_wl1, seqno) || (pcb->snd_wl1 == seqno && TCP_SEQ_LT(pcb->snd_wl2, ackno)) || (pcb->snd_wl2 == ackno && (u32_t)SND_WND_SCALE(pcb, tcphdr->wnd) > pcb->snd_wnd)) { pcb->snd_wnd = SND_WND_SCALE(pcb, tcphdr->wnd); /* keep track of the biggest window announced by the remote host to calculate the maximum segment size */ if (pcb->snd_wnd_max < pcb->snd_wnd) { pcb->snd_wnd_max = pcb->snd_wnd; } pcb->snd_wl1 = seqno; pcb->snd_wl2 = ackno; if (pcb->snd_wnd == 0) { if (pcb->persist_backoff == 0) { /* start persist timer */ pcb->persist_cnt = 0; pcb->persist_backoff = 1; } } else if (pcb->persist_backoff > 0) { /* stop persist timer */ pcb->persist_backoff = 0; } LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: window update %"TCPWNDSIZE_F"\n", pcb->snd_wnd)); #if TCP_WND_DEBUG } else { if (pcb->snd_wnd != (tcpwnd_size_t)SND_WND_SCALE(pcb, tcphdr->wnd)) { LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: no window update lastack %"U32_F" ackno %" U32_F" wl1 %"U32_F" seqno %"U32_F" wl2 %"U32_F"\n", pcb->lastack, ackno, pcb->snd_wl1, seqno, pcb->snd_wl2)); } #endif /* TCP_WND_DEBUG */ } /* (From Stevens TCP/IP Illustrated Vol II, p970.) Its only a * duplicate ack if: * 1) It doesn't ACK new data * 2) length of received packet is zero (i.e. no payload) * 3) the advertised window hasn't changed * 4) There is outstanding unacknowledged data (retransmission timer running) * 5) The ACK is == biggest ACK sequence number so far seen (snd_una) * * If it passes all five, should process as a dupack: * a) dupacks < 3: do nothing * b) dupacks == 3: fast retransmit * c) dupacks > 3: increase cwnd * * If it only passes 1-3, should reset dupack counter (and add to * stats, which we don't do in lwIP) * * If it only passes 1, should reset dupack counter * */ /* Clause 1 */ if (TCP_SEQ_LEQ(ackno, pcb->lastack)) { /* Clause 2 */ if (tcplen == 0) { /* Clause 3 */ if (pcb->snd_wl2 + pcb->snd_wnd == right_wnd_edge) { /* Clause 4 */ if (pcb->rtime >= 0) { /* Clause 5 */ if (pcb->lastack == ackno) { found_dupack = 1; if ((u8_t)(pcb->dupacks + 1) > pcb->dupacks) { ++pcb->dupacks; } if (pcb->dupacks > 3) { /* Inflate the congestion window, but not if it means that the value overflows. */ if ((tcpwnd_size_t)(pcb->cwnd + pcb->mss) > pcb->cwnd) { pcb->cwnd += pcb->mss; } } else if (pcb->dupacks == 3) { /* Do fast retransmit */ tcp_rexmit_fast(pcb); } } } } } /* If Clause (1) or more is true, but not a duplicate ack, reset * count of consecutive duplicate acks */ if (!found_dupack) { pcb->dupacks = 0; } } else if (TCP_SEQ_BETWEEN(ackno, pcb->lastack+1, pcb->snd_nxt)) { /* We come here when the ACK acknowledges new data. */ /* Reset the "IN Fast Retransmit" flag, since we are no longer in fast retransmit. Also reset the congestion window to the slow start threshold. */ if (pcb->flags & TF_INFR) { pcb->flags &= ~TF_INFR; pcb->cwnd = pcb->ssthresh; } /* Reset the number of retransmissions. */ pcb->nrtx = 0; /* Reset the retransmission time-out. */ pcb->rto = (pcb->sa >> 3) + pcb->sv; /* Reset the fast retransmit variables. */ pcb->dupacks = 0; pcb->lastack = ackno; /* Update the congestion control variables (cwnd and ssthresh). */ if (pcb->state >= ESTABLISHED) { if (pcb->cwnd < pcb->ssthresh) { if ((tcpwnd_size_t)(pcb->cwnd + pcb->mss) > pcb->cwnd) { pcb->cwnd += pcb->mss; } LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: slow start cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); } else { tcpwnd_size_t new_cwnd = (pcb->cwnd + pcb->mss * pcb->mss / pcb->cwnd); if (new_cwnd > pcb->cwnd) { pcb->cwnd = new_cwnd; } LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: congestion avoidance cwnd %"TCPWNDSIZE_F"\n", pcb->cwnd)); } } LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: ACK for %"U32_F", unacked->seqno %"U32_F":%"U32_F"\n", ackno, pcb->unacked != NULL? lwip_ntohl(pcb->unacked->tcphdr->seqno): 0, pcb->unacked != NULL? lwip_ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked): 0)); /* Remove segment from the unacknowledged list if the incoming ACK acknowledges them. */ while (pcb->unacked != NULL && TCP_SEQ_LEQ(lwip_ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked), ackno)) { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->unacked\n", lwip_ntohl(pcb->unacked->tcphdr->seqno), lwip_ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked))); next = pcb->unacked; pcb->unacked = pcb->unacked->next; LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"TCPWNDSIZE_F" ... ", (tcpwnd_size_t)pcb->snd_queuelen)); LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= pbuf_clen(next->p))); pcb->snd_queuelen -= pbuf_clen(next->p); recv_acked += next->len; tcp_seg_free(next); LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"TCPWNDSIZE_F" (after freeing unacked)\n", (tcpwnd_size_t)pcb->snd_queuelen)); if (pcb->snd_queuelen != 0) { LWIP_ASSERT("tcp_receive: valid queue length", pcb->unacked != NULL || pcb->unsent != NULL); } } /* If there's nothing left to acknowledge, stop the retransmit timer, otherwise reset it to start again */ if (pcb->unacked == NULL) { pcb->rtime = -1; } else { pcb->rtime = 0; } pcb->polltmr = 0; #if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS if (ip_current_is_v6()) { /* Inform neighbor reachability of forward progress. */ nd6_reachability_hint(ip6_current_src_addr()); } #endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ } else { /* Out of sequence ACK, didn't really ack anything */ tcp_send_empty_ack(pcb); } /* We go through the ->unsent list to see if any of the segments on the list are acknowledged by the ACK. This may seem strange since an "unsent" segment shouldn't be acked. The rationale is that lwIP puts all outstanding segments on the ->unsent list after a retransmission, so these segments may in fact have been sent once. */ while (pcb->unsent != NULL && TCP_SEQ_BETWEEN(ackno, lwip_ntohl(pcb->unsent->tcphdr->seqno) + TCP_TCPLEN(pcb->unsent), pcb->snd_nxt)) { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->unsent\n", lwip_ntohl(pcb->unsent->tcphdr->seqno), lwip_ntohl(pcb->unsent->tcphdr->seqno) + TCP_TCPLEN(pcb->unsent))); next = pcb->unsent; pcb->unsent = pcb->unsent->next; #if TCP_OVERSIZE if (pcb->unsent == NULL) { pcb->unsent_oversize = 0; } #endif /* TCP_OVERSIZE */ LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"TCPWNDSIZE_F" ... ", (tcpwnd_size_t)pcb->snd_queuelen)); LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= pbuf_clen(next->p))); /* Prevent ACK for FIN to generate a sent event */ pcb->snd_queuelen -= pbuf_clen(next->p); recv_acked += next->len; tcp_seg_free(next); LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"TCPWNDSIZE_F" (after freeing unsent)\n", (tcpwnd_size_t)pcb->snd_queuelen)); if (pcb->snd_queuelen != 0) { LWIP_ASSERT("tcp_receive: valid queue length", pcb->unacked != NULL || pcb->unsent != NULL); } } pcb->snd_buf += recv_acked; /* End of ACK for new data processing. */ LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: pcb->rttest %"U32_F" rtseq %"U32_F" ackno %"U32_F"\n", pcb->rttest, pcb->rtseq, ackno)); /* RTT estimation calculations. This is done by checking if the incoming segment acknowledges the segment we use to take a round-trip time measurement. */ if (pcb->rttest && TCP_SEQ_LT(pcb->rtseq, ackno)) { /* diff between this shouldn't exceed 32K since this are tcp timer ticks and a round-trip shouldn't be that long... */ m = (s16_t)(tcp_ticks - pcb->rttest); LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: experienced rtt %"U16_F" ticks (%"U16_F" msec).\n", m, (u16_t)(m * TCP_SLOW_INTERVAL))); /* This is taken directly from VJs original code in his paper */ m = m - (pcb->sa >> 3); pcb->sa += m; if (m < 0) { m = -m; } m = m - (pcb->sv >> 2); pcb->sv += m; pcb->rto = (pcb->sa >> 3) + pcb->sv; LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_receive: RTO %"U16_F" (%"U16_F" milliseconds)\n", pcb->rto, (u16_t)(pcb->rto * TCP_SLOW_INTERVAL))); pcb->rttest = 0; } } /* If the incoming segment contains data, we must process it further unless the pcb already received a FIN. (RFC 793, chapter 3.9, "SEGMENT ARRIVES" in states CLOSE-WAIT, CLOSING, LAST-ACK and TIME-WAIT: "Ignore the segment text.") */ if ((tcplen > 0) && (pcb->state < CLOSE_WAIT)) { /* This code basically does three things: +) If the incoming segment contains data that is the next in-sequence data, this data is passed to the application. This might involve trimming the first edge of the data. The rcv_nxt variable and the advertised window are adjusted. +) If the incoming segment has data that is above the next sequence number expected (->rcv_nxt), the segment is placed on the ->ooseq queue. This is done by finding the appropriate place in the ->ooseq queue (which is ordered by sequence number) and trim the segment in both ends if needed. An immediate ACK is sent to indicate that we received an out-of-sequence segment. +) Finally, we check if the first segment on the ->ooseq queue now is in sequence (i.e., if rcv_nxt >= ooseq->seqno). If rcv_nxt > ooseq->seqno, we must trim the first edge of the segment on ->ooseq before we adjust rcv_nxt. The data in the segments that are now on sequence are chained onto the incoming segment so that we only need to call the application once. */ /* First, we check if we must trim the first edge. We have to do this if the sequence number of the incoming segment is less than rcv_nxt, and the sequence number plus the length of the segment is larger than rcv_nxt. */ /* if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { if (TCP_SEQ_LT(pcb->rcv_nxt, seqno + tcplen)) {*/ if (TCP_SEQ_BETWEEN(pcb->rcv_nxt, seqno + 1, seqno + tcplen - 1)) { /* Trimming the first edge is done by pushing the payload pointer in the pbuf downwards. This is somewhat tricky since we do not want to discard the full contents of the pbuf up to the new starting point of the data since we have to keep the TCP header which is present in the first pbuf in the chain. What is done is really quite a nasty hack: the first pbuf in the pbuf chain is pointed to by inseg.p. Since we need to be able to deallocate the whole pbuf, we cannot change this inseg.p pointer to point to any of the later pbufs in the chain. Instead, we point the ->payload pointer in the first pbuf to data in one of the later pbufs. We also set the inseg.data pointer to point to the right place. This way, the ->p pointer will still point to the first pbuf, but the ->p->payload pointer will point to data in another pbuf. After we are done with adjusting the pbuf pointers we must adjust the ->data pointer in the seg and the segment length.*/ struct pbuf *p = inseg.p; off = pcb->rcv_nxt - seqno; LWIP_ASSERT("inseg.p != NULL", inseg.p); LWIP_ASSERT("insane offset!", (off < 0x7fff)); if (inseg.p->len < off) { LWIP_ASSERT("pbuf too short!", (((s32_t)inseg.p->tot_len) >= off)); new_tot_len = (u16_t)(inseg.p->tot_len - off); while (p->len < off) { off -= p->len; /* KJM following line changed (with addition of new_tot_len var) to fix bug #9076 inseg.p->tot_len -= p->len; */ p->tot_len = new_tot_len; p->len = 0; p = p->next; } if (pbuf_header(p, (s16_t)-off)) { /* Do we need to cope with this failing? Assert for now */ LWIP_ASSERT("pbuf_header failed", 0); } } else { if (pbuf_header(inseg.p, (s16_t)-off)) { /* Do we need to cope with this failing? Assert for now */ LWIP_ASSERT("pbuf_header failed", 0); } } inseg.len -= (u16_t)(pcb->rcv_nxt - seqno); inseg.tcphdr->seqno = seqno = pcb->rcv_nxt; } else { if (TCP_SEQ_LT(seqno, pcb->rcv_nxt)) { /* the whole segment is < rcv_nxt */ /* must be a duplicate of a packet that has already been correctly handled */ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: duplicate seqno %"U32_F"\n", seqno)); tcp_ack_now(pcb); } } /* The sequence number must be within the window (above rcv_nxt and below rcv_nxt + rcv_wnd) in order to be further processed. */ if (TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd - 1)) { if (pcb->rcv_nxt == seqno) { /* The incoming segment is the next in sequence. We check if we have to trim the end of the segment and update rcv_nxt and pass the data to the application. */ tcplen = TCP_TCPLEN(&inseg); if (tcplen > pcb->rcv_wnd) { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: other end overran receive window" "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { /* Must remove the FIN from the header as we're trimming * that byte of sequence-space from the packet */ TCPH_FLAGS_SET(inseg.tcphdr, TCPH_FLAGS(inseg.tcphdr) & ~(unsigned int)TCP_FIN); } /* Adjust length of segment to fit in the window. */ TCPWND_CHECK16(pcb->rcv_wnd); inseg.len = (u16_t)pcb->rcv_wnd; if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { inseg.len -= 1; } pbuf_realloc(inseg.p, inseg.len); tcplen = TCP_TCPLEN(&inseg); LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); } #if TCP_QUEUE_OOSEQ /* Received in-sequence data, adjust ooseq data if: - FIN has been received or - inseq overlaps with ooseq */ if (pcb->ooseq != NULL) { if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: received in-order FIN, binning ooseq queue\n")); /* Received in-order FIN means anything that was received * out of order must now have been received in-order, so * bin the ooseq queue */ while (pcb->ooseq != NULL) { struct tcp_seg *old_ooseq = pcb->ooseq; pcb->ooseq = pcb->ooseq->next; tcp_seg_free(old_ooseq); } } else { next = pcb->ooseq; /* Remove all segments on ooseq that are covered by inseg already. * FIN is copied from ooseq to inseg if present. */ while (next && TCP_SEQ_GEQ(seqno + tcplen, next->tcphdr->seqno + next->len)) { /* inseg cannot have FIN here (already processed above) */ if ((TCPH_FLAGS(next->tcphdr) & TCP_FIN) != 0 && (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) == 0) { TCPH_SET_FLAG(inseg.tcphdr, TCP_FIN); tcplen = TCP_TCPLEN(&inseg); } prev = next; next = next->next; tcp_seg_free(prev); } /* Now trim right side of inseg if it overlaps with the first * segment on ooseq */ if (next && TCP_SEQ_GT(seqno + tcplen, next->tcphdr->seqno)) { /* inseg cannot have FIN here (already processed above) */ inseg.len = (u16_t)(next->tcphdr->seqno - seqno); if (TCPH_FLAGS(inseg.tcphdr) & TCP_SYN) { inseg.len -= 1; } pbuf_realloc(inseg.p, inseg.len); tcplen = TCP_TCPLEN(&inseg); LWIP_ASSERT("tcp_receive: segment not trimmed correctly to ooseq queue\n", (seqno + tcplen) == next->tcphdr->seqno); } pcb->ooseq = next; } } #endif /* TCP_QUEUE_OOSEQ */ pcb->rcv_nxt = seqno + tcplen; /* Update the receiver's (our) window. */ LWIP_ASSERT("tcp_receive: tcplen > rcv_wnd\n", pcb->rcv_wnd >= tcplen); pcb->rcv_wnd -= tcplen; tcp_update_rcv_ann_wnd(pcb); /* If there is data in the segment, we make preparations to pass this up to the application. The ->recv_data variable is used for holding the pbuf that goes to the application. The code for reassembling out-of-sequence data chains its data on this pbuf as well. If the segment was a FIN, we set the TF_GOT_FIN flag that will be used to indicate to the application that the remote side has closed its end of the connection. */ if (inseg.p->tot_len > 0) { recv_data = inseg.p; /* Since this pbuf now is the responsibility of the application, we delete our reference to it so that we won't (mistakingly) deallocate it. */ inseg.p = NULL; } if (TCPH_FLAGS(inseg.tcphdr) & TCP_FIN) { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: received FIN.\n")); recv_flags |= TF_GOT_FIN; } #if TCP_QUEUE_OOSEQ /* We now check if we have segments on the ->ooseq queue that are now in sequence. */ while (pcb->ooseq != NULL && pcb->ooseq->tcphdr->seqno == pcb->rcv_nxt) { cseg = pcb->ooseq; seqno = pcb->ooseq->tcphdr->seqno; pcb->rcv_nxt += TCP_TCPLEN(cseg); LWIP_ASSERT("tcp_receive: ooseq tcplen > rcv_wnd\n", pcb->rcv_wnd >= TCP_TCPLEN(cseg)); pcb->rcv_wnd -= TCP_TCPLEN(cseg); tcp_update_rcv_ann_wnd(pcb); if (cseg->p->tot_len > 0) { /* Chain this pbuf onto the pbuf that we will pass to the application. */ /* With window scaling, this can overflow recv_data->tot_len, but that's not a problem since we explicitly fix that before passing recv_data to the application. */ if (recv_data) { pbuf_cat(recv_data, cseg->p); } else { recv_data = cseg->p; } cseg->p = NULL; } if (TCPH_FLAGS(cseg->tcphdr) & TCP_FIN) { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: dequeued FIN.\n")); recv_flags |= TF_GOT_FIN; if (pcb->state == ESTABLISHED) { /* force passive close or we can move to active close */ pcb->state = CLOSE_WAIT; } } pcb->ooseq = cseg->next; tcp_seg_free(cseg); } #endif /* TCP_QUEUE_OOSEQ */ /* Acknowledge the segment(s). */ tcp_ack(pcb); #if LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS if (ip_current_is_v6()) { /* Inform neighbor reachability of forward progress. */ nd6_reachability_hint(ip6_current_src_addr()); } #endif /* LWIP_IPV6 && LWIP_ND6_TCP_REACHABILITY_HINTS*/ } else { /* We get here if the incoming segment is out-of-sequence. */ tcp_send_empty_ack(pcb); #if TCP_QUEUE_OOSEQ /* We queue the segment on the ->ooseq queue. */ if (pcb->ooseq == NULL) { pcb->ooseq = tcp_seg_copy(&inseg); } else { /* If the queue is not empty, we walk through the queue and try to find a place where the sequence number of the incoming segment is between the sequence numbers of the previous and the next segment on the ->ooseq queue. That is the place where we put the incoming segment. If needed, we trim the second edges of the previous and the incoming segment so that it will fit into the sequence. If the incoming segment has the same sequence number as a segment on the ->ooseq queue, we discard the segment that contains less data. */ prev = NULL; for (next = pcb->ooseq; next != NULL; next = next->next) { if (seqno == next->tcphdr->seqno) { /* The sequence number of the incoming segment is the same as the sequence number of the segment on ->ooseq. We check the lengths to see which one to discard. */ if (inseg.len > next->len) { /* The incoming segment is larger than the old segment. We replace some segments with the new one. */ cseg = tcp_seg_copy(&inseg); if (cseg != NULL) { if (prev != NULL) { prev->next = cseg; } else { pcb->ooseq = cseg; } tcp_oos_insert_segment(cseg, next); } break; } else { /* Either the lengths are the same or the incoming segment was smaller than the old one; in either case, we ditch the incoming segment. */ break; } } else { if (prev == NULL) { if (TCP_SEQ_LT(seqno, next->tcphdr->seqno)) { /* The sequence number of the incoming segment is lower than the sequence number of the first segment on the queue. We put the incoming segment first on the queue. */ cseg = tcp_seg_copy(&inseg); if (cseg != NULL) { pcb->ooseq = cseg; tcp_oos_insert_segment(cseg, next); } break; } } else { /*if (TCP_SEQ_LT(prev->tcphdr->seqno, seqno) && TCP_SEQ_LT(seqno, next->tcphdr->seqno)) {*/ if (TCP_SEQ_BETWEEN(seqno, prev->tcphdr->seqno+1, next->tcphdr->seqno-1)) { /* The sequence number of the incoming segment is in between the sequence numbers of the previous and the next segment on ->ooseq. We trim trim the previous segment, delete next segments that included in received segment and trim received, if needed. */ cseg = tcp_seg_copy(&inseg); if (cseg != NULL) { if (TCP_SEQ_GT(prev->tcphdr->seqno + prev->len, seqno)) { /* We need to trim the prev segment. */ prev->len = (u16_t)(seqno - prev->tcphdr->seqno); pbuf_realloc(prev->p, prev->len); } prev->next = cseg; tcp_oos_insert_segment(cseg, next); } break; } } /* If the "next" segment is the last segment on the ooseq queue, we add the incoming segment to the end of the list. */ if (next->next == NULL && TCP_SEQ_GT(seqno, next->tcphdr->seqno)) { if (TCPH_FLAGS(next->tcphdr) & TCP_FIN) { /* segment "next" already contains all data */ break; } next->next = tcp_seg_copy(&inseg); if (next->next != NULL) { if (TCP_SEQ_GT(next->tcphdr->seqno + next->len, seqno)) { /* We need to trim the last segment. */ next->len = (u16_t)(seqno - next->tcphdr->seqno); pbuf_realloc(next->p, next->len); } /* check if the remote side overruns our receive window */ if (TCP_SEQ_GT((u32_t)tcplen + seqno, pcb->rcv_nxt + (u32_t)pcb->rcv_wnd)) { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: other end overran receive window" "seqno %"U32_F" len %"U16_F" right edge %"U32_F"\n", seqno, tcplen, pcb->rcv_nxt + pcb->rcv_wnd)); if (TCPH_FLAGS(next->next->tcphdr) & TCP_FIN) { /* Must remove the FIN from the header as we're trimming * that byte of sequence-space from the packet */ TCPH_FLAGS_SET(next->next->tcphdr, TCPH_FLAGS(next->next->tcphdr) & ~TCP_FIN); } /* Adjust length of segment to fit in the window. */ next->next->len = (u16_t)(pcb->rcv_nxt + pcb->rcv_wnd - seqno); pbuf_realloc(next->next->p, next->next->len); tcplen = TCP_TCPLEN(next->next); LWIP_ASSERT("tcp_receive: segment not trimmed correctly to rcv_wnd\n", (seqno + tcplen) == (pcb->rcv_nxt + pcb->rcv_wnd)); } } break; } } prev = next; } } #if TCP_OOSEQ_MAX_BYTES || TCP_OOSEQ_MAX_PBUFS /* Check that the data on ooseq doesn't exceed one of the limits and throw away everything above that limit. */ ooseq_blen = 0; ooseq_qlen = 0; prev = NULL; for (next = pcb->ooseq; next != NULL; prev = next, next = next->next) { struct pbuf *p = next->p; ooseq_blen += p->tot_len; ooseq_qlen += pbuf_clen(p); if ((ooseq_blen > TCP_OOSEQ_MAX_BYTES) || (ooseq_qlen > TCP_OOSEQ_MAX_PBUFS)) { /* too much ooseq data, dump this and everything after it */ tcp_segs_free(next); if (prev == NULL) { /* first ooseq segment is too much, dump the whole queue */ pcb->ooseq = NULL; } else { /* just dump 'next' and everything after it */ prev->next = NULL; } break; } } #endif /* TCP_OOSEQ_MAX_BYTES || TCP_OOSEQ_MAX_PBUFS */ #endif /* TCP_QUEUE_OOSEQ */ } } else { /* The incoming segment is not within the window. */ tcp_send_empty_ack(pcb); } } else { /* Segments with length 0 is taken care of here. Segments that fall out of the window are ACKed. */ if (!TCP_SEQ_BETWEEN(seqno, pcb->rcv_nxt, pcb->rcv_nxt + pcb->rcv_wnd - 1)) { tcp_ack_now(pcb); } } } static u8_t tcp_getoptbyte(void) { if ((tcphdr_opt2 == NULL) || (tcp_optidx < tcphdr_opt1len)) { u8_t* opts = (u8_t *)tcphdr + TCP_HLEN; return opts[tcp_optidx++]; } else { u8_t idx = (u8_t)(tcp_optidx++ - tcphdr_opt1len); return tcphdr_opt2[idx]; } } /** * Parses the options contained in the incoming segment. * * Called from tcp_listen_input() and tcp_process(). * Currently, only the MSS option is supported! * * @param pcb the tcp_pcb for which a segment arrived */ static void tcp_parseopt(struct tcp_pcb *pcb) { u8_t data; u16_t mss; #if LWIP_TCP_TIMESTAMPS u32_t tsval; #endif /* Parse the TCP MSS option, if present. */ if (tcphdr_optlen != 0) { for (tcp_optidx = 0; tcp_optidx < tcphdr_optlen; ) { u8_t opt = tcp_getoptbyte(); switch (opt) { case LWIP_TCP_OPT_EOL: /* End of options. */ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: EOL\n")); return; case LWIP_TCP_OPT_NOP: /* NOP option. */ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: NOP\n")); break; case LWIP_TCP_OPT_MSS: LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: MSS\n")); if (tcp_getoptbyte() != LWIP_TCP_OPT_LEN_MSS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_MSS) > tcphdr_optlen) { /* Bad length */ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); return; } /* An MSS option with the right option length. */ mss = (tcp_getoptbyte() << 8); mss |= tcp_getoptbyte(); /* Limit the mss to the configured TCP_MSS and prevent division by zero */ pcb->mss = ((mss > TCP_MSS) || (mss == 0)) ? TCP_MSS : mss; break; #if LWIP_WND_SCALE case LWIP_TCP_OPT_WS: LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: WND_SCALE\n")); if (tcp_getoptbyte() != LWIP_TCP_OPT_LEN_WS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_WS) > tcphdr_optlen) { /* Bad length */ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); return; } /* An WND_SCALE option with the right option length. */ data = tcp_getoptbyte(); /* If syn was received with wnd scale option, activate wnd scale opt, but only if this is not a retransmission */ if ((flags & TCP_SYN) && !(pcb->flags & TF_WND_SCALE)) { pcb->snd_scale = data; if (pcb->snd_scale > 14U) { pcb->snd_scale = 14U; } pcb->rcv_scale = TCP_RCV_SCALE; pcb->flags |= TF_WND_SCALE; /* window scaling is enabled, we can use the full receive window */ LWIP_ASSERT("window not at default value", pcb->rcv_wnd == TCPWND_MIN16(TCP_WND)); LWIP_ASSERT("window not at default value", pcb->rcv_ann_wnd == TCPWND_MIN16(TCP_WND)); pcb->rcv_wnd = pcb->rcv_ann_wnd = TCP_WND; } break; #endif #if LWIP_TCP_TIMESTAMPS case LWIP_TCP_OPT_TS: LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: TS\n")); if (tcp_getoptbyte() != LWIP_TCP_OPT_LEN_TS || (tcp_optidx - 2 + LWIP_TCP_OPT_LEN_TS) > tcphdr_optlen) { /* Bad length */ LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); return; } /* TCP timestamp option with valid length */ tsval = tcp_getoptbyte(); tsval |= (tcp_getoptbyte() << 8); tsval |= (tcp_getoptbyte() << 16); tsval |= (tcp_getoptbyte() << 24); if (flags & TCP_SYN) { pcb->ts_recent = lwip_ntohl(tsval); /* Enable sending timestamps in every segment now that we know the remote host supports it. */ pcb->flags |= TF_TIMESTAMP; } else if (TCP_SEQ_BETWEEN(pcb->ts_lastacksent, seqno, seqno+tcplen)) { pcb->ts_recent = lwip_ntohl(tsval); } /* Advance to next option (6 bytes already read) */ tcp_optidx += LWIP_TCP_OPT_LEN_TS - 6; break; #endif default: LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: other\n")); data = tcp_getoptbyte(); if (data < 2) { LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_parseopt: bad length\n")); /* If the length field is zero, the options are malformed and we don't process them further. */ return; } /* All other options have a length field, so that we easily can skip past them. */ tcp_optidx += data - 2; } } } } void tcp_trigger_input_pcb_close(void) { recv_flags |= TF_CLOSED; } #endif /* LWIP_TCP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/tcp_in.c
C
unknown
71,099
/** * @file * Transmission Control Protocol, outgoing traffic * * The output functions of TCP. * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #include "lwip/opt.h" #if LWIP_TCP /* don't build if not configured for use in lwipopts.h */ #include "lwip/priv/tcp_priv.h" #include "lwip/def.h" #include "lwip/mem.h" #include "lwip/memp.h" #include "lwip/ip_addr.h" #include "lwip/netif.h" #include "lwip/inet_chksum.h" #include "lwip/stats.h" #include "lwip/ip6.h" #include "lwip/ip6_addr.h" #if LWIP_TCP_TIMESTAMPS #include "lwip/sys.h" #endif #include <string.h> /* Define some copy-macros for checksum-on-copy so that the code looks nicer by preventing too many ifdef's. */ #if TCP_CHECKSUM_ON_COPY #define TCP_DATA_COPY(dst, src, len, seg) do { \ tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \ len, &seg->chksum, &seg->chksum_swapped); \ seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0) #define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \ tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped); #else /* TCP_CHECKSUM_ON_COPY*/ #define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len) #define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len) #endif /* TCP_CHECKSUM_ON_COPY*/ /** Define this to 1 for an extra check that the output checksum is valid * (usefule when the checksum is generated by the application, not the stack) */ #ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK #define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0 #endif /* Allow to override the failure of sanity check from warning to e.g. hard failure */ #if TCP_CHECKSUM_ON_COPY_SANITY_CHECK #ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL #define TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(msg) LWIP_DEBUGF(TCP_DEBUG | LWIP_DBG_LEVEL_WARNING, msg) #endif #endif #if TCP_OVERSIZE /** The size of segment pbufs created when TCP_OVERSIZE is enabled */ #ifndef TCP_OVERSIZE_CALC_LENGTH #define TCP_OVERSIZE_CALC_LENGTH(length) ((length) + TCP_OVERSIZE) #endif #endif /* Forward declarations.*/ static err_t tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif); /** Allocate a pbuf and create a tcphdr at p->payload, used for output * functions other than the default tcp_output -> tcp_output_segment * (e.g. tcp_send_empty_ack, etc.) * * @param pcb tcp pcb for which to send a packet (used to initialize tcp_hdr) * @param optlen length of header-options * @param datalen length of tcp data to reserve in pbuf * @param seqno_be seqno in network byte order (big-endian) * @return pbuf with p->payload being the tcp_hdr */ static struct pbuf * tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen, u32_t seqno_be /* already in network byte order */) { struct tcp_hdr *tcphdr; struct pbuf *p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM); if (p != NULL) { LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr", (p->len >= TCP_HLEN + optlen)); tcphdr = (struct tcp_hdr *)p->payload; tcphdr->src = lwip_htons(pcb->local_port); tcphdr->dest = lwip_htons(pcb->remote_port); tcphdr->seqno = seqno_be; tcphdr->ackno = lwip_htonl(pcb->rcv_nxt); TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), TCP_ACK); tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); tcphdr->chksum = 0; tcphdr->urgp = 0; /* If we're sending a packet, update the announced right window edge */ pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; } return p; } /** * Called by tcp_close() to send a segment including FIN flag but not data. * * @param pcb the tcp_pcb over which to send a segment * @return ERR_OK if sent, another err_t otherwise */ err_t tcp_send_fin(struct tcp_pcb *pcb) { /* first, try to add the fin to the last unsent segment */ if (pcb->unsent != NULL) { struct tcp_seg *last_unsent; for (last_unsent = pcb->unsent; last_unsent->next != NULL; last_unsent = last_unsent->next); if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) { /* no SYN/FIN/RST flag in the header, we can add the FIN flag */ TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN); pcb->flags |= TF_FIN; return ERR_OK; } } /* no data, no length, flags, copy=1, no optdata */ return tcp_enqueue_flags(pcb, TCP_FIN); } /** * Create a TCP segment with prefilled header. * * Called by tcp_write and tcp_enqueue_flags. * * @param pcb Protocol control block for the TCP connection. * @param p pbuf that is used to hold the TCP header. * @param flags TCP flags for header. * @param seqno TCP sequence number of this packet * @param optflags options to include in TCP header * @return a new tcp_seg pointing to p, or NULL. * The TCP header is filled in except ackno and wnd. * p is freed on failure. */ static struct tcp_seg * tcp_create_segment(struct tcp_pcb *pcb, struct pbuf *p, u8_t flags, u32_t seqno, u8_t optflags) { struct tcp_seg *seg; u8_t optlen = LWIP_TCP_OPT_LENGTH(optflags); if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no memory.\n")); pbuf_free(p); return NULL; } seg->flags = optflags; seg->next = NULL; seg->p = p; LWIP_ASSERT("p->tot_len >= optlen", p->tot_len >= optlen); seg->len = p->tot_len - optlen; #if TCP_OVERSIZE_DBGCHECK seg->oversize_left = 0; #endif /* TCP_OVERSIZE_DBGCHECK */ #if TCP_CHECKSUM_ON_COPY seg->chksum = 0; seg->chksum_swapped = 0; /* check optflags */ LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED", (optflags & TF_SEG_DATA_CHECKSUMMED) == 0); #endif /* TCP_CHECKSUM_ON_COPY */ /* build TCP header */ if (pbuf_header(p, TCP_HLEN)) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no room for TCP header in pbuf.\n")); TCP_STATS_INC(tcp.err); tcp_seg_free(seg); return NULL; } seg->tcphdr = (struct tcp_hdr *)seg->p->payload; seg->tcphdr->src = lwip_htons(pcb->local_port); seg->tcphdr->dest = lwip_htons(pcb->remote_port); seg->tcphdr->seqno = lwip_htonl(seqno); /* ackno is set in tcp_output */ TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), flags); /* wnd and chksum are set in tcp_output */ seg->tcphdr->urgp = 0; return seg; } /** * Allocate a PBUF_RAM pbuf, perhaps with extra space at the end. * * This function is like pbuf_alloc(layer, length, PBUF_RAM) except * there may be extra bytes available at the end. * * @param layer flag to define header size. * @param length size of the pbuf's payload. * @param max_length maximum usable size of payload+oversize. * @param oversize pointer to a u16_t that will receive the number of usable tail bytes. * @param pcb The TCP connection that will enqueue the pbuf. * @param apiflags API flags given to tcp_write. * @param first_seg true when this pbuf will be used in the first enqueued segment. */ #if TCP_OVERSIZE static struct pbuf * tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length, u16_t *oversize, struct tcp_pcb *pcb, u8_t apiflags, u8_t first_seg) { struct pbuf *p; u16_t alloc = length; #if LWIP_NETIF_TX_SINGLE_PBUF LWIP_UNUSED_ARG(max_length); LWIP_UNUSED_ARG(pcb); LWIP_UNUSED_ARG(apiflags); LWIP_UNUSED_ARG(first_seg); alloc = max_length; #else /* LWIP_NETIF_TX_SINGLE_PBUF */ if (length < max_length) { /* Should we allocate an oversized pbuf, or just the minimum * length required? If tcp_write is going to be called again * before this segment is transmitted, we want the oversized * buffer. If the segment will be transmitted immediately, we can * save memory by allocating only length. We use a simple * heuristic based on the following information: * * Did the user set TCP_WRITE_FLAG_MORE? * * Will the Nagle algorithm defer transmission of this segment? */ if ((apiflags & TCP_WRITE_FLAG_MORE) || (!(pcb->flags & TF_NODELAY) && (!first_seg || pcb->unsent != NULL || pcb->unacked != NULL))) { alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(TCP_OVERSIZE_CALC_LENGTH(length))); } } #endif /* LWIP_NETIF_TX_SINGLE_PBUF */ p = pbuf_alloc(layer, alloc, PBUF_RAM); if (p == NULL) { return NULL; } LWIP_ASSERT("need unchained pbuf", p->next == NULL); *oversize = p->len - length; /* trim p->len to the currently used size */ p->len = p->tot_len = length; return p; } #else /* TCP_OVERSIZE */ #define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM) #endif /* TCP_OVERSIZE */ #if TCP_CHECKSUM_ON_COPY /** Add a checksum of newly added data to the segment */ static void tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum, u8_t *seg_chksum_swapped) { u32_t helper; /* add chksum to old chksum and fold to u16_t */ helper = chksum + *seg_chksum; chksum = FOLD_U32T(helper); if ((len & 1) != 0) { *seg_chksum_swapped = 1 - *seg_chksum_swapped; chksum = SWAP_BYTES_IN_WORD(chksum); } *seg_chksum = chksum; } #endif /* TCP_CHECKSUM_ON_COPY */ /** Checks if tcp_write is allowed or not (checks state, snd_buf and snd_queuelen). * * @param pcb the tcp pcb to check for * @param len length of data to send (checked agains snd_buf) * @return ERR_OK if tcp_write is allowed to proceed, another err_t otherwise */ static err_t tcp_write_checks(struct tcp_pcb *pcb, u16_t len) { /* connection is in invalid state for data transmission? */ if ((pcb->state != ESTABLISHED) && (pcb->state != CLOSE_WAIT) && (pcb->state != SYN_SENT) && (pcb->state != SYN_RCVD)) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n")); return ERR_CONN; } else if (len == 0) { return ERR_OK; } /* fail on too much data */ if (len > pcb->snd_buf) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"TCPWNDSIZE_F")\n", len, pcb->snd_buf)); pcb->flags |= TF_NAGLEMEMERR; return ERR_MEM; } LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen)); /* If total number of pbufs on the unsent/unacked queues exceeds the * configured maximum, return an error */ /* check for configured max queuelen and possible overflow */ if ((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n", pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN)); TCP_STATS_INC(tcp.memerr); pcb->flags |= TF_NAGLEMEMERR; return ERR_MEM; } if (pcb->snd_queuelen != 0) { LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty", pcb->unacked != NULL || pcb->unsent != NULL); } else { LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty", pcb->unacked == NULL && pcb->unsent == NULL); } return ERR_OK; } /** * @ingroup tcp_raw * Write data for sending (but does not send it immediately). * * It waits in the expectation of more data being sent soon (as * it can send them more efficiently by combining them together). * To prompt the system to send data now, call tcp_output() after * calling tcp_write(). * * @param pcb Protocol control block for the TCP connection to enqueue data for. * @param arg Pointer to the data to be enqueued for sending. * @param len Data length in bytes * @param apiflags combination of following flags : * - TCP_WRITE_FLAG_COPY (0x01) data will be copied into memory belonging to the stack * - TCP_WRITE_FLAG_MORE (0x02) for TCP connection, PSH flag will not be set on last segment sent, * @return ERR_OK if enqueued, another err_t on error */ err_t tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags) { struct pbuf *concat_p = NULL; struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL; u16_t pos = 0; /* position in 'arg' data */ u16_t queuelen; u8_t optlen = 0; u8_t optflags = 0; #if TCP_OVERSIZE u16_t oversize = 0; u16_t oversize_used = 0; #if TCP_OVERSIZE_DBGCHECK u16_t oversize_add = 0; #endif /* TCP_OVERSIZE_DBGCHECK*/ #endif /* TCP_OVERSIZE */ u16_t extendlen = 0; #if TCP_CHECKSUM_ON_COPY u16_t concat_chksum = 0; u8_t concat_chksum_swapped = 0; u16_t concat_chksummed = 0; #endif /* TCP_CHECKSUM_ON_COPY */ err_t err; /* don't allocate segments bigger than half the maximum window we ever received */ u16_t mss_local = LWIP_MIN(pcb->mss, TCPWND_MIN16(pcb->snd_wnd_max/2)); mss_local = mss_local ? mss_local : pcb->mss; #if LWIP_NETIF_TX_SINGLE_PBUF /* Always copy to try to create single pbufs for TX */ apiflags |= TCP_WRITE_FLAG_COPY; #endif /* LWIP_NETIF_TX_SINGLE_PBUF */ LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n", (void *)pcb, arg, len, (u16_t)apiflags)); LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)", arg != NULL, return ERR_ARG;); err = tcp_write_checks(pcb, len); if (err != ERR_OK) { return err; } queuelen = pcb->snd_queuelen; #if LWIP_TCP_TIMESTAMPS if ((pcb->flags & TF_TIMESTAMP)) { /* Make sure the timestamp option is only included in data segments if we agreed about it with the remote host. */ optflags = TF_SEG_OPTS_TS; optlen = LWIP_TCP_OPT_LENGTH(TF_SEG_OPTS_TS); /* ensure that segments can hold at least one data byte... */ mss_local = LWIP_MAX(mss_local, LWIP_TCP_OPT_LEN_TS + 1); } #endif /* LWIP_TCP_TIMESTAMPS */ /* * TCP segmentation is done in three phases with increasing complexity: * * 1. Copy data directly into an oversized pbuf. * 2. Chain a new pbuf to the end of pcb->unsent. * 3. Create new segments. * * We may run out of memory at any point. In that case we must * return ERR_MEM and not change anything in pcb. Therefore, all * changes are recorded in local variables and committed at the end * of the function. Some pcb fields are maintained in local copies: * * queuelen = pcb->snd_queuelen * oversize = pcb->unsent_oversize * * These variables are set consistently by the phases: * * seg points to the last segment tampered with. * * pos records progress as data is segmented. */ /* Find the tail of the unsent queue. */ if (pcb->unsent != NULL) { u16_t space; u16_t unsent_optlen; /* @todo: this could be sped up by keeping last_unsent in the pcb */ for (last_unsent = pcb->unsent; last_unsent->next != NULL; last_unsent = last_unsent->next); /* Usable space at the end of the last unsent segment */ unsent_optlen = LWIP_TCP_OPT_LENGTH(last_unsent->flags); LWIP_ASSERT("mss_local is too small", mss_local >= last_unsent->len + unsent_optlen); space = mss_local - (last_unsent->len + unsent_optlen); /* * Phase 1: Copy data directly into an oversized pbuf. * * The number of bytes copied is recorded in the oversize_used * variable. The actual copying is done at the bottom of the * function. */ #if TCP_OVERSIZE #if TCP_OVERSIZE_DBGCHECK /* check that pcb->unsent_oversize matches last_unsent->oversize_left */ LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)", pcb->unsent_oversize == last_unsent->oversize_left); #endif /* TCP_OVERSIZE_DBGCHECK */ oversize = pcb->unsent_oversize; if (oversize > 0) { LWIP_ASSERT("inconsistent oversize vs. space", oversize <= space); seg = last_unsent; oversize_used = LWIP_MIN(space, LWIP_MIN(oversize, len)); pos += oversize_used; oversize -= oversize_used; space -= oversize_used; } /* now we are either finished or oversize is zero */ LWIP_ASSERT("inconsistent oversize vs. len", (oversize == 0) || (pos == len)); #endif /* TCP_OVERSIZE */ /* * Phase 2: Chain a new pbuf to the end of pcb->unsent. * * As an exception when NOT copying the data, if the given data buffer * directly follows the last unsent data buffer in memory, extend the last * ROM pbuf reference to the buffer, thus saving a ROM pbuf allocation. * * We don't extend segments containing SYN/FIN flags or options * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at * the end. */ if ((pos < len) && (space > 0) && (last_unsent->len > 0)) { u16_t seglen = LWIP_MIN(space, len - pos); seg = last_unsent; /* Create a pbuf with a copy or reference to seglen bytes. We * can use PBUF_RAW here since the data appears in the middle of * a segment. A header will never be prepended. */ if (apiflags & TCP_WRITE_FLAG_COPY) { /* Data is copied */ if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen)); goto memerr; } #if TCP_OVERSIZE_DBGCHECK oversize_add = oversize; #endif /* TCP_OVERSIZE_DBGCHECK */ TCP_DATA_COPY2(concat_p->payload, (const u8_t*)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped); #if TCP_CHECKSUM_ON_COPY concat_chksummed += seglen; #endif /* TCP_CHECKSUM_ON_COPY */ queuelen += pbuf_clen(concat_p); } else { /* Data is not copied */ /* If the last unsent pbuf is of type PBUF_ROM, try to extend it. */ struct pbuf *p; for (p = last_unsent->p; p->next != NULL; p = p->next); if (p->type == PBUF_ROM && (const u8_t *)p->payload + p->len == (const u8_t *)arg) { LWIP_ASSERT("tcp_write: ROM pbufs cannot be oversized", pos == 0); extendlen = seglen; } else { if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for zero-copy pbuf\n")); goto memerr; } /* reference the non-volatile payload data */ ((struct pbuf_rom*)concat_p)->payload = (const u8_t*)arg + pos; queuelen += pbuf_clen(concat_p); } #if TCP_CHECKSUM_ON_COPY /* calculate the checksum of nocopy-data */ tcp_seg_add_chksum(~inet_chksum((const u8_t*)arg + pos, seglen), seglen, &concat_chksum, &concat_chksum_swapped); concat_chksummed += seglen; #endif /* TCP_CHECKSUM_ON_COPY */ } pos += seglen; } } else { #if TCP_OVERSIZE LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)", pcb->unsent_oversize == 0); #endif /* TCP_OVERSIZE */ } /* * Phase 3: Create new segments. * * The new segments are chained together in the local 'queue' * variable, ready to be appended to pcb->unsent. */ while (pos < len) { struct pbuf *p; u16_t left = len - pos; u16_t max_len = mss_local - optlen; u16_t seglen = LWIP_MIN(left, max_len); #if TCP_CHECKSUM_ON_COPY u16_t chksum = 0; u8_t chksum_swapped = 0; #endif /* TCP_CHECKSUM_ON_COPY */ if (apiflags & TCP_WRITE_FLAG_COPY) { /* If copy is set, memory should be allocated and data copied * into pbuf */ if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, mss_local, &oversize, pcb, apiflags, queue == NULL)) == NULL) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen)); goto memerr; } LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen", (p->len >= seglen)); TCP_DATA_COPY2((char *)p->payload + optlen, (const u8_t*)arg + pos, seglen, &chksum, &chksum_swapped); } else { /* Copy is not set: First allocate a pbuf for holding the data. * Since the referenced data is available at least until it is * sent out on the link (as it has to be ACKed by the remote * party) we can safely use PBUF_ROM instead of PBUF_REF here. */ struct pbuf *p2; #if TCP_OVERSIZE LWIP_ASSERT("oversize == 0", oversize == 0); #endif /* TCP_OVERSIZE */ if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for zero-copy pbuf\n")); goto memerr; } #if TCP_CHECKSUM_ON_COPY /* calculate the checksum of nocopy-data */ chksum = ~inet_chksum((const u8_t*)arg + pos, seglen); if (seglen & 1) { chksum_swapped = 1; chksum = SWAP_BYTES_IN_WORD(chksum); } #endif /* TCP_CHECKSUM_ON_COPY */ /* reference the non-volatile payload data */ ((struct pbuf_rom*)p2)->payload = (const u8_t*)arg + pos; /* Second, allocate a pbuf for the headers. */ if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { /* If allocation fails, we have to deallocate the data pbuf as * well. */ pbuf_free(p2); LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for header pbuf\n")); goto memerr; } /* Concatenate the headers and data pbufs together. */ pbuf_cat(p/*header*/, p2/*data*/); } queuelen += pbuf_clen(p); /* Now that there are more segments queued, we check again if the * length of the queue exceeds the configured maximum or * overflows. */ if ((queuelen > TCP_SND_QUEUELEN) || (queuelen > TCP_SNDQUEUELEN_OVERFLOW)) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: queue too long %"U16_F" (%d)\n", queuelen, (int)TCP_SND_QUEUELEN)); pbuf_free(p); goto memerr; } if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) { goto memerr; } #if TCP_OVERSIZE_DBGCHECK seg->oversize_left = oversize; #endif /* TCP_OVERSIZE_DBGCHECK */ #if TCP_CHECKSUM_ON_COPY seg->chksum = chksum; seg->chksum_swapped = chksum_swapped; seg->flags |= TF_SEG_DATA_CHECKSUMMED; #endif /* TCP_CHECKSUM_ON_COPY */ /* first segment of to-be-queued data? */ if (queue == NULL) { queue = seg; } else { /* Attach the segment to the end of the queued segments */ LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL); prev_seg->next = seg; } /* remember last segment of to-be-queued data for next iteration */ prev_seg = seg; LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n", lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg))); pos += seglen; } /* * All three segmentation phases were successful. We can commit the * transaction. */ #if TCP_OVERSIZE_DBGCHECK if ((last_unsent != NULL) && (oversize_add != 0)) { last_unsent->oversize_left += oversize_add; } #endif /* TCP_OVERSIZE_DBGCHECK */ /* * Phase 1: If data has been added to the preallocated tail of * last_unsent, we update the length fields of the pbuf chain. */ #if TCP_OVERSIZE if (oversize_used > 0) { struct pbuf *p; /* Bump tot_len of whole chain, len of tail */ for (p = last_unsent->p; p; p = p->next) { p->tot_len += oversize_used; if (p->next == NULL) { TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent); p->len += oversize_used; } } last_unsent->len += oversize_used; #if TCP_OVERSIZE_DBGCHECK LWIP_ASSERT("last_unsent->oversize_left >= oversize_used", last_unsent->oversize_left >= oversize_used); last_unsent->oversize_left -= oversize_used; #endif /* TCP_OVERSIZE_DBGCHECK */ } pcb->unsent_oversize = oversize; #endif /* TCP_OVERSIZE */ /* * Phase 2: concat_p can be concatenated onto last_unsent->p, unless we * determined that the last ROM pbuf can be extended to include the new data. */ if (concat_p != NULL) { LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty", (last_unsent != NULL)); pbuf_cat(last_unsent->p, concat_p); last_unsent->len += concat_p->tot_len; } else if (extendlen > 0) { struct pbuf *p; LWIP_ASSERT("tcp_write: extension of reference requires reference", last_unsent != NULL && last_unsent->p != NULL); for (p = last_unsent->p; p->next != NULL; p = p->next) { p->tot_len += extendlen; } p->tot_len += extendlen; p->len += extendlen; last_unsent->len += extendlen; } #if TCP_CHECKSUM_ON_COPY if (concat_chksummed) { LWIP_ASSERT("tcp_write: concat checksum needs concatenated data", concat_p != NULL || extendlen > 0); /*if concat checksumm swapped - swap it back */ if (concat_chksum_swapped) { concat_chksum = SWAP_BYTES_IN_WORD(concat_chksum); } tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum, &last_unsent->chksum_swapped); last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED; } #endif /* TCP_CHECKSUM_ON_COPY */ /* * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that * is harmless */ if (last_unsent == NULL) { pcb->unsent = queue; } else { last_unsent->next = queue; } /* * Finally update the pcb state. */ pcb->snd_lbb += len; pcb->snd_buf -= len; pcb->snd_queuelen = queuelen; LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n", pcb->snd_queuelen)); if (pcb->snd_queuelen != 0) { LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL || pcb->unsent != NULL); } /* Set the PSH flag in the last segment that we enqueued. */ if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE)==0)) { TCPH_SET_FLAG(seg->tcphdr, TCP_PSH); } return ERR_OK; memerr: pcb->flags |= TF_NAGLEMEMERR; TCP_STATS_INC(tcp.memerr); if (concat_p != NULL) { pbuf_free(concat_p); } if (queue != NULL) { tcp_segs_free(queue); } if (pcb->snd_queuelen != 0) { LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL || pcb->unsent != NULL); } LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen)); return ERR_MEM; } /** * Enqueue TCP options for transmission. * * Called by tcp_connect(), tcp_listen_input(), and tcp_send_ctrl(). * * @param pcb Protocol control block for the TCP connection. * @param flags TCP header flags to set in the outgoing segment. */ err_t tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags) { struct pbuf *p; struct tcp_seg *seg; u8_t optflags = 0; u8_t optlen = 0; LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen)); LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)", (flags & (TCP_SYN | TCP_FIN)) != 0); /* check for configured max queuelen and possible overflow (FIN flag should always come through!) */ if (((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) && ((flags & TCP_FIN) == 0)) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_enqueue_flags: too long queue %"U16_F" (max %"U16_F")\n", pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN)); TCP_STATS_INC(tcp.memerr); pcb->flags |= TF_NAGLEMEMERR; return ERR_MEM; } if (flags & TCP_SYN) { optflags = TF_SEG_OPTS_MSS; #if LWIP_WND_SCALE if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_WND_SCALE)) { /* In a <SYN,ACK> (sent in state SYN_RCVD), the window scale option may only be sent if we received a window scale option from the remote host. */ optflags |= TF_SEG_OPTS_WND_SCALE; } #endif /* LWIP_WND_SCALE */ } #if LWIP_TCP_TIMESTAMPS if ((pcb->flags & TF_TIMESTAMP)) { /* Make sure the timestamp option is only included in data segments if we agreed about it with the remote host. */ optflags |= TF_SEG_OPTS_TS; } #endif /* LWIP_TCP_TIMESTAMPS */ optlen = LWIP_TCP_OPT_LENGTH(optflags); /* Allocate pbuf with room for TCP header + options */ if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) { pcb->flags |= TF_NAGLEMEMERR; TCP_STATS_INC(tcp.memerr); return ERR_MEM; } LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen", (p->len >= optlen)); /* Allocate memory for tcp_seg, and fill in fields. */ if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) { pcb->flags |= TF_NAGLEMEMERR; TCP_STATS_INC(tcp.memerr); return ERR_MEM; } LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % LWIP_MIN(MEM_ALIGNMENT, 4)) == 0); LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0); LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n", lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg), (u16_t)flags)); /* Now append seg to pcb->unsent queue */ if (pcb->unsent == NULL) { pcb->unsent = seg; } else { struct tcp_seg *useg; for (useg = pcb->unsent; useg->next != NULL; useg = useg->next); useg->next = seg; } #if TCP_OVERSIZE /* The new unsent tail has no space */ pcb->unsent_oversize = 0; #endif /* TCP_OVERSIZE */ /* SYN and FIN bump the sequence number */ if ((flags & TCP_SYN) || (flags & TCP_FIN)) { pcb->snd_lbb++; /* optlen does not influence snd_buf */ } if (flags & TCP_FIN) { pcb->flags |= TF_FIN; } /* update number of segments on the queues */ pcb->snd_queuelen += pbuf_clen(seg->p); LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen)); if (pcb->snd_queuelen != 0) { LWIP_ASSERT("tcp_enqueue_flags: invalid queue length", pcb->unacked != NULL || pcb->unsent != NULL); } return ERR_OK; } #if LWIP_TCP_TIMESTAMPS /* Build a timestamp option (12 bytes long) at the specified options pointer) * * @param pcb tcp_pcb * @param opts option pointer where to store the timestamp option */ static void tcp_build_timestamp_option(struct tcp_pcb *pcb, u32_t *opts) { /* Pad with two NOP options to make everything nicely aligned */ opts[0] = PP_HTONL(0x0101080A); opts[1] = lwip_htonl(sys_now()); opts[2] = lwip_htonl(pcb->ts_recent); } #endif #if LWIP_WND_SCALE /** Build a window scale option (3 bytes long) at the specified options pointer) * * @param opts option pointer where to store the window scale option */ static void tcp_build_wnd_scale_option(u32_t *opts) { /* Pad with one NOP option to make everything nicely aligned */ opts[0] = PP_HTONL(0x01030300 | TCP_RCV_SCALE); } #endif /** * Send an ACK without data. * * @param pcb Protocol control block for the TCP connection to send the ACK */ err_t tcp_send_empty_ack(struct tcp_pcb *pcb) { err_t err; struct pbuf *p; u8_t optlen = 0; struct netif *netif; #if LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP struct tcp_hdr *tcphdr; #endif /* LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP */ #if LWIP_TCP_TIMESTAMPS if (pcb->flags & TF_TIMESTAMP) { optlen = LWIP_TCP_OPT_LENGTH(TF_SEG_OPTS_TS); } #endif p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt)); if (p == NULL) { /* let tcp_fasttmr retry sending this ACK */ pcb->flags |= (TF_ACK_DELAY | TF_ACK_NOW); LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n")); return ERR_BUF; } #if LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP tcphdr = (struct tcp_hdr *)p->payload; #endif /* LWIP_TCP_TIMESTAMPS || CHECKSUM_GEN_TCP */ LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt)); /* NB. MSS and window scale options are only sent on SYNs, so ignore them here */ #if LWIP_TCP_TIMESTAMPS pcb->ts_lastacksent = pcb->rcv_nxt; if (pcb->flags & TF_TIMESTAMP) { tcp_build_timestamp_option(pcb, (u32_t *)(tcphdr + 1)); } #endif netif = ip_route(&pcb->local_ip, &pcb->remote_ip); if (netif == NULL) { err = ERR_RTE; } else { #if CHECKSUM_GEN_TCP IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, &pcb->local_ip, &pcb->remote_ip); } #endif NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint)); err = ip_output_if(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, pcb->tos, IP_PROTO_TCP, netif); NETIF_SET_HWADDRHINT(netif, NULL); } pbuf_free(p); if (err != ERR_OK) { /* let tcp_fasttmr retry sending this ACK */ pcb->flags |= (TF_ACK_DELAY | TF_ACK_NOW); } else { /* remove ACK flags from the PCB, as we sent an empty ACK now */ pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW); } return err; } /** * @ingroup tcp_raw * Find out what we can send and send it * * @param pcb Protocol control block for the TCP connection to send data * @return ERR_OK if data has been sent or nothing to send * another err_t on error */ err_t tcp_output(struct tcp_pcb *pcb) { struct tcp_seg *seg, *useg; u32_t wnd, snd_nxt; err_t err; struct netif *netif; #if TCP_CWND_DEBUG s16_t i = 0; #endif /* TCP_CWND_DEBUG */ /* pcb->state LISTEN not allowed here */ LWIP_ASSERT("don't call tcp_output for listen-pcbs", pcb->state != LISTEN); /* First, check if we are invoked by the TCP input processing code. If so, we do not output anything. Instead, we rely on the input processing code to call us when input processing is done with. */ if (tcp_input_pcb == pcb) { return ERR_OK; } wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd); seg = pcb->unsent; /* If the TF_ACK_NOW flag is set and no data will be sent (either * because the ->unsent queue is empty or because the window does * not allow it), construct an empty ACK segment and send it. * * If data is to be sent, we will just piggyback the ACK (see below). */ if (pcb->flags & TF_ACK_NOW && (seg == NULL || lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd)) { return tcp_send_empty_ack(pcb); } /* useg should point to last segment on unacked queue */ useg = pcb->unacked; if (useg != NULL) { for (; useg->next != NULL; useg = useg->next); } netif = ip_route(&pcb->local_ip, &pcb->remote_ip); if (netif == NULL) { return ERR_RTE; } /* If we don't have a local IP address, we get one from netif */ if (ip_addr_isany(&pcb->local_ip)) { const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, &pcb->remote_ip); if (local_ip == NULL) { return ERR_RTE; } ip_addr_copy(pcb->local_ip, *local_ip); } #if TCP_OUTPUT_DEBUG if (seg == NULL) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n", (void*)pcb->unsent)); } #endif /* TCP_OUTPUT_DEBUG */ #if TCP_CWND_DEBUG if (seg == NULL) { LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F ", cwnd %"TCPWNDSIZE_F", wnd %"U32_F ", seg == NULL, ack %"U32_F"\n", pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack)); } else { LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n", pcb->snd_wnd, pcb->cwnd, wnd, lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len, lwip_ntohl(seg->tcphdr->seqno), pcb->lastack)); } #endif /* TCP_CWND_DEBUG */ /* Check if we need to start the persistent timer when the next unsent segment * does not fit within the remaining send window and RTO timer is not running (we * have no in-flight data). A traditional approach would fill the remaining window * with part of the unsent segment (which will engage zero-window probing upon * reception of the zero window update from the receiver). This ensures the * subsequent window update is reliably received. With the goal of being lightweight, * we avoid splitting the unsent segment and treat the window as already zero. */ if (seg != NULL && lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd && wnd > 0 && wnd == pcb->snd_wnd && pcb->unacked == NULL) { /* Start the persist timer */ if (pcb->persist_backoff == 0) { pcb->persist_cnt = 0; pcb->persist_backoff = 1; } goto output_done; } /* data available and window allows it to be sent? */ while (seg != NULL && lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) { LWIP_ASSERT("RST not expected here!", (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0); /* Stop sending if the nagle algorithm would prevent it * Don't stop: * - if tcp_write had a memory error before (prevent delayed ACK timeout) or * - if FIN was already enqueued for this PCB (SYN is always alone in a segment - * either seg->next != NULL or pcb->unacked == NULL; * RST is no sent using tcp_write/tcp_output. */ if ((tcp_do_output_nagle(pcb) == 0) && ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)) { break; } #if TCP_CWND_DEBUG LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n", pcb->snd_wnd, pcb->cwnd, wnd, lwip_ntohl(seg->tcphdr->seqno) + seg->len - pcb->lastack, lwip_ntohl(seg->tcphdr->seqno), pcb->lastack, i)); ++i; #endif /* TCP_CWND_DEBUG */ if (pcb->state != SYN_SENT) { TCPH_SET_FLAG(seg->tcphdr, TCP_ACK); } #if TCP_OVERSIZE_DBGCHECK seg->oversize_left = 0; #endif /* TCP_OVERSIZE_DBGCHECK */ err = tcp_output_segment(seg, pcb, netif); if (err != ERR_OK) { /* segment could not be sent, for whatever reason */ pcb->flags |= TF_NAGLEMEMERR; return err; } pcb->unsent = seg->next; if (pcb->state != SYN_SENT) { pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW); } snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg); if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { pcb->snd_nxt = snd_nxt; } /* put segment on unacknowledged list if length > 0 */ if (TCP_TCPLEN(seg) > 0) { seg->next = NULL; /* unacked list is empty? */ if (pcb->unacked == NULL) { pcb->unacked = seg; useg = seg; /* unacked list is not empty? */ } else { /* In the case of fast retransmit, the packet should not go to the tail * of the unacked queue, but rather somewhere before it. We need to check for * this case. -STJ Jul 27, 2004 */ if (TCP_SEQ_LT(lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(useg->tcphdr->seqno))) { /* add segment to before tail of unacked list, keeping the list sorted */ struct tcp_seg **cur_seg = &(pcb->unacked); while (*cur_seg && TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { cur_seg = &((*cur_seg)->next ); } seg->next = (*cur_seg); (*cur_seg) = seg; } else { /* add segment to tail of unacked list */ useg->next = seg; useg = useg->next; } } /* do not queue empty segments on the unacked list */ } else { tcp_seg_free(seg); } seg = pcb->unsent; } output_done: #if TCP_OVERSIZE if (pcb->unsent == NULL) { /* last unsent has been removed, reset unsent_oversize */ pcb->unsent_oversize = 0; } #endif /* TCP_OVERSIZE */ pcb->flags &= ~TF_NAGLEMEMERR; return ERR_OK; } /** * Called by tcp_output() to actually send a TCP segment over IP. * * @param seg the tcp_seg to send * @param pcb the tcp_pcb for the TCP connection used to send the segment * @param netif the netif used to send the segment */ static err_t tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif) { err_t err; u16_t len; u32_t *opts; if (seg->p->ref != 1) { /* This can happen if the pbuf of this segment is still referenced by the netif driver due to deferred transmission. Since this function modifies p->len, we must not continue in this case. */ return ERR_OK; } /* The TCP header has already been constructed, but the ackno and wnd fields remain. */ seg->tcphdr->ackno = lwip_htonl(pcb->rcv_nxt); /* advertise our receive window size in this TCP segment */ #if LWIP_WND_SCALE if (seg->flags & TF_SEG_OPTS_WND_SCALE) { /* The Window field in a SYN segment itself (the only type where we send the window scale option) is never scaled. */ seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(pcb->rcv_ann_wnd)); } else #endif /* LWIP_WND_SCALE */ { seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd))); } pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd; /* Add any requested options. NB MSS option is only set on SYN packets, so ignore it here */ /* cast through void* to get rid of alignment warnings */ opts = (u32_t *)(void *)(seg->tcphdr + 1); if (seg->flags & TF_SEG_OPTS_MSS) { u16_t mss; #if TCP_CALCULATE_EFF_SEND_MSS mss = tcp_eff_send_mss(TCP_MSS, &pcb->local_ip, &pcb->remote_ip); #else /* TCP_CALCULATE_EFF_SEND_MSS */ mss = TCP_MSS; #endif /* TCP_CALCULATE_EFF_SEND_MSS */ *opts = TCP_BUILD_MSS_OPTION(mss); opts += 1; } #if LWIP_TCP_TIMESTAMPS pcb->ts_lastacksent = pcb->rcv_nxt; if (seg->flags & TF_SEG_OPTS_TS) { tcp_build_timestamp_option(pcb, opts); opts += 3; } #endif #if LWIP_WND_SCALE if (seg->flags & TF_SEG_OPTS_WND_SCALE) { tcp_build_wnd_scale_option(opts); opts += 1; } #endif /* Set retransmission timer running if it is not currently enabled This must be set before checking the route. */ if (pcb->rtime < 0) { pcb->rtime = 0; } if (pcb->rttest == 0) { pcb->rttest = tcp_ticks; pcb->rtseq = lwip_ntohl(seg->tcphdr->seqno); LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq)); } LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n", lwip_htonl(seg->tcphdr->seqno), lwip_htonl(seg->tcphdr->seqno) + seg->len)); len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload); if (len == 0) { /** Exclude retransmitted segments from this count. */ MIB2_STATS_INC(mib2.tcpoutsegs); } seg->p->len -= len; seg->p->tot_len -= len; seg->p->payload = seg->tcphdr; seg->tcphdr->chksum = 0; #if CHECKSUM_GEN_TCP IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { #if TCP_CHECKSUM_ON_COPY u32_t acc; #if TCP_CHECKSUM_ON_COPY_SANITY_CHECK u16_t chksum_slow = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); #endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) { LWIP_ASSERT("data included but not checksummed", seg->p->tot_len == (TCPH_HDRLEN(seg->tcphdr) * 4)); } /* rebuild TCP header checksum (TCP header changes for retransmissions!) */ acc = ip_chksum_pseudo_partial(seg->p, IP_PROTO_TCP, seg->p->tot_len, TCPH_HDRLEN(seg->tcphdr) * 4, &pcb->local_ip, &pcb->remote_ip); /* add payload checksum */ if (seg->chksum_swapped) { seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum); seg->chksum_swapped = 0; } acc += (u16_t)~(seg->chksum); seg->tcphdr->chksum = FOLD_U32T(acc); #if TCP_CHECKSUM_ON_COPY_SANITY_CHECK if (chksum_slow != seg->tcphdr->chksum) { TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL( ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n", seg->tcphdr->chksum, chksum_slow)); seg->tcphdr->chksum = chksum_slow; } #endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */ #else /* TCP_CHECKSUM_ON_COPY */ seg->tcphdr->chksum = ip_chksum_pseudo(seg->p, IP_PROTO_TCP, seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip); #endif /* TCP_CHECKSUM_ON_COPY */ } #endif /* CHECKSUM_GEN_TCP */ TCP_STATS_INC(tcp.xmit); NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint)); err = ip_output_if(seg->p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, pcb->tos, IP_PROTO_TCP, netif); NETIF_SET_HWADDRHINT(netif, NULL); return err; } /** * Send a TCP RESET packet (empty segment with RST flag set) either to * abort a connection or to show that there is no matching local connection * for a received segment. * * Called by tcp_abort() (to abort a local connection), tcp_input() (if no * matching local pcb was found), tcp_listen_input() (if incoming segment * has ACK flag set) and tcp_process() (received segment in the wrong state) * * Since a RST segment is in most cases not sent for an active connection, * tcp_rst() has a number of arguments that are taken from a tcp_pcb for * most other segment output functions. * * @param seqno the sequence number to use for the outgoing segment * @param ackno the acknowledge number to use for the outgoing segment * @param local_ip the local IP address to send the segment from * @param remote_ip the remote IP address to send the segment to * @param local_port the local TCP port to send the segment from * @param remote_port the remote TCP port to send the segment to */ void tcp_rst(u32_t seqno, u32_t ackno, const ip_addr_t *local_ip, const ip_addr_t *remote_ip, u16_t local_port, u16_t remote_port) { struct pbuf *p; struct tcp_hdr *tcphdr; struct netif *netif; p = pbuf_alloc(PBUF_IP, TCP_HLEN, PBUF_RAM); if (p == NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n")); return; } LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr", (p->len >= sizeof(struct tcp_hdr))); tcphdr = (struct tcp_hdr *)p->payload; tcphdr->src = lwip_htons(local_port); tcphdr->dest = lwip_htons(remote_port); tcphdr->seqno = lwip_htonl(seqno); tcphdr->ackno = lwip_htonl(ackno); TCPH_HDRLEN_FLAGS_SET(tcphdr, TCP_HLEN/4, TCP_RST | TCP_ACK); #if LWIP_WND_SCALE tcphdr->wnd = PP_HTONS(((TCP_WND >> TCP_RCV_SCALE) & 0xFFFF)); #else tcphdr->wnd = PP_HTONS(TCP_WND); #endif tcphdr->chksum = 0; tcphdr->urgp = 0; TCP_STATS_INC(tcp.xmit); MIB2_STATS_INC(mib2.tcpoutrsts); netif = ip_route(local_ip, remote_ip); if (netif != NULL) { #if CHECKSUM_GEN_TCP IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, local_ip, remote_ip); } #endif /* Send output with hardcoded TTL/HL since we have no access to the pcb */ ip_output_if(p, local_ip, remote_ip, TCP_TTL, 0, IP_PROTO_TCP, netif); } pbuf_free(p); LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno)); } /** * Requeue all unacked segments for retransmission * * Called by tcp_slowtmr() for slow retransmission. * * @param pcb the tcp_pcb for which to re-enqueue all unacked segments */ void tcp_rexmit_rto(struct tcp_pcb *pcb) { struct tcp_seg *seg; if (pcb->unacked == NULL) { return; } /* Move all unacked segments to the head of the unsent queue */ for (seg = pcb->unacked; seg->next != NULL; seg = seg->next); /* concatenate unsent queue after unacked queue */ seg->next = pcb->unsent; #if TCP_OVERSIZE_DBGCHECK /* if last unsent changed, we need to update unsent_oversize */ if (pcb->unsent == NULL) { pcb->unsent_oversize = seg->oversize_left; } #endif /* TCP_OVERSIZE_DBGCHECK */ /* unsent queue is the concatenated queue (of unacked, unsent) */ pcb->unsent = pcb->unacked; /* unacked queue is now empty */ pcb->unacked = NULL; /* increment number of retransmissions */ if (pcb->nrtx < 0xFF) { ++pcb->nrtx; } /* Don't take any RTT measurements after retransmitting. */ pcb->rttest = 0; /* Do the actual retransmission */ tcp_output(pcb); } /** * Requeue the first unacked segment for retransmission * * Called by tcp_receive() for fast retransmit. * * @param pcb the tcp_pcb for which to retransmit the first unacked segment */ void tcp_rexmit(struct tcp_pcb *pcb) { struct tcp_seg *seg; struct tcp_seg **cur_seg; if (pcb->unacked == NULL) { return; } /* Move the first unacked segment to the unsent queue */ /* Keep the unsent queue sorted. */ seg = pcb->unacked; pcb->unacked = seg->next; cur_seg = &(pcb->unsent); while (*cur_seg && TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) { cur_seg = &((*cur_seg)->next ); } seg->next = *cur_seg; *cur_seg = seg; #if TCP_OVERSIZE if (seg->next == NULL) { /* the retransmitted segment is last in unsent, so reset unsent_oversize */ pcb->unsent_oversize = 0; } #endif /* TCP_OVERSIZE */ if (pcb->nrtx < 0xFF) { ++pcb->nrtx; } /* Don't take any rtt measurements after retransmitting. */ pcb->rttest = 0; /* Do the actual retransmission. */ MIB2_STATS_INC(mib2.tcpretranssegs); /* No need to call tcp_output: we are always called from tcp_input() and thus tcp_output directly returns. */ } /** * Handle retransmission after three dupacks received * * @param pcb the tcp_pcb for which to retransmit the first unacked segment */ void tcp_rexmit_fast(struct tcp_pcb *pcb) { if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) { /* This is fast retransmit. Retransmit the first unacked segment. */ LWIP_DEBUGF(TCP_FR_DEBUG, ("tcp_receive: dupacks %"U16_F" (%"U32_F "), fast retransmit %"U32_F"\n", (u16_t)pcb->dupacks, pcb->lastack, lwip_ntohl(pcb->unacked->tcphdr->seqno))); tcp_rexmit(pcb); /* Set ssthresh to half of the minimum of the current * cwnd and the advertised window */ pcb->ssthresh = LWIP_MIN(pcb->cwnd, pcb->snd_wnd) / 2; /* The minimum value for ssthresh should be 2 MSS */ if (pcb->ssthresh < (2U * pcb->mss)) { LWIP_DEBUGF(TCP_FR_DEBUG, ("tcp_receive: The minimum value for ssthresh %"TCPWNDSIZE_F " should be min 2 mss %"U16_F"...\n", pcb->ssthresh, (u16_t)(2*pcb->mss))); pcb->ssthresh = 2*pcb->mss; } pcb->cwnd = pcb->ssthresh + 3 * pcb->mss; pcb->flags |= TF_INFR; /* Reset the retransmission timer to prevent immediate rto retransmissions */ pcb->rtime = 0; } } /** * Send keepalive packets to keep a connection active although * no data is sent over it. * * Called by tcp_slowtmr() * * @param pcb the tcp_pcb for which to send a keepalive packet */ err_t tcp_keepalive(struct tcp_pcb *pcb) { err_t err; struct pbuf *p; struct netif *netif; LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to ")); ip_addr_debug_print(TCP_DEBUG, &pcb->remote_ip); LWIP_DEBUGF(TCP_DEBUG, ("\n")); LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); p = tcp_output_alloc_header(pcb, 0, 0, lwip_htonl(pcb->snd_nxt - 1)); if (p == NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: could not allocate memory for pbuf\n")); return ERR_MEM; } netif = ip_route(&pcb->local_ip, &pcb->remote_ip); if (netif == NULL) { err = ERR_RTE; } else { #if CHECKSUM_GEN_TCP IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { struct tcp_hdr *tcphdr = (struct tcp_hdr *)p->payload; tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, &pcb->local_ip, &pcb->remote_ip); } #endif /* CHECKSUM_GEN_TCP */ TCP_STATS_INC(tcp.xmit); /* Send output to IP */ NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint)); err = ip_output_if(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP, netif); NETIF_SET_HWADDRHINT(netif, NULL); } pbuf_free(p); LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F" err %d.\n", pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); return err; } /** * Send persist timer zero-window probes to keep a connection active * when a window update is lost. * * Called by tcp_slowtmr() * * @param pcb the tcp_pcb for which to send a zero-window probe packet */ err_t tcp_zero_window_probe(struct tcp_pcb *pcb) { err_t err; struct pbuf *p; struct tcp_hdr *tcphdr; struct tcp_seg *seg; u16_t len; u8_t is_fin; u32_t snd_nxt; struct netif *netif; LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: sending ZERO WINDOW probe to ")); ip_addr_debug_print(TCP_DEBUG, &pcb->remote_ip); LWIP_DEBUGF(TCP_DEBUG, ("\n")); LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: tcp_ticks %"U32_F " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n", tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent)); seg = pcb->unacked; if (seg == NULL) { seg = pcb->unsent; } if (seg == NULL) { /* nothing to send, zero window probe not needed */ return ERR_OK; } is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0); /* we want to send one seqno: either FIN or data (no options) */ len = is_fin ? 0 : 1; p = tcp_output_alloc_header(pcb, 0, len, seg->tcphdr->seqno); if (p == NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n")); return ERR_MEM; } tcphdr = (struct tcp_hdr *)p->payload; if (is_fin) { /* FIN segment, no data */ TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN); } else { /* Data segment, copy in one byte from the head of the unacked queue */ char *d = ((char *)p->payload + TCP_HLEN); /* Depending on whether the segment has already been sent (unacked) or not (unsent), seg->p->payload points to the IP header or TCP header. Ensure we copy the first TCP data byte: */ pbuf_copy_partial(seg->p, d, 1, seg->p->tot_len - seg->len); } /* The byte may be acknowledged without the window being opened. */ snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + 1; if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) { pcb->snd_nxt = snd_nxt; } netif = ip_route(&pcb->local_ip, &pcb->remote_ip); if (netif == NULL) { err = ERR_RTE; } else { #if CHECKSUM_GEN_TCP IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) { tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len, &pcb->local_ip, &pcb->remote_ip); } #endif TCP_STATS_INC(tcp.xmit); /* Send output to IP */ NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint)); err = ip_output_if(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP, netif); NETIF_SET_HWADDRHINT(netif, NULL); } pbuf_free(p); LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F " ackno %"U32_F" err %d.\n", pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err)); return err; } #endif /* LWIP_TCP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/tcp_out.c
C
unknown
59,252
/** * @file * Stack-internal timers implementation. * This file includes timer callbacks for stack-internal timers as well as * functions to set up or stop timers and check for expired timers. * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * Simon Goldschmidt * */ #include "lwip/opt.h" #include "lwip/timeouts.h" #include "lwip/priv/tcp_priv.h" #include "lwip/def.h" #include "lwip/memp.h" #include "lwip/priv/tcpip_priv.h" #include "lwip/ip4_frag.h" #include "lwip/etharp.h" #include "lwip/dhcp.h" #include "lwip/autoip.h" #include "lwip/igmp.h" #include "lwip/dns.h" #include "lwip/nd6.h" #include "lwip/ip6_frag.h" #include "lwip/mld6.h" #include "lwip/sys.h" #include "lwip/pbuf.h" #if LWIP_DEBUG_TIMERNAMES #define HANDLER(x) x, #x #else /* LWIP_DEBUG_TIMERNAMES */ #define HANDLER(x) x #endif /* LWIP_DEBUG_TIMERNAMES */ /** This array contains all stack-internal cyclic timers. To get the number of * timers, use LWIP_ARRAYSIZE() */ const struct lwip_cyclic_timer lwip_cyclic_timers[] = { #if LWIP_TCP /* The TCP timer is a special case: it does not have to run always and is triggered to start from TCP using tcp_timer_needed() */ {TCP_TMR_INTERVAL, HANDLER(tcp_tmr)}, #endif /* LWIP_TCP */ #if LWIP_IPV4 #if IP_REASSEMBLY {IP_TMR_INTERVAL, HANDLER(ip_reass_tmr)}, #endif /* IP_REASSEMBLY */ #if LWIP_ARP {ARP_TMR_INTERVAL, HANDLER(etharp_tmr)}, #endif /* LWIP_ARP */ #if LWIP_DHCP {DHCP_COARSE_TIMER_MSECS, HANDLER(dhcp_coarse_tmr)}, {DHCP_FINE_TIMER_MSECS, HANDLER(dhcp_fine_tmr)}, #endif /* LWIP_DHCP */ #if LWIP_AUTOIP {AUTOIP_TMR_INTERVAL, HANDLER(autoip_tmr)}, #endif /* LWIP_AUTOIP */ #if LWIP_IGMP {IGMP_TMR_INTERVAL, HANDLER(igmp_tmr)}, #endif /* LWIP_IGMP */ #endif /* LWIP_IPV4 */ #if LWIP_DNS {DNS_TMR_INTERVAL, HANDLER(dns_tmr)}, #endif /* LWIP_DNS */ #if LWIP_IPV6 {ND6_TMR_INTERVAL, HANDLER(nd6_tmr)}, #if LWIP_IPV6_REASS {IP6_REASS_TMR_INTERVAL, HANDLER(ip6_reass_tmr)}, #endif /* LWIP_IPV6_REASS */ #if LWIP_IPV6_MLD {MLD6_TMR_INTERVAL, HANDLER(mld6_tmr)}, #endif /* LWIP_IPV6_MLD */ #endif /* LWIP_IPV6 */ }; #if LWIP_TIMERS && !LWIP_TIMERS_CUSTOM /** The one and only timeout list */ static struct sys_timeo *next_timeout; static u32_t timeouts_last_time; #if LWIP_TCP /** global variable that shows if the tcp timer is currently scheduled or not */ static int tcpip_tcp_timer_active; /** * Timer callback function that calls tcp_tmr() and reschedules itself. * * @param arg unused argument */ static void tcpip_tcp_timer(void *arg) { LWIP_UNUSED_ARG(arg); /* call TCP timer handler */ tcp_tmr(); /* timer still needed? */ if (tcp_active_pcbs || tcp_tw_pcbs) { /* restart timer */ sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); } else { /* disable timer */ tcpip_tcp_timer_active = 0; } } /** * Called from TCP_REG when registering a new PCB: * the reason is to have the TCP timer only running when * there are active (or time-wait) PCBs. */ void tcp_timer_needed(void) { /* timer is off but needed again? */ if (!tcpip_tcp_timer_active && (tcp_active_pcbs || tcp_tw_pcbs)) { /* enable and start timer */ tcpip_tcp_timer_active = 1; sys_timeout(TCP_TMR_INTERVAL, tcpip_tcp_timer, NULL); } } #endif /* LWIP_TCP */ /** * Timer callback function that calls mld6_tmr() and reschedules itself. * * @param arg unused argument */ static void cyclic_timer(void *arg) { const struct lwip_cyclic_timer* cyclic = (const struct lwip_cyclic_timer*)arg; #if LWIP_DEBUG_TIMERNAMES LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: %s()\n", cyclic->handler_name)); #endif cyclic->handler(); sys_timeout(cyclic->interval_ms, cyclic_timer, arg); } /** Initialize this module */ void sys_timeouts_init(void) { size_t i; /* tcp_tmr() at index 0 is started on demand */ for (i = (LWIP_TCP ? 1 : 0); i < LWIP_ARRAYSIZE(lwip_cyclic_timers); i++) { /* we have to cast via size_t to get rid of const warning (this is OK as cyclic_timer() casts back to const* */ sys_timeout(lwip_cyclic_timers[i].interval_ms, cyclic_timer, LWIP_CONST_CAST(void*, &lwip_cyclic_timers[i])); } /* Initialise timestamp for sys_check_timeouts */ timeouts_last_time = sys_now(); } /** * Create a one-shot timer (aka timeout). Timeouts are processed in the * following cases: * - while waiting for a message using sys_timeouts_mbox_fetch() * - by calling sys_check_timeouts() (NO_SYS==1 only) * * @param msecs time in milliseconds after that the timer should expire * @param handler callback function to call when msecs have elapsed * @param arg argument to pass to the callback function */ #if LWIP_DEBUG_TIMERNAMES void sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char* handler_name) #else /* LWIP_DEBUG_TIMERNAMES */ void sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg) #endif /* LWIP_DEBUG_TIMERNAMES */ { struct sys_timeo *timeout, *t; u32_t now, diff; timeout = (struct sys_timeo *)memp_malloc(MEMP_SYS_TIMEOUT); if (timeout == NULL) { LWIP_ASSERT("sys_timeout: timeout != NULL, pool MEMP_SYS_TIMEOUT is empty", timeout != NULL); return; } now = sys_now(); if (next_timeout == NULL) { diff = 0; timeouts_last_time = now; } else { diff = now - timeouts_last_time; } timeout->next = NULL; timeout->h = handler; timeout->arg = arg; timeout->time = msecs + diff; #if LWIP_DEBUG_TIMERNAMES timeout->handler_name = handler_name; LWIP_DEBUGF(TIMERS_DEBUG, ("sys_timeout: %p msecs=%"U32_F" handler=%s arg=%p\n", (void *)timeout, msecs, handler_name, (void *)arg)); #endif /* LWIP_DEBUG_TIMERNAMES */ if (next_timeout == NULL) { next_timeout = timeout; return; } if (next_timeout->time > msecs) { next_timeout->time -= msecs; timeout->next = next_timeout; next_timeout = timeout; } else { for (t = next_timeout; t != NULL; t = t->next) { timeout->time -= t->time; if (t->next == NULL || t->next->time > timeout->time) { if (t->next != NULL) { t->next->time -= timeout->time; } else if (timeout->time > msecs) { /* If this is the case, 'timeouts_last_time' and 'now' differs too much. This can be due to sys_check_timeouts() not being called at the right times, but also when stopping in a breakpoint. Anyway, let's assume this is not wanted, so add the first timer's time instead of 'diff' */ timeout->time = msecs + next_timeout->time; } timeout->next = t->next; t->next = timeout; break; } } } } /** * Go through timeout list (for this task only) and remove the first matching * entry (subsequent entries remain untouched), even though the timeout has not * triggered yet. * * @param handler callback function that would be called by the timeout * @param arg callback argument that would be passed to handler */ void sys_untimeout(sys_timeout_handler handler, void *arg) { struct sys_timeo *prev_t, *t; if (next_timeout == NULL) { return; } for (t = next_timeout, prev_t = NULL; t != NULL; prev_t = t, t = t->next) { if ((t->h == handler) && (t->arg == arg)) { /* We have a match */ /* Unlink from previous in list */ if (prev_t == NULL) { next_timeout = t->next; } else { prev_t->next = t->next; } /* If not the last one, add time of this one back to next */ if (t->next != NULL) { t->next->time += t->time; } memp_free(MEMP_SYS_TIMEOUT, t); return; } } return; } /** * @ingroup lwip_nosys * Handle timeouts for NO_SYS==1 (i.e. without using * tcpip_thread/sys_timeouts_mbox_fetch(). Uses sys_now() to call timeout * handler functions when timeouts expire. * * Must be called periodically from your main loop. */ #if !NO_SYS && !defined __DOXYGEN__ static #endif /* !NO_SYS */ void sys_check_timeouts(void) { if (next_timeout) { struct sys_timeo *tmptimeout; u32_t diff; sys_timeout_handler handler; void *arg; u8_t had_one; u32_t now; now = sys_now(); /* this cares for wraparounds */ diff = now - timeouts_last_time; do { PBUF_CHECK_FREE_OOSEQ(); had_one = 0; tmptimeout = next_timeout; if (tmptimeout && (tmptimeout->time <= diff)) { /* timeout has expired */ had_one = 1; timeouts_last_time += tmptimeout->time; diff -= tmptimeout->time; next_timeout = tmptimeout->next; handler = tmptimeout->h; arg = tmptimeout->arg; #if LWIP_DEBUG_TIMERNAMES if (handler != NULL) { LWIP_DEBUGF(TIMERS_DEBUG, ("sct calling h=%s arg=%p\n", tmptimeout->handler_name, arg)); } #endif /* LWIP_DEBUG_TIMERNAMES */ memp_free(MEMP_SYS_TIMEOUT, tmptimeout); if (handler != NULL) { #if !NO_SYS /* For LWIP_TCPIP_CORE_LOCKING, lock the core before calling the timeout handler function. */ LOCK_TCPIP_CORE(); #endif /* !NO_SYS */ handler(arg); #if !NO_SYS UNLOCK_TCPIP_CORE(); #endif /* !NO_SYS */ } LWIP_TCPIP_THREAD_ALIVE(); } /* repeat until all expired timers have been called */ } while (had_one); } } /** Set back the timestamp of the last call to sys_check_timeouts() * This is necessary if sys_check_timeouts() hasn't been called for a long * time (e.g. while saving energy) to prevent all timer functions of that * period being called. */ void sys_restart_timeouts(void) { timeouts_last_time = sys_now(); } /** Return the time left before the next timeout is due. If no timeouts are * enqueued, returns 0xffffffff */ #if !NO_SYS static #endif /* !NO_SYS */ u32_t sys_timeouts_sleeptime(void) { u32_t diff; if (next_timeout == NULL) { return 0xffffffff; } diff = sys_now() - timeouts_last_time; if (diff > next_timeout->time) { return 0; } else { return next_timeout->time - diff; } } #if !NO_SYS /** * Wait (forever) for a message to arrive in an mbox. * While waiting, timeouts are processed. * * @param mbox the mbox to fetch the message from * @param msg the place to store the message */ void sys_timeouts_mbox_fetch(sys_mbox_t *mbox, void **msg) { u32_t sleeptime; again: if (!next_timeout) { sys_arch_mbox_fetch(mbox, msg, 0); return; } sleeptime = sys_timeouts_sleeptime(); if (sleeptime == 0 || sys_arch_mbox_fetch(mbox, msg, sleeptime) == SYS_ARCH_TIMEOUT) { /* If a SYS_ARCH_TIMEOUT value is returned, a timeout occurred before a message could be fetched. */ sys_check_timeouts(); /* We try again to fetch a message from the mbox. */ goto again; } } #endif /* NO_SYS */ #else /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */ /* Satisfy the TCP code which calls this function */ void tcp_timer_needed(void) { } #endif /* LWIP_TIMERS && !LWIP_TIMERS_CUSTOM */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/timeouts.c
C
unknown
12,923
/** * @file * User Datagram Protocol module\n * The code for the User Datagram Protocol UDP & UDPLite (RFC 3828).\n * See also @ref udp_raw * * @defgroup udp_raw UDP * @ingroup callbackstyle_api * User Datagram Protocol module\n * @see @ref raw_api and @ref netconn */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ /* @todo Check the use of '(struct udp_pcb).chksum_len_rx'! */ #include "lwip/opt.h" #if LWIP_UDP /* don't build if not configured for use in lwipopts.h */ #include "lwip/udp.h" #include "lwip/def.h" #include "lwip/memp.h" #include "lwip/inet_chksum.h" #include "lwip/ip_addr.h" #include "lwip/ip6.h" #include "lwip/ip6_addr.h" #include "lwip/netif.h" #include "lwip/icmp.h" #include "lwip/icmp6.h" #include "lwip/stats.h" #include "lwip/snmp.h" #include "lwip/dhcp.h" #include <string.h> #ifndef UDP_LOCAL_PORT_RANGE_START /* From http://www.iana.org/assignments/port-numbers: "The Dynamic and/or Private Ports are those from 49152 through 65535" */ #define UDP_LOCAL_PORT_RANGE_START 0xc000 #define UDP_LOCAL_PORT_RANGE_END 0xffff #define UDP_ENSURE_LOCAL_PORT_RANGE(port) ((u16_t)(((port) & ~UDP_LOCAL_PORT_RANGE_START) + UDP_LOCAL_PORT_RANGE_START)) #endif /* last local UDP port */ static u16_t udp_port = UDP_LOCAL_PORT_RANGE_START; /* The list of UDP PCBs */ /* exported in udp.h (was static) */ struct udp_pcb *udp_pcbs; /** * Initialize this module. */ void udp_init(void) { #if LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS && defined(LWIP_RAND) udp_port = UDP_ENSURE_LOCAL_PORT_RANGE(LWIP_RAND()); #endif /* LWIP_RANDOMIZE_INITIAL_LOCAL_PORTS && defined(LWIP_RAND) */ } /** * Allocate a new local UDP port. * * @return a new (free) local UDP port number */ static u16_t udp_new_port(void) { u16_t n = 0; struct udp_pcb *pcb; again: if (udp_port++ == UDP_LOCAL_PORT_RANGE_END) { udp_port = UDP_LOCAL_PORT_RANGE_START; } /* Check all PCBs. */ for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { if (pcb->local_port == udp_port) { if (++n > (UDP_LOCAL_PORT_RANGE_END - UDP_LOCAL_PORT_RANGE_START)) { return 0; } goto again; } } return udp_port; } /** Common code to see if the current input packet matches the pcb * (current input packet is accessed via ip(4/6)_current_* macros) * * @param pcb pcb to check * @param inp network interface on which the datagram was received (only used for IPv4) * @param broadcast 1 if his is an IPv4 broadcast (global or subnet-only), 0 otherwise (only used for IPv4) * @return 1 on match, 0 otherwise */ static u8_t udp_input_local_match(struct udp_pcb *pcb, struct netif *inp, u8_t broadcast) { LWIP_UNUSED_ARG(inp); /* in IPv6 only case */ LWIP_UNUSED_ARG(broadcast); /* in IPv6 only case */ /* Dual-stack: PCBs listening to any IP type also listen to any IP address */ if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { #if LWIP_IPV4 && IP_SOF_BROADCAST_RECV if ((broadcast != 0) && !ip_get_option(pcb, SOF_BROADCAST)) { return 0; } #endif /* LWIP_IPV4 && IP_SOF_BROADCAST_RECV */ return 1; } /* Only need to check PCB if incoming IP version matches PCB IP version */ if (IP_ADDR_PCB_VERSION_MATCH_EXACT(pcb, ip_current_dest_addr())) { #if LWIP_IPV4 /* Special case: IPv4 broadcast: all or broadcasts in my subnet * Note: broadcast variable can only be 1 if it is an IPv4 broadcast */ if (broadcast != 0) { #if IP_SOF_BROADCAST_RECV if (ip_get_option(pcb, SOF_BROADCAST)) #endif /* IP_SOF_BROADCAST_RECV */ { if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || ((ip4_current_dest_addr()->addr == IPADDR_BROADCAST)) || ip4_addr_netcmp(ip_2_ip4(&pcb->local_ip), ip4_current_dest_addr(), netif_ip4_netmask(inp))) { return 1; } } } else #endif /* LWIP_IPV4 */ /* Handle IPv4 and IPv6: all or exact match */ if (ip_addr_isany(&pcb->local_ip) || ip_addr_cmp(&pcb->local_ip, ip_current_dest_addr())) { return 1; } } return 0; } /** * Process an incoming UDP datagram. * * Given an incoming UDP datagram (as a chain of pbufs) this function * finds a corresponding UDP PCB and hands over the pbuf to the pcbs * recv function. If no pcb is found or the datagram is incorrect, the * pbuf is freed. * * @param p pbuf to be demultiplexed to a UDP PCB (p->payload pointing to the UDP header) * @param inp network interface on which the datagram was received. * */ void udp_input(struct pbuf *p, struct netif *inp) { struct udp_hdr *udphdr; struct udp_pcb *pcb, *prev; struct udp_pcb *uncon_pcb; u16_t src, dest; u8_t broadcast; u8_t for_us = 0; LWIP_UNUSED_ARG(inp); PERF_START; UDP_STATS_INC(udp.recv); /* Check minimum length (UDP header) */ if (p->len < UDP_HLEN) { /* drop short packets */ LWIP_DEBUGF(UDP_DEBUG, ("udp_input: short UDP datagram (%"U16_F" bytes) discarded\n", p->tot_len)); UDP_STATS_INC(udp.lenerr); UDP_STATS_INC(udp.drop); MIB2_STATS_INC(mib2.udpinerrors); pbuf_free(p); goto end; } udphdr = (struct udp_hdr *)p->payload; /* is broadcast packet ? */ broadcast = ip_addr_isbroadcast(ip_current_dest_addr(), ip_current_netif()); LWIP_DEBUGF(UDP_DEBUG, ("udp_input: received datagram of length %"U16_F"\n", p->tot_len)); /* convert src and dest ports to host byte order */ src = lwip_ntohs(udphdr->src); dest = lwip_ntohs(udphdr->dest); udp_debug_print(udphdr); /* print the UDP source and destination */ LWIP_DEBUGF(UDP_DEBUG, ("udp (")); ip_addr_debug_print(UDP_DEBUG, ip_current_dest_addr()); LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", lwip_ntohs(udphdr->dest))); ip_addr_debug_print(UDP_DEBUG, ip_current_src_addr()); LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", lwip_ntohs(udphdr->src))); pcb = NULL; prev = NULL; uncon_pcb = NULL; /* Iterate through the UDP pcb list for a matching pcb. * 'Perfect match' pcbs (connected to the remote port & ip address) are * preferred. If no perfect match is found, the first unconnected pcb that * matches the local port and ip address gets the datagram. */ for (pcb = udp_pcbs; pcb != NULL; pcb = pcb->next) { /* print the PCB local and remote address */ LWIP_DEBUGF(UDP_DEBUG, ("pcb (")); ip_addr_debug_print(UDP_DEBUG, &pcb->local_ip); LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F") <-- (", pcb->local_port)); ip_addr_debug_print(UDP_DEBUG, &pcb->remote_ip); LWIP_DEBUGF(UDP_DEBUG, (", %"U16_F")\n", pcb->remote_port)); /* compare PCB local addr+port to UDP destination addr+port */ if ((pcb->local_port == dest) && (udp_input_local_match(pcb, inp, broadcast) != 0)) { if (((pcb->flags & UDP_FLAGS_CONNECTED) == 0) && ((uncon_pcb == NULL) #if SO_REUSE /* prefer specific IPs over cath-all */ || !ip_addr_isany(&pcb->local_ip) #endif /* SO_REUSE */ )) { /* the first unconnected matching PCB */ uncon_pcb = pcb; } /* compare PCB remote addr+port to UDP source addr+port */ if ((pcb->remote_port == src) && (ip_addr_isany_val(pcb->remote_ip) || ip_addr_cmp(&pcb->remote_ip, ip_current_src_addr()))) { /* the first fully matching PCB */ if (prev != NULL) { /* move the pcb to the front of udp_pcbs so that is found faster next time */ prev->next = pcb->next; pcb->next = udp_pcbs; udp_pcbs = pcb; } else { UDP_STATS_INC(udp.cachehit); } break; } } prev = pcb; } /* no fully matching pcb found? then look for an unconnected pcb */ if (pcb == NULL) { pcb = uncon_pcb; } /* Check checksum if this is a match or if it was directed at us. */ if (pcb != NULL) { for_us = 1; } else { #if LWIP_IPV6 if (ip_current_is_v6()) { for_us = netif_get_ip6_addr_match(inp, ip6_current_dest_addr()) >= 0; } #endif /* LWIP_IPV6 */ #if LWIP_IPV4 if (!ip_current_is_v6()) { for_us = ip4_addr_cmp(netif_ip4_addr(inp), ip4_current_dest_addr()); } #endif /* LWIP_IPV4 */ } if (for_us) { LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: calculating checksum\n")); #if CHECKSUM_CHECK_UDP IF__NETIF_CHECKSUM_ENABLED(inp, CHECKSUM_CHECK_UDP) { #if LWIP_UDPLITE if (ip_current_header_proto() == IP_PROTO_UDPLITE) { /* Do the UDP Lite checksum */ u16_t chklen = lwip_ntohs(udphdr->len); if (chklen < sizeof(struct udp_hdr)) { if (chklen == 0) { /* For UDP-Lite, checksum length of 0 means checksum over the complete packet (See RFC 3828 chap. 3.1) */ chklen = p->tot_len; } else { /* At least the UDP-Lite header must be covered by the checksum! (Again, see RFC 3828 chap. 3.1) */ goto chkerr; } } if (ip_chksum_pseudo_partial(p, IP_PROTO_UDPLITE, p->tot_len, chklen, ip_current_src_addr(), ip_current_dest_addr()) != 0) { goto chkerr; } } else #endif /* LWIP_UDPLITE */ { if (udphdr->chksum != 0) { if (ip_chksum_pseudo(p, IP_PROTO_UDP, p->tot_len, ip_current_src_addr(), ip_current_dest_addr()) != 0) { goto chkerr; } } } } #endif /* CHECKSUM_CHECK_UDP */ if (pbuf_header(p, -UDP_HLEN)) { /* Can we cope with this failing? Just assert for now */ LWIP_ASSERT("pbuf_header failed\n", 0); UDP_STATS_INC(udp.drop); MIB2_STATS_INC(mib2.udpinerrors); pbuf_free(p); goto end; } if (pcb != NULL) { MIB2_STATS_INC(mib2.udpindatagrams); #if SO_REUSE && SO_REUSE_RXTOALL if (ip_get_option(pcb, SOF_REUSEADDR) && (broadcast || ip_addr_ismulticast(ip_current_dest_addr()))) { /* pass broadcast- or multicast packets to all multicast pcbs if SOF_REUSEADDR is set on the first match */ struct udp_pcb *mpcb; u8_t p_header_changed = 0; s16_t hdrs_len = (s16_t)(ip_current_header_tot_len() + UDP_HLEN); for (mpcb = udp_pcbs; mpcb != NULL; mpcb = mpcb->next) { if (mpcb != pcb) { /* compare PCB local addr+port to UDP destination addr+port */ if ((mpcb->local_port == dest) && (udp_input_local_match(mpcb, inp, broadcast) != 0)) { /* pass a copy of the packet to all local matches */ if (mpcb->recv != NULL) { struct pbuf *q; /* for that, move payload to IP header again */ if (p_header_changed == 0) { pbuf_header_force(p, hdrs_len); p_header_changed = 1; } q = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM); if (q != NULL) { err_t err = pbuf_copy(q, p); if (err == ERR_OK) { /* move payload to UDP data */ pbuf_header(q, -hdrs_len); mpcb->recv(mpcb->recv_arg, mpcb, q, ip_current_src_addr(), src); } } } } } } if (p_header_changed) { /* and move payload to UDP data again */ pbuf_header(p, -hdrs_len); } } #endif /* SO_REUSE && SO_REUSE_RXTOALL */ /* callback */ if (pcb->recv != NULL) { /* now the recv function is responsible for freeing p */ pcb->recv(pcb->recv_arg, pcb, p, ip_current_src_addr(), src); } else { /* no recv function registered? then we have to free the pbuf! */ pbuf_free(p); goto end; } } else { LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_input: not for us.\n")); #if LWIP_ICMP || LWIP_ICMP6 /* No match was found, send ICMP destination port unreachable unless destination address was broadcast/multicast. */ if (!broadcast && !ip_addr_ismulticast(ip_current_dest_addr())) { /* move payload pointer back to ip header */ pbuf_header_force(p, (s16_t)(ip_current_header_tot_len() + UDP_HLEN)); icmp_port_unreach(ip_current_is_v6(), p); } #endif /* LWIP_ICMP || LWIP_ICMP6 */ UDP_STATS_INC(udp.proterr); UDP_STATS_INC(udp.drop); MIB2_STATS_INC(mib2.udpnoports); pbuf_free(p); } } else { pbuf_free(p); } end: PERF_STOP("udp_input"); return; #if CHECKSUM_CHECK_UDP chkerr: LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("udp_input: UDP (or UDP Lite) datagram discarded due to failing checksum\n")); UDP_STATS_INC(udp.chkerr); UDP_STATS_INC(udp.drop); MIB2_STATS_INC(mib2.udpinerrors); pbuf_free(p); PERF_STOP("udp_input"); #endif /* CHECKSUM_CHECK_UDP */ } /** * @ingroup udp_raw * Send data using UDP. * * @param pcb UDP PCB used to send the data. * @param p chain of pbuf's to be sent. * * The datagram will be sent to the current remote_ip & remote_port * stored in pcb. If the pcb is not bound to a port, it will * automatically be bound to a random port. * * @return lwIP error code. * - ERR_OK. Successful. No error occurred. * - ERR_MEM. Out of memory. * - ERR_RTE. Could not find route to destination address. * - ERR_VAL. No PCB or PCB is dual-stack * - More errors could be returned by lower protocol layers. * * @see udp_disconnect() udp_sendto() */ err_t udp_send(struct udp_pcb *pcb, struct pbuf *p) { if ((pcb == NULL) || IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { return ERR_VAL; } /* send to the packet using remote ip and port stored in the pcb */ return udp_sendto(pcb, p, &pcb->remote_ip, pcb->remote_port); } #if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP /** @ingroup udp_raw * Same as udp_send() but with checksum */ err_t udp_send_chksum(struct udp_pcb *pcb, struct pbuf *p, u8_t have_chksum, u16_t chksum) { if ((pcb == NULL) || IP_IS_ANY_TYPE_VAL(pcb->remote_ip)) { return ERR_VAL; } /* send to the packet using remote ip and port stored in the pcb */ return udp_sendto_chksum(pcb, p, &pcb->remote_ip, pcb->remote_port, have_chksum, chksum); } #endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ /** * @ingroup udp_raw * Send data to a specified address using UDP. * * @param pcb UDP PCB used to send the data. * @param p chain of pbuf's to be sent. * @param dst_ip Destination IP address. * @param dst_port Destination UDP port. * * dst_ip & dst_port are expected to be in the same byte order as in the pcb. * * If the PCB already has a remote address association, it will * be restored after the data is sent. * * @return lwIP error code (@see udp_send for possible error codes) * * @see udp_disconnect() udp_send() */ err_t udp_sendto(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, u16_t dst_port) { #if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP return udp_sendto_chksum(pcb, p, dst_ip, dst_port, 0, 0); } /** @ingroup udp_raw * Same as udp_sendto(), but with checksum */ err_t udp_sendto_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, u16_t dst_port, u8_t have_chksum, u16_t chksum) { #endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ struct netif *netif; const ip_addr_t *dst_ip_route = dst_ip; if ((pcb == NULL) || (dst_ip == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { return ERR_VAL; } LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send\n")); #if LWIP_IPV6 || (LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS) if (ip_addr_ismulticast(dst_ip_route)) { #if LWIP_IPV6 if (IP_IS_V6(dst_ip)) { /* For multicast, find a netif based on source address. */ dst_ip_route = &pcb->local_ip; } else #endif /* LWIP_IPV6 */ { #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS /* IPv4 does not use source-based routing by default, so we use an administratively selected interface for multicast by default. However, this can be overridden by setting an interface address in pcb->multicast_ip that is used for routing. */ if (!ip_addr_isany_val(pcb->multicast_ip) && !ip4_addr_cmp(ip_2_ip4(&pcb->multicast_ip), IP4_ADDR_BROADCAST)) { dst_ip_route = &pcb->multicast_ip; } #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS */ } } #endif /* LWIP_IPV6 || (LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS) */ /* find the outgoing network interface for this packet */ if(IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { /* Don't call ip_route() with IP_ANY_TYPE */ netif = ip_route(IP46_ADDR_ANY(IP_GET_TYPE(dst_ip_route)), dst_ip_route); } else { netif = ip_route(&pcb->local_ip, dst_ip_route); } /* no outgoing network interface could be found? */ if (netif == NULL) { LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: No route to ")); ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, dst_ip); LWIP_DEBUGF(UDP_DEBUG, ("\n")); UDP_STATS_INC(udp.rterr); return ERR_RTE; } #if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum); #else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ return udp_sendto_if(pcb, p, dst_ip, dst_port, netif); #endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ } /** * @ingroup udp_raw * Send data to a specified address using UDP. * The netif used for sending can be specified. * * This function exists mainly for DHCP, to be able to send UDP packets * on a netif that is still down. * * @param pcb UDP PCB used to send the data. * @param p chain of pbuf's to be sent. * @param dst_ip Destination IP address. * @param dst_port Destination UDP port. * @param netif the netif used for sending. * * dst_ip & dst_port are expected to be in the same byte order as in the pcb. * * @return lwIP error code (@see udp_send for possible error codes) * * @see udp_disconnect() udp_send() */ err_t udp_sendto_if(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif) { #if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP return udp_sendto_if_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0); } /** Same as udp_sendto_if(), but with checksum */ err_t udp_sendto_if_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, u8_t have_chksum, u16_t chksum) { #endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ const ip_addr_t *src_ip; if ((pcb == NULL) || (dst_ip == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { return ERR_VAL; } /* PCB local address is IP_ANY_ADDR? */ #if LWIP_IPV6 if (IP_IS_V6(dst_ip)) { if (ip6_addr_isany(ip_2_ip6(&pcb->local_ip))) { src_ip = ip6_select_source_address(netif, ip_2_ip6(dst_ip)); if (src_ip == NULL) { /* No suitable source address was found. */ return ERR_RTE; } } else { /* use UDP PCB local IPv6 address as source address, if still valid. */ if (netif_get_ip6_addr_match(netif, ip_2_ip6(&pcb->local_ip)) < 0) { /* Address isn't valid anymore. */ return ERR_RTE; } src_ip = &pcb->local_ip; } } #endif /* LWIP_IPV6 */ #if LWIP_IPV4 && LWIP_IPV6 else #endif /* LWIP_IPV4 && LWIP_IPV6 */ #if LWIP_IPV4 if (ip4_addr_isany(ip_2_ip4(&pcb->local_ip)) || ip4_addr_ismulticast(ip_2_ip4(&pcb->local_ip))) { /* if the local_ip is any or multicast * use the outgoing network interface IP address as source address */ src_ip = netif_ip_addr4(netif); } else { /* check if UDP PCB local IP address is correct * this could be an old address if netif->ip_addr has changed */ if (!ip4_addr_cmp(ip_2_ip4(&(pcb->local_ip)), netif_ip4_addr(netif))) { /* local_ip doesn't match, drop the packet */ return ERR_RTE; } /* use UDP PCB local IP address as source address */ src_ip = &pcb->local_ip; } #endif /* LWIP_IPV4 */ #if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, have_chksum, chksum, src_ip); #else /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ return udp_sendto_if_src(pcb, p, dst_ip, dst_port, netif, src_ip); #endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ } /** @ingroup udp_raw * Same as @ref udp_sendto_if, but with source address */ err_t udp_sendto_if_src(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, const ip_addr_t *src_ip) { #if LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP return udp_sendto_if_src_chksum(pcb, p, dst_ip, dst_port, netif, 0, 0, src_ip); } /** Same as udp_sendto_if_src(), but with checksum */ err_t udp_sendto_if_src_chksum(struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *dst_ip, u16_t dst_port, struct netif *netif, u8_t have_chksum, u16_t chksum, const ip_addr_t *src_ip) { #endif /* LWIP_CHECKSUM_ON_COPY && CHECKSUM_GEN_UDP */ struct udp_hdr *udphdr; err_t err; struct pbuf *q; /* q will be sent down the stack */ u8_t ip_proto; u8_t ttl; if ((pcb == NULL) || (dst_ip == NULL) || !IP_ADDR_PCB_VERSION_MATCH(pcb, src_ip) || !IP_ADDR_PCB_VERSION_MATCH(pcb, dst_ip)) { return ERR_VAL; } #if LWIP_IPV4 && IP_SOF_BROADCAST /* broadcast filter? */ if (!ip_get_option(pcb, SOF_BROADCAST) && #if LWIP_IPV6 IP_IS_V4(dst_ip) && #endif /* LWIP_IPV6 */ ip_addr_isbroadcast(dst_ip, netif)) { LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("udp_sendto_if: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb)); return ERR_VAL; } #endif /* LWIP_IPV4 && IP_SOF_BROADCAST */ /* if the PCB is not yet bound to a port, bind it here */ if (pcb->local_port == 0) { LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_send: not yet bound to a port, binding now\n")); err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); if (err != ERR_OK) { LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: forced port bind failed\n")); return err; } } /* not enough space to add an UDP header to first pbuf in given p chain? */ if (pbuf_header(p, UDP_HLEN)) { /* allocate header in a separate new pbuf */ q = pbuf_alloc(PBUF_IP, UDP_HLEN, PBUF_RAM); /* new header pbuf could not be allocated? */ if (q == NULL) { LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("udp_send: could not allocate header\n")); return ERR_MEM; } if (p->tot_len != 0) { /* chain header q in front of given pbuf p (only if p contains data) */ pbuf_chain(q, p); } /* first pbuf q points to header pbuf */ LWIP_DEBUGF(UDP_DEBUG, ("udp_send: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p)); } else { /* adding space for header within p succeeded */ /* first pbuf q equals given pbuf */ q = p; LWIP_DEBUGF(UDP_DEBUG, ("udp_send: added header in given pbuf %p\n", (void *)p)); } LWIP_ASSERT("check that first pbuf can hold struct udp_hdr", (q->len >= sizeof(struct udp_hdr))); /* q now represents the packet to be sent */ udphdr = (struct udp_hdr *)q->payload; udphdr->src = lwip_htons(pcb->local_port); udphdr->dest = lwip_htons(dst_port); /* in UDP, 0 checksum means 'no checksum' */ udphdr->chksum = 0x0000; /* Multicast Loop? */ #if (LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS) || (LWIP_IPV6 && LWIP_IPV6_MLD) if (((pcb->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) && ip_addr_ismulticast(dst_ip)) { q->flags |= PBUF_FLAG_MCASTLOOP; } #endif /* (LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS) || (LWIP_IPV6 && LWIP_IPV6_MLD) */ LWIP_DEBUGF(UDP_DEBUG, ("udp_send: sending datagram of length %"U16_F"\n", q->tot_len)); #if LWIP_UDPLITE /* UDP Lite protocol? */ if (pcb->flags & UDP_FLAGS_UDPLITE) { u16_t chklen, chklen_hdr; LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE packet length %"U16_F"\n", q->tot_len)); /* set UDP message length in UDP header */ chklen_hdr = chklen = pcb->chksum_len_tx; if ((chklen < sizeof(struct udp_hdr)) || (chklen > q->tot_len)) { if (chklen != 0) { LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP LITE pcb->chksum_len is illegal: %"U16_F"\n", chklen)); } /* For UDP-Lite, checksum length of 0 means checksum over the complete packet. (See RFC 3828 chap. 3.1) At least the UDP-Lite header must be covered by the checksum, therefore, if chksum_len has an illegal value, we generate the checksum over the complete packet to be safe. */ chklen_hdr = 0; chklen = q->tot_len; } udphdr->len = lwip_htons(chklen_hdr); /* calculate checksum */ #if CHECKSUM_GEN_UDP IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { #if LWIP_CHECKSUM_ON_COPY if (have_chksum) { chklen = UDP_HLEN; } #endif /* LWIP_CHECKSUM_ON_COPY */ udphdr->chksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDPLITE, q->tot_len, chklen, src_ip, dst_ip); #if LWIP_CHECKSUM_ON_COPY if (have_chksum) { u32_t acc; acc = udphdr->chksum + (u16_t)~(chksum); udphdr->chksum = FOLD_U32T(acc); } #endif /* LWIP_CHECKSUM_ON_COPY */ /* chksum zero must become 0xffff, as zero means 'no checksum' */ if (udphdr->chksum == 0x0000) { udphdr->chksum = 0xffff; } } #endif /* CHECKSUM_GEN_UDP */ ip_proto = IP_PROTO_UDPLITE; } else #endif /* LWIP_UDPLITE */ { /* UDP */ LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP packet length %"U16_F"\n", q->tot_len)); udphdr->len = lwip_htons(q->tot_len); /* calculate checksum */ #if CHECKSUM_GEN_UDP IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_UDP) { /* Checksum is mandatory over IPv6. */ if (IP_IS_V6(dst_ip) || (pcb->flags & UDP_FLAGS_NOCHKSUM) == 0) { u16_t udpchksum; #if LWIP_CHECKSUM_ON_COPY if (have_chksum) { u32_t acc; udpchksum = ip_chksum_pseudo_partial(q, IP_PROTO_UDP, q->tot_len, UDP_HLEN, src_ip, dst_ip); acc = udpchksum + (u16_t)~(chksum); udpchksum = FOLD_U32T(acc); } else #endif /* LWIP_CHECKSUM_ON_COPY */ { udpchksum = ip_chksum_pseudo(q, IP_PROTO_UDP, q->tot_len, src_ip, dst_ip); } /* chksum zero must become 0xffff, as zero means 'no checksum' */ if (udpchksum == 0x0000) { udpchksum = 0xffff; } udphdr->chksum = udpchksum; } } #endif /* CHECKSUM_GEN_UDP */ ip_proto = IP_PROTO_UDP; } /* Determine TTL to use */ #if LWIP_MULTICAST_TX_OPTIONS ttl = (ip_addr_ismulticast(dst_ip) ? udp_get_multicast_ttl(pcb) : pcb->ttl); #else /* LWIP_MULTICAST_TX_OPTIONS */ ttl = pcb->ttl; #endif /* LWIP_MULTICAST_TX_OPTIONS */ LWIP_DEBUGF(UDP_DEBUG, ("udp_send: UDP checksum 0x%04"X16_F"\n", udphdr->chksum)); LWIP_DEBUGF(UDP_DEBUG, ("udp_send: ip_output_if (,,,,0x%02"X16_F",)\n", (u16_t)ip_proto)); /* output to IP */ NETIF_SET_HWADDRHINT(netif, &(pcb->addr_hint)); err = ip_output_if_src(q, src_ip, dst_ip, ttl, pcb->tos, ip_proto, netif); NETIF_SET_HWADDRHINT(netif, NULL); /* @todo: must this be increased even if error occurred? */ MIB2_STATS_INC(mib2.udpoutdatagrams); /* did we chain a separate header pbuf earlier? */ if (q != p) { /* free the header pbuf */ pbuf_free(q); q = NULL; /* p is still referenced by the caller, and will live on */ } UDP_STATS_INC(udp.xmit); return err; } /** * @ingroup udp_raw * Bind an UDP PCB. * * @param pcb UDP PCB to be bound with a local address ipaddr and port. * @param ipaddr local IP address to bind with. Use IP4_ADDR_ANY to * bind to all local interfaces. * @param port local UDP port to bind with. Use 0 to automatically bind * to a random port between UDP_LOCAL_PORT_RANGE_START and * UDP_LOCAL_PORT_RANGE_END. * * ipaddr & port are expected to be in the same byte order as in the pcb. * * @return lwIP error code. * - ERR_OK. Successful. No error occurred. * - ERR_USE. The specified ipaddr and port are already bound to by * another UDP PCB. * * @see udp_disconnect() */ err_t udp_bind(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) { struct udp_pcb *ipcb; u8_t rebind; #if LWIP_IPV4 /* Don't propagate NULL pointer (IPv4 ANY) to subsequent functions */ if (ipaddr == NULL) { ipaddr = IP4_ADDR_ANY; } #endif /* LWIP_IPV4 */ /* still need to check for ipaddr == NULL in IPv6 only case */ if ((pcb == NULL) || (ipaddr == NULL)) { return ERR_VAL; } LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, ("udp_bind(ipaddr = ")); ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE, ipaddr); LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE, (", port = %"U16_F")\n", port)); rebind = 0; /* Check for double bind and rebind of the same pcb */ for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { /* is this UDP PCB already on active list? */ if (pcb == ipcb) { rebind = 1; break; } } /* no port specified? */ if (port == 0) { port = udp_new_port(); if (port == 0) { /* no more ports available in local range */ LWIP_DEBUGF(UDP_DEBUG, ("udp_bind: out of free UDP ports\n")); return ERR_USE; } } else { for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { if (pcb != ipcb) { /* By default, we don't allow to bind to a port that any other udp PCB is already bound to, unless *all* PCBs with that port have tha REUSEADDR flag set. */ #if SO_REUSE if (!ip_get_option(pcb, SOF_REUSEADDR) || !ip_get_option(ipcb, SOF_REUSEADDR)) #endif /* SO_REUSE */ { /* port matches that of PCB in list and REUSEADDR not set -> reject */ if ((ipcb->local_port == port) && /* IP address matches? */ ip_addr_cmp(&ipcb->local_ip, ipaddr)) { /* other PCB already binds to this local IP and port */ LWIP_DEBUGF(UDP_DEBUG, ("udp_bind: local port %"U16_F" already bound by another pcb\n", port)); return ERR_USE; } } } } } ip_addr_set_ipaddr(&pcb->local_ip, ipaddr); pcb->local_port = port; mib2_udp_bind(pcb); /* pcb not active yet? */ if (rebind == 0) { /* place the PCB on the active list if not already there */ pcb->next = udp_pcbs; udp_pcbs = pcb; } LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_bind: bound to ")); ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, &pcb->local_ip); LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->local_port)); return ERR_OK; } /** * @ingroup udp_raw * Connect an UDP PCB. * * This will associate the UDP PCB with the remote address. * * @param pcb UDP PCB to be connected with remote address ipaddr and port. * @param ipaddr remote IP address to connect with. * @param port remote UDP port to connect with. * * @return lwIP error code * * ipaddr & port are expected to be in the same byte order as in the pcb. * * The udp pcb is bound to a random local port if not already bound. * * @see udp_disconnect() */ err_t udp_connect(struct udp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port) { struct udp_pcb *ipcb; if ((pcb == NULL) || (ipaddr == NULL)) { return ERR_VAL; } if (pcb->local_port == 0) { err_t err = udp_bind(pcb, &pcb->local_ip, pcb->local_port); if (err != ERR_OK) { return err; } } ip_addr_set_ipaddr(&pcb->remote_ip, ipaddr); pcb->remote_port = port; pcb->flags |= UDP_FLAGS_CONNECTED; LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, ("udp_connect: connected to ")); ip_addr_debug_print(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, &pcb->remote_ip); LWIP_DEBUGF(UDP_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_STATE, (", port %"U16_F")\n", pcb->remote_port)); /* Insert UDP PCB into the list of active UDP PCBs. */ for (ipcb = udp_pcbs; ipcb != NULL; ipcb = ipcb->next) { if (pcb == ipcb) { /* already on the list, just return */ return ERR_OK; } } /* PCB not yet on the list, add PCB now */ pcb->next = udp_pcbs; udp_pcbs = pcb; return ERR_OK; } /** * @ingroup udp_raw * Disconnect a UDP PCB * * @param pcb the udp pcb to disconnect. */ void udp_disconnect(struct udp_pcb *pcb) { /* reset remote address association */ #if LWIP_IPV4 && LWIP_IPV6 if (IP_IS_ANY_TYPE_VAL(pcb->local_ip)) { ip_addr_copy(pcb->remote_ip, *IP_ANY_TYPE); } else { #endif ip_addr_set_any(IP_IS_V6_VAL(pcb->remote_ip), &pcb->remote_ip); #if LWIP_IPV4 && LWIP_IPV6 } #endif pcb->remote_port = 0; /* mark PCB as unconnected */ pcb->flags &= ~UDP_FLAGS_CONNECTED; } /** * @ingroup udp_raw * Set a receive callback for a UDP PCB * * This callback will be called when receiving a datagram for the pcb. * * @param pcb the pcb for which to set the recv callback * @param recv function pointer of the callback function * @param recv_arg additional argument to pass to the callback function */ void udp_recv(struct udp_pcb *pcb, udp_recv_fn recv, void *recv_arg) { /* remember recv() callback and user data */ pcb->recv = recv; pcb->recv_arg = recv_arg; } /** * @ingroup udp_raw * Remove an UDP PCB. * * @param pcb UDP PCB to be removed. The PCB is removed from the list of * UDP PCB's and the data structure is freed from memory. * * @see udp_new() */ void udp_remove(struct udp_pcb *pcb) { struct udp_pcb *pcb2; mib2_udp_unbind(pcb); /* pcb to be removed is first in list? */ if (udp_pcbs == pcb) { /* make list start at 2nd pcb */ udp_pcbs = udp_pcbs->next; /* pcb not 1st in list */ } else { for (pcb2 = udp_pcbs; pcb2 != NULL; pcb2 = pcb2->next) { /* find pcb in udp_pcbs list */ if (pcb2->next != NULL && pcb2->next == pcb) { /* remove pcb from list */ pcb2->next = pcb->next; break; } } } memp_free(MEMP_UDP_PCB, pcb); } /** * @ingroup udp_raw * Create a UDP PCB. * * @return The UDP PCB which was created. NULL if the PCB data structure * could not be allocated. * * @see udp_remove() */ struct udp_pcb * udp_new(void) { struct udp_pcb *pcb; pcb = (struct udp_pcb *)memp_malloc(MEMP_UDP_PCB); /* could allocate UDP PCB? */ if (pcb != NULL) { /* UDP Lite: by initializing to all zeroes, chksum_len is set to 0 * which means checksum is generated over the whole datagram per default * (recommended as default by RFC 3828). */ /* initialize PCB to all zeroes */ memset(pcb, 0, sizeof(struct udp_pcb)); pcb->ttl = UDP_TTL; #if LWIP_MULTICAST_TX_OPTIONS udp_set_multicast_ttl(pcb, UDP_TTL); #endif /* LWIP_MULTICAST_TX_OPTIONS */ } return pcb; } /** * @ingroup udp_raw * Create a UDP PCB for specific IP type. * * @param type IP address type, see @ref lwip_ip_addr_type definitions. * If you want to listen to IPv4 and IPv6 (dual-stack) packets, * supply @ref IPADDR_TYPE_ANY as argument and bind to @ref IP_ANY_TYPE. * @return The UDP PCB which was created. NULL if the PCB data structure * could not be allocated. * * @see udp_remove() */ struct udp_pcb * udp_new_ip_type(u8_t type) { struct udp_pcb *pcb; pcb = udp_new(); #if LWIP_IPV4 && LWIP_IPV6 if (pcb != NULL) { IP_SET_TYPE_VAL(pcb->local_ip, type); IP_SET_TYPE_VAL(pcb->remote_ip, type); } #else LWIP_UNUSED_ARG(type); #endif /* LWIP_IPV4 && LWIP_IPV6 */ return pcb; } /** This function is called from netif.c when address is changed * * @param old_addr IP address of the netif before change * @param new_addr IP address of the netif after change */ void udp_netif_ip_addr_changed(const ip_addr_t* old_addr, const ip_addr_t* new_addr) { struct udp_pcb* upcb; if (!ip_addr_isany(old_addr) && !ip_addr_isany(new_addr)) { for (upcb = udp_pcbs; upcb != NULL; upcb = upcb->next) { /* PCB bound to current local interface address? */ if (ip_addr_cmp(&upcb->local_ip, old_addr)) { /* The PCB is bound to the old ipaddr and * is set to bound to the new one instead */ ip_addr_copy(upcb->local_ip, *new_addr); } } } } #if UDP_DEBUG /** * Print UDP header information for debug purposes. * * @param udphdr pointer to the udp header in memory. */ void udp_debug_print(struct udp_hdr *udphdr) { LWIP_DEBUGF(UDP_DEBUG, ("UDP header:\n")); LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | %5"U16_F" | (src port, dest port)\n", lwip_ntohs(udphdr->src), lwip_ntohs(udphdr->dest))); LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); LWIP_DEBUGF(UDP_DEBUG, ("| %5"U16_F" | 0x%04"X16_F" | (len, chksum)\n", lwip_ntohs(udphdr->len), lwip_ntohs(udphdr->chksum))); LWIP_DEBUGF(UDP_DEBUG, ("+-------------------------------+\n")); } #endif /* UDP_DEBUG */ #endif /* LWIP_UDP */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/core/udp.c
C
unknown
40,022
/** * @file * netconn API (to be used from non-TCPIP threads) */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #ifndef LWIP_HDR_API_H #define LWIP_HDR_API_H #include "lwip/opt.h" #if LWIP_NETCONN || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */ /* Note: Netconn API is always available when sockets are enabled - * sockets are implemented on top of them */ #include "lwip/arch.h" #include "lwip/netbuf.h" #include "lwip/sys.h" #include "lwip/ip_addr.h" #include "lwip/err.h" #ifdef __cplusplus extern "C" { #endif /* Throughout this file, IP addresses and port numbers are expected to be in * the same byte order as in the corresponding pcb. */ /* Flags for netconn_write (u8_t) */ #define NETCONN_NOFLAG 0x00 #define NETCONN_NOCOPY 0x00 /* Only for source code compatibility */ #define NETCONN_COPY 0x01 #define NETCONN_MORE 0x02 #define NETCONN_DONTBLOCK 0x04 /* Flags for struct netconn.flags (u8_t) */ /** Should this netconn avoid blocking? */ #define NETCONN_FLAG_NON_BLOCKING 0x02 /** Was the last connect action a non-blocking one? */ #define NETCONN_FLAG_IN_NONBLOCKING_CONNECT 0x04 /** If a nonblocking write has been rejected before, poll_tcp needs to check if the netconn is writable again */ #define NETCONN_FLAG_CHECK_WRITESPACE 0x10 #if LWIP_IPV6 /** If this flag is set then only IPv6 communication is allowed on the netconn. As per RFC#3493 this features defaults to OFF allowing dual-stack usage by default. */ #define NETCONN_FLAG_IPV6_V6ONLY 0x20 #endif /* LWIP_IPV6 */ /* Helpers to process several netconn_types by the same code */ #define NETCONNTYPE_GROUP(t) ((t)&0xF0) #define NETCONNTYPE_DATAGRAM(t) ((t)&0xE0) #if LWIP_IPV6 #define NETCONN_TYPE_IPV6 0x08 #define NETCONNTYPE_ISIPV6(t) (((t)&NETCONN_TYPE_IPV6) != 0) #define NETCONNTYPE_ISUDPLITE(t) (((t)&0xF3) == NETCONN_UDPLITE) #define NETCONNTYPE_ISUDPNOCHKSUM(t) (((t)&0xF3) == NETCONN_UDPNOCHKSUM) #else /* LWIP_IPV6 */ #define NETCONNTYPE_ISIPV6(t) (0) #define NETCONNTYPE_ISUDPLITE(t) ((t) == NETCONN_UDPLITE) #define NETCONNTYPE_ISUDPNOCHKSUM(t) ((t) == NETCONN_UDPNOCHKSUM) #endif /* LWIP_IPV6 */ /** @ingroup netconn_common * Protocol family and type of the netconn */ enum netconn_type { NETCONN_INVALID = 0, /** TCP IPv4 */ NETCONN_TCP = 0x10, #if LWIP_IPV6 /** TCP IPv6 */ NETCONN_TCP_IPV6 = NETCONN_TCP | NETCONN_TYPE_IPV6 /* 0x18 */, #endif /* LWIP_IPV6 */ /** UDP IPv4 */ NETCONN_UDP = 0x20, /** UDP IPv4 lite */ NETCONN_UDPLITE = 0x21, /** UDP IPv4 no checksum */ NETCONN_UDPNOCHKSUM = 0x22, #if LWIP_IPV6 /** UDP IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ NETCONN_UDP_IPV6 = NETCONN_UDP | NETCONN_TYPE_IPV6 /* 0x28 */, /** UDP IPv6 lite (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ NETCONN_UDPLITE_IPV6 = NETCONN_UDPLITE | NETCONN_TYPE_IPV6 /* 0x29 */, /** UDP IPv6 no checksum (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ NETCONN_UDPNOCHKSUM_IPV6 = NETCONN_UDPNOCHKSUM | NETCONN_TYPE_IPV6 /* 0x2a */, #endif /* LWIP_IPV6 */ /** Raw connection IPv4 */ NETCONN_RAW = 0x40 #if LWIP_IPV6 /** Raw connection IPv6 (dual-stack by default, unless you call @ref netconn_set_ipv6only) */ , NETCONN_RAW_IPV6 = NETCONN_RAW | NETCONN_TYPE_IPV6 /* 0x48 */ #endif /* LWIP_IPV6 */ }; /** Current state of the netconn. Non-TCP netconns are always * in state NETCONN_NONE! */ enum netconn_state { NETCONN_NONE, NETCONN_WRITE, NETCONN_LISTEN, NETCONN_CONNECT, NETCONN_CLOSE }; /** Used to inform the callback function about changes * * Event explanation: * * In the netconn implementation, there are three ways to block a client: * * - accept mbox (sys_arch_mbox_fetch(&conn->acceptmbox, &accept_ptr, 0); in netconn_accept()) * - receive mbox (sys_arch_mbox_fetch(&conn->recvmbox, &buf, 0); in netconn_recv_data()) * - send queue is full (sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); in lwip_netconn_do_write()) * * The events have to be seen as events signaling the state of these mboxes/semaphores. For non-blocking * connections, you need to know in advance whether a call to a netconn function call would block or not, * and these events tell you about that. * * RCVPLUS events say: Safe to perform a potentially blocking call call once more. * They are counted in sockets - three RCVPLUS events for accept mbox means you are safe * to call netconn_accept 3 times without being blocked. * Same thing for receive mbox. * * RCVMINUS events say: Your call to to a possibly blocking function is "acknowledged". * Socket implementation decrements the counter. * * For TX, there is no need to count, its merely a flag. SENDPLUS means you may send something. * SENDPLUS occurs when enough data was delivered to peer so netconn_send() can be called again. * A SENDMINUS event occurs when the next call to a netconn_send() would be blocking. */ enum netconn_evt { NETCONN_EVT_RCVPLUS, NETCONN_EVT_RCVMINUS, NETCONN_EVT_SENDPLUS, NETCONN_EVT_SENDMINUS, NETCONN_EVT_ERROR }; #if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) /** Used for netconn_join_leave_group() */ enum netconn_igmp { NETCONN_JOIN, NETCONN_LEAVE }; #endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ #if LWIP_DNS /* Used for netconn_gethostbyname_addrtype(), these should match the DNS_ADDRTYPE defines in dns.h */ #define NETCONN_DNS_DEFAULT NETCONN_DNS_IPV4_IPV6 #define NETCONN_DNS_IPV4 0 #define NETCONN_DNS_IPV6 1 #define NETCONN_DNS_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ #define NETCONN_DNS_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ #endif /* LWIP_DNS */ /* forward-declare some structs to avoid to include their headers */ struct ip_pcb; struct tcp_pcb; struct udp_pcb; struct raw_pcb; struct netconn; struct api_msg; /** A callback prototype to inform about events for a netconn */ typedef void (* netconn_callback)(struct netconn *, enum netconn_evt, u16_t len); /** A netconn descriptor */ struct netconn { /** type of the netconn (TCP, UDP or RAW) */ enum netconn_type type; /** current state of the netconn */ enum netconn_state state; /** the lwIP internal protocol control block */ union { struct ip_pcb *ip; struct tcp_pcb *tcp; struct udp_pcb *udp; struct raw_pcb *raw; } pcb; /** the last error this netconn had */ err_t last_err; #if !LWIP_NETCONN_SEM_PER_THREAD /** sem that is used to synchronously execute functions in the core context */ sys_sem_t op_completed; #endif /** mbox where received packets are stored until they are fetched by the netconn application thread (can grow quite big) */ sys_mbox_t recvmbox; #if LWIP_TCP /** mbox where new connections are stored until processed by the application thread */ sys_mbox_t acceptmbox; #endif /* LWIP_TCP */ /** only used for socket layer */ #if LWIP_SOCKET int socket; #endif /* LWIP_SOCKET */ #if LWIP_SO_SNDTIMEO /** timeout to wait for sending data (which means enqueueing data for sending in internal buffers) in milliseconds */ s32_t send_timeout; #endif /* LWIP_SO_RCVTIMEO */ #if LWIP_SO_RCVTIMEO /** timeout in milliseconds to wait for new data to be received (or connections to arrive for listening netconns) */ int recv_timeout; #endif /* LWIP_SO_RCVTIMEO */ #if LWIP_SO_RCVBUF /** maximum amount of bytes queued in recvmbox not used for TCP: adjust TCP_WND instead! */ int recv_bufsize; /** number of bytes currently in recvmbox to be received, tested against recv_bufsize to limit bytes on recvmbox for UDP and RAW, used for FIONREAD */ int recv_avail; #endif /* LWIP_SO_RCVBUF */ #if LWIP_SO_LINGER /** values <0 mean linger is disabled, values > 0 are seconds to linger */ s16_t linger; #endif /* LWIP_SO_LINGER */ /** flags holding more netconn-internal state, see NETCONN_FLAG_* defines */ u8_t flags; #if LWIP_TCP /** TCP: when data passed to netconn_write doesn't fit into the send buffer, this temporarily stores how much is already sent. */ size_t write_offset; /** TCP: when data passed to netconn_write doesn't fit into the send buffer, this temporarily stores the message. Also used during connect and close. */ struct api_msg *current_msg; #endif /* LWIP_TCP */ /** A callback function that is informed about events for this netconn */ netconn_callback callback; }; /** Register an Network connection event */ #define API_EVENT(c,e,l) if (c->callback) { \ (*c->callback)(c, e, l); \ } /** Set conn->last_err to err but don't overwrite fatal errors */ #define NETCONN_SET_SAFE_ERR(conn, err) do { if ((conn) != NULL) { \ SYS_ARCH_DECL_PROTECT(netconn_set_safe_err_lev); \ SYS_ARCH_PROTECT(netconn_set_safe_err_lev); \ if (!ERR_IS_FATAL((conn)->last_err)) { \ (conn)->last_err = err; \ } \ SYS_ARCH_UNPROTECT(netconn_set_safe_err_lev); \ }} while(0); /* Network connection functions: */ /** @ingroup netconn_common * Create new netconn connection * @param t @ref netconn_type */ #define netconn_new(t) netconn_new_with_proto_and_callback(t, 0, NULL) #define netconn_new_with_callback(t, c) netconn_new_with_proto_and_callback(t, 0, c) struct netconn *netconn_new_with_proto_and_callback(enum netconn_type t, u8_t proto, netconn_callback callback); err_t netconn_delete(struct netconn *conn); /** Get the type of a netconn (as enum netconn_type). */ #define netconn_type(conn) (conn->type) err_t netconn_getaddr(struct netconn *conn, ip_addr_t *addr, u16_t *port, u8_t local); /** @ingroup netconn_common */ #define netconn_peer(c,i,p) netconn_getaddr(c,i,p,0) /** @ingroup netconn_common */ #define netconn_addr(c,i,p) netconn_getaddr(c,i,p,1) err_t netconn_bind(struct netconn *conn, const ip_addr_t *addr, u16_t port); err_t netconn_connect(struct netconn *conn, const ip_addr_t *addr, u16_t port); err_t netconn_disconnect (struct netconn *conn); err_t netconn_listen_with_backlog(struct netconn *conn, u8_t backlog); /** @ingroup netconn_tcp */ #define netconn_listen(conn) netconn_listen_with_backlog(conn, TCP_DEFAULT_LISTEN_BACKLOG) err_t netconn_accept(struct netconn *conn, struct netconn **new_conn); err_t netconn_recv(struct netconn *conn, struct netbuf **new_buf); err_t netconn_recv_tcp_pbuf(struct netconn *conn, struct pbuf **new_buf); err_t netconn_sendto(struct netconn *conn, struct netbuf *buf, const ip_addr_t *addr, u16_t port); err_t netconn_send(struct netconn *conn, struct netbuf *buf); err_t netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, u8_t apiflags, size_t *bytes_written); /** @ingroup netconn_tcp */ #define netconn_write(conn, dataptr, size, apiflags) \ netconn_write_partly(conn, dataptr, size, apiflags, NULL) err_t netconn_close(struct netconn *conn); err_t netconn_shutdown(struct netconn *conn, u8_t shut_rx, u8_t shut_tx); #if LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) err_t netconn_join_leave_group(struct netconn *conn, const ip_addr_t *multiaddr, const ip_addr_t *netif_addr, enum netconn_igmp join_or_leave); #endif /* LWIP_IGMP || (LWIP_IPV6 && LWIP_IPV6_MLD) */ #if LWIP_DNS #if LWIP_IPV4 && LWIP_IPV6 err_t netconn_gethostbyname_addrtype(const char *name, ip_addr_t *addr, u8_t dns_addrtype); #define netconn_gethostbyname(name, addr) netconn_gethostbyname_addrtype(name, addr, NETCONN_DNS_DEFAULT) #else /* LWIP_IPV4 && LWIP_IPV6 */ err_t netconn_gethostbyname(const char *name, ip_addr_t *addr); #define netconn_gethostbyname_addrtype(name, addr, dns_addrtype) netconn_gethostbyname(name, addr) #endif /* LWIP_IPV4 && LWIP_IPV6 */ #endif /* LWIP_DNS */ #define netconn_err(conn) ((conn)->last_err) #define netconn_recv_bufsize(conn) ((conn)->recv_bufsize) /** Set the blocking status of netconn calls (@todo: write/send is missing) */ #define netconn_set_nonblocking(conn, val) do { if(val) { \ (conn)->flags |= NETCONN_FLAG_NON_BLOCKING; \ } else { \ (conn)->flags &= ~ NETCONN_FLAG_NON_BLOCKING; }} while(0) /** Get the blocking status of netconn calls (@todo: write/send is missing) */ #define netconn_is_nonblocking(conn) (((conn)->flags & NETCONN_FLAG_NON_BLOCKING) != 0) #if LWIP_IPV6 /** @ingroup netconn_common * TCP: Set the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) */ #define netconn_set_ipv6only(conn, val) do { if(val) { \ (conn)->flags |= NETCONN_FLAG_IPV6_V6ONLY; \ } else { \ (conn)->flags &= ~ NETCONN_FLAG_IPV6_V6ONLY; }} while(0) /** @ingroup netconn_common * TCP: Get the IPv6 ONLY status of netconn calls (see NETCONN_FLAG_IPV6_V6ONLY) */ #define netconn_get_ipv6only(conn) (((conn)->flags & NETCONN_FLAG_IPV6_V6ONLY) != 0) #endif /* LWIP_IPV6 */ #if LWIP_SO_SNDTIMEO /** Set the send timeout in milliseconds */ #define netconn_set_sendtimeout(conn, timeout) ((conn)->send_timeout = (timeout)) /** Get the send timeout in milliseconds */ #define netconn_get_sendtimeout(conn) ((conn)->send_timeout) #endif /* LWIP_SO_SNDTIMEO */ #if LWIP_SO_RCVTIMEO /** Set the receive timeout in milliseconds */ #define netconn_set_recvtimeout(conn, timeout) ((conn)->recv_timeout = (timeout)) /** Get the receive timeout in milliseconds */ #define netconn_get_recvtimeout(conn) ((conn)->recv_timeout) #endif /* LWIP_SO_RCVTIMEO */ #if LWIP_SO_RCVBUF /** Set the receive buffer in bytes */ #define netconn_set_recvbufsize(conn, recvbufsize) ((conn)->recv_bufsize = (recvbufsize)) /** Get the receive buffer in bytes */ #define netconn_get_recvbufsize(conn) ((conn)->recv_bufsize) #endif /* LWIP_SO_RCVBUF*/ #if LWIP_NETCONN_SEM_PER_THREAD void netconn_thread_init(void); void netconn_thread_cleanup(void); #else /* LWIP_NETCONN_SEM_PER_THREAD */ #define netconn_thread_init() #define netconn_thread_cleanup() #endif /* LWIP_NETCONN_SEM_PER_THREAD */ #ifdef __cplusplus } #endif #endif /* LWIP_NETCONN || LWIP_SOCKET */ #endif /* LWIP_HDR_API_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/api.h
C
unknown
16,367
/* * Copyright (c) 2001-2003 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #ifndef LWIP_HDR_APPS_FS_H #define LWIP_HDR_APPS_FS_H #include "httpd_opts.h" #include "lwip/err.h" #ifdef __cplusplus extern "C" { #endif #define FS_READ_EOF -1 #define FS_READ_DELAYED -2 #if HTTPD_PRECALCULATED_CHECKSUM struct fsdata_chksum { u32_t offset; u16_t chksum; u16_t len; }; #endif /* HTTPD_PRECALCULATED_CHECKSUM */ #define FS_FILE_FLAGS_HEADER_INCLUDED 0x01 #define FS_FILE_FLAGS_HEADER_PERSISTENT 0x02 struct fs_file { const char *data; int len; int index; void *pextension; #if HTTPD_PRECALCULATED_CHECKSUM const struct fsdata_chksum *chksum; u16_t chksum_count; #endif /* HTTPD_PRECALCULATED_CHECKSUM */ u8_t flags; #if LWIP_HTTPD_CUSTOM_FILES u8_t is_custom_file; #endif /* LWIP_HTTPD_CUSTOM_FILES */ #if LWIP_HTTPD_FILE_STATE void *state; #endif /* LWIP_HTTPD_FILE_STATE */ }; #if LWIP_HTTPD_FS_ASYNC_READ typedef void (*fs_wait_cb)(void *arg); #endif /* LWIP_HTTPD_FS_ASYNC_READ */ err_t fs_open(struct fs_file *file, const char *name); void fs_close(struct fs_file *file); #if LWIP_HTTPD_DYNAMIC_FILE_READ #if LWIP_HTTPD_FS_ASYNC_READ int fs_read_async(struct fs_file *file, char *buffer, int count, fs_wait_cb callback_fn, void *callback_arg); #else /* LWIP_HTTPD_FS_ASYNC_READ */ int fs_read(struct fs_file *file, char *buffer, int count); #endif /* LWIP_HTTPD_FS_ASYNC_READ */ #endif /* LWIP_HTTPD_DYNAMIC_FILE_READ */ #if LWIP_HTTPD_FS_ASYNC_READ int fs_is_file_ready(struct fs_file *file, fs_wait_cb callback_fn, void *callback_arg); #endif /* LWIP_HTTPD_FS_ASYNC_READ */ int fs_bytes_left(struct fs_file *file); #if LWIP_HTTPD_FILE_STATE /** This user-defined function is called when a file is opened. */ void *fs_state_init(struct fs_file *file, const char *name); /** This user-defined function is called when a file is closed. */ void fs_state_free(struct fs_file *file, void *state); #endif /* #if LWIP_HTTPD_FILE_STATE */ #ifdef __cplusplus } #endif #endif /* LWIP_HDR_APPS_FS_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/fs.h
C
unknown
3,670
/** * @file * HTTP server */ /* * Copyright (c) 2001-2003 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * * This version of the file has been modified by Texas Instruments to offer * simple server-side-include (SSI) and Common Gateway Interface (CGI) * capability. */ #ifndef LWIP_HDR_APPS_HTTPD_H #define LWIP_HDR_APPS_HTTPD_H #include "httpd_opts.h" #include "lwip/err.h" #include "lwip/pbuf.h" #ifdef __cplusplus extern "C" { #endif #if LWIP_HTTPD_CGI /* * Function pointer for a CGI script handler. * * This function is called each time the HTTPD server is asked for a file * whose name was previously registered as a CGI function using a call to * http_set_cgi_handler. The iIndex parameter provides the index of the * CGI within the ppcURLs array passed to http_set_cgi_handler. Parameters * pcParam and pcValue provide access to the parameters provided along with * the URI. iNumParams provides a count of the entries in the pcParam and * pcValue arrays. Each entry in the pcParam array contains the name of a * parameter with the corresponding entry in the pcValue array containing the * value for that parameter. Note that pcParam may contain multiple elements * with the same name if, for example, a multi-selection list control is used * in the form generating the data. * * The function should return a pointer to a character string which is the * path and filename of the response that is to be sent to the connected * browser, for example "/thanks.htm" or "/response/error.ssi". * * The maximum number of parameters that will be passed to this function via * iNumParams is defined by LWIP_HTTPD_MAX_CGI_PARAMETERS. Any parameters in the incoming * HTTP request above this number will be discarded. * * Requests intended for use by this CGI mechanism must be sent using the GET * method (which encodes all parameters within the URI rather than in a block * later in the request). Attempts to use the POST method will result in the * request being ignored. * */ typedef const char *(*tCGIHandler)(int iIndex, int iNumParams, char *pcParam[], char *pcValue[]); /* * Structure defining the base filename (URL) of a CGI and the associated * function which is to be called when that URL is requested. */ typedef struct { const char *pcCGIName; tCGIHandler pfnCGIHandler; } tCGI; void http_set_cgi_handlers(const tCGI *pCGIs, int iNumHandlers); #endif /* LWIP_HTTPD_CGI */ #if LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI #if LWIP_HTTPD_CGI_SSI /** Define this generic CGI handler in your application. * It is called once for every URI with parameters. * The parameters can be stored to */ extern void httpd_cgi_handler(const char* uri, int iNumParams, char **pcParam, char **pcValue #if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE , void *connection_state #endif /* LWIP_HTTPD_FILE_STATE */ ); #endif /* LWIP_HTTPD_CGI_SSI */ #endif /* LWIP_HTTPD_CGI || LWIP_HTTPD_CGI_SSI */ #if LWIP_HTTPD_SSI /* * Function pointer for the SSI tag handler callback. * * This function will be called each time the HTTPD server detects a tag of the * form <!--#name--> in a .shtml, .ssi or .shtm file where "name" appears as * one of the tags supplied to http_set_ssi_handler in the ppcTags array. The * returned insert string, which will be appended after the the string * "<!--#name-->" in file sent back to the client,should be written to pointer * pcInsert. iInsertLen contains the size of the buffer pointed to by * pcInsert. The iIndex parameter provides the zero-based index of the tag as * found in the ppcTags array and identifies the tag that is to be processed. * * The handler returns the number of characters written to pcInsert excluding * any terminating NULL or a negative number to indicate a failure (tag not * recognized, for example). * * Note that the behavior of this SSI mechanism is somewhat different from the * "normal" SSI processing as found in, for example, the Apache web server. In * this case, the inserted text is appended following the SSI tag rather than * replacing the tag entirely. This allows for an implementation that does not * require significant additional buffering of output data yet which will still * offer usable SSI functionality. One downside to this approach is when * attempting to use SSI within JavaScript. The SSI tag is structured to * resemble an HTML comment but this syntax does not constitute a comment * within JavaScript and, hence, leaving the tag in place will result in * problems in these cases. To work around this, any SSI tag which needs to * output JavaScript code must do so in an encapsulated way, sending the whole * HTML <script>...</script> section as a single include. */ typedef u16_t (*tSSIHandler)( #if LWIP_HTTPD_SSI_RAW const char* ssi_tag_name, #else /* LWIP_HTTPD_SSI_RAW */ int iIndex, #endif /* LWIP_HTTPD_SSI_RAW */ char *pcInsert, int iInsertLen #if LWIP_HTTPD_SSI_MULTIPART , u16_t current_tag_part, u16_t *next_tag_part #endif /* LWIP_HTTPD_SSI_MULTIPART */ #if defined(LWIP_HTTPD_FILE_STATE) && LWIP_HTTPD_FILE_STATE , void *connection_state #endif /* LWIP_HTTPD_FILE_STATE */ ); /** Set the SSI handler function * (if LWIP_HTTPD_SSI_RAW==1, only the first argument is used) */ void http_set_ssi_handler(tSSIHandler pfnSSIHandler, const char **ppcTags, int iNumTags); /** For LWIP_HTTPD_SSI_RAW==1, return this to indicate the tag is unknown. * In this case, the webserver writes a warning into the page. * You can also just return 0 to write nothing for unknown tags. */ #define HTTPD_SSI_TAG_UNKNOWN 0xFFFF #endif /* LWIP_HTTPD_SSI */ #if LWIP_HTTPD_SUPPORT_POST /* These functions must be implemented by the application */ /** Called when a POST request has been received. The application can decide * whether to accept it or not. * * @param connection Unique connection identifier, valid until httpd_post_end * is called. * @param uri The HTTP header URI receiving the POST request. * @param http_request The raw HTTP request (the first packet, normally). * @param http_request_len Size of 'http_request'. * @param content_len Content-Length from HTTP header. * @param response_uri Filename of response file, to be filled when denying the * request * @param response_uri_len Size of the 'response_uri' buffer. * @param post_auto_wnd Set this to 0 to let the callback code handle window * updates by calling 'httpd_post_data_recved' (to throttle rx speed) * default is 1 (httpd handles window updates automatically) * @return ERR_OK: Accept the POST request, data may be passed in * another err_t: Deny the POST request, send back 'bad request'. */ err_t httpd_post_begin(void *connection, const char *uri, const char *http_request, u16_t http_request_len, int content_len, char *response_uri, u16_t response_uri_len, u8_t *post_auto_wnd); /** Called for each pbuf of data that has been received for a POST. * ATTENTION: The application is responsible for freeing the pbufs passed in! * * @param connection Unique connection identifier. * @param p Received data. * @return ERR_OK: Data accepted. * another err_t: Data denied, http_post_get_response_uri will be called. */ err_t httpd_post_receive_data(void *connection, struct pbuf *p); /** Called when all data is received or when the connection is closed. * The application must return the filename/URI of a file to send in response * to this POST request. If the response_uri buffer is untouched, a 404 * response is returned. * * @param connection Unique connection identifier. * @param response_uri Filename of response file, to be filled when denying the request * @param response_uri_len Size of the 'response_uri' buffer. */ void httpd_post_finished(void *connection, char *response_uri, u16_t response_uri_len); #if LWIP_HTTPD_POST_MANUAL_WND void httpd_post_data_recved(void *connection, u16_t recved_len); #endif /* LWIP_HTTPD_POST_MANUAL_WND */ #endif /* LWIP_HTTPD_SUPPORT_POST */ void httpd_init(void); #ifdef __cplusplus } #endif #endif /* LWIP_HTTPD_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/httpd.h
C
unknown
10,203
/** * @file * HTTP server options list */ /* * Copyright (c) 2001-2003 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * * This version of the file has been modified by Texas Instruments to offer * simple server-side-include (SSI) and Common Gateway Interface (CGI) * capability. */ #ifndef LWIP_HDR_APPS_HTTPD_OPTS_H #define LWIP_HDR_APPS_HTTPD_OPTS_H #include "lwip/opt.h" /** * @defgroup httpd_opts Options * @ingroup httpd * @{ */ /** Set this to 1 to support CGI (old style) */ #if !defined LWIP_HTTPD_CGI || defined __DOXYGEN__ #define LWIP_HTTPD_CGI 0 #endif /** Set this to 1 to support CGI (new style) */ #if !defined LWIP_HTTPD_CGI_SSI || defined __DOXYGEN__ #define LWIP_HTTPD_CGI_SSI 0 #endif /** Set this to 1 to support SSI (Server-Side-Includes) */ #if !defined LWIP_HTTPD_SSI || defined __DOXYGEN__ #define LWIP_HTTPD_SSI 0 #endif /** Set this to 1 to implement an SSI tag handler callback that gets a const char* * to the tag (instead of an index into a pre-registered array of known tags) */ #if !defined LWIP_HTTPD_SSI_RAW || defined __DOXYGEN__ #define LWIP_HTTPD_SSI_RAW 0 #endif /** Set this to 1 to support HTTP POST */ #if !defined LWIP_HTTPD_SUPPORT_POST || defined __DOXYGEN__ #define LWIP_HTTPD_SUPPORT_POST 0 #endif /* The maximum number of parameters that the CGI handler can be sent. */ #if !defined LWIP_HTTPD_MAX_CGI_PARAMETERS || defined __DOXYGEN__ #define LWIP_HTTPD_MAX_CGI_PARAMETERS 16 #endif /** LWIP_HTTPD_SSI_MULTIPART==1: SSI handler function is called with 2 more * arguments indicating a counter for insert string that are too long to be * inserted at once: the SSI handler function must then set 'next_tag_part' * which will be passed back to it in the next call. */ #if !defined LWIP_HTTPD_SSI_MULTIPART || defined __DOXYGEN__ #define LWIP_HTTPD_SSI_MULTIPART 0 #endif /* The maximum length of the string comprising the tag name */ #if !defined LWIP_HTTPD_MAX_TAG_NAME_LEN || defined __DOXYGEN__ #define LWIP_HTTPD_MAX_TAG_NAME_LEN 8 #endif /* The maximum length of string that can be returned to replace any given tag */ #if !defined LWIP_HTTPD_MAX_TAG_INSERT_LEN || defined __DOXYGEN__ #define LWIP_HTTPD_MAX_TAG_INSERT_LEN 192 #endif #if !defined LWIP_HTTPD_POST_MANUAL_WND || defined __DOXYGEN__ #define LWIP_HTTPD_POST_MANUAL_WND 0 #endif /** This string is passed in the HTTP header as "Server: " */ #if !defined HTTPD_SERVER_AGENT || defined __DOXYGEN__ #define HTTPD_SERVER_AGENT "lwIP/" LWIP_VERSION_STRING " (http://savannah.nongnu.org/projects/lwip)" #endif /** Set this to 1 if you want to include code that creates HTTP headers * at runtime. Default is off: HTTP headers are then created statically * by the makefsdata tool. Static headers mean smaller code size, but * the (readonly) fsdata will grow a bit as every file includes the HTTP * header. */ #if !defined LWIP_HTTPD_DYNAMIC_HEADERS || defined __DOXYGEN__ #define LWIP_HTTPD_DYNAMIC_HEADERS 0 #endif #if !defined HTTPD_DEBUG || defined __DOXYGEN__ #define HTTPD_DEBUG LWIP_DBG_OFF #endif /** Set this to 1 to use a memp pool for allocating * struct http_state instead of the heap. */ #if !defined HTTPD_USE_MEM_POOL || defined __DOXYGEN__ #define HTTPD_USE_MEM_POOL 0 #endif /** The server port for HTTPD to use */ #if !defined HTTPD_SERVER_PORT || defined __DOXYGEN__ #define HTTPD_SERVER_PORT 80 #endif /** Maximum retries before the connection is aborted/closed. * - number of times pcb->poll is called -> default is 4*500ms = 2s; * - reset when pcb->sent is called */ #if !defined HTTPD_MAX_RETRIES || defined __DOXYGEN__ #define HTTPD_MAX_RETRIES 4 #endif /** The poll delay is X*500ms */ #if !defined HTTPD_POLL_INTERVAL || defined __DOXYGEN__ #define HTTPD_POLL_INTERVAL 4 #endif /** Priority for tcp pcbs created by HTTPD (very low by default). * Lower priorities get killed first when running out of memory. */ #if !defined HTTPD_TCP_PRIO || defined __DOXYGEN__ #define HTTPD_TCP_PRIO TCP_PRIO_MIN #endif /** Set this to 1 to enable timing each file sent */ #if !defined LWIP_HTTPD_TIMING || defined __DOXYGEN__ #define LWIP_HTTPD_TIMING 0 #endif /** Set this to 1 to enable timing each file sent */ #if !defined HTTPD_DEBUG_TIMING || defined __DOXYGEN__ #define HTTPD_DEBUG_TIMING LWIP_DBG_OFF #endif /** Set this to one to show error pages when parsing a request fails instead of simply closing the connection. */ #if !defined LWIP_HTTPD_SUPPORT_EXTSTATUS || defined __DOXYGEN__ #define LWIP_HTTPD_SUPPORT_EXTSTATUS 0 #endif /** Set this to 0 to drop support for HTTP/0.9 clients (to save some bytes) */ #if !defined LWIP_HTTPD_SUPPORT_V09 || defined __DOXYGEN__ #define LWIP_HTTPD_SUPPORT_V09 1 #endif /** Set this to 1 to enable HTTP/1.1 persistent connections. * ATTENTION: If the generated file system includes HTTP headers, these must * include the "Connection: keep-alive" header (pass argument "-11" to makefsdata). */ #if !defined LWIP_HTTPD_SUPPORT_11_KEEPALIVE || defined __DOXYGEN__ #define LWIP_HTTPD_SUPPORT_11_KEEPALIVE 0 #endif /** Set this to 1 to support HTTP request coming in in multiple packets/pbufs */ #if !defined LWIP_HTTPD_SUPPORT_REQUESTLIST || defined __DOXYGEN__ #define LWIP_HTTPD_SUPPORT_REQUESTLIST 1 #endif #if LWIP_HTTPD_SUPPORT_REQUESTLIST /** Number of rx pbufs to enqueue to parse an incoming request (up to the first newline) */ #if !defined LWIP_HTTPD_REQ_QUEUELEN || defined __DOXYGEN__ #define LWIP_HTTPD_REQ_QUEUELEN 5 #endif /** Number of (TCP payload-) bytes (in pbufs) to enqueue to parse and incoming request (up to the first double-newline) */ #if !defined LWIP_HTTPD_REQ_BUFSIZE || defined __DOXYGEN__ #define LWIP_HTTPD_REQ_BUFSIZE LWIP_HTTPD_MAX_REQ_LENGTH #endif /** Defines the maximum length of a HTTP request line (up to the first CRLF, copied from pbuf into this a global buffer when pbuf- or packet-queues are received - otherwise the input pbuf is used directly) */ #if !defined LWIP_HTTPD_MAX_REQ_LENGTH || defined __DOXYGEN__ #define LWIP_HTTPD_MAX_REQ_LENGTH LWIP_MIN(1023, (LWIP_HTTPD_REQ_QUEUELEN * PBUF_POOL_BUFSIZE)) #endif #endif /* LWIP_HTTPD_SUPPORT_REQUESTLIST */ /** This is the size of a static buffer used when URIs end with '/'. * In this buffer, the directory requested is concatenated with all the * configured default file names. * Set to 0 to disable checking default filenames on non-root directories. */ #if !defined LWIP_HTTPD_MAX_REQUEST_URI_LEN || defined __DOXYGEN__ #define LWIP_HTTPD_MAX_REQUEST_URI_LEN 63 #endif /** Maximum length of the filename to send as response to a POST request, * filled in by the application when a POST is finished. */ #if !defined LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN || defined __DOXYGEN__ #define LWIP_HTTPD_POST_MAX_RESPONSE_URI_LEN 63 #endif /** Set this to 0 to not send the SSI tag (default is on, so the tag will * be sent in the HTML page */ #if !defined LWIP_HTTPD_SSI_INCLUDE_TAG || defined __DOXYGEN__ #define LWIP_HTTPD_SSI_INCLUDE_TAG 1 #endif /** Set this to 1 to call tcp_abort when tcp_close fails with memory error. * This can be used to prevent consuming all memory in situations where the * HTTP server has low priority compared to other communication. */ #if !defined LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR || defined __DOXYGEN__ #define LWIP_HTTPD_ABORT_ON_CLOSE_MEM_ERROR 0 #endif /** Set this to 1 to kill the oldest connection when running out of * memory for 'struct http_state' or 'struct http_ssi_state'. * ATTENTION: This puts all connections on a linked list, so may be kind of slow. */ #if !defined LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED || defined __DOXYGEN__ #define LWIP_HTTPD_KILL_OLD_ON_CONNECTIONS_EXCEEDED 0 #endif /** Set this to 1 to send URIs without extension without headers * (who uses this at all??) */ #if !defined LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI || defined __DOXYGEN__ #define LWIP_HTTPD_OMIT_HEADER_FOR_EXTENSIONLESS_URI 0 #endif /** Default: Tags are sent from struct http_state and are therefore volatile */ #if !defined HTTP_IS_TAG_VOLATILE || defined __DOXYGEN__ #define HTTP_IS_TAG_VOLATILE(ptr) TCP_WRITE_FLAG_COPY #endif /* By default, the httpd is limited to send 2*pcb->mss to keep resource usage low when http is not an important protocol in the device. */ #if !defined HTTPD_LIMIT_SENDING_TO_2MSS || defined __DOXYGEN__ #define HTTPD_LIMIT_SENDING_TO_2MSS 1 #endif /* Define this to a function that returns the maximum amount of data to enqueue. The function have this signature: u16_t fn(struct tcp_pcb* pcb); */ #if !defined HTTPD_MAX_WRITE_LEN || defined __DOXYGEN__ #if HTTPD_LIMIT_SENDING_TO_2MSS #define HTTPD_MAX_WRITE_LEN(pcb) (2 * tcp_mss(pcb)) #endif #endif /*------------------- FS OPTIONS -------------------*/ /** Set this to 1 and provide the functions: * - "int fs_open_custom(struct fs_file *file, const char *name)" * Called first for every opened file to allow opening files * that are not included in fsdata(_custom).c * - "void fs_close_custom(struct fs_file *file)" * Called to free resources allocated by fs_open_custom(). */ #if !defined LWIP_HTTPD_CUSTOM_FILES || defined __DOXYGEN__ #define LWIP_HTTPD_CUSTOM_FILES 0 #endif /** Set this to 1 to support fs_read() to dynamically read file data. * Without this (default=off), only one-block files are supported, * and the contents must be ready after fs_open(). */ #if !defined LWIP_HTTPD_DYNAMIC_FILE_READ || defined __DOXYGEN__ #define LWIP_HTTPD_DYNAMIC_FILE_READ 0 #endif /** Set this to 1 to include an application state argument per file * that is opened. This allows to keep a state per connection/file. */ #if !defined LWIP_HTTPD_FILE_STATE || defined __DOXYGEN__ #define LWIP_HTTPD_FILE_STATE 0 #endif /** HTTPD_PRECALCULATED_CHECKSUM==1: include precompiled checksums for * predefined (MSS-sized) chunks of the files to prevent having to calculate * the checksums at runtime. */ #if !defined HTTPD_PRECALCULATED_CHECKSUM || defined __DOXYGEN__ #define HTTPD_PRECALCULATED_CHECKSUM 0 #endif /** LWIP_HTTPD_FS_ASYNC_READ==1: support asynchronous read operations * (fs_read_async returns FS_READ_DELAYED and calls a callback when finished). */ #if !defined LWIP_HTTPD_FS_ASYNC_READ || defined __DOXYGEN__ #define LWIP_HTTPD_FS_ASYNC_READ 0 #endif /** Set this to 1 to include "fsdata_custom.c" instead of "fsdata.c" for the * file system (to prevent changing the file included in CVS) */ #if !defined HTTPD_USE_CUSTOM_FSDATA || defined __DOXYGEN__ #define HTTPD_USE_CUSTOM_FSDATA 0 #endif /** * @} */ #endif /* LWIP_HDR_APPS_HTTPD_OPTS_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/httpd_opts.h
C
unknown
12,678
/** * @file * lwIP iPerf server implementation */ /* * Copyright (c) 2014 Simon Goldschmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Simon Goldschmidt * */ #ifndef LWIP_HDR_APPS_LWIPERF_H #define LWIP_HDR_APPS_LWIPERF_H #include "lwip/opt.h" #include "lwip/ip_addr.h" #ifdef __cplusplus extern "C" { #endif #define LWIPERF_TCP_PORT_DEFAULT 5001 /** lwIPerf test results */ enum lwiperf_report_type { /** The server side test is done */ LWIPERF_TCP_DONE_SERVER, /** The client side test is done */ LWIPERF_TCP_DONE_CLIENT, /** Local error lead to test abort */ LWIPERF_TCP_ABORTED_LOCAL, /** Data check error lead to test abort */ LWIPERF_TCP_ABORTED_LOCAL_DATAERROR, /** Transmit error lead to test abort */ LWIPERF_TCP_ABORTED_LOCAL_TXERROR, /** Remote side aborted the test */ LWIPERF_TCP_ABORTED_REMOTE }; /** Prototype of a report function that is called when a session is finished. This report function can show the test results. @param report_type contains the test result */ typedef void (*lwiperf_report_fn)(void *arg, enum lwiperf_report_type report_type, const ip_addr_t* local_addr, u16_t local_port, const ip_addr_t* remote_addr, u16_t remote_port, u32_t bytes_transferred, u32_t ms_duration, u32_t bandwidth_kbitpsec); void* lwiperf_start_tcp_server(const ip_addr_t* local_addr, u16_t local_port, lwiperf_report_fn report_fn, void* report_arg); void* lwiperf_start_tcp_server_default(lwiperf_report_fn report_fn, void* report_arg); void lwiperf_abort(void* lwiperf_session); #ifdef __cplusplus } #endif #endif /* LWIP_HDR_APPS_LWIPERF_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/lwiperf.h
C
unknown
3,172
/** * @file * MDNS responder */ /* * Copyright (c) 2015 Verisure Innovation AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Erik Ekman <erik@kryo.se> * */ #ifndef LWIP_HDR_MDNS_H #define LWIP_HDR_MDNS_H #include "lwip/apps/mdns_opts.h" #include "lwip/netif.h" #if LWIP_MDNS_RESPONDER enum mdns_sd_proto { DNSSD_PROTO_UDP = 0, DNSSD_PROTO_TCP = 1 }; #define MDNS_LABEL_MAXLEN 63 struct mdns_host; struct mdns_service; /** Callback function to add text to a reply, called when generating the reply */ typedef void (*service_get_txt_fn_t)(struct mdns_service *service, void *txt_userdata); void mdns_resp_init(void); err_t mdns_resp_add_netif(struct netif *netif, const char *hostname, u32_t dns_ttl); err_t mdns_resp_remove_netif(struct netif *netif); err_t mdns_resp_add_service(struct netif *netif, const char *name, const char *service, enum mdns_sd_proto proto, u16_t port, u32_t dns_ttl, service_get_txt_fn_t txt_fn, void *txt_userdata); err_t mdns_resp_add_service_txtitem(struct mdns_service *service, const char *txt, u8_t txt_len); void mdns_resp_netif_settings_changed(struct netif *netif); #endif /* LWIP_MDNS_RESPONDER */ #endif /* LWIP_HDR_MDNS_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/mdns.h
C
unknown
2,699
/** * @file * MDNS responder */ /* * Copyright (c) 2015 Verisure Innovation AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Erik Ekman <erik@kryo.se> * */ #ifndef LWIP_HDR_APPS_MDNS_OPTS_H #define LWIP_HDR_APPS_MDNS_OPTS_H #include "lwip/opt.h" /** * @defgroup mdns_opts Options * @ingroup mdns * @{ */ /** * LWIP_MDNS_RESPONDER==1: Turn on multicast DNS module. UDP must be available for MDNS * transport. IGMP is needed for IPv4 multicast. */ #ifndef LWIP_MDNS_RESPONDER #define LWIP_MDNS_RESPONDER 0 #endif /* LWIP_MDNS_RESPONDER */ /** The maximum number of services per netif */ #ifndef MDNS_MAX_SERVICES #define MDNS_MAX_SERVICES 1 #endif /** * MDNS_DEBUG: Enable debugging for multicast DNS. */ #ifndef MDNS_DEBUG #define MDNS_DEBUG LWIP_DBG_OFF #endif /** * @} */ #endif /* LWIP_HDR_APPS_MDNS_OPTS_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/mdns_opts.h
C
unknown
2,406
/** * @file * MDNS responder private definitions */ /* * Copyright (c) 2015 Verisure Innovation AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Erik Ekman <erik@kryo.se> * */ #ifndef LWIP_HDR_MDNS_PRIV_H #define LWIP_HDR_MDNS_PRIV_H #include "lwip/apps/mdns_opts.h" #include "lwip/pbuf.h" #if LWIP_MDNS_RESPONDER /* Domain struct and methods - visible for unit tests */ #define MDNS_DOMAIN_MAXLEN 256 #define MDNS_READNAME_ERROR 0xFFFF struct mdns_domain { /* Encoded domain name */ u8_t name[MDNS_DOMAIN_MAXLEN]; /* Total length of domain name, including zero */ u16_t length; /* Set if compression of this domain is not allowed */ u8_t skip_compression; }; err_t mdns_domain_add_label(struct mdns_domain *domain, const char *label, u8_t len); u16_t mdns_readname(struct pbuf *p, u16_t offset, struct mdns_domain *domain); int mdns_domain_eq(struct mdns_domain *a, struct mdns_domain *b); u16_t mdns_compress_domain(struct pbuf *pbuf, u16_t *offset, struct mdns_domain *domain); #endif /* LWIP_MDNS_RESPONDER */ #endif /* LWIP_HDR_MDNS_PRIV_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/mdns_priv.h
C
unknown
2,586
/** * @file * MQTT client */ /* * Copyright (c) 2016 Erik Andersson * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Erik Andersson * */ #ifndef LWIP_HDR_APPS_MQTT_CLIENT_H #define LWIP_HDR_APPS_MQTT_CLIENT_H #include "lwip/apps/mqtt_opts.h" #include "lwip/err.h" #include "lwip/ip_addr.h" #ifdef __cplusplus extern "C" { #endif typedef struct mqtt_client_t mqtt_client_t; /** @ingroup mqtt * Default MQTT port */ #define MQTT_PORT 1883 /*---------------------------------------------------------------------------------------------- */ /* Connection with server */ /** * @ingroup mqtt * Client information and connection parameters */ struct mqtt_connect_client_info_t { /** Client identifier, must be set by caller */ const char *client_id; /** User name and password, set to NULL if not used */ const char* client_user; const char* client_pass; /** keep alive time in seconds, 0 to disable keep alive functionality*/ u16_t keep_alive; /** will topic, set to NULL if will is not to be used, will_msg, will_qos and will retain are then ignored */ const char* will_topic; const char* will_msg; u8_t will_qos; u8_t will_retain; }; /** * @ingroup mqtt * Connection status codes */ typedef enum { MQTT_CONNECT_ACCEPTED = 0, MQTT_CONNECT_REFUSED_PROTOCOL_VERSION = 1, MQTT_CONNECT_REFUSED_IDENTIFIER = 2, MQTT_CONNECT_REFUSED_SERVER = 3, MQTT_CONNECT_REFUSED_USERNAME_PASS = 4, MQTT_CONNECT_REFUSED_NOT_AUTHORIZED_ = 5, MQTT_CONNECT_DISCONNECTED = 256, MQTT_CONNECT_TIMEOUT = 257 } mqtt_connection_status_t; /** * @ingroup mqtt * Function prototype for mqtt connection status callback. Called when * client has connected to the server after initiating a mqtt connection attempt by * calling mqtt_connect() or when connection is closed by server or an error * * @param client MQTT client itself * @param arg Additional argument to pass to the callback function * @param status Connect result code or disconnection notification @see mqtt_connection_status_t * */ typedef void (*mqtt_connection_cb_t)(mqtt_client_t *client, void *arg, mqtt_connection_status_t status); /** * @ingroup mqtt * Data callback flags */ enum { /** Flag set when last fragment of data arrives in data callback */ MQTT_DATA_FLAG_LAST = 1 }; /** * @ingroup mqtt * Function prototype for MQTT incoming publish data callback function. Called when data * arrives to a subscribed topic @see mqtt_subscribe * * @param arg Additional argument to pass to the callback function * @param data User data, pointed object, data may not be referenced after callback return, NULL is passed when all publish data are delivered * @param len Length of publish data fragment * @param flags MQTT_DATA_FLAG_LAST set when this call contains the last part of data from publish message * */ typedef void (*mqtt_incoming_data_cb_t)(void *arg, const u8_t *data, u16_t len, u8_t flags); /** * @ingroup mqtt * Function prototype for MQTT incoming publish function. Called when an incoming publish * arrives to a subscribed topic @see mqtt_subscribe * * @param arg Additional argument to pass to the callback function * @param topic Zero terminated Topic text string, topic may not be referenced after callback return * @param tot_len Total length of publish data, if set to 0 (no publish payload) data callback will not be invoked */ typedef void (*mqtt_incoming_publish_cb_t)(void *arg, const char *topic, u32_t tot_len); /** * @ingroup mqtt * Function prototype for mqtt request callback. Called when a subscribe, unsubscribe * or publish request has completed * @param arg Pointer to user data supplied when invoking request * @param err ERR_OK on success * ERR_TIMEOUT if no response was received within timeout, * ERR_ABRT if (un)subscribe was denied */ typedef void (*mqtt_request_cb_t)(void *arg, err_t err); /** * Pending request item, binds application callback to pending server requests */ struct mqtt_request_t { /** Next item in list, NULL means this is the last in chain, next pointing at itself means request is unallocated */ struct mqtt_request_t *next; /** Callback to upper layer */ mqtt_request_cb_t cb; void *arg; /** MQTT packet identifier */ u16_t pkt_id; /** Expire time relative to element before this */ u16_t timeout_diff; }; /** Ring buffer */ struct mqtt_ringbuf_t { u16_t put; u16_t get; u8_t buf[MQTT_OUTPUT_RINGBUF_SIZE]; }; /** MQTT client */ struct mqtt_client_t { /** Timers and timeouts */ u16_t cyclic_tick; u16_t keep_alive; u16_t server_watchdog; /** Packet identifier generator*/ u16_t pkt_id_seq; /** Packet identifier of pending incoming publish */ u16_t inpub_pkt_id; /** Connection state */ u8_t conn_state; struct tcp_pcb *conn; /** Connection callback */ void *connect_arg; mqtt_connection_cb_t connect_cb; /** Pending requests to server */ struct mqtt_request_t *pend_req_queue; struct mqtt_request_t req_list[MQTT_REQ_MAX_IN_FLIGHT]; void *inpub_arg; /** Incoming data callback */ mqtt_incoming_data_cb_t data_cb; mqtt_incoming_publish_cb_t pub_cb; /** Input */ u32_t msg_idx; u8_t rx_buffer[MQTT_VAR_HEADER_BUFFER_LEN]; /** Output ring-buffer */ struct mqtt_ringbuf_t output; }; /** Connect to server */ err_t mqtt_client_connect(mqtt_client_t *client, const ip_addr_t *ipaddr, u16_t port, mqtt_connection_cb_t cb, void *arg, const struct mqtt_connect_client_info_t *client_info); /** Disconnect from server */ void mqtt_disconnect(mqtt_client_t *client); /** Create new client */ mqtt_client_t *mqtt_client_new(void); /** Check connection status */ u8_t mqtt_client_is_connected(mqtt_client_t *client); /** Set callback to call for incoming publish */ void mqtt_set_inpub_callback(mqtt_client_t *client, mqtt_incoming_publish_cb_t, mqtt_incoming_data_cb_t data_cb, void *arg); /** Common function for subscribe and unsubscribe */ err_t mqtt_sub_unsub(mqtt_client_t *client, const char *topic, u8_t qos, mqtt_request_cb_t cb, void *arg, u8_t sub); /** @ingroup mqtt *Subscribe to topic */ #define mqtt_subscribe(client, topic, qos, cb, arg) mqtt_sub_unsub(client, topic, qos, cb, arg, 1) /** @ingroup mqtt * Unsubscribe to topic */ #define mqtt_unsubscribe(client, topic, cb, arg) mqtt_sub_unsub(client, topic, 0, cb, arg, 0) /** Publish data to topic */ err_t mqtt_publish(mqtt_client_t *client, const char *topic, const void *payload, u16_t payload_length, u8_t qos, u8_t retain, mqtt_request_cb_t cb, void *arg); #ifdef __cplusplus } #endif #endif /* LWIP_HDR_APPS_MQTT_CLIENT_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/mqtt.h
C
unknown
8,461
/** * @file * MQTT client options */ /* * Copyright (c) 2016 Erik Andersson * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Erik Andersson * */ #ifndef LWIP_HDR_APPS_MQTT_OPTS_H #define LWIP_HDR_APPS_MQTT_OPTS_H #include "lwip/opt.h" #ifdef __cplusplus extern "C" { #endif /** * @defgroup mqtt_opts Options * @ingroup mqtt * @{ */ /** * Output ring-buffer size, must be able to fit largest outgoing publish message topic+payloads */ #ifndef MQTT_OUTPUT_RINGBUF_SIZE #define MQTT_OUTPUT_RINGBUF_SIZE 256 #endif /** * Number of bytes in receive buffer, must be at least the size of the longest incoming topic + 8 * If one wants to avoid fragmented incoming publish, set length to max incoming topic length + max payload length + 8 */ #ifndef MQTT_VAR_HEADER_BUFFER_LEN #define MQTT_VAR_HEADER_BUFFER_LEN 128 #endif /** * Maximum number of pending subscribe, unsubscribe and publish requests to server . */ #ifndef MQTT_REQ_MAX_IN_FLIGHT #define MQTT_REQ_MAX_IN_FLIGHT 4 #endif /** * Seconds between each cyclic timer call. */ #ifndef MQTT_CYCLIC_TIMER_INTERVAL #define MQTT_CYCLIC_TIMER_INTERVAL 5 #endif /** * Publish, subscribe and unsubscribe request timeout in seconds. */ #ifndef MQTT_REQ_TIMEOUT #define MQTT_REQ_TIMEOUT 30 #endif /** * Seconds for MQTT connect response timeout after sending connect request */ #ifndef MQTT_CONNECT_TIMOUT #define MQTT_CONNECT_TIMOUT 100 #endif /** * @} */ #ifdef __cplusplus } #endif #endif /* LWIP_HDR_APPS_MQTT_OPTS_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/mqtt_opts.h
C
unknown
3,045
/** * @file * NETBIOS name service responder */ /* * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * */ #ifndef LWIP_HDR_APPS_NETBIOS_H #define LWIP_HDR_APPS_NETBIOS_H #include "lwip/apps/netbiosns_opts.h" void netbiosns_init(void); #ifndef NETBIOS_LWIP_NAME void netbiosns_set_name(const char* hostname); #endif void netbiosns_stop(void); #endif /* LWIP_HDR_APPS_NETBIOS_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/netbiosns.h
C
unknown
1,830
/** * @file * NETBIOS name service responder options */ /* * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * */ #ifndef LWIP_HDR_APPS_NETBIOS_OPTS_H #define LWIP_HDR_APPS_NETBIOS_OPTS_H #include "lwip/opt.h" /** * @defgroup netbiosns_opts Options * @ingroup netbiosns * @{ */ /** NetBIOS name of lwip device * This must be uppercase until NETBIOS_STRCMP() is defined to a string * comparision function that is case insensitive. * If you want to use the netif's hostname, use this (with LWIP_NETIF_HOSTNAME): * (ip_current_netif() != NULL ? ip_current_netif()->hostname != NULL ? ip_current_netif()->hostname : "" : "") * * If this is not defined, netbiosns_set_name() can be called at runtime to change the name. */ #ifdef __DOXYGEN__ #define NETBIOS_LWIP_NAME "NETBIOSLWIPDEV" #endif /** * @} */ #endif /* LWIP_HDR_APPS_NETBIOS_OPTS_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/netbiosns_opts.h
C
unknown
2,323
/** * @file * SNMP server main API - start and basic configuration */ /* * Copyright (c) 2001, 2002 Leon Woestenberg <leon.woestenberg@axon.tv> * Copyright (c) 2001, 2002 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Leon Woestenberg <leon.woestenberg@axon.tv> * Martin Hentschel <info@cl-soft.de> * */ #ifndef LWIP_HDR_APPS_SNMP_H #define LWIP_HDR_APPS_SNMP_H #include "lwip/apps/snmp_opts.h" #ifdef __cplusplus extern "C" { #endif #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #include "lwip/err.h" #include "lwip/apps/snmp_core.h" /** SNMP variable binding descriptor (publically needed for traps) */ struct snmp_varbind { /** pointer to next varbind, NULL for last in list */ struct snmp_varbind *next; /** pointer to previous varbind, NULL for first in list */ struct snmp_varbind *prev; /** object identifier */ struct snmp_obj_id oid; /** value ASN1 type */ u8_t type; /** object value length */ u16_t value_len; /** object value */ void *value; }; /** * @ingroup snmp_core * Agent setup, start listening to port 161. */ void snmp_init(void); void snmp_set_mibs(const struct snmp_mib **mibs, u8_t num_mibs); void snmp_set_device_enterprise_oid(const struct snmp_obj_id* device_enterprise_oid); const struct snmp_obj_id* snmp_get_device_enterprise_oid(void); void snmp_trap_dst_enable(u8_t dst_idx, u8_t enable); void snmp_trap_dst_ip_set(u8_t dst_idx, const ip_addr_t *dst); /** Generic trap: cold start */ #define SNMP_GENTRAP_COLDSTART 0 /** Generic trap: warm start */ #define SNMP_GENTRAP_WARMSTART 1 /** Generic trap: link down */ #define SNMP_GENTRAP_LINKDOWN 2 /** Generic trap: link up */ #define SNMP_GENTRAP_LINKUP 3 /** Generic trap: authentication failure */ #define SNMP_GENTRAP_AUTH_FAILURE 4 /** Generic trap: EGP neighbor lost */ #define SNMP_GENTRAP_EGP_NEIGHBOR_LOSS 5 /** Generic trap: enterprise specific */ #define SNMP_GENTRAP_ENTERPRISE_SPECIFIC 6 err_t snmp_send_trap_generic(s32_t generic_trap); err_t snmp_send_trap_specific(s32_t specific_trap, struct snmp_varbind *varbinds); err_t snmp_send_trap(const struct snmp_obj_id* oid, s32_t generic_trap, s32_t specific_trap, struct snmp_varbind *varbinds); #define SNMP_AUTH_TRAPS_DISABLED 0 #define SNMP_AUTH_TRAPS_ENABLED 1 void snmp_set_auth_traps_enabled(u8_t enable); u8_t snmp_get_auth_traps_enabled(void); const char * snmp_get_community(void); const char * snmp_get_community_write(void); const char * snmp_get_community_trap(void); void snmp_set_community(const char * const community); void snmp_set_community_write(const char * const community); void snmp_set_community_trap(const char * const community); void snmp_coldstart_trap(void); void snmp_authfail_trap(void); typedef void (*snmp_write_callback_fct)(const u32_t* oid, u8_t oid_len, void* callback_arg); void snmp_set_write_callback(snmp_write_callback_fct write_callback, void* callback_arg); #endif /* LWIP_SNMP */ #ifdef __cplusplus } #endif #endif /* LWIP_HDR_APPS_SNMP_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/snmp.h
C
unknown
4,632
/** * @file * SNMP core API for implementing MIBs */ /* * Copyright (c) 2006 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Christiaan Simons <christiaan.simons@axon.tv> * Martin Hentschel <info@cl-soft.de> */ #ifndef LWIP_HDR_APPS_SNMP_CORE_H #define LWIP_HDR_APPS_SNMP_CORE_H #include "lwip/apps/snmp_opts.h" #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #include "lwip/ip_addr.h" #include "lwip/err.h" #ifdef __cplusplus extern "C" { #endif /* basic ASN1 defines */ #define SNMP_ASN1_CLASS_UNIVERSAL 0x00 #define SNMP_ASN1_CLASS_APPLICATION 0x40 #define SNMP_ASN1_CLASS_CONTEXT 0x80 #define SNMP_ASN1_CLASS_PRIVATE 0xC0 #define SNMP_ASN1_CONTENTTYPE_PRIMITIVE 0x00 #define SNMP_ASN1_CONTENTTYPE_CONSTRUCTED 0x20 /* universal tags (from ASN.1 spec.) */ #define SNMP_ASN1_UNIVERSAL_END_OF_CONTENT 0 #define SNMP_ASN1_UNIVERSAL_INTEGER 2 #define SNMP_ASN1_UNIVERSAL_OCTET_STRING 4 #define SNMP_ASN1_UNIVERSAL_NULL 5 #define SNMP_ASN1_UNIVERSAL_OBJECT_ID 6 #define SNMP_ASN1_UNIVERSAL_SEQUENCE_OF 16 /* application specific (SNMP) tags (from SNMPv2-SMI) */ #define SNMP_ASN1_APPLICATION_IPADDR 0 /* [APPLICATION 0] IMPLICIT OCTET STRING (SIZE (4)) */ #define SNMP_ASN1_APPLICATION_COUNTER 1 /* [APPLICATION 1] IMPLICIT INTEGER (0..4294967295) => u32_t */ #define SNMP_ASN1_APPLICATION_GAUGE 2 /* [APPLICATION 2] IMPLICIT INTEGER (0..4294967295) => u32_t */ #define SNMP_ASN1_APPLICATION_TIMETICKS 3 /* [APPLICATION 3] IMPLICIT INTEGER (0..4294967295) => u32_t */ #define SNMP_ASN1_APPLICATION_OPAQUE 4 /* [APPLICATION 4] IMPLICIT OCTET STRING */ #define SNMP_ASN1_APPLICATION_COUNTER64 6 /* [APPLICATION 6] IMPLICIT INTEGER (0..18446744073709551615) */ /* context specific (SNMP) tags (from RFC 1905) */ #define SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE 1 /* full ASN1 type defines */ #define SNMP_ASN1_TYPE_END_OF_CONTENT (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_END_OF_CONTENT) #define SNMP_ASN1_TYPE_INTEGER (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_INTEGER) #define SNMP_ASN1_TYPE_OCTET_STRING (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OCTET_STRING) #define SNMP_ASN1_TYPE_NULL (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_NULL) #define SNMP_ASN1_TYPE_OBJECT_ID (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_UNIVERSAL_OBJECT_ID) #define SNMP_ASN1_TYPE_SEQUENCE (SNMP_ASN1_CLASS_UNIVERSAL | SNMP_ASN1_CONTENTTYPE_CONSTRUCTED | SNMP_ASN1_UNIVERSAL_SEQUENCE_OF) #define SNMP_ASN1_TYPE_IPADDR (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_IPADDR) #define SNMP_ASN1_TYPE_IPADDRESS SNMP_ASN1_TYPE_IPADDR #define SNMP_ASN1_TYPE_COUNTER (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER) #define SNMP_ASN1_TYPE_COUNTER32 SNMP_ASN1_TYPE_COUNTER #define SNMP_ASN1_TYPE_GAUGE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_GAUGE) #define SNMP_ASN1_TYPE_GAUGE32 SNMP_ASN1_TYPE_GAUGE #define SNMP_ASN1_TYPE_UNSIGNED32 SNMP_ASN1_TYPE_GAUGE #define SNMP_ASN1_TYPE_TIMETICKS (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_TIMETICKS) #define SNMP_ASN1_TYPE_OPAQUE (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_OPAQUE) #define SNMP_ASN1_TYPE_COUNTER64 (SNMP_ASN1_CLASS_APPLICATION | SNMP_ASN1_CONTENTTYPE_PRIMITIVE | SNMP_ASN1_APPLICATION_COUNTER64) #define SNMP_VARBIND_EXCEPTION_OFFSET 0xF0 #define SNMP_VARBIND_EXCEPTION_MASK 0x0F /** error codes predefined by SNMP prot. */ typedef enum { SNMP_ERR_NOERROR = 0, /* outdated v1 error codes. do not use anmore! #define SNMP_ERR_NOSUCHNAME 2 use SNMP_ERR_NOSUCHINSTANCE instead #define SNMP_ERR_BADVALUE 3 use SNMP_ERR_WRONGTYPE,SNMP_ERR_WRONGLENGTH,SNMP_ERR_WRONGENCODING or SNMP_ERR_WRONGVALUE instead #define SNMP_ERR_READONLY 4 use SNMP_ERR_NOTWRITABLE instead */ SNMP_ERR_GENERROR = 5, SNMP_ERR_NOACCESS = 6, SNMP_ERR_WRONGTYPE = 7, SNMP_ERR_WRONGLENGTH = 8, SNMP_ERR_WRONGENCODING = 9, SNMP_ERR_WRONGVALUE = 10, SNMP_ERR_NOCREATION = 11, SNMP_ERR_INCONSISTENTVALUE = 12, SNMP_ERR_RESOURCEUNAVAILABLE = 13, SNMP_ERR_COMMITFAILED = 14, SNMP_ERR_UNDOFAILED = 15, SNMP_ERR_NOTWRITABLE = 17, SNMP_ERR_INCONSISTENTNAME = 18, SNMP_ERR_NOSUCHINSTANCE = SNMP_VARBIND_EXCEPTION_OFFSET + SNMP_ASN1_CONTEXT_VARBIND_NO_SUCH_INSTANCE } snmp_err_t; /** internal object identifier representation */ struct snmp_obj_id { u8_t len; u32_t id[SNMP_MAX_OBJ_ID_LEN]; }; struct snmp_obj_id_const_ref { u8_t len; const u32_t* id; }; extern const struct snmp_obj_id_const_ref snmp_zero_dot_zero; /* administrative identifier from SNMPv2-SMI */ /** SNMP variant value, used as reference in struct snmp_node_instance and table implementation */ union snmp_variant_value { void* ptr; const void* const_ptr; u32_t u32; s32_t s32; }; /** SNMP MIB node types tree node is the only node the stack can process in order to walk the tree, all other nodes are assumed to be leaf nodes. This cannot be an enum because users may want to define their own node types. */ #define SNMP_NODE_TREE 0x00 /* predefined leaf node types */ #define SNMP_NODE_SCALAR 0x01 #define SNMP_NODE_SCALAR_ARRAY 0x02 #define SNMP_NODE_TABLE 0x03 #define SNMP_NODE_THREADSYNC 0x04 /** node "base class" layout, the mandatory fields for a node */ struct snmp_node { /** one out of SNMP_NODE_TREE or any leaf node type (like SNMP_NODE_SCALAR) */ u8_t node_type; /** the number assigned to this node which used as part of the full OID */ u32_t oid; }; /** SNMP node instance access types */ typedef enum { SNMP_NODE_INSTANCE_ACCESS_READ = 1, SNMP_NODE_INSTANCE_ACCESS_WRITE = 2, SNMP_NODE_INSTANCE_READ_ONLY = SNMP_NODE_INSTANCE_ACCESS_READ, SNMP_NODE_INSTANCE_READ_WRITE = (SNMP_NODE_INSTANCE_ACCESS_READ | SNMP_NODE_INSTANCE_ACCESS_WRITE), SNMP_NODE_INSTANCE_WRITE_ONLY = SNMP_NODE_INSTANCE_ACCESS_WRITE, SNMP_NODE_INSTANCE_NOT_ACCESSIBLE = 0 } snmp_access_t; struct snmp_node_instance; typedef s16_t (*node_instance_get_value_method)(struct snmp_node_instance*, void*); typedef snmp_err_t (*node_instance_set_test_method)(struct snmp_node_instance*, u16_t, void*); typedef snmp_err_t (*node_instance_set_value_method)(struct snmp_node_instance*, u16_t, void*); typedef void (*node_instance_release_method)(struct snmp_node_instance*); #define SNMP_GET_VALUE_RAW_DATA 0x8000 /** SNMP node instance */ struct snmp_node_instance { /** prefilled with the node, get_instance() is called on; may be changed by user to any value to pass an arbitrary node between calls to get_instance() and get_value/test_value/set_value */ const struct snmp_node* node; /** prefilled with the instance id requested; for get_instance() this is the exact oid requested; for get_next_instance() this is the relative starting point, stack expects relative oid of next node here */ struct snmp_obj_id instance_oid; /** ASN type for this object (see snmp_asn1.h for definitions) */ u8_t asn1_type; /** one out of instance access types defined above (SNMP_NODE_INSTANCE_READ_ONLY,...) */ snmp_access_t access; /** returns object value for the given object identifier. Return values <0 to indicate an error */ node_instance_get_value_method get_value; /** tests length and/or range BEFORE setting */ node_instance_set_test_method set_test; /** sets object value, only called when set_test() was successful */ node_instance_set_value_method set_value; /** called in any case when the instance is not required anymore by stack (useful for freeing memory allocated in get_instance/get_next_instance methods) */ node_instance_release_method release_instance; /** reference to pass arbitrary value between calls to get_instance() and get_value/test_value/set_value */ union snmp_variant_value reference; /** see reference (if reference is a pointer, the length of underlying data may be stored here or anything else) */ u32_t reference_len; }; /** SNMP tree node */ struct snmp_tree_node { /** inherited "base class" members */ struct snmp_node node; u16_t subnode_count; const struct snmp_node* const *subnodes; }; #define SNMP_CREATE_TREE_NODE(oid, subnodes) \ {{ SNMP_NODE_TREE, (oid) }, \ (u16_t)LWIP_ARRAYSIZE(subnodes), (subnodes) } #define SNMP_CREATE_EMPTY_TREE_NODE(oid) \ {{ SNMP_NODE_TREE, (oid) }, \ 0, NULL } /** SNMP leaf node */ struct snmp_leaf_node { /** inherited "base class" members */ struct snmp_node node; snmp_err_t (*get_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); snmp_err_t (*get_next_instance)(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); }; /** represents a single mib with its base oid and root node */ struct snmp_mib { const u32_t *base_oid; u8_t base_oid_len; const struct snmp_node *root_node; }; #define SNMP_MIB_CREATE(oid_list, root_node) { (oid_list), (u8_t)LWIP_ARRAYSIZE(oid_list), root_node } /** OID range structure */ struct snmp_oid_range { u32_t min; u32_t max; }; /** checks if incoming OID length and values are in allowed ranges */ u8_t snmp_oid_in_range(const u32_t *oid_in, u8_t oid_len, const struct snmp_oid_range *oid_ranges, u8_t oid_ranges_len); typedef enum { SNMP_NEXT_OID_STATUS_SUCCESS, SNMP_NEXT_OID_STATUS_NO_MATCH, SNMP_NEXT_OID_STATUS_BUF_TO_SMALL } snmp_next_oid_status_t; /** state for next_oid_init / next_oid_check functions */ struct snmp_next_oid_state { const u32_t* start_oid; u8_t start_oid_len; u32_t* next_oid; u8_t next_oid_len; u8_t next_oid_max_len; snmp_next_oid_status_t status; void* reference; }; void snmp_next_oid_init(struct snmp_next_oid_state *state, const u32_t *start_oid, u8_t start_oid_len, u32_t *next_oid_buf, u8_t next_oid_max_len); u8_t snmp_next_oid_precheck(struct snmp_next_oid_state *state, const u32_t *oid, const u8_t oid_len); u8_t snmp_next_oid_check(struct snmp_next_oid_state *state, const u32_t *oid, const u8_t oid_len, void* reference); void snmp_oid_assign(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); void snmp_oid_combine(struct snmp_obj_id* target, const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); void snmp_oid_prefix(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); void snmp_oid_append(struct snmp_obj_id* target, const u32_t *oid, u8_t oid_len); u8_t snmp_oid_equal(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); s8_t snmp_oid_compare(const u32_t *oid1, u8_t oid1_len, const u32_t *oid2, u8_t oid2_len); #if LWIP_IPV4 u8_t snmp_oid_to_ip4(const u32_t *oid, ip4_addr_t *ip); void snmp_ip4_to_oid(const ip4_addr_t *ip, u32_t *oid); #endif /* LWIP_IPV4 */ #if LWIP_IPV6 u8_t snmp_oid_to_ip6(const u32_t *oid, ip6_addr_t *ip); void snmp_ip6_to_oid(const ip6_addr_t *ip, u32_t *oid); #endif /* LWIP_IPV6 */ #if LWIP_IPV4 || LWIP_IPV6 u8_t snmp_ip_to_oid(const ip_addr_t *ip, u32_t *oid); u8_t snmp_ip_port_to_oid(const ip_addr_t *ip, u16_t port, u32_t *oid); u8_t snmp_oid_to_ip(const u32_t *oid, u8_t oid_len, ip_addr_t *ip); u8_t snmp_oid_to_ip_port(const u32_t *oid, u8_t oid_len, ip_addr_t *ip, u16_t *port); #endif /* LWIP_IPV4 || LWIP_IPV6 */ struct netif; u8_t netif_to_num(const struct netif *netif); snmp_err_t snmp_set_test_ok(struct snmp_node_instance* instance, u16_t value_len, void* value); /* generic function which can be used if test is always successful */ err_t snmp_decode_bits(const u8_t *buf, u32_t buf_len, u32_t *bit_value); err_t snmp_decode_truthvalue(const s32_t *asn1_value, u8_t *bool_value); u8_t snmp_encode_bits(u8_t *buf, u32_t buf_len, u32_t bit_value, u8_t bit_count); u8_t snmp_encode_truthvalue(s32_t *asn1_value, u32_t bool_value); struct snmp_statistics { u32_t inpkts; u32_t outpkts; u32_t inbadversions; u32_t inbadcommunitynames; u32_t inbadcommunityuses; u32_t inasnparseerrs; u32_t intoobigs; u32_t innosuchnames; u32_t inbadvalues; u32_t inreadonlys; u32_t ingenerrs; u32_t intotalreqvars; u32_t intotalsetvars; u32_t ingetrequests; u32_t ingetnexts; u32_t insetrequests; u32_t ingetresponses; u32_t intraps; u32_t outtoobigs; u32_t outnosuchnames; u32_t outbadvalues; u32_t outgenerrs; u32_t outgetrequests; u32_t outgetnexts; u32_t outsetrequests; u32_t outgetresponses; u32_t outtraps; }; extern struct snmp_statistics snmp_stats; #ifdef __cplusplus } #endif #endif /* LWIP_SNMP */ #endif /* LWIP_HDR_APPS_SNMP_CORE_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/snmp_core.h
C
unknown
14,808
/** * @file * SNMP MIB2 API */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> * */ #ifndef LWIP_HDR_APPS_SNMP_MIB2_H #define LWIP_HDR_APPS_SNMP_MIB2_H #include "lwip/apps/snmp_opts.h" #ifdef __cplusplus extern "C" { #endif #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #if SNMP_LWIP_MIB2 #include "lwip/apps/snmp_core.h" extern const struct snmp_mib mib2; #if SNMP_USE_NETCONN #include "lwip/apps/snmp_threadsync.h" void snmp_mib2_lwip_synchronizer(snmp_threadsync_called_fn fn, void* arg); extern struct snmp_threadsync_instance snmp_mib2_lwip_locks; #endif #ifndef SNMP_SYSSERVICES #define SNMP_SYSSERVICES ((1 << 6) | (1 << 3) | ((IP_FORWARD) << 2)) #endif void snmp_mib2_set_sysdescr(const u8_t* str, const u16_t* len); /* read-only be defintion */ void snmp_mib2_set_syscontact(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); void snmp_mib2_set_syscontact_readonly(const u8_t *ocstr, const u16_t *ocstrlen); void snmp_mib2_set_sysname(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); void snmp_mib2_set_sysname_readonly(const u8_t *ocstr, const u16_t *ocstrlen); void snmp_mib2_set_syslocation(u8_t *ocstr, u16_t *ocstrlen, u16_t bufsize); void snmp_mib2_set_syslocation_readonly(const u8_t *ocstr, const u16_t *ocstrlen); #endif /* SNMP_LWIP_MIB2 */ #endif /* LWIP_SNMP */ #ifdef __cplusplus } #endif #endif /* LWIP_HDR_APPS_SNMP_MIB2_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/snmp_mib2.h
C
unknown
3,008
/** * @file * SNMP server options list */ /* * Copyright (c) 2015 Dirk Ziegelmeier * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Dirk Ziegelmeier * */ #ifndef LWIP_HDR_SNMP_OPTS_H #define LWIP_HDR_SNMP_OPTS_H #include "lwip/opt.h" /** * @defgroup snmp_opts Options * @ingroup snmp * @{ */ /** * LWIP_SNMP==1: This enables the lwIP SNMP agent. UDP must be available * for SNMP transport. * If you want to use your own SNMP agent, leave this disabled. * To integrate MIB2 of an external agent, you need to enable * LWIP_MIB2_CALLBACKS and MIB2_STATS. This will give you the callbacks * and statistics counters you need to get MIB2 working. */ #if !defined LWIP_SNMP || defined __DOXYGEN__ #define LWIP_SNMP 0 #endif /** * SNMP_USE_NETCONN: Use netconn API instead of raw API. * Makes SNMP agent run in a worker thread, so blocking operations * can be done in MIB calls. */ #if !defined SNMP_USE_NETCONN || defined __DOXYGEN__ #define SNMP_USE_NETCONN 0 #endif /** * SNMP_USE_RAW: Use raw API. * SNMP agent does not run in a worker thread, so blocking operations * should not be done in MIB calls. */ #if !defined SNMP_USE_RAW || defined __DOXYGEN__ #define SNMP_USE_RAW 1 #endif #if SNMP_USE_NETCONN && SNMP_USE_RAW #error SNMP stack can use only one of the APIs {raw, netconn} #endif #if LWIP_SNMP && !SNMP_USE_NETCONN && !SNMP_USE_RAW #error SNMP stack needs a receive API and UDP {raw, netconn} #endif #if SNMP_USE_NETCONN /** * SNMP_STACK_SIZE: Stack size of SNMP netconn worker thread */ #if !defined SNMP_STACK_SIZE || defined __DOXYGEN__ #define SNMP_STACK_SIZE DEFAULT_THREAD_STACKSIZE #endif /** * SNMP_THREAD_PRIO: SNMP netconn worker thread priority */ #if !defined SNMP_THREAD_PRIO || defined __DOXYGEN__ #define SNMP_THREAD_PRIO DEFAULT_THREAD_PRIO #endif #endif /* SNMP_USE_NETCONN */ /** * SNMP_TRAP_DESTINATIONS: Number of trap destinations. At least one trap * destination is required */ #if !defined SNMP_TRAP_DESTINATIONS || defined __DOXYGEN__ #define SNMP_TRAP_DESTINATIONS 1 #endif /** * Only allow SNMP write actions that are 'safe' (e.g. disabling netifs is not * a safe action and disabled when SNMP_SAFE_REQUESTS = 1). * Unsafe requests are disabled by default! */ #if !defined SNMP_SAFE_REQUESTS || defined __DOXYGEN__ #define SNMP_SAFE_REQUESTS 1 #endif /** * The maximum length of strings used. */ #if !defined SNMP_MAX_OCTET_STRING_LEN || defined __DOXYGEN__ #define SNMP_MAX_OCTET_STRING_LEN 127 #endif /** * The maximum number of Sub ID's inside an object identifier. * Indirectly this also limits the maximum depth of SNMP tree. */ #if !defined SNMP_MAX_OBJ_ID_LEN || defined __DOXYGEN__ #define SNMP_MAX_OBJ_ID_LEN 50 #endif #if !defined SNMP_MAX_VALUE_SIZE || defined __DOXYGEN__ /** * The maximum size of a value. */ #define SNMP_MIN_VALUE_SIZE (2 * sizeof(u32_t*)) /* size required to store the basic types (8 bytes for counter64) */ /** * The minimum size of a value. */ #define SNMP_MAX_VALUE_SIZE LWIP_MAX(LWIP_MAX((SNMP_MAX_OCTET_STRING_LEN), sizeof(u32_t)*(SNMP_MAX_OBJ_ID_LEN)), SNMP_MIN_VALUE_SIZE) #endif /** * The snmp read-access community. Used for write-access and traps, too * unless SNMP_COMMUNITY_WRITE or SNMP_COMMUNITY_TRAP are enabled, respectively. */ #if !defined SNMP_COMMUNITY || defined __DOXYGEN__ #define SNMP_COMMUNITY "public" #endif /** * The snmp write-access community. * Set this community to "" in order to disallow any write access. */ #if !defined SNMP_COMMUNITY_WRITE || defined __DOXYGEN__ #define SNMP_COMMUNITY_WRITE "private" #endif /** * The snmp community used for sending traps. */ #if !defined SNMP_COMMUNITY_TRAP || defined __DOXYGEN__ #define SNMP_COMMUNITY_TRAP "public" #endif /** * The maximum length of community string. * If community names shall be adjusted at runtime via snmp_set_community() calls, * enter here the possible maximum length (+1 for terminating null character). */ #if !defined SNMP_MAX_COMMUNITY_STR_LEN || defined __DOXYGEN__ #define SNMP_MAX_COMMUNITY_STR_LEN LWIP_MAX(LWIP_MAX(sizeof(SNMP_COMMUNITY), sizeof(SNMP_COMMUNITY_WRITE)), sizeof(SNMP_COMMUNITY_TRAP)) #endif /** * The OID identifiying the device. This may be the enterprise OID itself or any OID located below it in tree. */ #if !defined SNMP_DEVICE_ENTERPRISE_OID || defined __DOXYGEN__ #define SNMP_LWIP_ENTERPRISE_OID 26381 /** * IANA assigned enterprise ID for lwIP is 26381 * @see http://www.iana.org/assignments/enterprise-numbers * * @note this enterprise ID is assigned to the lwIP project, * all object identifiers living under this ID are assigned * by the lwIP maintainers! * @note don't change this define, use snmp_set_device_enterprise_oid() * * If you need to create your own private MIB you'll need * to apply for your own enterprise ID with IANA: * http://www.iana.org/numbers.html */ #define SNMP_DEVICE_ENTERPRISE_OID {1, 3, 6, 1, 4, 1, SNMP_LWIP_ENTERPRISE_OID} /** * Length of SNMP_DEVICE_ENTERPRISE_OID */ #define SNMP_DEVICE_ENTERPRISE_OID_LEN 7 #endif /** * SNMP_DEBUG: Enable debugging for SNMP messages. */ #if !defined SNMP_DEBUG || defined __DOXYGEN__ #define SNMP_DEBUG LWIP_DBG_OFF #endif /** * SNMP_MIB_DEBUG: Enable debugging for SNMP MIBs. */ #if !defined SNMP_MIB_DEBUG || defined __DOXYGEN__ #define SNMP_MIB_DEBUG LWIP_DBG_OFF #endif /** * Indicates if the MIB2 implementation of LWIP SNMP stack is used. */ #if !defined SNMP_LWIP_MIB2 || defined __DOXYGEN__ #define SNMP_LWIP_MIB2 LWIP_SNMP #endif /** * Value return for sysDesc field of MIB2. */ #if !defined SNMP_LWIP_MIB2_SYSDESC || defined __DOXYGEN__ #define SNMP_LWIP_MIB2_SYSDESC "lwIP" #endif /** * Value return for sysName field of MIB2. * To make sysName field settable, call snmp_mib2_set_sysname() to provide the necessary buffers. */ #if !defined SNMP_LWIP_MIB2_SYSNAME || defined __DOXYGEN__ #define SNMP_LWIP_MIB2_SYSNAME "FQDN-unk" #endif /** * Value return for sysContact field of MIB2. * To make sysContact field settable, call snmp_mib2_set_syscontact() to provide the necessary buffers. */ #if !defined SNMP_LWIP_MIB2_SYSCONTACT || defined __DOXYGEN__ #define SNMP_LWIP_MIB2_SYSCONTACT "" #endif /** * Value return for sysLocation field of MIB2. * To make sysLocation field settable, call snmp_mib2_set_syslocation() to provide the necessary buffers. */ #if !defined SNMP_LWIP_MIB2_SYSLOCATION || defined __DOXYGEN__ #define SNMP_LWIP_MIB2_SYSLOCATION "" #endif /** * This value is used to limit the repetitions processed in GetBulk requests (value == 0 means no limitation). * This may be useful to limit the load for a single request. * According to SNMP RFC 1905 it is allowed to not return all requested variables from a GetBulk request if system load would be too high. * so the effect is that the client will do more requests to gather all data. * For the stack this could be useful in case that SNMP processing is done in TCP/IP thread. In this situation a request with many * repetitions could block the thread for a longer time. Setting limit here will keep the stack more responsive. */ #if !defined SNMP_LWIP_GETBULK_MAX_REPETITIONS || defined __DOXYGEN__ #define SNMP_LWIP_GETBULK_MAX_REPETITIONS 0 #endif /** * @} */ /* ------------------------------------ ---------- SNMPv3 options ---------- ------------------------------------ */ /** * LWIP_SNMP_V3==1: This enables EXPERIMENTAL SNMPv3 support. LWIP_SNMP must * also be enabled. * THIS IS UNDER DEVELOPMENT AND SHOULD NOT BE ENABLED IN PRODUCTS. */ #ifndef LWIP_SNMP_V3 #define LWIP_SNMP_V3 0 #endif #ifndef LWIP_SNMP_V3_CRYPTO #define LWIP_SNMP_V3_CRYPTO LWIP_SNMP_V3 #endif #ifndef LWIP_SNMP_V3_MBEDTLS #define LWIP_SNMP_V3_MBEDTLS LWIP_SNMP_V3 #endif #endif /* LWIP_HDR_SNMP_OPTS_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/snmp_opts.h
C
unknown
9,814
/** * @file * SNMP server MIB API to implement scalar nodes */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Martin Hentschel <info@cl-soft.de> * */ #ifndef LWIP_HDR_APPS_SNMP_SCALAR_H #define LWIP_HDR_APPS_SNMP_SCALAR_H #include "lwip/apps/snmp_opts.h" #include "lwip/apps/snmp_core.h" #ifdef __cplusplus extern "C" { #endif #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ /** basic scalar node */ struct snmp_scalar_node { /** inherited "base class" members */ struct snmp_leaf_node node; u8_t asn1_type; snmp_access_t access; node_instance_get_value_method get_value; node_instance_set_test_method set_test; node_instance_set_value_method set_value; }; snmp_err_t snmp_scalar_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); snmp_err_t snmp_scalar_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); #define SNMP_SCALAR_CREATE_NODE(oid, access, asn1_type, get_value_method, set_test_method, set_value_method) \ {{{ SNMP_NODE_SCALAR, (oid) }, \ snmp_scalar_get_instance, \ snmp_scalar_get_next_instance }, \ (asn1_type), (access), (get_value_method), (set_test_method), (set_value_method) } #define SNMP_SCALAR_CREATE_NODE_READONLY(oid, asn1_type, get_value_method) SNMP_SCALAR_CREATE_NODE(oid, SNMP_NODE_INSTANCE_READ_ONLY, asn1_type, get_value_method, NULL, NULL) /** scalar array node - a tree node which contains scalars only as children */ struct snmp_scalar_array_node_def { u32_t oid; u8_t asn1_type; snmp_access_t access; }; typedef s16_t (*snmp_scalar_array_get_value_method)(const struct snmp_scalar_array_node_def*, void*); typedef snmp_err_t (*snmp_scalar_array_set_test_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); typedef snmp_err_t (*snmp_scalar_array_set_value_method)(const struct snmp_scalar_array_node_def*, u16_t, void*); /** basic scalar array node */ struct snmp_scalar_array_node { /** inherited "base class" members */ struct snmp_leaf_node node; u16_t array_node_count; const struct snmp_scalar_array_node_def* array_nodes; snmp_scalar_array_get_value_method get_value; snmp_scalar_array_set_test_method set_test; snmp_scalar_array_set_value_method set_value; }; snmp_err_t snmp_scalar_array_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); snmp_err_t snmp_scalar_array_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); #define SNMP_SCALAR_CREATE_ARRAY_NODE(oid, array_nodes, get_value_method, set_test_method, set_value_method) \ {{{ SNMP_NODE_SCALAR_ARRAY, (oid) }, \ snmp_scalar_array_get_instance, \ snmp_scalar_array_get_next_instance }, \ (u16_t)LWIP_ARRAYSIZE(array_nodes), (array_nodes), (get_value_method), (set_test_method), (set_value_method) } #endif /* LWIP_SNMP */ #ifdef __cplusplus } #endif #endif /* LWIP_HDR_APPS_SNMP_SCALAR_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/snmp_scalar.h
C
unknown
4,629
/** * @file * SNMP server MIB API to implement table nodes */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Martin Hentschel <info@cl-soft.de> * */ #ifndef LWIP_HDR_APPS_SNMP_TABLE_H #define LWIP_HDR_APPS_SNMP_TABLE_H #include "lwip/apps/snmp_opts.h" #include "lwip/apps/snmp_core.h" #ifdef __cplusplus extern "C" { #endif #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ /** default (customizable) read/write table */ struct snmp_table_col_def { u32_t index; u8_t asn1_type; snmp_access_t access; }; /** table node */ struct snmp_table_node { /** inherited "base class" members */ struct snmp_leaf_node node; u16_t column_count; const struct snmp_table_col_def* columns; snmp_err_t (*get_cell_instance)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, struct snmp_node_instance* cell_instance); snmp_err_t (*get_next_cell_instance)(const u32_t* column, struct snmp_obj_id* row_oid, struct snmp_node_instance* cell_instance); /** returns object value for the given object identifier */ node_instance_get_value_method get_value; /** tests length and/or range BEFORE setting */ node_instance_set_test_method set_test; /** sets object value, only called when set_test() was successful */ node_instance_set_value_method set_value; }; snmp_err_t snmp_table_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); snmp_err_t snmp_table_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); #define SNMP_TABLE_CREATE(oid, columns, get_cell_instance_method, get_next_cell_instance_method, get_value_method, set_test_method, set_value_method) \ {{{ SNMP_NODE_TABLE, (oid) }, \ snmp_table_get_instance, \ snmp_table_get_next_instance }, \ (u16_t)LWIP_ARRAYSIZE(columns), (columns), \ (get_cell_instance_method), (get_next_cell_instance_method), \ (get_value_method), (set_test_method), (set_value_method)} #define SNMP_TABLE_GET_COLUMN_FROM_OID(oid) ((oid)[1]) /* first array value is (fixed) row entry (fixed to 1) and 2nd value is column, follow3ed by instance */ /** simple read-only table */ typedef enum { SNMP_VARIANT_VALUE_TYPE_U32, SNMP_VARIANT_VALUE_TYPE_S32, SNMP_VARIANT_VALUE_TYPE_PTR, SNMP_VARIANT_VALUE_TYPE_CONST_PTR } snmp_table_column_data_type_t; struct snmp_table_simple_col_def { u32_t index; u8_t asn1_type; snmp_table_column_data_type_t data_type; /* depending of what union member is used to store the value*/ }; /** simple read-only table node */ struct snmp_table_simple_node { /* inherited "base class" members */ struct snmp_leaf_node node; u16_t column_count; const struct snmp_table_simple_col_def* columns; snmp_err_t (*get_cell_value)(const u32_t* column, const u32_t* row_oid, u8_t row_oid_len, union snmp_variant_value* value, u32_t* value_len); snmp_err_t (*get_next_cell_instance_and_value)(const u32_t* column, struct snmp_obj_id* row_oid, union snmp_variant_value* value, u32_t* value_len); }; snmp_err_t snmp_table_simple_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); snmp_err_t snmp_table_simple_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); #define SNMP_TABLE_CREATE_SIMPLE(oid, columns, get_cell_value_method, get_next_cell_instance_and_value_method) \ {{{ SNMP_NODE_TABLE, (oid) }, \ snmp_table_simple_get_instance, \ snmp_table_simple_get_next_instance }, \ (u16_t)LWIP_ARRAYSIZE(columns), (columns), (get_cell_value_method), (get_next_cell_instance_and_value_method) } s16_t snmp_table_extract_value_from_s32ref(struct snmp_node_instance* instance, void* value); s16_t snmp_table_extract_value_from_u32ref(struct snmp_node_instance* instance, void* value); s16_t snmp_table_extract_value_from_refconstptr(struct snmp_node_instance* instance, void* value); #endif /* LWIP_SNMP */ #ifdef __cplusplus } #endif #endif /* LWIP_HDR_APPS_SNMP_TABLE_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/snmp_table.h
C
unknown
5,644
/** * @file * SNMP server MIB API to implement thread synchronization */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Dirk Ziegelmeier <dziegel@gmx.de> * */ #ifndef LWIP_HDR_APPS_SNMP_THREADSYNC_H #define LWIP_HDR_APPS_SNMP_THREADSYNC_H #include "lwip/apps/snmp_opts.h" #ifdef __cplusplus extern "C" { #endif #if LWIP_SNMP /* don't build if not configured for use in lwipopts.h */ #include "lwip/apps/snmp_core.h" #include "lwip/sys.h" typedef void (*snmp_threadsync_called_fn)(void* arg); typedef void (*snmp_threadsync_synchronizer_fn)(snmp_threadsync_called_fn fn, void* arg); /** Thread sync runtime data. For internal usage only. */ struct threadsync_data { union { snmp_err_t err; s16_t s16; } retval; union { const u32_t *root_oid; void *value; } arg1; union { u8_t root_oid_len; u16_t len; } arg2; const struct snmp_threadsync_node *threadsync_node; struct snmp_node_instance proxy_instance; }; /** Thread sync instance. Needed EXCATLY once for every thread to be synced into. */ struct snmp_threadsync_instance { sys_sem_t sem; sys_mutex_t sem_usage_mutex; snmp_threadsync_synchronizer_fn sync_fn; struct threadsync_data data; }; /** SNMP thread sync proxy leaf node */ struct snmp_threadsync_node { /* inherited "base class" members */ struct snmp_leaf_node node; const struct snmp_leaf_node *target; struct snmp_threadsync_instance *instance; }; snmp_err_t snmp_threadsync_get_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); snmp_err_t snmp_threadsync_get_next_instance(const u32_t *root_oid, u8_t root_oid_len, struct snmp_node_instance* instance); /** Create thread sync proxy node */ #define SNMP_CREATE_THREAD_SYNC_NODE(oid, target_leaf_node, threadsync_instance) \ {{{ SNMP_NODE_THREADSYNC, (oid) }, \ snmp_threadsync_get_instance, \ snmp_threadsync_get_next_instance }, \ (target_leaf_node), \ (threadsync_instance) } /** Create thread sync instance data */ void snmp_threadsync_init(struct snmp_threadsync_instance *instance, snmp_threadsync_synchronizer_fn sync_fn); #endif /* LWIP_SNMP */ #ifdef __cplusplus } #endif #endif /* LWIP_HDR_APPS_SNMP_THREADSYNC_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/snmp_threadsync.h
C
unknown
3,913
/** * @file * Additional SNMPv3 functionality RFC3414 and RFC3826. */ /* * Copyright (c) 2016 Elias Oenal. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Elias Oenal <lwip@eliasoenal.com> */ #ifndef LWIP_HDR_APPS_SNMP_V3_H #define LWIP_HDR_APPS_SNMP_V3_H #include "lwip/apps/snmp_opts.h" #include "lwip/err.h" #if LWIP_SNMP && LWIP_SNMP_V3 #define SNMP_V3_AUTH_ALGO_INVAL 0 #define SNMP_V3_AUTH_ALGO_MD5 1 #define SNMP_V3_AUTH_ALGO_SHA 2 #define SNMP_V3_PRIV_ALGO_INVAL 0 #define SNMP_V3_PRIV_ALGO_DES 1 #define SNMP_V3_PRIV_ALGO_AES 2 #define SNMP_V3_PRIV_MODE_DECRYPT 0 #define SNMP_V3_PRIV_MODE_ENCRYPT 1 /* * The following callback functions must be implemented by the application. * There is a dummy implementation in snmpv3_dummy.c. */ void snmpv3_get_engine_id(const char **id, u8_t *len); err_t snmpv3_set_engine_id(const char* id, u8_t len); u32_t snmpv3_get_engine_boots(void); void snmpv3_set_engine_boots(u32_t boots); u32_t snmpv3_get_engine_time(void); void snmpv3_reset_engine_time(void); err_t snmpv3_get_user(const char* username, u8_t *auth_algo, u8_t *auth_key, u8_t *priv_algo, u8_t *priv_key); /* The following functions are provided by the SNMPv3 agent */ void snmpv3_engine_id_changed(void); void snmpv3_password_to_key_md5( const u8_t *password, /* IN */ u8_t passwordlen, /* IN */ const u8_t *engineID, /* IN - pointer to snmpEngineID */ u8_t engineLength, /* IN - length of snmpEngineID */ u8_t *key); /* OUT - pointer to caller 16-octet buffer */ void snmpv3_password_to_key_sha( const u8_t *password, /* IN */ u8_t passwordlen, /* IN */ const u8_t *engineID, /* IN - pointer to snmpEngineID */ u8_t engineLength, /* IN - length of snmpEngineID */ u8_t *key); /* OUT - pointer to caller 20-octet buffer */ #endif #endif /* LWIP_HDR_APPS_SNMP_V3_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/snmpv3.h
C
unknown
3,422
/** * @file * SNTP client API */ /* * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Frédéric Bernon, Simon Goldschmidt * */ #ifndef LWIP_HDR_APPS_SNTP_H #define LWIP_HDR_APPS_SNTP_H #include "lwip/apps/sntp_opts.h" #include "lwip/ip_addr.h" #ifdef __cplusplus extern "C" { #endif /* SNTP operating modes: default is to poll using unicast. The mode has to be set before calling sntp_init(). */ #define SNTP_OPMODE_POLL 0 #define SNTP_OPMODE_LISTENONLY 1 void sntp_setoperatingmode(u8_t operating_mode); u8_t sntp_getoperatingmode(void); void sntp_init(void); void sntp_stop(void); u8_t sntp_enabled(void); void sntp_setserver(u8_t idx, const ip_addr_t *addr); const ip_addr_t* sntp_getserver(u8_t idx); #if SNTP_SERVER_DNS void sntp_setservername(u8_t idx, char *server); char *sntp_getservername(u8_t idx); #endif /* SNTP_SERVER_DNS */ #if SNTP_GET_SERVERS_FROM_DHCP void sntp_servermode_dhcp(int set_servers_from_dhcp); #else /* SNTP_GET_SERVERS_FROM_DHCP */ #define sntp_servermode_dhcp(x) #endif /* SNTP_GET_SERVERS_FROM_DHCP */ #ifdef __cplusplus } #endif #endif /* LWIP_HDR_APPS_SNTP_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/sntp.h
C
unknown
2,712
/** * @file * SNTP client options list */ /* * Copyright (c) 2007-2009 Frédéric Bernon, Simon Goldschmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Frédéric Bernon, Simon Goldschmidt * */ #ifndef LWIP_HDR_APPS_SNTP_OPTS_H #define LWIP_HDR_APPS_SNTP_OPTS_H #include "lwip/opt.h" /** * @defgroup sntp_opts Options * @ingroup sntp * @{ */ /** SNTP macro to change system time in seconds * Define SNTP_SET_SYSTEM_TIME_US(sec, us) to set the time in microseconds instead of this one * if you need the additional precision. */ #if !defined SNTP_SET_SYSTEM_TIME || defined __DOXYGEN__ #define SNTP_SET_SYSTEM_TIME(sec) LWIP_UNUSED_ARG(sec) #endif /** The maximum number of SNTP servers that can be set */ #if !defined SNTP_MAX_SERVERS || defined __DOXYGEN__ #define SNTP_MAX_SERVERS LWIP_DHCP_MAX_NTP_SERVERS #endif /** Set this to 1 to implement the callback function called by dhcp when * NTP servers are received. */ #if !defined SNTP_GET_SERVERS_FROM_DHCP || defined __DOXYGEN__ #define SNTP_GET_SERVERS_FROM_DHCP LWIP_DHCP_GET_NTP_SRV #endif /** Set this to 1 to support DNS names (or IP address strings) to set sntp servers * One server address/name can be defined as default if SNTP_SERVER_DNS == 1: * \#define SNTP_SERVER_ADDRESS "pool.ntp.org" */ #if !defined SNTP_SERVER_DNS || defined __DOXYGEN__ #define SNTP_SERVER_DNS 0 #endif /** * SNTP_DEBUG: Enable debugging for SNTP. */ #if !defined SNTP_DEBUG || defined __DOXYGEN__ #define SNTP_DEBUG LWIP_DBG_OFF #endif /** SNTP server port */ #if !defined SNTP_PORT || defined __DOXYGEN__ #define SNTP_PORT 123 #endif /** Set this to 1 to allow config of SNTP server(s) by DNS name */ #if !defined SNTP_SERVER_DNS || defined __DOXYGEN__ #define SNTP_SERVER_DNS 0 #endif /** Sanity check: * Define this to * - 0 to turn off sanity checks (default; smaller code) * - >= 1 to check address and port of the response packet to ensure the * response comes from the server we sent the request to. * - >= 2 to check returned Originate Timestamp against Transmit Timestamp * sent to the server (to ensure response to older request). * - >= 3 @todo: discard reply if any of the LI, Stratum, or Transmit Timestamp * fields is 0 or the Mode field is not 4 (unicast) or 5 (broadcast). * - >= 4 @todo: to check that the Root Delay and Root Dispersion fields are each * greater than or equal to 0 and less than infinity, where infinity is * currently a cozy number like one second. This check avoids using a * server whose synchronization source has expired for a very long time. */ #if !defined SNTP_CHECK_RESPONSE || defined __DOXYGEN__ #define SNTP_CHECK_RESPONSE 0 #endif /** According to the RFC, this shall be a random delay * between 1 and 5 minutes (in milliseconds) to prevent load peaks. * This can be defined to a random generation function, * which must return the delay in milliseconds as u32_t. * Turned off by default. */ #if !defined SNTP_STARTUP_DELAY || defined __DOXYGEN__ #define SNTP_STARTUP_DELAY 0 #endif /** If you want the startup delay to be a function, define this * to a function (including the brackets) and define SNTP_STARTUP_DELAY to 1. */ #if !defined SNTP_STARTUP_DELAY_FUNC || defined __DOXYGEN__ #define SNTP_STARTUP_DELAY_FUNC SNTP_STARTUP_DELAY #endif /** SNTP receive timeout - in milliseconds * Also used as retry timeout - this shouldn't be too low. * Default is 3 seconds. */ #if !defined SNTP_RECV_TIMEOUT || defined __DOXYGEN__ #define SNTP_RECV_TIMEOUT 3000 #endif /** SNTP update delay - in milliseconds * Default is 1 hour. Must not be beolw 15 seconds by specification (i.e. 15000) */ #if !defined SNTP_UPDATE_DELAY || defined __DOXYGEN__ #define SNTP_UPDATE_DELAY 3600000 #endif /** SNTP macro to get system time, used with SNTP_CHECK_RESPONSE >= 2 * to send in request and compare in response. */ #if !defined SNTP_GET_SYSTEM_TIME || defined __DOXYGEN__ #define SNTP_GET_SYSTEM_TIME(sec, us) do { (sec) = 0; (us) = 0; } while(0) #endif /** Default retry timeout (in milliseconds) if the response * received is invalid. * This is doubled with each retry until SNTP_RETRY_TIMEOUT_MAX is reached. */ #if !defined SNTP_RETRY_TIMEOUT || defined __DOXYGEN__ #define SNTP_RETRY_TIMEOUT SNTP_RECV_TIMEOUT #endif /** Maximum retry timeout (in milliseconds). */ #if !defined SNTP_RETRY_TIMEOUT_MAX || defined __DOXYGEN__ #define SNTP_RETRY_TIMEOUT_MAX (SNTP_RETRY_TIMEOUT * 10) #endif /** Increase retry timeout with every retry sent * Default is on to conform to RFC. */ #if !defined SNTP_RETRY_TIMEOUT_EXP || defined __DOXYGEN__ #define SNTP_RETRY_TIMEOUT_EXP 1 #endif /** * @} */ #endif /* LWIP_HDR_APPS_SNTP_OPTS_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/sntp_opts.h
C
unknown
6,471
/****************************************************************//** * * @file tftp_opts.h * * @author Logan Gunthorpe <logang@deltatee.com> * * @brief Trivial File Transfer Protocol (RFC 1350) implementation options * * Copyright (c) Deltatee Enterprises Ltd. 2013 * All rights reserved. * ********************************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification,are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Logan Gunthorpe <logang@deltatee.com> * */ #ifndef LWIP_HDR_APPS_TFTP_OPTS_H #define LWIP_HDR_APPS_TFTP_OPTS_H #include "lwip/opt.h" /** * @defgroup tftp_opts Options * @ingroup tftp * @{ */ /** * Enable TFTP debug messages */ #if !defined TFTP_DEBUG || defined __DOXYGEN__ #define TFTP_DEBUG LWIP_DBG_ON #endif /** * TFTP server port */ #if !defined TFTP_PORT || defined __DOXYGEN__ #define TFTP_PORT 69 #endif /** * TFTP timeout */ #if !defined TFTP_TIMEOUT_MSECS || defined __DOXYGEN__ #define TFTP_TIMEOUT_MSECS 10000 #endif /** * Max. number of retries when a file is read from server */ #if !defined TFTP_MAX_RETRIES || defined __DOXYGEN__ #define TFTP_MAX_RETRIES 5 #endif /** * TFTP timer cyclic interval */ #if !defined TFTP_TIMER_MSECS || defined __DOXYGEN__ #define TFTP_TIMER_MSECS 50 #endif /** * Max. length of TFTP filename */ #if !defined TFTP_MAX_FILENAME_LEN || defined __DOXYGEN__ #define TFTP_MAX_FILENAME_LEN 20 #endif /** * Max. length of TFTP mode */ #if !defined TFTP_MAX_MODE_LEN || defined __DOXYGEN__ #define TFTP_MAX_MODE_LEN 7 #endif /** * @} */ #endif /* LWIP_HDR_APPS_TFTP_OPTS_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/tftp_opts.h
C
unknown
3,088
/****************************************************************//** * * @file tftp_server.h * * @author Logan Gunthorpe <logang@deltatee.com> * * @brief Trivial File Transfer Protocol (RFC 1350) * * Copyright (c) Deltatee Enterprises Ltd. 2013 * All rights reserved. * ********************************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification,are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Logan Gunthorpe <logang@deltatee.com> * */ #ifndef LWIP_HDR_APPS_TFTP_SERVER_H #define LWIP_HDR_APPS_TFTP_SERVER_H #include "lwip/apps/tftp_opts.h" #include "lwip/err.h" #include "lwip/pbuf.h" #ifdef __cplusplus extern "C" { #endif /** @ingroup tftp * TFTP context containing callback functions for TFTP transfers */ struct tftp_context { /** * Open file for read/write. * @param fname Filename * @param mode Mode string from TFTP RFC 1350 (netascii, octet, mail) * @param write Flag indicating read (0) or write (!= 0) access * @returns File handle supplied to other functions */ void* (*open)(const char* fname, const char* mode, u8_t write); /** * Close file handle * @param handle File handle returned by open() */ void (*close)(void* handle); /** * Read from file * @param handle File handle returned by open() * @param buf Target buffer to copy read data to * @param bytes Number of bytes to copy to buf * @returns &gt;= 0: Success; &lt; 0: Error */ int (*read)(void* handle, void* buf, int bytes); /** * Write to file * @param handle File handle returned by open() * @param pbuf PBUF adjusted such that payload pointer points * to the beginning of write data. In other words, * TFTP headers are stripped off. * @returns &gt;= 0: Success; &lt; 0: Error */ int (*write)(void* handle, struct pbuf* p); }; err_t tftp_init(const struct tftp_context* ctx); #ifdef __cplusplus } #endif #endif /* LWIP_HDR_APPS_TFTP_SERVER_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/apps/tftp_server.h
C
unknown
3,413
/** * @file * Support for different processor and compiler architectures */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #ifndef LWIP_HDR_ARCH_H #define LWIP_HDR_ARCH_H #ifndef LITTLE_ENDIAN #define LITTLE_ENDIAN 1234 #endif #ifndef BIG_ENDIAN #define BIG_ENDIAN 4321 #endif #include "arch/cc.h" /** * @defgroup compiler_abstraction Compiler/platform abstraction * @ingroup sys_layer * All defines related to this section must not be placed in lwipopts.h, * but in arch/cc.h! * These options cannot be \#defined in lwipopts.h since they are not options * of lwIP itself, but options of the lwIP port to your system. * @{ */ /** Define the byte order of the system. * Needed for conversion of network data to host byte order. * Allowed values: LITTLE_ENDIAN and BIG_ENDIAN */ #ifndef BYTE_ORDER #define BYTE_ORDER LITTLE_ENDIAN #endif /** Define random number generator function of your system */ #ifdef __DOXYGEN__ #define LWIP_RAND() ((u32_t)rand()) #endif /** Platform specific diagnostic output.\n * Note the default implementation pulls in printf, which may * in turn pull in a lot of standard libary code. In resource-constrained * systems, this should be defined to something less resource-consuming. */ #ifndef LWIP_PLATFORM_DIAG #define LWIP_PLATFORM_DIAG(x) do {printf x;} while(0) #include <stdio.h> #include <stdlib.h> #endif /** Platform specific assertion handling.\n * Note the default implementation pulls in printf, fflush and abort, which may * in turn pull in a lot of standard libary code. In resource-constrained * systems, this should be defined to something less resource-consuming. */ #ifndef LWIP_PLATFORM_ASSERT #define LWIP_PLATFORM_ASSERT(x) do {printf("Assertion \"%s\" failed at line %d in %s\n", \ x, __LINE__, __FILE__); fflush(NULL); abort();} while(0) #include <stdio.h> #include <stdlib.h> #endif /** Define this to 1 in arch/cc.h of your port if you do not want to * include stddef.h header to get size_t. You need to typedef size_t * by yourself in this case. */ #ifndef LWIP_NO_STDDEF_H #define LWIP_NO_STDDEF_H 0 #endif #if !LWIP_NO_STDDEF_H #include <stddef.h> /* for size_t */ #endif /** Define this to 1 in arch/cc.h of your port if your compiler does not provide * the stdint.h header. You need to typedef the generic types listed in * lwip/arch.h yourself in this case (u8_t, u16_t...). */ #ifndef LWIP_NO_STDINT_H #define LWIP_NO_STDINT_H 0 #endif /* Define generic types used in lwIP */ #if !LWIP_NO_STDINT_H #include <stdint.h> typedef uint8_t u8_t; typedef int8_t s8_t; typedef uint16_t u16_t; typedef int16_t s16_t; typedef uint32_t u32_t; typedef int32_t s32_t; typedef uintptr_t mem_ptr_t; #endif /** Define this to 1 in arch/cc.h of your port if your compiler does not provide * the inttypes.h header. You need to define the format strings listed in * lwip/arch.h yourself in this case (X8_F, U16_F...). */ #ifndef LWIP_NO_INTTYPES_H #define LWIP_NO_INTTYPES_H 0 #endif /* Define (sn)printf formatters for these lwIP types */ #if !LWIP_NO_INTTYPES_H #include <inttypes.h> #ifndef X8_F #define X8_F "02" PRIx8 #endif #ifndef U16_F #define U16_F PRIu16 #endif #ifndef S16_F #define S16_F PRId16 #endif #ifndef X16_F #define X16_F PRIx16 #endif #ifndef U32_F #define U32_F PRIu32 #endif #ifndef S32_F #define S32_F PRId32 #endif #ifndef X32_F #define X32_F PRIx32 #endif #ifndef SZT_F #define SZT_F PRIuPTR #endif #endif /** Define this to 1 in arch/cc.h of your port if your compiler does not provide * the limits.h header. You need to define the type limits yourself in this case * (e.g. INT_MAX). */ #ifndef LWIP_NO_LIMITS_H #define LWIP_NO_LIMITS_H 0 #endif /* Include limits.h? */ #if !LWIP_NO_LIMITS_H #include <limits.h> #endif /** C++ const_cast<target_type>(val) equivalent to remove constness from a value (GCC -Wcast-qual) */ #ifndef LWIP_CONST_CAST #define LWIP_CONST_CAST(target_type, val) ((target_type)((ptrdiff_t)val)) #endif /** Get rid of alignment cast warnings (GCC -Wcast-align) */ #ifndef LWIP_ALIGNMENT_CAST #define LWIP_ALIGNMENT_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) #endif /** Get rid of warnings related to pointer-to-numeric and vice-versa casts, * e.g. "conversion from 'u8_t' to 'void *' of greater size" */ #ifndef LWIP_PTR_NUMERIC_CAST #define LWIP_PTR_NUMERIC_CAST(target_type, val) LWIP_CONST_CAST(target_type, val) #endif /** Allocates a memory buffer of specified size that is of sufficient size to align * its start address using LWIP_MEM_ALIGN. * You can declare your own version here e.g. to enforce alignment without adding * trailing padding bytes (see LWIP_MEM_ALIGN_BUFFER) or your own section placement * requirements.\n * e.g. if you use gcc and need 32 bit alignment:\n * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[size] \_\_attribute\_\_((aligned(4)))\n * or more portable:\n * \#define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u32_t variable_name[(size + sizeof(u32_t) - 1) / sizeof(u32_t)] */ #ifndef LWIP_DECLARE_MEMORY_ALIGNED #define LWIP_DECLARE_MEMORY_ALIGNED(variable_name, size) u8_t variable_name[LWIP_MEM_ALIGN_BUFFER(size)] #endif /** Calculate memory size for an aligned buffer - returns the next highest * multiple of MEM_ALIGNMENT (e.g. LWIP_MEM_ALIGN_SIZE(3) and * LWIP_MEM_ALIGN_SIZE(4) will both yield 4 for MEM_ALIGNMENT == 4). */ #ifndef LWIP_MEM_ALIGN_SIZE #define LWIP_MEM_ALIGN_SIZE(size) (((size) + MEM_ALIGNMENT - 1U) & ~(MEM_ALIGNMENT-1U)) #endif /** Calculate safe memory size for an aligned buffer when using an unaligned * type as storage. This includes a safety-margin on (MEM_ALIGNMENT - 1) at the * start (e.g. if buffer is u8_t[] and actual data will be u32_t*) */ #ifndef LWIP_MEM_ALIGN_BUFFER #define LWIP_MEM_ALIGN_BUFFER(size) (((size) + MEM_ALIGNMENT - 1U)) #endif /** Align a memory pointer to the alignment defined by MEM_ALIGNMENT * so that ADDR % MEM_ALIGNMENT == 0 */ #ifndef LWIP_MEM_ALIGN #define LWIP_MEM_ALIGN(addr) ((void *)(((mem_ptr_t)(addr) + MEM_ALIGNMENT - 1) & ~(mem_ptr_t)(MEM_ALIGNMENT-1))) #endif #ifdef __cplusplus extern "C" { #endif /** Packed structs support. * Placed BEFORE declaration of a packed struct.\n * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. */ #ifndef PACK_STRUCT_BEGIN #define PACK_STRUCT_BEGIN #endif /* PACK_STRUCT_BEGIN */ /** Packed structs support. * Placed AFTER declaration of a packed struct.\n * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. */ #ifndef PACK_STRUCT_END #define PACK_STRUCT_END #endif /* PACK_STRUCT_END */ /** Packed structs support. * Placed between end of declaration of a packed struct and trailing semicolon.\n * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. */ #ifndef PACK_STRUCT_STRUCT #if defined(__GNUC__) || defined(__clang__) #define PACK_STRUCT_STRUCT __attribute__((packed)) #else #define PACK_STRUCT_STRUCT #endif #endif /* PACK_STRUCT_STRUCT */ /** Packed structs support. * Wraps u32_t and u16_t members.\n * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. */ #ifndef PACK_STRUCT_FIELD #define PACK_STRUCT_FIELD(x) x #endif /* PACK_STRUCT_FIELD */ /** Packed structs support. * Wraps u8_t members, where some compilers warn that packing is not necessary.\n * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. */ #ifndef PACK_STRUCT_FLD_8 #define PACK_STRUCT_FLD_8(x) PACK_STRUCT_FIELD(x) #endif /* PACK_STRUCT_FLD_8 */ /** Packed structs support. * Wraps members that are packed structs themselves, where some compilers warn that packing is not necessary.\n * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. */ #ifndef PACK_STRUCT_FLD_S #define PACK_STRUCT_FLD_S(x) PACK_STRUCT_FIELD(x) #endif /* PACK_STRUCT_FLD_S */ /** Packed structs support using \#include files before and after struct to be packed.\n * The file included BEFORE the struct is "arch/bpstruct.h".\n * The file included AFTER the struct is "arch/epstruct.h".\n * This can be used to implement struct packing on MS Visual C compilers, see * the Win32 port in the lwIP contrib repository for reference. * For examples of packed struct declarations, see include/lwip/prot/ subfolder.\n * A port to GCC/clang is included in lwIP, if you use these compilers there is nothing to do here. */ #ifdef __DOXYGEN__ #define PACK_STRUCT_USE_INCLUDES #endif /** Eliminates compiler warning about unused arguments (GCC -Wextra -Wunused). */ #ifndef LWIP_UNUSED_ARG #define LWIP_UNUSED_ARG(x) (void)x #endif /* LWIP_UNUSED_ARG */ /** * @} */ #ifdef __cplusplus } #endif #endif /* LWIP_HDR_ARCH_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/arch.h
C
unknown
11,283
/** * @file * * AutoIP Automatic LinkLocal IP Configuration */ /* * * Copyright (c) 2007 Dominik Spies <kontakt@dspies.de> * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Author: Dominik Spies <kontakt@dspies.de> * * This is a AutoIP implementation for the lwIP TCP/IP stack. It aims to conform * with RFC 3927. * */ #ifndef LWIP_HDR_AUTOIP_H #define LWIP_HDR_AUTOIP_H #include "lwip/opt.h" #if LWIP_IPV4 && LWIP_AUTOIP /* don't build if not configured for use in lwipopts.h */ #include "lwip/netif.h" /* #include "lwip/udp.h" */ #include "lwip/etharp.h" #ifdef __cplusplus extern "C" { #endif /** AutoIP Timing */ #define AUTOIP_TMR_INTERVAL 100 #define AUTOIP_TICKS_PER_SECOND (1000 / AUTOIP_TMR_INTERVAL) /** AutoIP state information per netif */ struct autoip { /** the currently selected, probed, announced or used LL IP-Address */ ip4_addr_t llipaddr; /** current AutoIP state machine state */ u8_t state; /** sent number of probes or announces, dependent on state */ u8_t sent_num; /** ticks to wait, tick is AUTOIP_TMR_INTERVAL long */ u16_t ttw; /** ticks until a conflict can be solved by defending */ u8_t lastconflict; /** total number of probed/used Link Local IP-Addresses */ u8_t tried_llipaddr; }; void autoip_set_struct(struct netif *netif, struct autoip *autoip); /** Remove a struct autoip previously set to the netif using autoip_set_struct() */ #define autoip_remove_struct(netif) do { (netif)->autoip = NULL; } while (0) err_t autoip_start(struct netif *netif); err_t autoip_stop(struct netif *netif); void autoip_arp_reply(struct netif *netif, struct etharp_hdr *hdr); void autoip_tmr(void); void autoip_network_changed(struct netif *netif); u8_t autoip_supplied_address(const struct netif *netif); /* for lwIP internal use by ip4.c */ u8_t autoip_accept_packet(struct netif *netif, const ip4_addr_t *addr); #define netif_autoip_data(netif) ((struct autoip*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_AUTOIP)) #ifdef __cplusplus } #endif #endif /* LWIP_IPV4 && LWIP_AUTOIP */ #endif /* LWIP_HDR_AUTOIP_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/autoip.h
C
unknown
3,583
/** * @file * Debug messages infrastructure */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #ifndef LWIP_HDR_DEBUG_H #define LWIP_HDR_DEBUG_H #include "lwip/arch.h" #include "lwip/opt.h" /** * @defgroup debugging_levels LWIP_DBG_MIN_LEVEL and LWIP_DBG_TYPES_ON values * @ingroup lwip_opts_debugmsg * @{ */ /** @name Debug level (LWIP_DBG_MIN_LEVEL) * @{ */ /** Debug level: ALL messages*/ #define LWIP_DBG_LEVEL_ALL 0x00 /** Debug level: Warnings. bad checksums, dropped packets, ... */ #define LWIP_DBG_LEVEL_WARNING 0x01 /** Debug level: Serious. memory allocation failures, ... */ #define LWIP_DBG_LEVEL_SERIOUS 0x02 /** Debug level: Severe */ #define LWIP_DBG_LEVEL_SEVERE 0x03 /** * @} */ #define LWIP_DBG_MASK_LEVEL 0x03 /* compatibility define only */ #define LWIP_DBG_LEVEL_OFF LWIP_DBG_LEVEL_ALL /** @name Enable/disable debug messages completely (LWIP_DBG_TYPES_ON) * @{ */ /** flag for LWIP_DEBUGF to enable that debug message */ #define LWIP_DBG_ON 0x80U /** flag for LWIP_DEBUGF to disable that debug message */ #define LWIP_DBG_OFF 0x00U /** * @} */ /** @name Debug message types (LWIP_DBG_TYPES_ON) * @{ */ /** flag for LWIP_DEBUGF indicating a tracing message (to follow program flow) */ #define LWIP_DBG_TRACE 0x40U /** flag for LWIP_DEBUGF indicating a state debug message (to follow module states) */ #define LWIP_DBG_STATE 0x20U /** flag for LWIP_DEBUGF indicating newly added code, not thoroughly tested yet */ #define LWIP_DBG_FRESH 0x10U /** flag for LWIP_DEBUGF to halt after printing this debug message */ #define LWIP_DBG_HALT 0x08U /** * @} */ /** * @} */ /** * @defgroup lwip_assertions Assertion handling * @ingroup lwip_opts_debug * @{ */ /** * LWIP_NOASSERT: Disable LWIP_ASSERT checks: * To disable assertions define LWIP_NOASSERT in arch/cc.h. */ #ifdef __DOXYGEN__ #define LWIP_NOASSERT #undef LWIP_NOASSERT #endif /** * @} */ #ifndef LWIP_NOASSERT #define LWIP_ASSERT(message, assertion) do { if (!(assertion)) { \ LWIP_PLATFORM_ASSERT(message); }} while(0) #ifndef LWIP_PLATFORM_ASSERT #error "If you want to use LWIP_ASSERT, LWIP_PLATFORM_ASSERT(message) needs to be defined in your arch/cc.h" #endif #else /* LWIP_NOASSERT */ #define LWIP_ASSERT(message, assertion) #endif /* LWIP_NOASSERT */ #ifndef LWIP_ERROR #ifndef LWIP_NOASSERT #define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_ASSERT(message) #elif defined LWIP_DEBUG #define LWIP_PLATFORM_ERROR(message) LWIP_PLATFORM_DIAG((message)) #else #define LWIP_PLATFORM_ERROR(message) #endif /* if "expression" isn't true, then print "message" and execute "handler" expression */ #define LWIP_ERROR(message, expression, handler) do { if (!(expression)) { \ LWIP_PLATFORM_ERROR(message); handler;}} while(0) #endif /* LWIP_ERROR */ /** Enable debug message printing, but only if debug message type is enabled * AND is of correct type AND is at least LWIP_DBG_LEVEL. */ #ifdef __DOXYGEN__ #define LWIP_DEBUG #undef LWIP_DEBUG #endif #ifdef LWIP_DEBUG #ifndef LWIP_PLATFORM_DIAG #error "If you want to use LWIP_DEBUG, LWIP_PLATFORM_DIAG(message) needs to be defined in your arch/cc.h" #endif #define LWIP_DEBUGF(debug, message) do { \ if ( \ ((debug) & LWIP_DBG_ON) && \ ((debug) & LWIP_DBG_TYPES_ON) && \ ((s16_t)((debug) & LWIP_DBG_MASK_LEVEL) >= LWIP_DBG_MIN_LEVEL)) { \ LWIP_PLATFORM_DIAG(message); \ if ((debug) & LWIP_DBG_HALT) { \ while(1); \ } \ } \ } while(0) #else /* LWIP_DEBUG */ #define LWIP_DEBUGF(debug, message) #endif /* LWIP_DEBUG */ #endif /* LWIP_HDR_DEBUG_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/debug.h
C
unknown
5,635
/** * @file * various utility macros */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #ifndef LWIP_HDR_DEF_H #define LWIP_HDR_DEF_H /* arch.h might define NULL already */ #include "lwip/arch.h" #include "lwip/opt.h" #if LWIP_PERF #include "arch/perf.h" #else /* LWIP_PERF */ #define PERF_START /* null definition */ #define PERF_STOP(x) /* null definition */ #endif /* LWIP_PERF */ #ifdef __cplusplus extern "C" { #endif #define LWIP_MAX(x , y) (((x) > (y)) ? (x) : (y)) #define LWIP_MIN(x , y) (((x) < (y)) ? (x) : (y)) /* Get the number of entries in an array ('x' must NOT be a pointer!) */ #define LWIP_ARRAYSIZE(x) (sizeof(x)/sizeof((x)[0])) /** Create u32_t value from bytes */ #define LWIP_MAKEU32(a,b,c,d) (((u32_t)((a) & 0xff) << 24) | \ ((u32_t)((b) & 0xff) << 16) | \ ((u32_t)((c) & 0xff) << 8) | \ (u32_t)((d) & 0xff)) #ifndef NULL #ifdef __cplusplus #define NULL 0 #else #define NULL ((void *)0) #endif #endif #if BYTE_ORDER == BIG_ENDIAN #define lwip_htons(x) (x) #define lwip_ntohs(x) (x) #define lwip_htonl(x) (x) #define lwip_ntohl(x) (x) #define PP_HTONS(x) (x) #define PP_NTOHS(x) (x) #define PP_HTONL(x) (x) #define PP_NTOHL(x) (x) #else /* BYTE_ORDER != BIG_ENDIAN */ #ifndef lwip_htons u16_t lwip_htons(u16_t x); #endif #define lwip_ntohs(x) lwip_htons(x) #ifndef lwip_htonl u32_t lwip_htonl(u32_t x); #endif #define lwip_ntohl(x) lwip_htonl(x) /* These macros should be calculated by the preprocessor and are used with compile-time constants only (so that there is no little-endian overhead at runtime). */ #define PP_HTONS(x) ((((x) & 0x00ffUL) << 8) | (((x) & 0xff00UL) >> 8)) #define PP_NTOHS(x) PP_HTONS(x) #define PP_HTONL(x) ((((x) & 0x000000ffUL) << 24) | \ (((x) & 0x0000ff00UL) << 8) | \ (((x) & 0x00ff0000UL) >> 8) | \ (((x) & 0xff000000UL) >> 24)) #define PP_NTOHL(x) PP_HTONL(x) #endif /* BYTE_ORDER == BIG_ENDIAN */ /* Provide usual function names as macros for users, but this can be turned off */ #ifndef LWIP_DONT_PROVIDE_BYTEORDER_FUNCTIONS #define htons(x) lwip_htons(x) #define ntohs(x) lwip_ntohs(x) #define htonl(x) lwip_htonl(x) #define ntohl(x) lwip_ntohl(x) #endif /* Functions that are not available as standard implementations. * In cc.h, you can #define these to implementations available on * your platform to save some code bytes if you use these functions * in your application, too. */ #ifndef lwip_itoa /* This can be #defined to itoa() or snprintf(result, bufsize, "%d", number) depending on your platform */ void lwip_itoa(char* result, size_t bufsize, int number); #endif #ifndef lwip_strnicmp /* This can be #defined to strnicmp() or strncasecmp() depending on your platform */ int lwip_strnicmp(const char* str1, const char* str2, size_t len); #endif #ifndef lwip_stricmp /* This can be #defined to stricmp() or strcasecmp() depending on your platform */ int lwip_stricmp(const char* str1, const char* str2); #endif #ifndef lwip_strnstr /* This can be #defined to strnstr() depending on your platform */ char* lwip_strnstr(const char* buffer, const char* token, size_t n); #endif #ifdef __cplusplus } #endif #endif /* LWIP_HDR_DEF_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/def.h
C
unknown
4,984
/** * @file * DHCP client API */ /* * Copyright (c) 2001-2004 Leon Woestenberg <leon.woestenberg@gmx.net> * Copyright (c) 2001-2004 Axon Digital Design B.V., The Netherlands. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Leon Woestenberg <leon.woestenberg@gmx.net> * */ #ifndef LWIP_HDR_DHCP_H #define LWIP_HDR_DHCP_H #include "lwip/opt.h" #if LWIP_DHCP /* don't build if not configured for use in lwipopts.h */ #include "lwip/netif.h" #include "lwip/udp.h" #ifdef __cplusplus extern "C" { #endif /** period (in seconds) of the application calling dhcp_coarse_tmr() */ #define DHCP_COARSE_TIMER_SECS 60 /** period (in milliseconds) of the application calling dhcp_coarse_tmr() */ #define DHCP_COARSE_TIMER_MSECS (DHCP_COARSE_TIMER_SECS * 1000UL) /** period (in milliseconds) of the application calling dhcp_fine_tmr() */ #define DHCP_FINE_TIMER_MSECS 500 #define DHCP_BOOT_FILE_LEN 128U /* AutoIP cooperation flags (struct dhcp.autoip_coop_state) */ typedef enum { DHCP_AUTOIP_COOP_STATE_OFF = 0, DHCP_AUTOIP_COOP_STATE_ON = 1 } dhcp_autoip_coop_state_enum_t; struct dhcp { /** transaction identifier of last sent request */ u32_t xid; /** incoming msg */ struct dhcp_msg *msg_in; /** track PCB allocation state */ u8_t pcb_allocated; /** current DHCP state machine state */ u8_t state; /** retries of current request */ u8_t tries; #if LWIP_DHCP_AUTOIP_COOP u8_t autoip_coop_state; #endif u8_t subnet_mask_given; struct pbuf *p_out; /* pbuf of outcoming msg */ struct dhcp_msg *msg_out; /* outgoing msg */ u16_t options_out_len; /* outgoing msg options length */ u16_t request_timeout; /* #ticks with period DHCP_FINE_TIMER_SECS for request timeout */ u16_t t1_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for renewal time */ u16_t t2_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for rebind time */ u16_t t1_renew_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next renew try */ u16_t t2_rebind_time; /* #ticks with period DHCP_COARSE_TIMER_SECS until next rebind try */ u16_t lease_used; /* #ticks with period DHCP_COARSE_TIMER_SECS since last received DHCP ack */ u16_t t0_timeout; /* #ticks with period DHCP_COARSE_TIMER_SECS for lease time */ ip_addr_t server_ip_addr; /* dhcp server address that offered this lease (ip_addr_t because passed to UDP) */ ip4_addr_t offered_ip_addr; ip4_addr_t offered_sn_mask; ip4_addr_t offered_gw_addr; u32_t offered_t0_lease; /* lease period (in seconds) */ u32_t offered_t1_renew; /* recommended renew time (usually 50% of lease period) */ u32_t offered_t2_rebind; /* recommended rebind time (usually 87.5 of lease period) */ #if LWIP_DHCP_BOOTP_FILE ip4_addr_t offered_si_addr; char boot_file_name[DHCP_BOOT_FILE_LEN]; #endif /* LWIP_DHCP_BOOTPFILE */ }; void dhcp_set_struct(struct netif *netif, struct dhcp *dhcp); /** Remove a struct dhcp previously set to the netif using dhcp_set_struct() */ #define dhcp_remove_struct(netif) netif_set_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP, NULL) void dhcp_cleanup(struct netif *netif); err_t dhcp_start(struct netif *netif); err_t dhcp_renew(struct netif *netif); err_t dhcp_release(struct netif *netif); void dhcp_stop(struct netif *netif); void dhcp_inform(struct netif *netif); void dhcp_network_changed(struct netif *netif); #if DHCP_DOES_ARP_CHECK void dhcp_arp_reply(struct netif *netif, const ip4_addr_t *addr); #endif u8_t dhcp_supplied_address(const struct netif *netif); /* to be called every minute */ void dhcp_coarse_tmr(void); /* to be called every half second */ void dhcp_fine_tmr(void); #if LWIP_DHCP_GET_NTP_SRV /** This function must exist, in other to add offered NTP servers to * the NTP (or SNTP) engine. * See LWIP_DHCP_MAX_NTP_SERVERS */ extern void dhcp_set_ntp_servers(u8_t num_ntp_servers, const ip4_addr_t* ntp_server_addrs); #endif /* LWIP_DHCP_GET_NTP_SRV */ #define netif_dhcp_data(netif) ((struct dhcp*)netif_get_client_data(netif, LWIP_NETIF_CLIENT_DATA_INDEX_DHCP)) #ifdef __cplusplus } #endif #endif /* LWIP_DHCP */ #endif /*LWIP_HDR_DHCP_H*/
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/dhcp.h
C
unknown
5,698
/** * @file * * IPv6 address autoconfiguration as per RFC 4862. */ /* * Copyright (c) 2010 Inico Technologies Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Ivan Delamer <delamer@inicotech.com> * * IPv6 address autoconfiguration as per RFC 4862. * * Please coordinate changes and requests with Ivan Delamer * <delamer@inicotech.com> */ #ifndef LWIP_HDR_IP6_DHCP6_H #define LWIP_HDR_IP6_DHCP6_H #include "lwip/opt.h" #if LWIP_IPV6_DHCP6 /* don't build if not configured for use in lwipopts.h */ struct dhcp6 { /*@todo: implement DHCP6*/ }; #endif /* LWIP_IPV6_DHCP6 */ #endif /* LWIP_HDR_IP6_DHCP6_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/dhcp6.h
C
unknown
2,130
/** * @file * DNS API */ /** * lwip DNS resolver header file. * Author: Jim Pettinato * April 2007 * ported from uIP resolv.c Copyright (c) 2002-2003, Adam Dunkels. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef LWIP_HDR_DNS_H #define LWIP_HDR_DNS_H #include "lwip/opt.h" #if LWIP_DNS #include "lwip/ip_addr.h" #ifdef __cplusplus extern "C" { #endif /** DNS timer period */ #define DNS_TMR_INTERVAL 1000 /* DNS resolve types: */ #define LWIP_DNS_ADDRTYPE_IPV4 0 #define LWIP_DNS_ADDRTYPE_IPV6 1 #define LWIP_DNS_ADDRTYPE_IPV4_IPV6 2 /* try to resolve IPv4 first, try IPv6 if IPv4 fails only */ #define LWIP_DNS_ADDRTYPE_IPV6_IPV4 3 /* try to resolve IPv6 first, try IPv4 if IPv6 fails only */ #if LWIP_IPV4 && LWIP_IPV6 #ifndef LWIP_DNS_ADDRTYPE_DEFAULT #define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4_IPV6 #endif #elif LWIP_IPV4 #define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV4 #else #define LWIP_DNS_ADDRTYPE_DEFAULT LWIP_DNS_ADDRTYPE_IPV6 #endif #if DNS_LOCAL_HOSTLIST /** struct used for local host-list */ struct local_hostlist_entry { /** static hostname */ const char *name; /** static host address in network byteorder */ ip_addr_t addr; struct local_hostlist_entry *next; }; #define DNS_LOCAL_HOSTLIST_ELEM(name, addr_init) {name, addr_init, NULL} #if DNS_LOCAL_HOSTLIST_IS_DYNAMIC #ifndef DNS_LOCAL_HOSTLIST_MAX_NAMELEN #define DNS_LOCAL_HOSTLIST_MAX_NAMELEN DNS_MAX_NAME_LENGTH #endif #define LOCALHOSTLIST_ELEM_SIZE ((sizeof(struct local_hostlist_entry) + DNS_LOCAL_HOSTLIST_MAX_NAMELEN + 1)) #endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ #endif /* DNS_LOCAL_HOSTLIST */ #if LWIP_IPV4 extern const ip_addr_t dns_mquery_v4group; #endif /* LWIP_IPV4 */ #if LWIP_IPV6 extern const ip_addr_t dns_mquery_v6group; #endif /* LWIP_IPV6 */ /** Callback which is invoked when a hostname is found. * A function of this type must be implemented by the application using the DNS resolver. * @param name pointer to the name that was looked up. * @param ipaddr pointer to an ip_addr_t containing the IP address of the hostname, * or NULL if the name could not be found (or on any other error). * @param callback_arg a user-specified callback argument passed to dns_gethostbyname */ typedef void (*dns_found_callback)(const char *name, const ip_addr_t *ipaddr, void *callback_arg); void dns_init(void); void dns_tmr(void); void dns_setserver(u8_t numdns, const ip_addr_t *dnsserver); const ip_addr_t* dns_getserver(u8_t numdns); err_t dns_gethostbyname(const char *hostname, ip_addr_t *addr, dns_found_callback found, void *callback_arg); err_t dns_gethostbyname_addrtype(const char *hostname, ip_addr_t *addr, dns_found_callback found, void *callback_arg, u8_t dns_addrtype); #if DNS_LOCAL_HOSTLIST size_t dns_local_iterate(dns_found_callback iterator_fn, void *iterator_arg); err_t dns_local_lookup(const char *hostname, ip_addr_t *addr, u8_t dns_addrtype); #if DNS_LOCAL_HOSTLIST_IS_DYNAMIC int dns_local_removehost(const char *hostname, const ip_addr_t *addr); err_t dns_local_addhost(const char *hostname, const ip_addr_t *addr); #endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ #endif /* DNS_LOCAL_HOSTLIST */ #ifdef __cplusplus } #endif #endif /* LWIP_DNS */ #endif /* LWIP_HDR_DNS_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/dns.h
C
unknown
4,981
/** * @file * lwIP Error codes */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <adam@sics.se> * */ #ifndef LWIP_HDR_ERR_H #define LWIP_HDR_ERR_H #include "lwip/opt.h" #include "lwip/arch.h" #ifdef __cplusplus extern "C" { #endif /** * @defgroup infrastructure_errors Error codes * @ingroup infrastructure * @{ */ /** Define LWIP_ERR_T in cc.h if you want to use * a different type for your platform (must be signed). */ #ifdef LWIP_ERR_T typedef LWIP_ERR_T err_t; #else /* LWIP_ERR_T */ typedef s8_t err_t; #endif /* LWIP_ERR_T*/ /** Definitions for error constants. */ typedef enum { /** No error, everything OK. */ ERR_OK = 0, /** Out of memory error. */ ERR_MEM = -1, /** Buffer error. */ ERR_BUF = -2, /** Timeout. */ ERR_TIMEOUT = -3, /** Routing problem. */ ERR_RTE = -4, /** Operation in progress */ ERR_INPROGRESS = -5, /** Illegal value. */ ERR_VAL = -6, /** Operation would block. */ ERR_WOULDBLOCK = -7, /** Address in use. */ ERR_USE = -8, /** Already connecting. */ ERR_ALREADY = -9, /** Conn already established.*/ ERR_ISCONN = -10, /** Not connected. */ ERR_CONN = -11, /** Low-level netif error */ ERR_IF = -12, /** Connection aborted. */ ERR_ABRT = -13, /** Connection reset. */ ERR_RST = -14, /** Connection closed. */ ERR_CLSD = -15, /** Illegal argument. */ ERR_ARG = -16 } err_enum_t; #define ERR_IS_FATAL(e) ((e) <= ERR_ABRT) /** * @} */ #ifdef LWIP_DEBUG extern const char *lwip_strerr(err_t err); #else #define lwip_strerr(x) "" #endif /* LWIP_DEBUG */ #if !NO_SYS int err_to_errno(err_t err); #endif /* !NO_SYS */ #ifdef __cplusplus } #endif #endif /* LWIP_HDR_ERR_H */
2301_81045437/classic-platform
communication/lwip-2.0.3/src/include/lwip/err.h
C
unknown
3,499