--------------------- PatchSet 11179 Date: 2007/01/21 12:53:55 Author: adrian Branch: HEAD Tag: (none) Log: The first cut of my general code tidyups - specifically HTTP parser/client side There are a lot of changes here: * changed the way we build dates; we don't need to use the libc versions and they do a -lot- of work just to build a date * local version of inet_ntoa, similar reasons to above * create a static version of the fd_note routine which copies the pointer than copying the string. * remove the different between the URI and log URI. We can determine the log URI at logging time, rather than trying to keep the URI and log URI in sync at a handful of locations in the code. * Remove the extra status line/headers copy during reply parsing * Rework the status line parsing routine to not require stdio routines * Rework the request line parsing routine to not require stdio routines (brought over from Squid-3.) * Lots of basic refactoring of the client-side code relating to request parsing. Its still inefficient but its much better than it was before.. * Replace the RequestMethodStr[] array with a replacement (pointer, length) struct to avoid having to keep calling strlen() in a few time-critical paths. (This stuff generates compiler warnings which needs to be fixed at some point.) * Create a httpHeaderAddClone() routine which clones header entries. For now it does this by copying but it opens up copy-less refcounting magic later on in the evolution of this codebase. This work was commercially sponsored; a followup commit to SPONSORS.txt will explain who. Members: lib/rfc1123.c:1.37->1.38 src/HttpHeader.c:1.91->1.92 src/HttpMsg.c:1.12->1.13 src/HttpReply.c:1.56->1.57 src/HttpRequest.c:1.41->1.42 src/HttpStatusLine.c:1.26->1.27 src/access_log.c:1.95->1.96 src/acl.c:1.318->1.319 src/asn.c:1.84->1.85 src/cf.data.pre:1.382->1.383 src/client_db.c:1.56->1.57 src/client_side.c:1.693->1.694 src/comm.c:1.358->1.359 src/errormap.c:1.2->1.3 src/errorpage.c:1.190->1.191 src/external_acl.c:1.29->1.30 src/fd.c:1.55->1.56 src/forward.c:1.120->1.121 src/globals.h:1.123->1.124 src/htcp.c:1.54->1.55 src/http.c:1.420->1.421 src/mime.c:1.108->1.109 src/neighbors.c:1.313->1.314 src/peer_digest.c:1.94->1.95 src/peer_monitor.c:1.3->1.4 src/peer_select.c:1.131->1.132 src/protos.h:1.520->1.521 src/redirect.c:1.96->1.97 src/ssl.c:1.134->1.135 src/stat.c:1.377->1.378 src/store.c:1.570->1.571 src/store_digest.c:1.53->1.54 src/store_key_md5.c:1.29->1.30 src/store_log.c:1.26->1.27 src/structs.h:1.507->1.508 src/tools.c:1.250->1.251 src/typedefs.h:1.151->1.152 src/url.c:1.144->1.145 src/urn.c:1.80->1.81 src/wais.c:1.142->1.143 src/auth/digest/auth_digest.c:1.21->1.22 src/fs/aufs/store_dir_aufs.c:1.67->1.68 src/fs/coss/store_dir_coss.c:1.66->1.67 src/fs/diskd/store_dir_diskd.c:1.87->1.88 src/fs/ufs/store_dir_ufs.c:1.63->1.64 Index: squid/lib/rfc1123.c =================================================================== RCS file: /cvsroot/squid/squid/lib/rfc1123.c,v retrieving revision 1.37 retrieving revision 1.38 diff -u -r1.37 -r1.38 --- squid/lib/rfc1123.c 18 Jan 2007 23:25:41 -0000 1.37 +++ squid/lib/rfc1123.c 21 Jan 2007 12:53:55 -0000 1.38 @@ -1,6 +1,6 @@ /* - * $Id: rfc1123.c,v 1.37 2007/01/18 23:25:41 hno Exp $ + * $Id: rfc1123.c,v 1.38 2007/01/21 12:53:55 adrian Exp $ * * DEBUG: * AUTHOR: Harvest Derived @@ -253,9 +253,87 @@ return t; } +/* [ahc] Yes, this is english-centric. Sorry! */ +static char *days[] = { "Sun", "Mon", "Tues", "Wed", "Thu", "Fri", "Sat" }; +static char *months[] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; + +#define TBUFSZ 128 +/* [ahc] XXX I really should be making sure that we don't over-run the buffer! Grr. */ const char * mkrfc1123(time_t t) { + static char buf[TBUFSZ]; + char *s = buf; + char *src; + struct tm *gmt = gmtime(&t); + short y; + /* "%a, %d %b %Y %H:%M:%S GMT" */ + + /* Append day */ + for (src = days[gmt->tm_wday]; *src != '\0'; src++, s++) { + *s = *src; + } + + /* Append ", " */ + *s++ = ','; + *s++ = ' '; + + /* Append number day (two-digit, padded 0) */ + *s++ = ((gmt->tm_mday / 10) % 10) + '0'; + *s++ = (gmt->tm_mday % 10) + '0'; + + /* append space */ + *s++ = ' '; + + /* Append month abbreviation */ + for (src = months[gmt->tm_mon]; *src != '\0'; src++, s++) { + *s = *src; + } + + /* Space */ + *s++ = ' '; + + /* four-character year */ + y = 1900 + gmt->tm_year; + *s++ = ((y / 1000) % 10) + '0'; + *s++ = ((y / 100) % 10) + '0'; + *s++ = ((y / 10) % 10) + '0'; + *s++ = (y % 10) + '0'; + + /* Space */ + *s++ = ' '; + + /* Two-char hour */ + *s++ = ((gmt->tm_hour / 10) % 10) + '0'; + *s++ = (gmt->tm_hour % 10) + '0'; + + /* : */ + *s++ = ':'; + + /* Two-char minute */ + *s++ = ((gmt->tm_min / 10) % 10) + '0'; + *s++ = (gmt->tm_min % 10) + '0'; + + /* : */ + *s++ = ':'; + + /* Two char second */ + *s++ = ((gmt->tm_sec / 10) % 10) + '0'; + *s++ = (gmt->tm_sec % 10) + '0'; + + /* " GMT\0" */ + *s++ = ' '; + *s++ = 'G'; + *s++ = 'M'; + *s++ = 'T'; + *s++ = '\0'; + /* Finito! */ + return buf; +} + +const char * +mkrfc1123_old(time_t t) +{ static char buf[128]; struct tm *gmt = gmtime(&t); Index: squid/src/HttpHeader.c =================================================================== RCS file: /cvsroot/squid/squid/src/HttpHeader.c,v retrieving revision 1.91 retrieving revision 1.92 diff -u -r1.91 -r1.92 --- squid/src/HttpHeader.c 19 Jan 2007 01:10:12 -0000 1.91 +++ squid/src/HttpHeader.c 21 Jan 2007 12:53:56 -0000 1.92 @@ -1,6 +1,6 @@ /* - * $Id: HttpHeader.c,v 1.91 2007/01/19 01:10:12 hno Exp $ + * $Id: HttpHeader.c,v 1.92 2007/01/21 12:53:56 adrian Exp $ * * DEBUG: section 55 HTTP Header * AUTHOR: Alex Rousskov @@ -244,6 +244,7 @@ #define assert_eid(id) assert((id) < HDR_ENUM_END) static HttpHeaderEntry *httpHeaderEntryCreate(http_hdr_type id, const char *name, const char *value); +static HttpHeaderEntry *httpHeaderEntryCreate2(http_hdr_type id, String name, String value); static void httpHeaderEntryDestroy(HttpHeaderEntry * e); static HttpHeaderEntry *httpHeaderEntryParseCreate(const char *field_start, const char *field_end); static void httpHeaderNoteParsedEntry(http_hdr_type id, String value, int error); @@ -376,7 +377,7 @@ debug(55, 7) ("appending hdr: %p += %p\n", dest, src); while ((e = httpHeaderGetEntry(src, &pos))) { - httpHeaderAddEntry(dest, httpHeaderEntryClone(e)); + httpHeaderAddClone(dest, e); } } @@ -398,7 +399,7 @@ httpHeaderDelById(old, e->id); else httpHeaderDelByName(old, strBuf(e->name)); - httpHeaderAddEntry(old, httpHeaderEntryClone(e)); + httpHeaderAddClone(old, e); } } @@ -769,7 +770,7 @@ return httpHeaderGetList(hdr, id); if ((e = httpHeaderFindEntry(hdr, id))) { String s; - stringInit(&s, strBuf(e->value)); + stringLimitInit(&s, strBuf(e->value), strLen(e->value)); return s; } return StringNull; @@ -1180,6 +1181,23 @@ return e; } +static HttpHeaderEntry * +httpHeaderEntryCreate2(http_hdr_type id, String name, String value) +{ + HttpHeaderEntry *e; + assert_eid(id); + e = memAllocate(MEM_HTTP_HDR_ENTRY); + e->id = id; + if (id != HDR_OTHER) + e->name = Headers[id].name; + else + stringLimitInit(&e->name, strBuf(name), strLen(name)); + stringLimitInit(&e->value, strBuf(value), strLen(value)); + Headers[id].stat.aliveCount++; + debug(55, 9) ("created entry %p: '%s: %s'\n", e, strBuf(e->name), strBuf(e->value)); + return e; +} + static void httpHeaderEntryDestroy(HttpHeaderEntry * e) { @@ -1265,7 +1283,13 @@ HttpHeaderEntry * httpHeaderEntryClone(const HttpHeaderEntry * e) { - return httpHeaderEntryCreate(e->id, strBuf(e->name), strBuf(e->value)); + return httpHeaderEntryCreate2(e->id, e->name, e->value); +} + +void +httpHeaderAddClone(HttpHeader * hdr, const HttpHeaderEntry * e) +{ + httpHeaderAddEntry(hdr, httpHeaderEntryClone(e)); } void Index: squid/src/HttpMsg.c =================================================================== RCS file: /cvsroot/squid/squid/src/HttpMsg.c,v retrieving revision 1.12 retrieving revision 1.13 diff -u -r1.12 -r1.13 --- squid/src/HttpMsg.c 13 Jun 2006 14:12:17 -0000 1.12 +++ squid/src/HttpMsg.c 21 Jan 2007 12:53:56 -0000 1.13 @@ -1,6 +1,6 @@ /* - * $Id: HttpMsg.c,v 1.12 2006/06/13 14:12:17 hno Exp $ + * $Id: HttpMsg.c,v 1.13 2007/01/21 12:53:56 adrian Exp $ * * DEBUG: section 74 HTTP Message * AUTHOR: Alex Rousskov @@ -35,17 +35,24 @@ #include "squid.h" +int +httpMsgParseRequestHeader(request_t * req, HttpMsgBuf * hmsg) +{ + const char *s, *e; + s = hmsg->buf + hmsg->h_start; + e = hmsg->buf + hmsg->h_end + 1; + return httpHeaderParse(&req->header, s, e); +} /* find end of headers */ int -httpMsgIsolateHeaders(const char **parse_start, const char **blk_start, const char **blk_end) +httpMsgIsolateHeaders(const char **parse_start, int l, const char **blk_start, const char **blk_end) { /* * parse_start points to the first line of HTTP message *headers*, * not including the request or status lines */ - size_t l = strlen(*parse_start); - size_t end = headersEnd(*parse_start, l); + int end = headersEnd(*parse_start, l); int nnl; if (end) { *blk_start = *parse_start; @@ -120,3 +127,249 @@ return httpHeaderHasConnDir(hdr, "keep-alive"); } } + +/* Adrian's replacement message buffer code to parse the request/reply line */ + +void +HttpMsgBufInit(HttpMsgBuf * hmsg, const char *buf, size_t size) +{ + hmsg->buf = buf; + hmsg->size = size; + hmsg->req_start = hmsg->req_end = -1; + hmsg->h_start = hmsg->h_end = -1; + hmsg->r_len = hmsg->u_len = hmsg->m_len = hmsg->v_len = hmsg->h_len = 0; +} + +void +httpMsgBufDone(HttpMsgBuf * hmsg) +{ + (void) 0; +} + + +/* + * Attempt to parse the request line. + * + * This will set the values in hmsg that it determines. One may end up + * with a partially-parsed buffer; the return value tells you whether + * the values are valid or not. + * + * @return 1 if parsed correctly, 0 if more is needed, -1 if error + * + * TODO: + * * have it indicate "error" and "not enough" as two separate conditions! + * * audit this code as off-by-one errors are probably everywhere! + */ +int +httpMsgParseRequestLine(HttpMsgBuf * hmsg) +{ + int i = 0; + int retcode = 0; + int maj = -1, min = -1; + int last_whitespace = -1, line_end = -1; + + /* Find \r\n - end of URL+Version (and the request) */ + for (i = 0; i < hmsg->size; i++) { + if (hmsg->buf[i] == '\n') { + break; + } + if (i < hmsg->size - 1 && hmsg->buf[i - 1] == '\r' && hmsg->buf[i] == '\n') { + i++; + break; + } + } + if (i == hmsg->size) { + retcode = 0; + goto finish; + } + /* XXX this should point to the -end- of the \r\n, \n, etc. */ + hmsg->req_end = i; + i = 0; + + /* Find first non-whitespace - beginning of method */ + for (; i < hmsg->req_end && (isspace(hmsg->buf[i])); i++); + if (i >= hmsg->req_end) { + retcode = 0; + goto finish; + } + hmsg->m_start = i; + hmsg->req_start = i; + hmsg->r_len = hmsg->req_end - hmsg->req_start + 1; + + /* Find first whitespace - end of method */ + for (; i < hmsg->req_end && (!isspace(hmsg->buf[i])); i++); + if (i >= hmsg->req_end) { + retcode = 0; + goto finish; + } + hmsg->m_end = i - 1; + hmsg->m_len = hmsg->m_end - hmsg->m_start + 1; + + /* Find first non-whitespace - beginning of URL+Version */ + for (; i < hmsg->req_end && (isspace(hmsg->buf[i])); i++); + if (i >= hmsg->req_end) { + retcode = 0; + goto finish; + } + hmsg->u_start = i; + + /* Find \r\n or \n - thats the end of the line. Keep track of the last whitespace! */ + for (; i <= hmsg->req_end; i++) { + /* If \n - its end of line */ + if (hmsg->buf[i] == '\n') { + line_end = i; + break; + } + /* XXX could be off-by-one wrong! */ + if (hmsg->buf[i] == '\r' && (i + 1) <= hmsg->req_end && hmsg->buf[i + 1] == '\n') { + line_end = i; + break; + } + /* If its a whitespace, note it as it'll delimit our version */ + if (hmsg->buf[i] == ' ' || hmsg->buf[i] == '\t') { + last_whitespace = i; + } + } + if (i > hmsg->req_end) { + retcode = 0; + goto finish; + } + /* At this point we don't need the 'i' value; so we'll recycle it for version parsing */ + + /* + * At this point: line_end points to the first eol char (\r or \n); + * last_whitespace points to the last whitespace char in the URL. + * We know we have a full buffer here! + */ + if (last_whitespace == -1) { + maj = 0; + min = 9; + hmsg->u_end = line_end - 1; + assert(hmsg->u_end >= hmsg->u_start); + } else { + /* Find the first non-whitespace after last_whitespace */ + /* XXX why <= vs < ? I do need to really re-audit all of this .. */ + for (i = last_whitespace; i <= hmsg->req_end && isspace(hmsg->buf[i]); i++); + if (i > hmsg->req_end) { + retcode = 0; + goto finish; + } + /* is it http/ ? if so, we try parsing. If not, the URL is the whole line; version is 0.9 */ + if (i + 5 >= hmsg->req_end || (strncasecmp(&hmsg->buf[i], "HTTP/", 5) != 0)) { + maj = 0; + min = 9; + hmsg->u_end = line_end - 1; + assert(hmsg->u_end >= hmsg->u_start); + } else { + /* Ok, lets try parsing! Yes, this needs refactoring! */ + hmsg->v_start = i; + i += 5; + + /* next should be 1 or more digits */ + maj = 0; + for (; i < hmsg->req_end && (isdigit(hmsg->buf[i])); i++) { + maj = maj * 10; + maj = maj + (hmsg->buf[i]) - '0'; + } + if (i >= hmsg->req_end) { + retcode = 0; + goto finish; + } + /* next should be .; we -have- to have this as we have a whole line.. */ + if (hmsg->buf[i] != '.') { + retcode = 0; + goto finish; + } + if (i + 1 >= hmsg->req_end) { + retcode = 0; + goto finish; + } + /* next should be one or more digits */ + i++; + min = 0; + for (; i < hmsg->req_end && (isdigit(hmsg->buf[i])); i++) { + min = min * 10; + min = min + (hmsg->buf[i]) - '0'; + } + + /* Find whitespace, end of version */ + hmsg->v_end = i; + hmsg->v_len = hmsg->v_end - hmsg->v_start + 1; + hmsg->u_end = last_whitespace - 1; + } + } + hmsg->u_len = hmsg->u_end - hmsg->u_start + 1; + + /* + * Rightio - we have all the schtuff. Return true; we've got enough. + */ + retcode = 1; + assert(maj != -1); + assert(min != -1); + finish: + hmsg->v_maj = maj; + hmsg->v_min = min; + debug(1, 2) ("Parser: retval %d: from %d->%d: method %d->%d; url %d->%d; version %d->%d (%d/%d)\n", + retcode, hmsg->req_start, hmsg->req_end, + hmsg->m_start, hmsg->m_end, + hmsg->u_start, hmsg->u_end, + hmsg->v_start, hmsg->v_end, maj, min); + return retcode; +} + +/* + * A temporary replacement for headersEnd() in this codebase. + * This routine searches for the end of the headers in a HTTP request + * (obviously anything > HTTP/0.9.) + * + * It returns buffer length on success or 0 on failure. + */ +int +httpMsgFindHeadersEnd(HttpMsgBuf * hmsg) +{ + int e = 0; + int state = 1; + const char *mime = hmsg->buf; + int l = hmsg->size; + int he = -1; + + /* Always succeed HTTP/0.9 - it means we've already parsed the buffer for the request */ + if (hmsg->v_maj == 0 && hmsg->v_min == 9) + return 1; + + while (e < l && state < 3) { + switch (state) { + case 0: + if ('\n' == mime[e]) { + he = e; + state = 1; + } + break; + case 1: + if ('\r' == mime[e]) + state = 2; + else if ('\n' == mime[e]) + state = 3; + else + state = 0; + break; + case 2: + if ('\n' == mime[e]) + state = 3; + else + state = 0; + break; + default: + break; + } + e++; + } + if (3 == state) { + hmsg->h_end = he; + hmsg->h_start = hmsg->req_end + 1; + hmsg->h_len = hmsg->h_end - hmsg->h_start; + return e; + } + return 0; + +} Index: squid/src/HttpReply.c =================================================================== RCS file: /cvsroot/squid/squid/src/HttpReply.c,v retrieving revision 1.56 retrieving revision 1.57 diff -u -r1.56 -r1.57 --- squid/src/HttpReply.c 11 Jun 2006 00:28:19 -0000 1.56 +++ squid/src/HttpReply.c 21 Jan 2007 12:53:56 -0000 1.57 @@ -1,6 +1,6 @@ /* - * $Id: HttpReply.c,v 1.56 2006/06/11 00:28:19 hno Exp $ + * $Id: HttpReply.c,v 1.57 2007/01/21 12:53:56 adrian Exp $ * * DEBUG: section 58 HTTP Reply (Response) * AUTHOR: Alex Rousskov @@ -53,9 +53,8 @@ static void httpReplyDoDestroy(HttpReply * rep); static void httpReplyHdrCacheInit(HttpReply * rep); static void httpReplyHdrCacheClean(HttpReply * rep); -static int httpReplyParseStep(HttpReply * rep, const char *parse_start, int atEnd); +static int httpReplyParseStep(HttpReply * rep, const char *parse_start, int len); static int httpReplyParseError(HttpReply * rep); -static int httpReplyIsolateStart(const char **parse_start, const char **blk_start, const char **blk_end); static time_t httpReplyHdrExpirationTime(const HttpReply * rep); @@ -137,24 +136,9 @@ int httpReplyParse(HttpReply * rep, const char *buf, size_t end) { - /* - * this extra buffer/copy will be eliminated when headers become - * meta-data in store. Currently we have to xstrncpy the buffer - * becuase somebody may feed a non NULL-terminated buffer to - * us. - */ - MemBuf mb = MemBufNull; - int success; - /* reset current state, because we are not used in incremental fashion */ + /* The code called by httpReplyParseStep doesn't assume NUL-terminated stuff! */ httpReplyReset(rep); - /* put a string terminator. s is how many bytes to touch in - * 'buf' including the terminating NULL. */ - memBufDefInit(&mb); - memBufAppend(&mb, buf, end); - memBufAppend(&mb, "\0", 1); - success = httpReplyParseStep(rep, mb.buf, 0); - memBufClean(&mb); - return success == 1; + return (httpReplyParseStep(rep, buf, end) == 1); } void @@ -384,42 +368,50 @@ * -1 -- parse error */ static int -httpReplyParseStep(HttpReply * rep, const char *buf, int atEnd) +httpReplyParseStep(HttpReply * rep, const char *buf, int len) { const char *parse_start = buf; const char *blk_start, *blk_end; - const char **parse_end_ptr = &blk_end; assert(rep); assert(parse_start); assert(rep->pstate < psParsed); + int i; + const char *re; - *parse_end_ptr = parse_start; - if (rep->pstate == psReadyToParseStartLine) { - if (!httpReplyIsolateStart(&parse_start, &blk_start, &blk_end)) - return 0; - if (!httpStatusLineParse(&rep->sline, blk_start, blk_end)) - return httpReplyParseError(rep); - - *parse_end_ptr = parse_start; - rep->hdr_sz = *parse_end_ptr - buf; - rep->pstate++; - } - if (rep->pstate == psReadyToParseHeaders) { - if (!httpMsgIsolateHeaders(&parse_start, &blk_start, &blk_end)) { - if (atEnd) - blk_start = parse_start, blk_end = blk_start + strlen(blk_start); - else - return 0; - } - if (!httpHeaderParse(&rep->header, blk_start, blk_end)) - return httpReplyParseError(rep); + /* For now we'll assume we need to parse the whole lot */ - httpReplyHdrCacheInit(rep); + /* Find end of start line */ + for (re = buf, i = 0; i < len && *re != '\r' && *re != '\n'; re++, i++); + if (i >= len) + return httpReplyParseError(rep); + + /* s points to first \r or \n - so find the first character after */ + for (; i < len && (*re == '\r' || *re == '\n'); re++, i++); + if (i >= len) + return httpReplyParseError(rep); + + /* Pass that to the existing Squid status line parsing routine */ + if (!httpStatusLineParse(&rep->sline, buf, re - 1)) + return httpReplyParseError(rep); + rep->pstate++; + + /* All good? Attempt to isolate headers */ + /* The block in question is between re and buf + len */ + parse_start = re; + if (!httpMsgIsolateHeaders(&parse_start, len - i, &blk_start, &blk_end)) + return httpReplyParseError(rep); + + /* Isolated? parse headers */ + if (!httpHeaderParse(&rep->header, blk_start, blk_end)) + return httpReplyParseError(rep); - *parse_end_ptr = parse_start; - rep->hdr_sz = *parse_end_ptr - buf; - rep->pstate++; - } + /* Update rep */ + httpReplyHdrCacheInit(rep); + /* the previous code had hdr_sz including the status line + headers and final \r\n */ + rep->hdr_sz = parse_start - buf; + rep->pstate++; + + /* Done */ return 1; } @@ -435,25 +427,6 @@ return -1; } -/* find first CRLF */ -static int -httpReplyIsolateStart(const char **parse_start, const char **blk_start, const char **blk_end) -{ - int slen = strcspn(*parse_start, "\r\n"); - if (!(*parse_start)[slen]) /* no CRLF found */ - return 0; - - *blk_start = *parse_start; - *blk_end = *blk_start + slen; - while (**blk_end == '\r') /* CR */ - (*blk_end)++; - if (**blk_end == '\n') /* LF */ - (*blk_end)++; - - *parse_start = *blk_end; - return 1; -} - /* * Returns the body size of a HTTP response */ Index: squid/src/HttpRequest.c =================================================================== RCS file: /cvsroot/squid/squid/src/HttpRequest.c,v retrieving revision 1.41 retrieving revision 1.42 diff -u -r1.41 -r1.42 --- squid/src/HttpRequest.c 17 Jul 2006 02:31:59 -0000 1.41 +++ squid/src/HttpRequest.c 21 Jan 2007 12:53:56 -0000 1.42 @@ -1,6 +1,6 @@ /* - * $Id: HttpRequest.c,v 1.41 2006/07/17 02:31:59 hno Exp $ + * $Id: HttpRequest.c,v 1.42 2007/01/21 12:53:56 adrian Exp $ * * DEBUG: section 73 HTTP Request * AUTHOR: Duane Wessels @@ -106,15 +106,6 @@ requestDestroy(request); } -int -httpRequestParseHeader(request_t * req, const char *parse_start) -{ - const char *blk_start, *blk_end; - if (!httpMsgIsolateHeaders(&parse_start, &blk_start, &blk_end)) - return 0; - return httpHeaderParse(&req->header, blk_start, blk_end); -} - /* packs request-line and headers, appends terminator */ static void httpRequestPack(const request_t * req, Packer * p) @@ -122,7 +113,7 @@ assert(req && p); /* pack request-line */ packerPrintf(p, "%s %s HTTP/1.0\r\n", - RequestMethodStr[req->method], strBuf(req->urlpath)); + RequestMethods[req->method].str, strBuf(req->urlpath)); /* headers */ httpHeaderPackInto(&req->header, p); /* trailer */ @@ -142,7 +133,7 @@ packerPrintf(p, "\n"); /* pack request-line */ packerPrintf(p, "%s %s HTTP/%d.%d\r\n", - RequestMethodStr[req->method], urlCanonical(req), req->http_ver.major, req->http_ver.minor); + RequestMethods[req->method].str, urlCanonical(req), req->http_ver.major, req->http_ver.minor); /* headers */ httpHeaderPackInto(&req->header, p); /* trailer */ @@ -176,7 +167,7 @@ httpRequestPrefixLen(const request_t * req) { assert(req); - return strlen(RequestMethodStr[req->method]) + 1 + + return RequestMethods[req->method].len + 1 + strLen(req->urlpath) + 1 + 4 + 1 + 3 + 2 + req->header.len + 2; Index: squid/src/HttpStatusLine.c =================================================================== RCS file: /cvsroot/squid/squid/src/HttpStatusLine.c,v retrieving revision 1.26 retrieving revision 1.27 diff -u -r1.26 -r1.27 --- squid/src/HttpStatusLine.c 17 May 2005 16:56:37 -0000 1.26 +++ squid/src/HttpStatusLine.c 21 Jan 2007 12:53:56 -0000 1.27 @@ -1,6 +1,6 @@ /* - * $Id: HttpStatusLine.c,v 1.26 2005/05/17 16:56:37 hno Exp $ + * $Id: HttpStatusLine.c,v 1.27 2007/01/21 12:53:56 adrian Exp $ * * DEBUG: section 57 HTTP Status-line * AUTHOR: Alex Rousskov @@ -83,6 +83,9 @@ int httpStatusLineParse(HttpStatusLine * sline, const char *start, const char *end) { + int maj, min, status; + const char *s; + assert(sline); sline->status = HTTP_INVALID_HEADER; /* Squid header parsing error */ if (strncasecmp(start, "HTTP/", 5)) @@ -90,12 +93,58 @@ start += 5; if (!xisdigit(*start)) return 0; - if (sscanf(start, "%d.%d", &sline->version.major, &sline->version.minor) != 2) { - debug(57, 7) ("httpStatusLineParse: Invalid HTTP identifier.\n"); + + /* Format: HTTP/x.x CRLF */ + s = start; + maj = 0; + for (s = start; s < end && isdigit(*s); s++) { + maj = maj * 10; + maj = maj + *s - '0'; } - if (!(start = strchr(start, ' '))) + if (s >= end) { + debug(57, 7) ("httpStatusLineParse: Invalid HTTP reply status major.\n"); return 0; - sline->status = (http_status) atoi(++start); + } + /* next should be '.' */ + if (*s != '.') { + debug(57, 7) ("httpStatusLineParse: Invalid HTTP reply status line.\n"); + return 0; + } + s++; + /* next should be minor number */ + min = 0; + for (; s < end && isdigit(*s); s++) { + min = min * 10; + min = min + *s - '0'; + } + if (s >= end) { + debug(57, 7) ("httpStatusLineParse: Invalid HTTP reply status version minor.\n"); + return 0; + } + /* then a space */ + if (*s != ' ') { + } + s++; + /* next should be status start */ + status = 0; + for (; s < end && isdigit(*s); s++) { + status = status * 10; + status = status + *s - '0'; + } + if (s >= end) { + debug(57, 7) ("httpStatusLineParse: Invalid HTTP reply status code.\n"); + return 0; + } + /* then a space */ + + /* for now we ignore the reason-phrase */ + + /* then crlf */ + + sline->version.major = maj; + sline->version.minor = min; + sline->status = status; + /* we ignore 'reason-phrase' */ return 1; /* success */ } Index: squid/src/access_log.c =================================================================== RCS file: /cvsroot/squid/squid/src/access_log.c,v retrieving revision 1.95 retrieving revision 1.96 diff -u -r1.95 -r1.96 --- squid/src/access_log.c 19 Jan 2007 00:19:26 -0000 1.95 +++ squid/src/access_log.c 21 Jan 2007 12:53:56 -0000 1.96 @@ -1,6 +1,6 @@ /* - * $Id: access_log.c,v 1.95 2007/01/19 00:19:26 hno Exp $ + * $Id: access_log.c,v 1.96 2007/01/21 12:53:56 adrian Exp $ * * DEBUG: section 46 Access Log * AUTHOR: Duane Wessels @@ -625,7 +625,7 @@ break; case LFT_REQUEST_URI: - out = al->url; + out = rfc1738_escape_unescaped(al->url); break; case LFT_REQUEST_VERSION: @@ -1019,7 +1019,7 @@ al->http.code, al->cache.size, al->private.method_str, - al->url, + rfc1738_escape_unescaped(al->url), user ? user : dash_str, al->hier.ping.timedout ? "TIMEOUT_" : "", hier_strings[al->hier.code], @@ -1037,7 +1037,7 @@ al->http.code, al->cache.size, al->private.method_str, - al->url, + rfc1738_escape_unescaped(al->url), user ? user : dash_str, al->hier.ping.timedout ? "TIMEOUT_" : "", hier_strings[al->hier.code], @@ -1068,7 +1068,7 @@ user1 ? user1 : dash_str, mkhttpdlogtime(&squid_curtime), al->private.method_str, - al->url, + rfc1738_escape_unescaped(al->url), al->http.version.major, al->http.version.minor, al->http.code, al->cache.size, @@ -1100,7 +1100,7 @@ if (al->icp.opcode) al->private.method_str = icp_opcode_str[al->icp.opcode]; else - al->private.method_str = RequestMethodStr[al->http.method]; + al->private.method_str = RequestMethods[al->http.method].str; if (al->hier.host[0] == '\0') xstrncpy(al->hier.host, dash_str, SQUIDHOSTNAMELEN); Index: squid/src/acl.c =================================================================== RCS file: /cvsroot/squid/squid/src/acl.c,v retrieving revision 1.318 retrieving revision 1.319 diff -u -r1.318 -r1.319 --- squid/src/acl.c 6 Jan 2007 17:22:45 -0000 1.318 +++ squid/src/acl.c 21 Jan 2007 12:53:57 -0000 1.319 @@ -1,6 +1,6 @@ /* - * $Id: acl.c,v 1.318 2007/01/06 17:22:45 hno Exp $ + * $Id: acl.c,v 1.319 2007/01/21 12:53:57 adrian Exp $ * * DEBUG: section 28 Access Control * AUTHOR: Duane Wessels @@ -383,7 +383,7 @@ for (Tail = curlist; *Tail; Tail = &((*Tail)->next)); while ((t = strtokFile())) { q = memAllocate(MEM_INTLIST); - q->i = (int) urlParseMethod(t); + q->i = (int) urlParseMethod(t, strlen(t)); if (q->i == METHOD_NONE) self_destruct(); *(Tail) = q; @@ -2903,7 +2903,7 @@ { wordlist *W = NULL; while (data != NULL) { - wordlistAdd(&W, RequestMethodStr[data->i]); + wordlistAdd(&W, RequestMethods[data->i].str); data = data->next; } return W; Index: squid/src/asn.c =================================================================== RCS file: /cvsroot/squid/squid/src/asn.c,v retrieving revision 1.84 retrieving revision 1.85 diff -u -r1.84 -r1.85 --- squid/src/asn.c 5 Jun 2006 22:47:01 -0000 1.84 +++ squid/src/asn.c 21 Jan 2007 12:53:57 -0000 1.85 @@ -1,6 +1,6 @@ /* - * $Id: asn.c,v 1.84 2006/06/05 22:47:01 hno Exp $ + * $Id: asn.c,v 1.85 2007/01/21 12:53:57 adrian Exp $ * * DEBUG: section 53 AS Number handling * AUTHOR: Duane Wessels, Kostas Anagnostakis @@ -203,7 +203,7 @@ assert(NULL != req); asState->request = requestLink(req); if ((e = storeGetPublic(asres, METHOD_GET)) == NULL) { - e = storeCreateEntry(asres, asres, null_request_flags, METHOD_GET); + e = storeCreateEntry(asres, null_request_flags, METHOD_GET); asState->sc = storeClientRegister(e, asState); fwdStart(-1, e, asState->request); } else { Index: squid/src/cf.data.pre =================================================================== RCS file: /cvsroot/squid/squid/src/cf.data.pre,v retrieving revision 1.382 retrieving revision 1.383 diff -u -r1.382 -r1.383 --- squid/src/cf.data.pre 19 Jan 2007 22:03:03 -0000 1.382 +++ squid/src/cf.data.pre 21 Jan 2007 12:53:57 -0000 1.383 @@ -1,6 +1,6 @@ # -# $Id: cf.data.pre,v 1.382 2007/01/19 22:03:03 hno Exp $ +# $Id: cf.data.pre,v 1.383 2007/01/21 12:53:57 adrian Exp $ # # # SQUID Web Proxy Cache http://www.squid-cache.org/ @@ -4968,7 +4968,7 @@ NAME: extension_methods TYPE: extension_method -LOC: RequestMethodStr +LOC: RequestMethods DEFAULT: none DOC_START Squid only knows about standardized HTTP request methods. Index: squid/src/client_db.c =================================================================== RCS file: /cvsroot/squid/squid/src/client_db.c,v retrieving revision 1.56 retrieving revision 1.57 diff -u -r1.56 -r1.57 --- squid/src/client_db.c 23 Oct 2005 15:20:54 -0000 1.56 +++ squid/src/client_db.c 21 Jan 2007 12:53:58 -0000 1.57 @@ -1,6 +1,6 @@ /* - * $Id: client_db.c,v 1.56 2005/10/23 15:20:54 hno Exp $ + * $Id: client_db.c,v 1.57 2007/01/21 12:53:58 adrian Exp $ * * DEBUG: section 0 Client Database * AUTHOR: Duane Wessels @@ -53,7 +53,7 @@ { ClientInfo *c; c = memAllocate(MEM_CLIENT_INFO); - c->hash.key = xstrdup(inet_ntoa(addr)); + c->hash.key = xstrdup(xinet_ntoa(addr)); c->addr = addr; hash_join(client_table, &c->hash); statCounter.client_http.clients++; @@ -83,7 +83,7 @@ ClientInfo *c; if (!Config.onoff.client_db) return; - key = inet_ntoa(addr); + key = xinet_ntoa(addr); c = (ClientInfo *) hash_lookup(client_table, key); if (c == NULL) c = clientdbAdd(addr); @@ -119,7 +119,7 @@ ClientInfo *c; if (!Config.onoff.client_db) return 0; - key = inet_ntoa(addr); + key = xinet_ntoa(addr); c = (ClientInfo *) hash_lookup(client_table, key); if (c == NULL) c = clientdbAdd(addr); @@ -140,7 +140,7 @@ ClientInfo *c; if (!Config.onoff.client_db) return 0; - key = inet_ntoa(addr); + key = xinet_ntoa(addr); c = (ClientInfo *) hash_lookup(client_table, key); if (c == NULL) return 0; @@ -305,7 +305,7 @@ ClientInfo *c = NULL; char *key; if (current) { - key = inet_ntoa(*current); + key = xinet_ntoa(*current); hash_first(client_table); while ((c = (ClientInfo *) hash_next(client_table))) { if (!strcmp(key, hashKeyStr(&c->hash))) Index: squid/src/client_side.c =================================================================== RCS file: /cvsroot/squid/squid/src/client_side.c,v retrieving revision 1.693 retrieving revision 1.694 diff -u -r1.693 -r1.694 --- squid/src/client_side.c 19 Jan 2007 01:10:12 -0000 1.693 +++ squid/src/client_side.c 21 Jan 2007 12:53:58 -0000 1.694 @@ -1,6 +1,6 @@ /* - * $Id: client_side.c,v 1.693 2007/01/19 01:10:12 hno Exp $ + * $Id: client_side.c,v 1.694 2007/01/21 12:53:58 adrian Exp $ * * DEBUG: section 33 Client-side Routines * AUTHOR: Duane Wessels @@ -118,7 +118,7 @@ static void clientProcessMiss(clientHttpRequest *); static void clientBuildReplyHeader(clientHttpRequest * http, HttpReply * rep); static clientHttpRequest *parseHttpRequestAbort(ConnStateData * conn, const char *uri); -static clientHttpRequest *parseHttpRequest(ConnStateData *, method_t *, int *, char **, size_t *); +static clientHttpRequest *parseHttpRequest(ConnStateData *, HttpMsgBuf *, method_t *, int *); static void clientRedirectStart(clientHttpRequest * http); static RH clientRedirectDone; static void clientCheckNoCache(clientHttpRequest *); @@ -393,7 +393,7 @@ */ if (h->request == NULL) h->request = requestLink(requestCreate(m, PROTO_NONE, null_string)); - e = storeCreateEntry(h->uri, h->log_uri, flags, m); + e = storeCreateEntry(h->uri, flags, m); h->sc = storeClientRegister(e, h); #if DELAY_POOLS if (h->log_type != LOG_TCP_DENIED) @@ -412,7 +412,7 @@ ErrorState *err = NULL; char *proxy_auth_msg = NULL; debug(33, 2) ("The request %s %s is %s, because it matched '%s'\n", - RequestMethodStr[http->request->method], http->uri, + RequestMethods[http->request->method].str, http->uri, answer == ACCESS_ALLOWED ? "ALLOWED" : "DENIED", AclMatchedName ? AclMatchedName : "NO ACL's"); proxy_auth_msg = authenticateAuthUserRequestMessage(http->conn->auth_user_request ? http->conn->auth_user_request : http->request->auth_user_request); @@ -477,7 +477,7 @@ ErrorState *err = NULL; char *proxy_auth_msg = NULL; debug(33, 2) ("The request %s %s is %s, because it matched '%s'\n", - RequestMethodStr[http->request->method], http->uri, + RequestMethods[http->request->method].str, http->uri, answer == ACCESS_ALLOWED ? "ALLOWED" : "DENIED", AclMatchedName ? AclMatchedName : "NO ACL's"); proxy_auth_msg = authenticateAuthUserRequestMessage(http->conn->auth_user_request ? http->conn->auth_user_request : http->request->auth_user_request); @@ -643,6 +643,7 @@ #if HEADERS_LOG headersLog(0, 1, request->method, request); #endif + /* XXX This really should become a ref-counted string type pointer, not a copy! */ fd_note(http->conn->fd, http->uri); clientAccessCheck2(http); } @@ -743,7 +744,7 @@ vary = httpMakeVaryMark(request, mem->reply); if (etag && vary) { - storeAddVary(mem->url, mem->log_url, mem->method, NULL, httpHeaderGetStr(&mem->reply->header, HDR_ETAG), request->vary_hdr, request->vary_headers, strBuf(request->vary_encoding)); + storeAddVary(mem->url, mem->method, NULL, httpHeaderGetStr(&mem->reply->header, HDR_ETAG), request->vary_hdr, request->vary_headers, strBuf(request->vary_encoding)); } } clientHandleETagMiss(http); @@ -760,10 +761,7 @@ char *url = http->uri; StoreEntry *entry = NULL; debug(33, 3) ("clientProcessETag: '%s'\n", http->uri); - entry = storeCreateEntry(url, - http->log_uri, - http->request->flags, - http->request->method); + entry = storeCreateEntry(url, http->request->flags, http->request->method); http->sc = storeClientRegister(entry, http); #if DELAY_POOLS /* delay_id is already set on original store client */ @@ -823,10 +821,7 @@ } } if (!entry) { - entry = storeCreateEntry(url, - http->log_uri, - http->request->flags, - http->request->method); + entry = storeCreateEntry(url, http->request->flags, http->request->method); if (http->entry->mem_obj) { http->entry->mem_obj->refresh_timestamp = squid_curtime; if (Config.onoff.collapsed_forwarding) { @@ -1100,7 +1095,7 @@ /* Swap in the metadata */ http->entry = entry; storeLockObject(http->entry); - storeCreateMemObject(http->entry, http->uri, http->log_uri); + storeCreateMemObject(http->entry, http->uri); http->entry->mem_obj->method = http->request->method; http->sc = storeClientRegister(http->entry, http); http->log_type = LOG_TCP_HIT; @@ -1263,7 +1258,7 @@ mem = http->entry->mem_obj; if (http->out.size || http->log_type) { http->al.icp.opcode = ICP_INVALID; - http->al.url = http->log_uri; + http->al.url = http->uri; debug(33, 9) ("httpRequestFree: al.url='%s'\n", http->al.url); if (http->reply && http->log_type != LOG_TCP_DENIED) { http->al.http.code = http->reply->sline.status; @@ -1320,7 +1315,6 @@ if (request) checkFailureRatio(request->err_type, http->al.hier.code); safe_free(http->uri); - safe_free(http->log_uri); safe_free(http->al.headers.request); safe_free(http->al.headers.reply); safe_free(http->al.cache.authuser); @@ -1530,8 +1524,7 @@ #endif #if USE_REFERER_LOG if ((str = httpHeaderGetStr(req_hdr, HDR_REFERER))) - logReferer(fqdnFromAddr(http->conn->log_addr), str, - http->log_uri); + logReferer(fqdnFromAddr(http->conn->log_addr), str, rfc1738_escape_unescaped(http->uri)); #endif #if FORW_VIA_DB if (httpHeaderHas(req_hdr, HDR_X_FORWARDED_FOR)) { @@ -1570,7 +1563,7 @@ debug(33, 3) ("clientSetKeepaliveFlag: http_ver = %d.%d\n", request->http_ver.major, request->http_ver.minor); debug(33, 3) ("clientSetKeepaliveFlag: method = %s\n", - RequestMethodStr[request->method]); + RequestMethods[request->method].str); { http_version_t http_ver; httpBuildVersion(&http_ver, 1, 0); /* we are HTTP/1.0, no matter what the client requests... */ @@ -2804,7 +2797,7 @@ return; } debug(33, 2) ("The reply for %s %s is %s, because it matched '%s'\n", - RequestMethodStr[http->request->method], http->uri, + RequestMethods[http->request->method].str, http->uri, answer ? "ALLOWED" : "DENIED", AclMatchedName ? AclMatchedName : "NO ACL's"); if (answer != ACCESS_ALLOWED) { @@ -3040,7 +3033,7 @@ if (http == NULL) { debug(33, 5) ("clientKeepaliveNextRequest: FD %d reading next req\n", conn->fd); - fd_note(conn->fd, "Waiting for next request"); + fd_note_static(conn->fd, "Waiting for next request"); /* * Set the timeout BEFORE calling clientReadRequest(). */ @@ -3171,7 +3164,7 @@ ErrorState *err = NULL; http->flags.hit = 0; debug(33, 4) ("clientProcessOnlyIfCachedMiss: '%s %s'\n", - RequestMethodStr[r->method], url); + RequestMethods[r->method].str, url); http->al.http.code = HTTP_GATEWAY_TIMEOUT; err = errorCon(ERR_ONLY_IF_CACHED_MISS, HTTP_GATEWAY_TIMEOUT, http->orig_request); if (http->entry) { @@ -3322,7 +3315,7 @@ HttpReply *rep; http_version_t version; debug(33, 4) ("clientProcessRequest: %s '%s'\n", - RequestMethodStr[r->method], + RequestMethods[r->method].str, url); r->flags.collapsed = 0; if (r->method == METHOD_CONNECT && !http->redirect.status) { @@ -3371,7 +3364,7 @@ if (http->entry->mem_obj->request) r->hier = http->entry->mem_obj->request->hier; } - storeCreateMemObject(http->entry, http->uri, http->log_uri); + storeCreateMemObject(http->entry, http->uri); http->entry->mem_obj->method = r->method; http->sc = storeClientRegister(http->entry, http); #if DELAY_POOLS @@ -3399,7 +3392,7 @@ request_t *r = http->request; ErrorState *err = NULL; debug(33, 4) ("clientProcessMiss: '%s %s'\n", - RequestMethodStr[r->method], url); + RequestMethods[r->method].str, url); http->flags.hit = 0; /* * We might have a left-over StoreEntry from a failed cache hit @@ -3481,7 +3474,6 @@ http->start = current_time; http->req_sz = conn->in.offset; http->uri = xstrdup(uri); - http->log_uri = xstrndup(uri, MAX_URL); http->range_iter.boundary = StringNull; httpBuildVersion(&http->http_ver, 1, 0); dlinkAdd(http, &http->active, &ClientActiveRequests); @@ -3496,124 +3488,84 @@ * a clientHttpRequest structure on success */ static clientHttpRequest * -parseHttpRequest(ConnStateData * conn, method_t * method_p, int *status, - char **prefix_p, size_t * req_line_sz_p) +parseHttpRequest(ConnStateData * conn, HttpMsgBuf * hmsg, method_t * method_p, int *status) { - char *inbuf = NULL; + LOCAL_ARRAY(char, urlbuf, MAX_URL); + char *url = urlbuf; char *mstr = NULL; - char *url = NULL; char *req_hdr = NULL; http_version_t http_ver; - char *t = NULL; - char *end; size_t header_sz; /* size of headers, not including first line */ size_t prefix_sz; /* size of whole request (req-line + headers) */ size_t req_sz; method_t method; clientHttpRequest *http = NULL; - int http_version_offset = 0; + char *t; + int ret; /* pre-set these values to make aborting simpler */ - *prefix_p = NULL; *method_p = METHOD_NONE; *status = -1; - if ((t = memchr(conn->in.buf, '\n', conn->in.offset)) == NULL) { + /* Parse the request line */ + ret = httpMsgParseRequestLine(hmsg); + if (ret == 0) { debug(33, 5) ("Incomplete request, waiting for end of request line\n"); *status = 0; return NULL; } - *req_line_sz_p = req_sz = t - conn->in.buf + 1; /* HTTP/0.9 requests */ - while (t > conn->in.buf && xisspace(*t)) - t--; - while (t > conn->in.buf && !xisspace(*t)) - t--; - if (t > conn->in.buf && t < (conn->in.buf + conn->in.offset - 8) && strncasecmp(t + 1, "HTTP/", 5) == 0) { - if ((req_sz = headersEnd(conn->in.buf, conn->in.offset)) == 0) { + /* If HTTP/0.9 then there's no headers */ + if (hmsg->v_maj == 0 && hmsg->v_min == 9) { + req_sz = hmsg->r_len; + } else { + req_sz = httpMsgFindHeadersEnd(hmsg); + if (req_sz == 0) { debug(33, 5) ("Incomplete request, waiting for end of headers\n"); *status = 0; return NULL; } - http_version_offset = t - conn->in.buf; - if (sscanf(t + 6, "%d.%d", &http_ver.major, &http_ver.minor) != 2) { - debug(33, 3) ("parseHttpRequest: Invalid HTTP identifier.\n"); - return parseHttpRequestAbort(conn, "error:invalid-http-ident"); - } - debug(33, 6) ("parseHttpRequest: Client HTTP version %d.%d.\n", http_ver.major, http_ver.minor); - } else { - debug(33, 3) ("parseHttpRequest: Missing HTTP identifier\n"); - httpBuildVersion(&http_ver, 0, 9); /* wild guess */ } - - assert(req_sz <= conn->in.offset); - /* Use memcpy, not strdup! */ - inbuf = xmalloc(req_sz + 1); - xmemcpy(inbuf, conn->in.buf, req_sz); - *(inbuf + req_sz) = '\0'; + /* Set version */ + httpBuildVersion(&http_ver, hmsg->v_maj, hmsg->v_min); /* Enforce max_request_size */ if (req_sz >= Config.maxRequestHeaderSize) { debug(33, 5) ("parseHttpRequest: Too large request\n"); - xfree(inbuf); return parseHttpRequestAbort(conn, "error:request-too-large"); } - /* Barf on NULL characters in the headers */ - if (strlen(inbuf) != req_sz) { - debug(33, 1) ("parseHttpRequest: Requestheader contains NULL characters\n"); -#if TRY_TO_IGNORE_THIS - xfree(inbuf); - return parseHttpRequestAbort(conn, "error:invalid-request"); -#endif - } /* Look for request method */ - if ((mstr = strtok(inbuf, "\t ")) == NULL) { - debug(33, 1) ("parseHttpRequest: Can't get request method\n"); - xfree(inbuf); - return parseHttpRequestAbort(conn, "error:invalid-request"); - } - method = urlParseMethod(mstr); + method = urlParseMethod(hmsg->buf + hmsg->m_start, hmsg->m_len); + if (method == METHOD_NONE) { debug(33, 1) ("parseHttpRequest: Unsupported method '%s'\n", mstr); - xfree(inbuf); return parseHttpRequestAbort(conn, "error:unsupported-request-method"); } debug(33, 5) ("parseHttpRequest: Method is '%s'\n", mstr); *method_p = method; - /* look for URL+HTTP/x.x */ - if ((url = strtok(NULL, "\n")) == NULL) { - debug(33, 1) ("parseHttpRequest: Missing URL\n"); - xfree(inbuf); - return parseHttpRequestAbort(conn, "error:missing-url"); - } - if (http_version_offset) { - if (http_version_offset < url - inbuf) { - debug(33, 1) ("parseHttpRequest: Missing URL\n"); - xfree(inbuf); - return parseHttpRequestAbort(conn, "error:missing-url"); - } - inbuf[http_version_offset] = '\0'; - } else { - t = url + strlen(url) - 1; - while (t > url && *t == '\r') - *t-- = '\0'; - } - while (xisspace(*url)) - url++; - debug(33, 5) ("parseHttpRequest: URI is '%s'\n", url); + /* Make sure URL fits inside MAX_URL */ + if (hmsg->u_len >= MAX_URL) { + debug(33, 1) ("parseHttpRequest: URL too big (%d) chars: %s\n", hmsg->u_len, hmsg->buf + hmsg->u_start); + return parseHttpRequestAbort(conn, "error:request-too-large"); + } + xmemcpy(urlbuf, hmsg->buf + hmsg->u_start, hmsg->u_len); + /* XXX off-by-one termination error? */ + urlbuf[hmsg->u_len] = '\0'; + debug(33, 5) ("parseHttpRequest: URI is '%s'\n", urlbuf); /* * Process headers after request line + * XXX at this point we really should just parse the damned headers rather than doing + * it later, allowing us to then do the URL acceleration stuff withuot too much hackery. */ - req_hdr = inbuf + *req_line_sz_p; - header_sz = req_sz - *req_line_sz_p; + /* XXX re-evaluate all of these values and use whats in hmsg instead! */ + req_hdr = hmsg->buf + hmsg->r_len; + header_sz = hmsg->h_len; debug(33, 3) ("parseHttpRequest: req_hdr = {%s}\n", req_hdr); - end = req_hdr + header_sz; - debug(33, 3) ("parseHttpRequest: end = {%s}\n", end); - prefix_sz = end - inbuf; + prefix_sz = req_sz; debug(33, 3) ("parseHttpRequest: prefix_sz = %d, req_line_sz = %d\n", - (int) prefix_sz, (int) *req_line_sz_p); + (int) prefix_sz, (int) hmsg->r_len); assert(prefix_sz <= conn->in.offset); /* Ok, all headers are received */ @@ -3623,12 +3575,9 @@ http->start = current_time; http->req_sz = prefix_sz; http->range_iter.boundary = StringNull; - *prefix_p = xmalloc(prefix_sz + 1); - xmemcpy(*prefix_p, conn->in.buf, prefix_sz); - *(*prefix_p + prefix_sz) = '\0'; dlinkAdd(http, &http->active, &ClientActiveRequests); - debug(33, 5) ("parseHttpRequest: Request Header is\n%s\n", (*prefix_p) + *req_line_sz_p); + debug(33, 5) ("parseHttpRequest: Request Header is\n%s\n", hmsg->buf + hmsg->req_end); #if THIS_VIOLATES_HTTP_SPECS_ON_URL_TRANSFORMATION if ((t = strchr(url, '#'))) /* remove HTML anchors */ @@ -3757,12 +3706,7 @@ http->uri = xcalloc(url_sz, 1); strcpy(http->uri, url); } - if (!stringHasCntl(http->uri)) - http->log_uri = xstrndup(http->uri, MAX_URL); - else - http->log_uri = xstrndup(rfc1738_escape_unescaped(http->uri), MAX_URL); debug(33, 5) ("parseHttpRequest: Complete request received\n"); - xfree(inbuf); *status = 1; return http; @@ -3770,7 +3714,6 @@ /* This tries to back out what is done above */ dlinkDelete(&http->active, &ClientActiveRequests); safe_free(http->uri); - xfree(inbuf); cbdataFree(http); return parseHttpRequestAbort(conn, "error:invalid-request"); } @@ -3799,19 +3742,227 @@ } } +/* + * Attempt to parse a request in the conn buffer + * + * Return the number of bytes to consume from the buffer. + * >0 : consume X bytes and try parsing next request + * =0 : couldn't consume anything this trip (partial request); stop parsing & read more data + * <0 : error; stop parsing + */ +static int +clientTryParseRequest(ConnStateData * conn) +{ + int fd = conn->fd; + int nrequests; + dlink_node *n; + clientHttpRequest *http = NULL; + method_t method; + ErrorState *err = NULL; + int parser_return_code = 0; + request_t *request = NULL; + HttpMsgBuf msg; + + + /* Skip leading (and trailing) whitespace */ + while (conn->in.offset > 0 && xisspace(conn->in.buf[0])) { + xmemmove(conn->in.buf, conn->in.buf + 1, conn->in.offset - 1); + conn->in.offset--; + } + conn->in.buf[conn->in.offset] = '\0'; /* Terminate the string */ + if (conn->in.offset == 0) + return 0; + + HttpMsgBufInit(&msg, conn->in.buf, conn->in.offset); /* XXX for now there's no deallocation function needed but this may change */ + /* Limit the number of concurrent requests to 2 */ + for (n = conn->reqs.head, nrequests = 0; n; n = n->next, nrequests++); + if (nrequests >= (Config.onoff.pipeline_prefetch ? 2 : 1)) { + debug(33, 3) ("clientReadRequest: FD %d max concurrent requests reached\n", fd); + debug(33, 5) ("clientReadRequest: FD %d defering new request until one is done\n", fd); + conn->defer.until = squid_curtime + 100; /* Reset when a request is complete */ + return 0; + } + conn->in.buf[conn->in.offset] = '\0'; /* Terminate the string */ + if (nrequests == 0) + fd_note_static(conn->fd, "Reading next request"); + /* Process request */ + http = parseHttpRequest(conn, &msg, &method, &parser_return_code); + if (!http) { + /* falls through here to the "if parser_return_code == 0"; not sure what will + * happen if http == NULL and parser_return_code != 0 .. */ + } + if (http) { + + /* add to the client request queue */ + dlinkAddTail(http, &http->node, &conn->reqs); + conn->nrequests++; + commSetTimeout(fd, Config.Timeout.lifetime, clientLifetimeTimeout, http); + if (parser_return_code < 0) { + debug(33, 1) ("clientReadRequest: FD %d (%s:%d) Invalid Request\n", fd, fd_table[fd].ipaddr, fd_table[fd].remote_port); + err = errorCon(ERR_INVALID_REQ, HTTP_BAD_REQUEST, NULL); + err->src_addr = conn->peer.sin_addr; + err->request_hdrs = xstrdup(conn->in.buf); + http->log_type = LOG_TCP_DENIED; + http->entry = clientCreateStoreEntry(http, method, null_request_flags); + errorAppendEntry(http->entry, err); + return -1; + } + if ((request = urlParse(method, http->uri)) == NULL) { + debug(33, 5) ("Invalid URL: %s\n", http->uri); + err = errorCon(ERR_INVALID_URL, HTTP_BAD_REQUEST, NULL); + err->src_addr = conn->peer.sin_addr; + err->url = xstrdup(http->uri); + http->al.http.code = err->http_status; + http->log_type = LOG_TCP_DENIED; + http->entry = clientCreateStoreEntry(http, method, null_request_flags); + errorAppendEntry(http->entry, err); + return -1; + } + /* compile headers */ + /* we should skip request line! */ + if ((http->http_ver.major >= 1) && !httpMsgParseRequestHeader(request, &msg)) { + debug(33, 1) ("Failed to parse request headers: %s\n%s\n", + http->uri, msg.buf + msg.req_end); + err = errorCon(ERR_INVALID_URL, HTTP_BAD_REQUEST, request); + err->url = xstrdup(http->uri); + http->al.http.code = err->http_status; + http->log_type = LOG_TCP_DENIED; + http->entry = clientCreateStoreEntry(http, method, null_request_flags); + errorAppendEntry(http->entry, err); + return -1; + } + if (!http->flags.internal && internalCheck(strBuf(request->urlpath))) { + if (internalHostnameIs(request->host)) + http->flags.internal = 1; + else if (Config.onoff.global_internal_static && internalStaticCheck(strBuf(request->urlpath))) + http->flags.internal = 1; + if (http->flags.internal) { + request_t *old_request = requestLink(request); + request = urlParse(method, internalStoreUri("", strBuf(request->urlpath))); + httpHeaderAppend(&request->header, &old_request->header); + requestUnlink(old_request); + } + } + if (conn->port->urlgroup) + request->urlgroup = xstrdup(conn->port->urlgroup); +#if LINUX_TPROXY + request->flags.tproxy = conn->port->tproxy; +#endif + request->flags.accelerated = http->flags.accel; + request->flags.transparent = http->flags.transparent; + /* + * cache the Content-length value in request_t. + */ + request->content_length = httpHeaderGetSize(&request->header, + HDR_CONTENT_LENGTH); + request->flags.internal = http->flags.internal; + request->client_addr = conn->peer.sin_addr; + request->client_port = conn->peer.sin_port; +#if FOLLOW_X_FORWARDED_FOR + request->indirect_client_addr = request->client_addr; +#endif /* FOLLOW_X_FORWARDED_FOR */ + request->my_addr = conn->me.sin_addr; + request->my_port = ntohs(conn->me.sin_port); + request->client_port = ntohs(conn->peer.sin_port); + request->http_ver = http->http_ver; + if (!urlCheckRequest(request) || + httpHeaderHas(&request->header, HDR_TRANSFER_ENCODING)) { + err = errorCon(ERR_UNSUP_REQ, HTTP_NOT_IMPLEMENTED, request); + request->flags.proxy_keepalive = 0; + http->al.http.code = err->http_status; + http->log_type = LOG_TCP_DENIED; + http->entry = clientCreateStoreEntry(http, request->method, null_request_flags); + errorAppendEntry(http->entry, err); + return -1; + } + if (!clientCheckContentLength(request)) { + err = errorCon(ERR_INVALID_REQ, HTTP_LENGTH_REQUIRED, request); + http->al.http.code = err->http_status; + http->log_type = LOG_TCP_DENIED; + http->entry = clientCreateStoreEntry(http, request->method, null_request_flags); + errorAppendEntry(http->entry, err); + return -1; + } + http->request = requestLink(request); + http->orig_request = requestLink(request); + clientSetKeepaliveFlag(http); + /* Do we expect a request-body? */ + if (request->content_length > 0) { + conn->body.size_left = request->content_length; + request->body_reader = clientReadBody; + request->body_reader_data = conn; + cbdataLock(conn); + /* Is it too large? */ + if (clientRequestBodyTooLarge(request->content_length)) { + err = errorCon(ERR_TOO_BIG, HTTP_REQUEST_ENTITY_TOO_LARGE, request); + http->log_type = LOG_TCP_DENIED; + http->entry = clientCreateStoreEntry(http, + METHOD_NONE, null_request_flags); + errorAppendEntry(http->entry, err); + return -1; + } + } + if (request->method == METHOD_CONNECT) { + /* Stop reading requests... */ + commSetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0); + if (!DLINK_ISEMPTY(conn->reqs) && DLINK_HEAD(conn->reqs) == http) + clientCheckFollowXForwardedFor(http); + else { + debug(33, 1) ("WARNING: pipelined CONNECT request seen from %s\n", inet_ntoa(http->conn->peer.sin_addr)); + debugObj(33, 1, "Previous request:\n", ((clientHttpRequest *) DLINK_HEAD(conn->reqs))->request, + (ObjPackMethod) & httpRequestPackDebug); + debugObj(33, 1, "This request:\n", request, (ObjPackMethod) & httpRequestPackDebug); + } + return 0; + } else { + clientCheckFollowXForwardedFor(http); + } + } else if (parser_return_code == 0) { + /* + * Partial request received; reschedule until parseHttpRequest() + * is happy with the input + */ + if (conn->in.offset >= Config.maxRequestHeaderSize) { + /* The request is too large to handle */ + debug(33, 1) ("Request header is too large (%d bytes)\n", + (int) conn->in.offset); + debug(33, 1) ("Config 'request_header_max_size'= %ld bytes.\n", + (long int) Config.maxRequestHeaderSize); + err = errorCon(ERR_TOO_BIG, HTTP_REQUEST_ENTITY_TOO_LARGE, NULL); + err->src_addr = conn->peer.sin_addr; + http = parseHttpRequestAbort(conn, "error:request-too-large"); + /* add to the client request queue */ + dlinkAddTail(http, &http->node, &conn->reqs); + http->log_type = LOG_TCP_DENIED; + http->entry = clientCreateStoreEntry(http, METHOD_NONE, null_request_flags); + errorAppendEntry(http->entry, err); + return -1; + } + return 0; + } + if (!cbdataValid(conn)) + return -1; + + /* + * For now we assume "here" means "we parsed a valid request. This might not be the case + * as I might've broken up clientReadRequest() wrong. Quite a bit more work should be + * done to simplify this code anyway so the first step is identifying the cases where + * this isn't true. + */ + assert(http != NULL); + assert(http->req_sz > 0); + + return http->req_sz; +} + static void clientReadRequest(int fd, void *data) { ConnStateData *conn = data; - int parser_return_code = 0; - request_t *request = NULL; int size; - method_t method; - clientHttpRequest *http = NULL; - char *prefix = NULL; - ErrorState *err = NULL; fde *F = &fd_table[fd]; int len = conn->in.size - conn->in.offset - 1; + int ret; debug(33, 4) ("clientReadRequest: FD %d: reading request...\n", fd); if (len == 0) { /* Grow the request memory area to accomodate for a large request */ @@ -3853,7 +4004,7 @@ F->flags.socket_eof = 1; conn->defer.until = squid_curtime + 1; conn->defer.n++; - fd_note(fd, "half-closed"); + fd_note_static(fd, "half-closed"); /* There is one more close check at the end, to detect aborted * (partial) requests. At this point we can't tell if the request * is partial. @@ -3879,202 +4030,22 @@ } } /* Process next request */ - while (conn->in.offset > 0 && conn->body.size_left == 0) { - int nrequests; - dlink_node *n; - size_t req_line_sz = 0; - /* Skip leading (and trailing) whitespace */ - while (conn->in.offset > 0 && xisspace(conn->in.buf[0])) { - xmemmove(conn->in.buf, conn->in.buf + 1, conn->in.offset - 1); - conn->in.offset--; - } - conn->in.buf[conn->in.offset] = '\0'; /* Terminate the string */ - if (conn->in.offset == 0) - break; - /* Limit the number of concurrent requests to 2 */ - for (n = conn->reqs.head, nrequests = 0; n; n = n->next, nrequests++); - if (nrequests >= (Config.onoff.pipeline_prefetch ? 2 : 1)) { - debug(33, 3) ("clientReadRequest: FD %d max concurrent requests reached\n", fd); - debug(33, 5) ("clientReadRequest: FD %d defering new request until one is done\n", fd); - conn->defer.until = squid_curtime + 100; /* Reset when a request is complete */ - break; - } - conn->in.buf[conn->in.offset] = '\0'; /* Terminate the string */ - if (nrequests == 0) - fd_note(conn->fd, "Reading next request"); - /* Process request */ - http = parseHttpRequest(conn, - &method, - &parser_return_code, - &prefix, - &req_line_sz); - if (!http) - safe_free(prefix); - if (http) { - assert(http->req_sz > 0); - assert(conn->in.offset >= http->req_sz); - conn->in.offset -= http->req_sz; - debug(33, 5) ("conn->in.offset = %d\n", (int) conn->in.offset); - /* - * If we read past the end of this request, move the remaining - * data to the beginning - */ - if (conn->in.offset > 0) - xmemmove(conn->in.buf, conn->in.buf + http->req_sz, conn->in.offset); - /* add to the client request queue */ - dlinkAddTail(http, &http->node, &conn->reqs); - conn->nrequests++; - commSetTimeout(fd, Config.Timeout.lifetime, clientLifetimeTimeout, http); - if (parser_return_code < 0) { - debug(33, 1) ("clientReadRequest: FD %d (%s:%d) Invalid Request\n", fd, fd_table[fd].ipaddr, fd_table[fd].remote_port); - err = errorCon(ERR_INVALID_REQ, HTTP_BAD_REQUEST, NULL); - err->src_addr = conn->peer.sin_addr; - err->request_hdrs = xstrdup(conn->in.buf); - http->log_type = LOG_TCP_DENIED; - http->entry = clientCreateStoreEntry(http, method, null_request_flags); - errorAppendEntry(http->entry, err); - safe_free(prefix); - break; - } - if ((request = urlParse(method, http->uri)) == NULL) { - debug(33, 5) ("Invalid URL: %s\n", http->uri); - err = errorCon(ERR_INVALID_URL, HTTP_BAD_REQUEST, NULL); - err->src_addr = conn->peer.sin_addr; - err->url = xstrdup(http->uri); - http->al.http.code = err->http_status; - http->log_type = LOG_TCP_DENIED; - http->entry = clientCreateStoreEntry(http, method, null_request_flags); - errorAppendEntry(http->entry, err); - safe_free(prefix); - break; - } - /* compile headers */ - /* we should skip request line! */ - if ((http->http_ver.major >= 1) && !httpRequestParseHeader(request, prefix + req_line_sz)) { - debug(33, 1) ("Failed to parse request headers: %s\n%s\n", - http->uri, prefix); - err = errorCon(ERR_INVALID_URL, HTTP_BAD_REQUEST, request); - err->url = xstrdup(http->uri); - http->al.http.code = err->http_status; - http->log_type = LOG_TCP_DENIED; - http->entry = clientCreateStoreEntry(http, method, null_request_flags); - errorAppendEntry(http->entry, err); - safe_free(prefix); - break; - } - safe_free(prefix); - safe_free(http->log_uri); - http->log_uri = xstrdup(urlCanonicalClean(request)); - if (!http->flags.internal && internalCheck(strBuf(request->urlpath))) { - if (internalHostnameIs(request->host)) - http->flags.internal = 1; - else if (Config.onoff.global_internal_static && internalStaticCheck(strBuf(request->urlpath))) - http->flags.internal = 1; - if (http->flags.internal) { - request_t *old_request = requestLink(request); - request = urlParse(method, internalStoreUri("", strBuf(request->urlpath))); - httpHeaderAppend(&request->header, &old_request->header); - requestUnlink(old_request); - } - } - if (conn->port->urlgroup) - request->urlgroup = xstrdup(conn->port->urlgroup); -#if LINUX_TPROXY - request->flags.tproxy = conn->port->tproxy; -#endif - request->flags.accelerated = http->flags.accel; - request->flags.transparent = http->flags.transparent; - /* - * cache the Content-length value in request_t. - */ - request->content_length = httpHeaderGetSize(&request->header, - HDR_CONTENT_LENGTH); - request->flags.internal = http->flags.internal; - request->client_addr = conn->peer.sin_addr; - request->client_port = conn->peer.sin_port; -#if FOLLOW_X_FORWARDED_FOR - request->indirect_client_addr = request->client_addr; -#endif /* FOLLOW_X_FORWARDED_FOR */ - request->my_addr = conn->me.sin_addr; - request->my_port = ntohs(conn->me.sin_port); - request->client_port = ntohs(conn->peer.sin_port); - request->http_ver = http->http_ver; - if (!urlCheckRequest(request) || - httpHeaderHas(&request->header, HDR_TRANSFER_ENCODING)) { - err = errorCon(ERR_UNSUP_REQ, HTTP_NOT_IMPLEMENTED, request); - request->flags.proxy_keepalive = 0; - http->al.http.code = err->http_status; - http->log_type = LOG_TCP_DENIED; - http->entry = clientCreateStoreEntry(http, request->method, null_request_flags); - errorAppendEntry(http->entry, err); - break; - } - if (!clientCheckContentLength(request)) { - err = errorCon(ERR_INVALID_REQ, HTTP_LENGTH_REQUIRED, request); - http->al.http.code = err->http_status; - http->log_type = LOG_TCP_DENIED; - http->entry = clientCreateStoreEntry(http, request->method, null_request_flags); - errorAppendEntry(http->entry, err); - break; - } - http->request = requestLink(request); - http->orig_request = requestLink(request); - clientSetKeepaliveFlag(http); - /* Do we expect a request-body? */ - if (request->content_length > 0) { - conn->body.size_left = request->content_length; - request->body_reader = clientReadBody; - request->body_reader_data = conn; - cbdataLock(conn); - /* Is it too large? */ - if (clientRequestBodyTooLarge(request->content_length)) { - err = errorCon(ERR_TOO_BIG, HTTP_REQUEST_ENTITY_TOO_LARGE, request); - http->log_type = LOG_TCP_DENIED; - http->entry = clientCreateStoreEntry(http, - METHOD_NONE, null_request_flags); - errorAppendEntry(http->entry, err); - break; - } - } - if (request->method == METHOD_CONNECT) { - /* Stop reading requests... */ - commSetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0); - if (!DLINK_ISEMPTY(conn->reqs) && DLINK_HEAD(conn->reqs) == http) - clientCheckFollowXForwardedFor(http); - else { - debug(33, 1) ("WARNING: pipelined CONNECT request seen from %s\n", inet_ntoa(http->conn->peer.sin_addr)); - debugObj(33, 1, "Previous request:\n", ((clientHttpRequest *) DLINK_HEAD(conn->reqs))->request, - (ObjPackMethod) & httpRequestPackDebug); - debugObj(33, 1, "This request:\n", request, (ObjPackMethod) & httpRequestPackDebug); - } - break; - } else { - clientCheckFollowXForwardedFor(http); - } - } else if (parser_return_code == 0) { - /* - * Partial request received; reschedule until parseHttpRequest() - * is happy with the input - */ - if (conn->in.offset >= Config.maxRequestHeaderSize) { - /* The request is too large to handle */ - debug(33, 1) ("Request header is too large (%d bytes)\n", - (int) conn->in.offset); - debug(33, 1) ("Config 'request_header_max_size'= %ld bytes.\n", - (long int) Config.maxRequestHeaderSize); - err = errorCon(ERR_TOO_BIG, HTTP_REQUEST_ENTITY_TOO_LARGE, NULL); - err->src_addr = conn->peer.sin_addr; - http = parseHttpRequestAbort(conn, "error:request-too-large"); - /* add to the client request queue */ - dlinkAddTail(http, &http->node, &conn->reqs); - http->log_type = LOG_TCP_DENIED; - http->entry = clientCreateStoreEntry(http, METHOD_NONE, null_request_flags); - errorAppendEntry(http->entry, err); - } - break; - } - if (!cbdataValid(conn)) + while (cbdataValid(conn) && conn->in.offset > 0 && conn->body.size_left == 0) { + /* Ret tells us how many bytes to consume - 0 == didn't consume request, > 0 == consume, < 0 == error */ + ret = clientTryParseRequest(conn); + if (ret <= 0) break; + assert(ret > 0); + assert(conn->in.offset >= ret); + conn->in.offset -= ret; + debug(33, 5) ("removing %d bytes; conn->in.offset = %d\n", ret, (int) conn->in.offset); + /* + * If we read past the end of this request, move the remaining + * data to the beginning + */ + if (conn->in.offset > 0) + xmemmove(conn->in.buf, conn->in.buf + ret, conn->in.offset); + } /* while offset > 0 && conn->body.size_left == 0 */ if (!cbdataValid(conn)) { cbdataUnlock(conn); @@ -4524,7 +4495,7 @@ } F = &fd_table[fd]; debug(33, 4) ("httpAccept: FD %d: accepted port %d client %s:%d\n", fd, F->local_port, F->ipaddr, F->remote_port); - fd_note(fd, "client http connect"); + fd_note_static(fd, "client http connect"); connState = cbdataAlloc(ConnStateData); connState->port = s; cbdataLock(connState->port); @@ -4653,7 +4624,7 @@ F->read_method = &ssl_read_method; F->write_method = &ssl_write_method; debug(50, 5) ("httpsAcceptSSL: FD %d: starting SSL negotiation.\n", fd); - fd_note(fd, "client https connect"); + fd_note_static(fd, "client https connect"); commSetSelect(fd, COMM_SELECT_READ, clientNegotiateSSL, connState, 0); commSetDefer(fd, clientReadDefer, connState); Index: squid/src/comm.c =================================================================== RCS file: /cvsroot/squid/squid/src/comm.c,v retrieving revision 1.358 retrieving revision 1.359 diff -u -r1.358 -r1.359 --- squid/src/comm.c 23 Oct 2006 11:22:21 -0000 1.358 +++ squid/src/comm.c 21 Jan 2007 12:53:58 -0000 1.359 @@ -1,6 +1,6 @@ /* - * $Id: comm.c,v 1.358 2006/10/23 11:22:21 hno Exp $ + * $Id: comm.c,v 1.359 2007/01/21 12:53:58 adrian Exp $ * * DEBUG: section 5 Socket Functions * AUTHOR: Harvest Derived @@ -526,7 +526,7 @@ status = COMM_INPROGRESS; else return COMM_ERROR; - xstrncpy(F->ipaddr, inet_ntoa(address->sin_addr), 16); + xstrncpy(F->ipaddr, xinet_ntoa(address->sin_addr), 16); F->remote_port = ntohs(address->sin_port); if (status == COMM_OK) { debug(5, 10) ("comm_connect_addr: FD %d connected to %s:%d\n", @@ -572,7 +572,7 @@ /* fdstat update */ fd_open(sock, FD_SOCKET, "HTTP Request"); F = &fd_table[sock]; - xstrncpy(F->ipaddr, inet_ntoa(P.sin_addr), 16); + xstrncpy(F->ipaddr, xinet_ntoa(P.sin_addr), 16); F->remote_port = htons(P.sin_port); F->local_port = htons(M.sin_port); commSetNonBlocking(sock); @@ -646,7 +646,7 @@ void comm_lingering_close(int fd) { - fd_note(fd, "lingering close"); + fd_note_static(fd, "lingering close"); commSetSelect(fd, COMM_SELECT_READ, NULL, NULL, 0); commSetSelect(fd, COMM_SELECT_WRITE, NULL, NULL, 0); commSetTimeout(fd, 10, commLingerTimeout, NULL); Index: squid/src/errormap.c =================================================================== RCS file: /cvsroot/squid/squid/src/errormap.c,v retrieving revision 1.2 retrieving revision 1.3 diff -u -r1.2 -r1.3 --- squid/src/errormap.c 5 Jun 2006 22:47:01 -0000 1.2 +++ squid/src/errormap.c 21 Jan 2007 12:53:58 -0000 1.3 @@ -1,6 +1,6 @@ /* - * $Id: errormap.c,v 1.2 2006/06/05 22:47:01 hno Exp $ + * $Id: errormap.c,v 1.3 2007/01/21 12:53:58 adrian Exp $ * * DEBUG: section ?? Error Beautifier * AUTHOR: Henrik Nordstrom @@ -185,7 +185,7 @@ state = cbdataAlloc(ErrorMapState); state->req = requestLink(req); - state->e = storeCreateEntry(errorUrl, errorUrl, req->flags, req->method); + state->e = storeCreateEntry(errorUrl, req->flags, req->method); state->sc = storeClientRegister(state->e, state); state->callback = callback; state->callback_data = callback_data; @@ -194,12 +194,12 @@ hdrpos = HttpHeaderInitPos; while ((hdr = httpHeaderGetEntry(&client_req->header, &hdrpos)) != NULL) { if (CBIT_TEST(client_headers, hdr->id)) - httpHeaderAddEntry(&req->header, httpHeaderEntryClone(hdr)); + httpHeaderAddClone(&req->header, hdr); } hdrpos = HttpHeaderInitPos; while ((hdr = httpHeaderGetEntry(&reply->header, &hdrpos)) != NULL) { if (CBIT_TEST(server_headers, hdr->id)) - httpHeaderAddEntry(&req->header, httpHeaderEntryClone(hdr)); + httpHeaderAddClone(&req->header, hdr); } httpHeaderPutInt(&req->header, HDR_X_ERROR_STATUS, (int) reply->sline.status); httpHeaderPutStr(&req->header, HDR_X_REQUEST_URI, urlCanonical(client_req)); Index: squid/src/errorpage.c =================================================================== RCS file: /cvsroot/squid/squid/src/errorpage.c,v retrieving revision 1.190 retrieving revision 1.191 diff -u -r1.190 -r1.191 --- squid/src/errorpage.c 19 Jan 2007 00:21:01 -0000 1.190 +++ squid/src/errorpage.c 21 Jan 2007 12:53:59 -0000 1.191 @@ -1,6 +1,6 @@ /* - * $Id: errorpage.c,v 1.190 2007/01/19 00:21:01 hno Exp $ + * $Id: errorpage.c,v 1.191 2007/01/21 12:53:59 adrian Exp $ * * DEBUG: section 4 Error Generation * AUTHOR: Duane Wessels @@ -526,7 +526,7 @@ p = authenticateAuthUserRequestMessage(err->auth_user_request) ? authenticateAuthUserRequestMessage(err->auth_user_request) : "[not available]"; break; case 'M': - p = r ? RequestMethodStr[r->method] : "[unkown method]"; + p = r ? RequestMethods[r->method].str : "[unknown method]"; break; case 'o': p = external_acl_message; @@ -547,7 +547,7 @@ if (NULL != r) { Packer p; memBufPrintf(&mb, "%s %s HTTP/%d.%d\n", - RequestMethodStr[r->method], + RequestMethods[r->method].str, strLen(r->urlpath) ? strBuf(r->urlpath) : "/", r->http_ver.major, r->http_ver.minor); packerToMemInit(&p, &mb); Index: squid/src/external_acl.c =================================================================== RCS file: /cvsroot/squid/squid/src/external_acl.c,v retrieving revision 1.29 retrieving revision 1.30 diff -u -r1.29 -r1.30 --- squid/src/external_acl.c 1 Jan 2007 23:32:13 -0000 1.29 +++ squid/src/external_acl.c 21 Jan 2007 12:53:59 -0000 1.30 @@ -1,6 +1,6 @@ /* - * $Id: external_acl.c,v 1.29 2007/01/01 23:32:13 hno Exp $ + * $Id: external_acl.c,v 1.30 2007/01/21 12:53:59 adrian Exp $ * * DEBUG: section 82 External ACL * AUTHOR: Henrik Nordstrom, MARA Systems AB @@ -666,7 +666,7 @@ str = strBuf(request->urlpath); break; case EXT_ACL_METHOD: - str = RequestMethodStr[request->method]; + str = RequestMethods[request->method].str; break; case EXT_ACL_HEADER: sb = httpHeaderGetByName(&request->header, format->header); Index: squid/src/fd.c =================================================================== RCS file: /cvsroot/squid/squid/src/fd.c,v retrieving revision 1.55 retrieving revision 1.56 diff -u -r1.55 -r1.56 --- squid/src/fd.c 23 Oct 2006 11:25:29 -0000 1.55 +++ squid/src/fd.c 21 Jan 2007 12:53:59 -0000 1.56 @@ -1,6 +1,6 @@ /* - * $Id: fd.c,v 1.55 2006/10/23 11:25:29 hno Exp $ + * $Id: fd.c,v 1.56 2007/01/21 12:53:59 adrian Exp $ * * DEBUG: section 51 Filedescriptor Functions * AUTHOR: Duane Wessels @@ -176,7 +176,7 @@ #endif fdUpdateBiggest(fd, 1); if (desc) - xstrncpy(F->desc, desc, FD_DESC_SZ); + fd_note(fd, desc); Number_FD++; } @@ -184,7 +184,15 @@ fd_note(int fd, const char *s) { fde *F = &fd_table[fd]; - xstrncpy(F->desc, s, FD_DESC_SZ); + xstrncpy(F->descbuf, s, FD_DESC_SZ); + F->desc = F->descbuf; +} + +void +fd_note_static(int fd, const char *s) +{ + fde *F = &fd_table[fd]; + F->desc = s; } void Index: squid/src/forward.c =================================================================== RCS file: /cvsroot/squid/squid/src/forward.c,v retrieving revision 1.120 retrieving revision 1.121 diff -u -r1.120 -r1.121 --- squid/src/forward.c 19 Jan 2007 00:21:01 -0000 1.120 +++ squid/src/forward.c 21 Jan 2007 12:53:59 -0000 1.121 @@ -1,6 +1,6 @@ /* - * $Id: forward.c,v 1.120 2007/01/19 00:21:01 hno Exp $ + * $Id: forward.c,v 1.121 2007/01/21 12:53:59 adrian Exp $ * * DEBUG: section 17 Request Forwarding * AUTHOR: Duane Wessels @@ -655,7 +655,7 @@ int server_fd = fwdState->server_fd; debug(17, 3) ("fwdDispatch: FD %d: Fetching '%s %s'\n", fwdState->client_fd, - RequestMethodStr[request->method], + RequestMethods[request->method].str, storeUrl(entry)); /* * Assert that server_fd is set. This is to guarantee that fwdState @@ -1121,7 +1121,7 @@ (int) current_time.tv_sec, (int) current_time.tv_usec / 1000, fwdState->last_status, - RequestMethodStr[fwdState->request->method], + RequestMethods[fwdState->request->method].str, fwdState->request->canonical); } Index: squid/src/globals.h =================================================================== RCS file: /cvsroot/squid/squid/src/globals.h,v retrieving revision 1.123 retrieving revision 1.124 diff -u -r1.123 -r1.124 --- squid/src/globals.h 19 Jan 2007 00:19:26 -0000 1.123 +++ squid/src/globals.h 21 Jan 2007 12:53:59 -0000 1.124 @@ -1,6 +1,6 @@ /* - * $Id: globals.h,v 1.123 2007/01/19 00:19:26 hno Exp $ + * $Id: globals.h,v 1.124 2007/01/21 12:53:59 adrian Exp $ * * * SQUID Web Proxy Cache http://www.squid-cache.org/ @@ -48,7 +48,7 @@ extern char config_input_line[BUFSIZ]; extern const char *AclMatchedName; /* NULL */ extern const char *DefaultConfigFile; /* DEFAULT_CONFIG_FILE */ -extern const char *RequestMethodStr[]; +extern rms_t RequestMethods[]; extern const char *ProtocolStr[]; extern const char *cfg_filename; /* NULL */ extern const char *const appname; /* "squid" */ Index: squid/src/htcp.c =================================================================== RCS file: /cvsroot/squid/squid/src/htcp.c,v retrieving revision 1.54 retrieving revision 1.55 diff -u -r1.54 -r1.55 --- squid/src/htcp.c 4 Nov 2006 14:14:07 -0000 1.54 +++ squid/src/htcp.c 21 Jan 2007 12:54:00 -0000 1.55 @@ -1,6 +1,6 @@ /* - * $Id: htcp.c,v 1.54 2006/11/04 14:14:07 hno Exp $ + * $Id: htcp.c,v 1.55 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 31 Hypertext Caching Protocol * AUTHOR: Duane Wesssels @@ -553,7 +553,7 @@ /* * Parse the request */ - method = urlParseMethod(s->method); + method = urlParseMethod(s->method, strlen(s->method)); s->request = urlParse(method == METHOD_NONE ? METHOD_GET : method, s->uri); return s; } @@ -1178,7 +1178,7 @@ stuff.f1 = 1; stuff.response = 0; stuff.msg_id = ++msg_id_counter; - stuff.S.method = (char *) RequestMethodStr[req->method]; + stuff.S.method = (char *) RequestMethods[req->method].str; stuff.S.uri = (char *) storeUrl(e); stuff.S.version = vbuf; httpBuildRequestHeader(req, req, e, &hdr, flags); Index: squid/src/http.c =================================================================== RCS file: /cvsroot/squid/squid/src/http.c,v retrieving revision 1.420 retrieving revision 1.421 diff -u -r1.420 -r1.421 --- squid/src/http.c 21 Jan 2007 10:02:05 -0000 1.420 +++ squid/src/http.c 21 Jan 2007 12:54:00 -0000 1.421 @@ -1,6 +1,6 @@ /* - * $Id: http.c,v 1.420 2007/01/21 10:02:05 hno Exp $ + * $Id: http.c,v 1.421 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 11 Hypertext Transfer Protocol (HTTP) * AUTHOR: Harvest Derived @@ -738,7 +738,7 @@ if (len > 0) { debug(11, Config.onoff.relaxed_header_parser <= 0 || keep_alive ? 1 : 2) ("httpReadReply: Excess data from \"%s %s\"\n", - RequestMethodStr[orig_request->method], + RequestMethods[orig_request->method].str, storeUrl(entry)); comm_close(fd); return; @@ -760,7 +760,7 @@ */ if (!httpState->flags.request_sent) { debug(11, 1) ("httpReadReply: Request not yet fully sent \"%s %s\"\n", - RequestMethodStr[orig_request->method], + RequestMethods[orig_request->method].str, storeUrl(entry)); keep_alive = 0; } @@ -1066,7 +1066,7 @@ * authentication forwarding is explicitly enabled */ if (flags.proxying && orig_request->peer_login && strcmp(orig_request->peer_login, "PASS") == 0) { - httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + httpHeaderAddClone(hdr_out, e); if (request->flags.connection_proxy_auth) request->flags.pinned = 1; } @@ -1075,7 +1075,7 @@ /* Pass on WWW authentication. */ if (!flags.originpeer) { - httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + httpHeaderAddClone(hdr_out, e); if (orig_request->flags.connection_auth) orig_request->flags.pinned = 1; } else { @@ -1083,7 +1083,7 @@ * (see also below for proxy->server authentication) */ if (orig_request->peer_login && (strcmp(orig_request->peer_login, "PASS") == 0 || strcmp(orig_request->peer_login, "PROXYPASS") == 0)) { - httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + httpHeaderAddClone(hdr_out, e); if (orig_request->flags.connection_auth) orig_request->flags.pinned = 1; } @@ -1099,7 +1099,7 @@ if (orig_request->peer_domain) httpHeaderPutStr(hdr_out, HDR_HOST, orig_request->peer_domain); else if (request->flags.redirected && !Config.onoff.redir_rewrites_host) - httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + httpHeaderAddClone(hdr_out, e); else { /* use port# only if not default */ if (orig_request->port == urlDefaultPort(orig_request->protocol)) { @@ -1114,13 +1114,13 @@ /* append unless we added our own; * note: at most one client's ims header can pass through */ if (!httpHeaderHas(hdr_out, HDR_IF_MODIFIED_SINCE)) - httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + httpHeaderAddClone(hdr_out, e); break; case HDR_IF_NONE_MATCH: /* append unless we added our own; * note: at most one client's ims header can pass through */ if (!httpHeaderHas(hdr_out, HDR_IF_NONE_MATCH)) - httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + httpHeaderAddClone(hdr_out, e); break; case HDR_MAX_FORWARDS: if (orig_request->method == METHOD_TRACE) { @@ -1132,18 +1132,18 @@ break; case HDR_X_FORWARDED_FOR: if (!opt_forwarded_for) - httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + httpHeaderAddClone(hdr_out, e); break; case HDR_RANGE: case HDR_IF_RANGE: case HDR_REQUEST_RANGE: if (!we_do_ranges) - httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + httpHeaderAddClone(hdr_out, e); break; case HDR_VIA: /* If Via is disabled then forward any received header as-is */ if (!Config.onoff.via) - httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + httpHeaderAddClone(hdr_out, e); break; case HDR_CONNECTION: case HDR_KEEP_ALIVE: @@ -1162,11 +1162,11 @@ break; case HDR_FRONT_END_HTTPS: if (!flags.front_end_https) - httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + httpHeaderAddClone(hdr_out, e); break; default: /* pass on all other header fields */ - httpHeaderAddEntry(hdr_out, httpHeaderEntryClone(e)); + httpHeaderAddClone(hdr_out, e); } } @@ -1329,7 +1329,7 @@ { const int offset = mb->size; memBufPrintf(mb, "%s %s HTTP/1.0\r\n", - RequestMethodStr[request->method], + RequestMethods[request->method].str, strLen(request->urlpath) ? strBuf(request->urlpath) : "/"); /* build and pack headers */ { @@ -1417,7 +1417,7 @@ request_t *proxy_req; request_t *orig_req = fwd->request; debug(11, 3) ("httpStart: \"%s %s\"\n", - RequestMethodStr[orig_req->method], + RequestMethods[orig_req->method].str, storeUrl(fwd->entry)); httpState = cbdataAlloc(HttpStateData); storeLockObject(fwd->entry); Index: squid/src/mime.c =================================================================== RCS file: /cvsroot/squid/squid/src/mime.c,v retrieving revision 1.108 retrieving revision 1.109 diff -u -r1.108 -r1.109 --- squid/src/mime.c 28 May 2006 22:24:51 -0000 1.108 +++ squid/src/mime.c 21 Jan 2007 12:54:00 -0000 1.109 @@ -1,6 +1,6 @@ /* - * $Id: mime.c,v 1.108 2006/05/28 22:24:51 hno Exp $ + * $Id: mime.c,v 1.109 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 25 MIME Parsing * AUTHOR: Harvest Derived @@ -427,10 +427,7 @@ } flags = null_request_flags; flags.cachable = 1; - e = storeCreateEntry(url, - url, - flags, - METHOD_GET); + e = storeCreateEntry(url, flags, METHOD_GET); assert(e != NULL); EBIT_SET(e->flags, ENTRY_SPECIAL); storeSetPublicKey(e); Index: squid/src/neighbors.c =================================================================== RCS file: /cvsroot/squid/squid/src/neighbors.c,v retrieving revision 1.313 retrieving revision 1.314 diff -u -r1.313 -r1.314 --- squid/src/neighbors.c 19 Jan 2007 00:19:26 -0000 1.313 +++ squid/src/neighbors.c 21 Jan 2007 12:54:00 -0000 1.314 @@ -1,6 +1,6 @@ /* - * $Id: neighbors.c,v 1.313 2007/01/19 00:19:26 hno Exp $ + * $Id: neighbors.c,v 1.314 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 15 Neighbor Routines * AUTHOR: Harvest Derived @@ -1162,7 +1162,7 @@ assert(p->type == PEER_MULTICAST); p->mcast.flags.count_event_pending = 0; snprintf(url, MAX_URL, "http://%s/", inet_ntoa(p->in_addr.sin_addr)); - fake = storeCreateEntry(url, url, null_request_flags, METHOD_GET); + fake = storeCreateEntry(url, null_request_flags, METHOD_GET); psstate = cbdataAlloc(ps_state); psstate->request = requestLink(urlParse(METHOD_GET, url)); psstate->entry = fake; Index: squid/src/peer_digest.c =================================================================== RCS file: /cvsroot/squid/squid/src/peer_digest.c,v retrieving revision 1.94 retrieving revision 1.95 diff -u -r1.94 -r1.95 --- squid/src/peer_digest.c 5 Jun 2006 22:47:01 -0000 1.94 +++ squid/src/peer_digest.c 21 Jan 2007 12:54:00 -0000 1.95 @@ -1,6 +1,6 @@ /* - * $Id: peer_digest.c,v 1.94 2006/06/05 22:47:01 hno Exp $ + * $Id: peer_digest.c,v 1.95 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 72 Peer Digest Routines * AUTHOR: Alex Rousskov @@ -315,10 +315,10 @@ if (old_e) { debug(72, 5) ("peerDigestRequest: found old entry\n"); storeLockObject(old_e); - storeCreateMemObject(old_e, url, url); + storeCreateMemObject(old_e, url); fetch->old_sc = storeClientRegister(old_e, fetch); } - e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method); + e = fetch->entry = storeCreateEntry(url, req->flags, req->method); assert(EBIT_TEST(e->flags, KEY_PRIVATE)); fetch->sc = storeClientRegister(e, fetch); /* set lastmod to trigger IMS request if possible */ Index: squid/src/peer_monitor.c =================================================================== RCS file: /cvsroot/squid/squid/src/peer_monitor.c,v retrieving revision 1.3 retrieving revision 1.4 diff -u -r1.3 -r1.4 --- squid/src/peer_monitor.c 9 Jan 2007 10:24:41 -0000 1.3 +++ squid/src/peer_monitor.c 21 Jan 2007 12:54:00 -0000 1.4 @@ -1,6 +1,6 @@ /* - * $Id: peer_monitor.c,v 1.3 2007/01/09 10:24:41 hno Exp $ + * $Id: peer_monitor.c,v 1.4 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section ?? Peer monitoring * AUTHOR: Henrik Nordstrom @@ -151,7 +151,7 @@ if (pm->peer->login) xstrncpy(req->login, pm->peer->login, MAX_LOGIN_SZ); pm->running.req = requestLink(req); - pm->running.e = storeCreateEntry(url, url, req->flags, req->method); + pm->running.e = storeCreateEntry(url, req->flags, req->method); pm->running.sc = storeClientRegister(pm->running.e, pm); pm->running.buf = memAllocate(MEM_4K_BUF); fwdStartPeer(pm->peer, pm->running.e, pm->running.req); Index: squid/src/peer_select.c =================================================================== RCS file: /cvsroot/squid/squid/src/peer_select.c,v retrieving revision 1.131 retrieving revision 1.132 diff -u -r1.131 -r1.132 --- squid/src/peer_select.c 10 Dec 2006 05:24:52 -0000 1.131 +++ squid/src/peer_select.c 21 Jan 2007 12:54:00 -0000 1.132 @@ -1,6 +1,6 @@ /* - * $Id: peer_select.c,v 1.131 2006/12/10 05:24:52 hno Exp $ + * $Id: peer_select.c,v 1.132 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 44 Peer Selection Algorithm * AUTHOR: Duane Wessels @@ -141,7 +141,7 @@ if (entry) debug(44, 3) ("peerSelect: %s\n", storeUrl(entry)); else - debug(44, 3) ("peerSelect: %s\n", RequestMethodStr[request->method]); + debug(44, 3) ("peerSelect: %s\n", RequestMethods[request->method].str); psstate = cbdataAlloc(ps_state); psstate->request = requestLink(request); psstate->entry = entry; @@ -241,7 +241,7 @@ StoreEntry *entry = ps->entry; request_t *request = ps->request; debug(44, 3) ("peerSelectFoo: '%s %s'\n", - RequestMethodStr[request->method], + RequestMethods[request->method].str, request->host); if (ps->direct == DIRECT_UNKNOWN) { if (ps->always_direct == 0 && Config.accessList.AlwaysDirect) { @@ -464,7 +464,7 @@ request_t *request = ps->request; hier_code code = HIER_NONE; debug(44, 3) ("peerGetSomeParent: %s %s\n", - RequestMethodStr[request->method], + RequestMethods[request->method].str, request->host); if (ps->direct == DIRECT_YES) return; Index: squid/src/protos.h =================================================================== RCS file: /cvsroot/squid/squid/src/protos.h,v retrieving revision 1.520 retrieving revision 1.521 diff -u -r1.520 -r1.521 --- squid/src/protos.h 6 Jan 2007 17:22:45 -0000 1.520 +++ squid/src/protos.h 21 Jan 2007 12:54:00 -0000 1.521 @@ -1,6 +1,6 @@ /* - * $Id: protos.h,v 1.520 2007/01/06 17:22:45 hno Exp $ + * $Id: protos.h,v 1.521 2007/01/21 12:54:00 adrian Exp $ * * * SQUID Web Proxy Cache http://www.squid-cache.org/ @@ -263,6 +263,7 @@ extern void fd_close(int fd); extern void fd_open(int fd, unsigned int type, const char *); extern void fd_note(int fd, const char *); +extern void fd_note_static(int fd, const char *); extern void fd_bytes(int fd, int len, unsigned int type); extern void fdFreeMemory(void); extern void fdDumpOpen(void); @@ -406,6 +407,7 @@ extern int httpHeaderParseInt(const char *start, int *val); extern int httpHeaderParseSize(const char *start, squid_off_t * sz); extern int httpHeaderReset(HttpHeader * hdr); +extern void httpHeaderAddClone(HttpHeader * hdr, const HttpHeaderEntry * e); #if STDC_HEADERS extern void httpHeaderPutStrf(HttpHeader * hdr, http_hdr_type id, const char *fmt,...) PRINTF_FORMAT_ARG3; @@ -470,7 +472,7 @@ /* Http Msg (currently in HttpReply.c @?@ ) */ extern int httpMsgIsPersistent(http_version_t http_ver, const HttpHeader * hdr); -extern int httpMsgIsolateHeaders(const char **parse_start, const char **blk_start, const char **blk_end); +extern int httpMsgIsolateHeaders(const char **parse_start, int l, const char **blk_start, const char **blk_end); /* Http Reply */ extern void httpReplyInitModule(void); @@ -510,7 +512,6 @@ extern void requestDestroy(request_t *); extern request_t *requestLink(request_t *); extern void requestUnlink(request_t *); -extern int httpRequestParseHeader(request_t * req, const char *parse_start); extern void httpRequestSwapOut(const request_t * req, StoreEntry * e); extern void httpRequestPackDebug(request_t * req, Packer * p); extern int httpRequestPrefixLen(const request_t * req); @@ -896,12 +897,12 @@ /* * store.c */ -extern StoreEntry *new_StoreEntry(int, const char *, const char *); +extern StoreEntry *new_StoreEntry(int, const char *); extern StoreEntry *storeGet(const cache_key *); extern StoreEntry *storeGetPublic(const char *uri, const method_t method); extern StoreEntry *storeGetPublicByRequest(request_t * request); extern StoreEntry *storeGetPublicByRequestMethod(request_t * request, const method_t method); -extern StoreEntry *storeCreateEntry(const char *, const char *, request_flags, method_t); +extern StoreEntry *storeCreateEntry(const char *, request_flags, method_t); extern void storeSetPublicKey(StoreEntry *); extern void storeComplete(StoreEntry *); extern void storeInit(void); @@ -924,7 +925,7 @@ extern void storeMemObjectDump(MemObject * mem); extern void storeEntryDump(const StoreEntry * e, int debug_lvl); extern const char *storeUrl(const StoreEntry *); -extern void storeCreateMemObject(StoreEntry *, const char *, const char *); +extern void storeCreateMemObject(StoreEntry *, const char *); extern void storeCopyNotModifiedReplyHeaders(MemObject * O, MemObject * N); extern void storeBuffer(StoreEntry *); extern void storeBufferFlush(StoreEntry *); @@ -1128,7 +1129,7 @@ extern char *url_convert_hex(char *org_url, int allocate); extern char *url_escape(const char *url); extern protocol_t urlParseProtocol(const char *); -extern method_t urlParseMethod(const char *); +extern method_t urlParseMethod(const char *, int len); extern void urlInitialize(void); extern request_t *urlParse(method_t, char *); extern const char *urlCanonical(request_t *); @@ -1139,9 +1140,9 @@ extern int urlDefaultPort(protocol_t p); extern char *urlCanonicalClean(const request_t *); extern char *urlHostname(const char *url); -extern void parse_extension_method(const char *(*methods)[]); -extern void free_extension_method(const char *(*_methods)[]); -extern void dump_extension_method(StoreEntry * entry, const char *name, const char **methods); +extern void parse_extension_method(rms_t * foo[]); +extern void free_extension_method(rms_t * foo[]); +extern void dump_extension_method(StoreEntry * entry, const char *name, rms_t * methods[]); extern void useragentOpenLog(void); extern void useragentRotateLog(void); @@ -1415,6 +1416,16 @@ /* ETag support */ void storeLocateVaryDone(VaryData * data); void storeLocateVary(StoreEntry * e, int offset, const char *vary_data, String accept_encoding, STLVCB * callback, void *cbdata); -void storeAddVary(const char *url, const char *log_url, const method_t method, const cache_key * key, const char *etag, const char *vary, const char *vary_headers, const char *accept_encoding); +void storeAddVary(const char *url, const method_t method, const cache_key * key, const char *etag, const char *vary, const char *vary_headers, const char *accept_encoding); + +/* New HTTP message parsing support */ +extern void HttpMsgBufInit(HttpMsgBuf * hmsg, const char *buf, size_t size); +extern void httpMsgBufDone(HttpMsgBuf * hmsg); +extern int httpMsgParseRequestLine(HttpMsgBuf * hmsg); +extern int httpMsgParseRequestHeader(request_t * req, HttpMsgBuf * hmsg); +extern int httpMsgFindHeadersEnd(HttpMsgBuf * hmsg); + +extern const char *xinet_ntoa(const struct in_addr addr); + #endif /* SQUID_PROTOS_H */ Index: squid/src/redirect.c =================================================================== RCS file: /cvsroot/squid/squid/src/redirect.c,v retrieving revision 1.96 retrieving revision 1.97 diff -u -r1.96 -r1.97 --- squid/src/redirect.c 8 Jul 2006 16:01:12 -0000 1.96 +++ squid/src/redirect.c 21 Jan 2007 12:54:00 -0000 1.97 @@ -1,6 +1,6 @@ /* - * $Id: redirect.c,v 1.96 2006/07/08 16:01:12 serassio Exp $ + * $Id: redirect.c,v 1.97 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 61 Redirector * AUTHOR: Duane Wessels @@ -124,7 +124,7 @@ #endif if (!r->client_ident) r->client_ident = dash_str; - r->method_s = RequestMethodStr[http->request->method]; + r->method_s = RequestMethods[http->request->method].str; r->handler = handler; r->data = data; cbdataLock(r->data); Index: squid/src/ssl.c =================================================================== RCS file: /cvsroot/squid/squid/src/ssl.c,v retrieving revision 1.134 retrieving revision 1.135 diff -u -r1.134 -r1.135 --- squid/src/ssl.c 25 Aug 2006 12:26:07 -0000 1.134 +++ squid/src/ssl.c 21 Jan 2007 12:54:00 -0000 1.135 @@ -1,6 +1,6 @@ /* - * $Id: ssl.c,v 1.134 2006/08/25 12:26:07 serassio Exp $ + * $Id: ssl.c,v 1.135 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 26 Secure Sockets Layer Proxy * AUTHOR: Duane Wessels @@ -514,7 +514,7 @@ } } debug(26, 3) ("sslStart: '%s %s'\n", - RequestMethodStr[request->method], url); + RequestMethods[request->method].str, url); statCounter.server.all.requests++; statCounter.server.other.requests++; /* Create socket. */ Index: squid/src/stat.c =================================================================== RCS file: /cvsroot/squid/squid/src/stat.c,v retrieving revision 1.377 retrieving revision 1.378 diff -u -r1.377 -r1.378 --- squid/src/stat.c 1 Nov 2006 20:58:52 -0000 1.377 +++ squid/src/stat.c 21 Jan 2007 12:54:00 -0000 1.378 @@ -1,6 +1,6 @@ /* - * $Id: stat.c,v 1.377 2006/11/01 20:58:52 wessels Exp $ + * $Id: stat.c,v 1.378 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 18 Cache Manager Statistics * AUTHOR: Harvest Derived @@ -257,9 +257,10 @@ struct _store_client *sc; dlink_node *node; memBufPrintf(mb, "KEY %s\n", storeKeyText(e->hash.key)); + /* XXX should this url be escaped? */ if (mem) memBufPrintf(mb, "\t%s %s\n", - RequestMethodStr[mem->method], mem->log_url); + RequestMethods[mem->method].str, mem->url); memBufPrintf(mb, "\t%s\n", describeStatuses(e)); memBufPrintf(mb, "\t%s\n", storeEntryFlags(e)); memBufPrintf(mb, "\t%s\n", describeTimestamps(e)); Index: squid/src/store.c =================================================================== RCS file: /cvsroot/squid/squid/src/store.c,v retrieving revision 1.570 retrieving revision 1.571 diff -u -r1.570 -r1.571 --- squid/src/store.c 19 Jan 2007 00:21:01 -0000 1.570 +++ squid/src/store.c 21 Jan 2007 12:54:00 -0000 1.571 @@ -1,6 +1,6 @@ /* - * $Id: store.c,v 1.570 2007/01/19 00:21:01 hno Exp $ + * $Id: store.c,v 1.571 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 20 Storage Manager * AUTHOR: Harvest Derived @@ -79,7 +79,7 @@ static int storeEntryValidLength(const StoreEntry *); static void storeGetMemSpace(int); static void storeHashDelete(StoreEntry *); -static MemObject *new_MemObject(const char *, const char *); +static MemObject *new_MemObject(const char *); static void destroy_MemObject(StoreEntry *); static FREE destroy_StoreEntry; static void storePurgeMem(StoreEntry *); @@ -111,7 +111,7 @@ #endif static MemObject * -new_MemObject(const char *url, const char *log_url) +new_MemObject(const char *url) { MemObject *mem = memAllocate(MEM_MEMOBJECT); mem->reply = httpReplyCreate(); @@ -119,21 +119,19 @@ #if URL_CHECKSUM_DEBUG mem->chksum = url_checksum(mem->url); #endif - mem->log_url = xstrdup(log_url); mem->object_sz = -1; mem->serverfd = -1; - /* XXX account log_url */ debug(20, 3) ("new_MemObject: returning %p\n", mem); return mem; } StoreEntry * -new_StoreEntry(int mem_obj_flag, const char *url, const char *log_url) +new_StoreEntry(int mem_obj_flag, const char *url) { StoreEntry *e = NULL; e = memAllocate(MEM_STOREENTRY); if (mem_obj_flag) - e->mem_obj = new_MemObject(url, log_url); + e->mem_obj = new_MemObject(url); debug(20, 3) ("new_StoreEntry: returning %p\n", e); e->expires = e->lastmod = e->lastref = e->timestamp = -1; e->swap_filen = -1; @@ -171,7 +169,6 @@ mem->request = NULL; ctx_exit(ctx); /* must exit before we free mem->url */ safe_free(mem->url); - safe_free(mem->log_url); /* XXX account log_url */ safe_free(mem->vary_headers); safe_free(mem->vary_encoding); memFree(mem, MEM_MEMOBJECT); @@ -686,7 +683,7 @@ * At leas one of key or etag must be specified, preferably both. */ void -storeAddVary(const char *url, const char *log_url, const method_t method, const cache_key * key, const char *etag, const char *vary, const char *vary_headers, const char *accept_encoding) +storeAddVary(const char *url, const method_t method, const cache_key * key, const char *etag, const char *vary, const char *vary_headers, const char *accept_encoding) { AddVaryState *state; http_version_t version; @@ -707,7 +704,7 @@ if (state->oe) storeLockObject(state->oe); flags.cachable = 1; - state->e = storeCreateEntry(url, log_url, flags, method); + state->e = storeCreateEntry(url, flags, method); httpBuildVersion(&version, 1, 0); httpReplySetHeaders(state->e->mem_obj->reply, version, HTTP_OK, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000); httpHeaderPutStr(&state->e->mem_obj->reply->header, HDR_VARY, vary); @@ -731,7 +728,7 @@ */ /* Swap in the dummy Vary object */ if (!state->oe->mem_obj) { - storeCreateMemObject(state->oe, state->url, log_url); + storeCreateMemObject(state->oe, state->url); state->oe->mem_obj->method = method; } state->sc = storeClientRegister(state->oe, state); @@ -1032,7 +1029,7 @@ strListAdd(&vary, strBuf(varyhdr), ','); stringClean(&varyhdr); #endif - storeAddVary(mem->url, mem->log_url, mem->method, newkey, httpHeaderGetStr(&mem->reply->header, HDR_ETAG), strBuf(vary), mem->vary_headers, mem->vary_encoding); + storeAddVary(mem->url, mem->method, newkey, httpHeaderGetStr(&mem->reply->header, HDR_ETAG), strBuf(vary), mem->vary_headers, mem->vary_encoding); stringClean(&vary); } } else { @@ -1056,13 +1053,13 @@ } StoreEntry * -storeCreateEntry(const char *url, const char *log_url, request_flags flags, method_t method) +storeCreateEntry(const char *url, request_flags flags, method_t method) { StoreEntry *e = NULL; MemObject *mem = NULL; debug(20, 3) ("storeCreateEntry: '%s'\n", url); - e = new_StoreEntry(STORE_ENTRY_WITH_MEMOBJ, url, log_url); + e = new_StoreEntry(STORE_ENTRY_WITH_MEMOBJ, url); e->lock_count = 1; /* Note lock here w/o calling storeLock() */ mem = e->mem_obj; mem->method = method; @@ -1734,9 +1731,9 @@ mem->reply); debug(20, 1) ("MemObject->request: %p\n", mem->request); - debug(20, 1) ("MemObject->log_url: %p %s\n", - mem->log_url, - checkNullString(mem->log_url)); + debug(20, 1) ("MemObject->url: %p %s\n", + mem->url, + checkNullString(mem->url)); } void @@ -1808,11 +1805,11 @@ } void -storeCreateMemObject(StoreEntry * e, const char *url, const char *log_url) +storeCreateMemObject(StoreEntry * e, const char *url) { if (e->mem_obj) return; - e->mem_obj = new_MemObject(url, log_url); + e->mem_obj = new_MemObject(url); } /* this just sets DELAY_SENDING */ Index: squid/src/store_digest.c =================================================================== RCS file: /cvsroot/squid/squid/src/store_digest.c,v retrieving revision 1.53 retrieving revision 1.54 diff -u -r1.53 -r1.54 --- squid/src/store_digest.c 17 Jul 2006 14:09:57 -0000 1.53 +++ squid/src/store_digest.c 21 Jan 2007 12:54:00 -0000 1.54 @@ -1,6 +1,6 @@ /* - * $Id: store_digest.c,v 1.53 2006/07/17 14:09:57 hno Exp $ + * $Id: store_digest.c,v 1.54 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 71 Store Digest Manager * AUTHOR: Alex Rousskov @@ -350,7 +350,7 @@ url = internalStoreUri("/squid-internal-periodic/", StoreDigestFileName); flags = null_request_flags; flags.cachable = 1; - e = storeCreateEntry(url, url, flags, METHOD_GET); + e = storeCreateEntry(url, flags, METHOD_GET); assert(e); sd_state.rewrite_lock = cbdataAlloc(generic_cbdata); sd_state.rewrite_lock->data = e; Index: squid/src/store_key_md5.c =================================================================== RCS file: /cvsroot/squid/squid/src/store_key_md5.c,v retrieving revision 1.29 retrieving revision 1.30 diff -u -r1.29 -r1.30 --- squid/src/store_key_md5.c 7 Jun 2006 19:43:51 -0000 1.29 +++ squid/src/store_key_md5.c 21 Jan 2007 12:54:00 -0000 1.30 @@ -1,6 +1,6 @@ /* - * $Id: store_key_md5.c,v 1.29 2006/06/07 19:43:51 hno Exp $ + * $Id: store_key_md5.c,v 1.30 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 20 Storage Manager MD5 Cache Keys * AUTHOR: Duane Wessels @@ -98,7 +98,7 @@ MD5_CTX M; assert(id > 0); debug(20, 3) ("storeKeyPrivate: %s %s\n", - RequestMethodStr[method], url); + RequestMethods[method].str, url); MD5Init(&M); MD5Update(&M, (unsigned char *) &id, sizeof(id)); MD5Update(&M, (unsigned char *) &method, sizeof(method)); Index: squid/src/store_log.c =================================================================== RCS file: /cvsroot/squid/squid/src/store_log.c,v retrieving revision 1.26 retrieving revision 1.27 diff -u -r1.26 -r1.27 --- squid/src/store_log.c 2 Sep 2006 14:08:42 -0000 1.26 +++ squid/src/store_log.c 21 Jan 2007 12:54:00 -0000 1.27 @@ -1,6 +1,6 @@ /* - * $Id: store_log.c,v 1.26 2006/09/02 14:08:42 hno Exp $ + * $Id: store_log.c,v 1.27 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 20 Storage Manager Logging Functions * AUTHOR: Duane Wessels @@ -58,11 +58,6 @@ return; #endif if (mem != NULL) { - if (mem->log_url == NULL) { - debug(20, 1) ("storeLog: NULL log_url for %s\n", mem->url); - storeMemObjectDump(mem); - mem->log_url = xstrdup(mem->url); - } reply = mem->reply; /* * XXX Ok, where should we print the dir number here? @@ -83,8 +78,8 @@ strLen(reply->content_type) ? strBuf(reply->content_type) : "unknown", reply->content_length, mem->inmem_hi - mem->reply->hdr_sz, - RequestMethodStr[mem->method], - mem->log_url); + RequestMethods[mem->method].str, + rfc1738_escape_unescaped(mem->url)); } else { /* no mem object. Most RELEASE cases */ logfilePrintf(storelog, "%9ld.%03d %-7s %02d %08X %s ? ? ? ? ?/? ?/? ? ?\n", Index: squid/src/structs.h =================================================================== RCS file: /cvsroot/squid/squid/src/structs.h,v retrieving revision 1.507 retrieving revision 1.508 diff -u -r1.507 -r1.508 --- squid/src/structs.h 19 Jan 2007 01:10:12 -0000 1.507 +++ squid/src/structs.h 21 Jan 2007 12:54:00 -0000 1.508 @@ -1,6 +1,6 @@ /* - * $Id: structs.h,v 1.507 2007/01/19 01:10:12 hno Exp $ + * $Id: structs.h,v 1.508 2007/01/21 12:54:00 adrian Exp $ * * * SQUID Web Proxy Cache http://www.squid-cache.org/ @@ -872,7 +872,8 @@ struct in_addr local_addr; unsigned char tos; char ipaddr[16]; /* dotted decimal address of peer */ - char desc[FD_DESC_SZ]; + const char *desc; + char descbuf[FD_DESC_SZ]; struct { unsigned int open:1; unsigned int close_request:1; @@ -1049,13 +1050,14 @@ struct _HttpHeaderEntry { http_hdr_type id; + int active; String name; String value; }; struct _HttpHeader { /* protected, do not use these, use interface functions instead */ - Array entries; /* parsed fields in raw format */ + Array entries; /* parsed entries in raw format */ HttpHeaderMask mask; /* bit set <=> entry present */ http_hdr_owner_type owner; /* request or reply */ int len; /* length when packed, not counting terminating '\0' */ @@ -1192,7 +1194,6 @@ store_client *sc; /* The store_client we're using */ store_client *old_sc; /* ... for entry to be validated */ char *uri; - char *log_uri; struct { squid_off_t offset; squid_off_t size; @@ -1710,7 +1711,6 @@ STABH *callback; void *data; } abort; - char *log_url; RemovalPolicyNode repl; int id; squid_off_t object_sz; @@ -2498,4 +2498,23 @@ Array etags; }; +struct _HttpMsgBuf { + const char *buf; + size_t size; + /* offset of first/last byte of headers */ + int h_start, h_end, h_len; + /* offset of first/last byte of request, including any padding */ + int req_start, req_end, r_len; + int m_start, m_end, m_len; + int u_start, u_end, u_len; + int v_start, v_end, v_len; + int v_maj, v_min; +}; + +/* request method str stuff; should probably be a String type.. */ +struct rms { + char *str; + int len; +}; + #endif /* SQUID_STRUCTS_H */ Index: squid/src/tools.c =================================================================== RCS file: /cvsroot/squid/squid/src/tools.c,v retrieving revision 1.250 retrieving revision 1.251 diff -u -r1.250 -r1.251 --- squid/src/tools.c 4 Nov 2006 17:09:45 -0000 1.250 +++ squid/src/tools.c 21 Jan 2007 12:54:00 -0000 1.251 @@ -1,6 +1,6 @@ /* - * $Id: tools.c,v 1.250 2006/11/04 17:09:45 hno Exp $ + * $Id: tools.c,v 1.251 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 21 Misc Functions * AUTHOR: Harvest Derived @@ -1346,3 +1346,43 @@ } #endif } + +/* XXX this is ipv4-only aware atm */ +const char * +xinet_ntoa(const struct in_addr addr) +{ + static char buf[32]; + char *s = buf + 30; + unsigned char a; + + s[31] = '\0'; + + a = (addr.s_addr >> 24) & 0xff; + do { + *(s--) = (a % 10) + '0'; + a /= 10; + } while (a > 0); + *(s--) = '.'; + + a = (addr.s_addr >> 16) & 0xff; + do { + *(s--) = (a % 10) + '0'; + a /= 10; + } while (a > 0); + *(s--) = '.'; + + a = (addr.s_addr >> 8) & 0xff; + do { + *(s--) = (a % 10) + '0'; + a /= 10; + } while (a > 0); + *(s--) = '.'; + + a = (addr.s_addr) & 0xff; + do { + *(s--) = (a % 10) + '0'; + a /= 10; + } while (a > 0); + + return s + 1; +} Index: squid/src/typedefs.h =================================================================== RCS file: /cvsroot/squid/squid/src/typedefs.h,v retrieving revision 1.151 retrieving revision 1.152 diff -u -r1.151 -r1.152 --- squid/src/typedefs.h 2 Sep 2006 14:08:42 -0000 1.151 +++ squid/src/typedefs.h 21 Jan 2007 12:54:00 -0000 1.152 @@ -1,6 +1,6 @@ /* - * $Id: typedefs.h,v 1.151 2006/09/02 14:08:42 hno Exp $ + * $Id: typedefs.h,v 1.152 2007/01/21 12:54:00 adrian Exp $ * * * SQUID Web Proxy Cache http://www.squid-cache.org/ @@ -405,4 +405,8 @@ typedef struct _VaryData VaryData; typedef void STLVCB(VaryData * vary, void *cbdata); +typedef struct _HttpMsgBuf HttpMsgBuf; + +typedef struct rms rms_t; + #endif /* SQUID_TYPEDEFS_H */ Index: squid/src/url.c =================================================================== RCS file: /cvsroot/squid/squid/src/url.c,v retrieving revision 1.144 retrieving revision 1.145 diff -u -r1.144 -r1.145 --- squid/src/url.c 17 Jun 2006 23:31:03 -0000 1.144 +++ squid/src/url.c 21 Jan 2007 12:54:00 -0000 1.145 @@ -1,6 +1,6 @@ /* - * $Id: url.c,v 1.144 2006/06/17 23:31:03 hno Exp $ + * $Id: url.c,v 1.145 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 23 URL Parsing * AUTHOR: Duane Wessels @@ -35,56 +35,56 @@ #include "squid.h" -const char *RequestMethodStr[] = +rms_t RequestMethods[] = { - "NONE", - "GET", - "POST", - "PUT", - "HEAD", - "CONNECT", - "TRACE", - "PURGE", - "OPTIONS", - "DELETE", - "PROPFIND", - "PROPPATCH", - "MKCOL", - "COPY", - "MOVE", - "LOCK", - "UNLOCK", - "BMOVE", - "BDELETE", - "BPROPFIND", - "BPROPPATCH", - "BCOPY", - "SEARCH", - "SUBSCRIBE", - "UNSUBSCRIBE", - "POLL", - "REPORT", - "%EXT00", - "%EXT01", - "%EXT02", - "%EXT03", - "%EXT04", - "%EXT05", - "%EXT06", - "%EXT07", - "%EXT08", - "%EXT09", - "%EXT10", - "%EXT11", - "%EXT12", - "%EXT13", - "%EXT14", - "%EXT15", - "%EXT16", - "%EXT17", - "%EXT18", - "%EXT19", - "ERROR" + {"NONE", 4}, + {"GET", 3}, + {"POST", 4}, + {"PUT", 3}, + {"HEAD", 4}, + {"CONNECT", 7}, + {"TRACE", 5}, + {"PURGE", 5}, + {"OPTIONS", 7}, + {"DELETE", 6}, + {"PROPFIND", 8}, + {"PROPPATCH", 9}, + {"MKCOL", 5}, + {"COPY", 4}, + {"MOVE", 4}, + {"LOCK", 4}, + {"UNLOCK", 6}, + {"BMOVE", 5}, + {"BDELETE", 7}, + {"BPROPFIND", 9}, + {"BPROPPATCH", 10}, + {"BCOPY", 5}, + {"SEARCH", 6}, + {"SUBSCRIBE", 9}, + {"UNSUBSCRIBE", 11}, + {"POLL", 4}, + {"REPORT", 6}, + {"%EXT00", 6}, + {"%EXT01", 6}, + {"%EXT02", 6}, + {"%EXT03", 6}, + {"%EXT04", 6}, + {"%EXT05", 6}, + {"%EXT06", 6}, + {"%EXT07", 6}, + {"%EXT08", 6}, + {"%EXT09", 6}, + {"%EXT10", 6}, + {"%EXT11", 6}, + {"%EXT12", 6}, + {"%EXT13", 6}, + {"%EXT14", 6}, + {"%EXT15", 6}, + {"%EXT16", 6}, + {"%EXT17", 6}, + {"%EXT18", 6}, + {"%EXT19", 6}, + {"ERROR", 5}, }; const char *ProtocolStr[] = @@ -175,7 +175,7 @@ } method_t -urlParseMethod(const char *s) +urlParseMethod(const char *s, int len) { method_t method = METHOD_NONE; /* @@ -186,7 +186,7 @@ if (*s == '%') return METHOD_NONE; for (method++; method < METHOD_ENUM_END; method++) { - if (0 == strcasecmp(s, RequestMethodStr[method])) + if (len == RequestMethods[method].len && 0 == strncasecmp(s, RequestMethods[method].str, len)) return method; } return METHOD_NONE; @@ -259,6 +259,9 @@ protocol_t protocol = PROTO_NONE; int l; proto[0] = host[0] = urlpath[0] = login[0] = '\0'; + int i; + const char *src; + char *dst; if ((l = strlen(url)) + Config.appendDomainLen > (MAX_URL - 1)) { /* terminate so it doesn't overflow other buffers */ @@ -273,17 +276,51 @@ } else if (!strncmp(url, "urn:", 4)) { return urnParse(method, url); } else { - if (sscanf(url, "%[^:]://%[^/]%[^\r\n]", proto, host, urlpath) < 2) + /* Parse the URL: */ + src = url; + i = 0; + /* Find first : - everything before is protocol */ + for (i = 0, dst = proto; i < l && *src != ':'; i++, src++, dst++) { + *dst = *src; + } + if (i >= l) + return NULL; + *dst = '\0'; + + /* Then its :// */ + /* (XXX yah, I'm not checking we've got enough data left before checking the array..) */ + if (*src != ':' || *(src + 1) != '/' || *(src + 2) != '/') + return NULL; + i += 3; + src += 3; + + /* Then everything until first /; thats host (and port; which we'll look for here later) */ + for (dst = host; i < l && *src != '/'; i++, src++, dst++) { + *dst = *src; + } + if (i >= l) return NULL; + *dst = '\0'; + + /* Then everything from / (inclusive) until \r\n or \0 - thats urlpath */ + for (dst = urlpath; i < l && *src != '\r' && *src != '\n' && *src != '\0'; i++, src++, dst++) { + *dst = *src; + } + /* We -could- be at the end of the buffer here */ + if (i > l) + return NULL; + *dst = '\0'; + protocol = urlParseProtocol(proto); port = urlDefaultPort(protocol); - /* Is there any login informaiton? */ + /* Is there any login informaiton? (we should eventually parse it above) */ if ((t = strrchr(host, '@'))) { strcpy((char *) login, (char *) host); t = strrchr(login, '@'); *t = 0; strcpy((char *) host, t + 1); } + /* Is there any host information? (we should eventually parse it above) */ if ((t = strrchr(host, ':'))) { *t++ = '\0'; if (*t != '\0') @@ -399,6 +436,11 @@ return (request->canonical = xstrdup(urlbuf)); } +/* + * Eventually the request_t strings should be String entries which + * have in-built length. Eventually we should just take a buffer and + * do our magic inside that to eliminate that copy. + */ char * urlCanonicalClean(const request_t * request) { @@ -406,6 +448,10 @@ LOCAL_ARRAY(char, portbuf, 32); LOCAL_ARRAY(char, loginbuf, MAX_LOGIN_SZ + 1); char *t; + int i; + const char *s; + char *ts = "://"; + if (request->protocol == PROTO_URN) { snprintf(buf, MAX_URL, "urn:%s", strBuf(request->urlpath)); } else { @@ -424,12 +470,40 @@ *t = '\0'; strcat(loginbuf, "@"); } - snprintf(buf, MAX_URL, "%s://%s%s%s%s", - ProtocolStr[request->protocol], - loginbuf, - request->host, - portbuf, - strBuf(request->urlpath)); + /* + * This stuff would be better if/when each of these strings is a String with + * a known length.. + */ + s = ProtocolStr[request->protocol]; + for (i = 0; i < MAX_URL && *s != '\0'; i++, s++) { + buf[i] = *s; + } + s = ts; + for (; i < MAX_URL && *s != '\0'; i++, s++) { + buf[i] = *s; + } + s = loginbuf; + for (; i < MAX_URL && *s != '\0'; i++, s++) { + buf[i] = *s; + } + s = request->host; + for (; i < MAX_URL && *s != '\0'; i++, s++) { + buf[i] = *s; + } + s = portbuf; + for (; i < MAX_URL && *s != '\0'; i++, s++) { + buf[i] = *s; + } + s = strBuf(request->urlpath); + for (; i < MAX_URL && *s != '\0'; i++, s++) { + buf[i] = *s; + } + if (i >= (MAX_URL - 1)) { + buf[MAX_URL - 1] = '\0'; + } else { + buf[i] = '\0'; + } + /* * strip arguments AFTER a question-mark */ @@ -611,16 +685,17 @@ { method_t method = 0; for (method++; method < METHOD_ENUM_END; method++) { - if (0 == strcmp(mstr, RequestMethodStr[method])) { + if (0 == strcmp(mstr, RequestMethods[method].str)) { debug(23, 2) ("Extension method '%s' already exists\n", mstr); return; } - if (0 != strncmp("%EXT", RequestMethodStr[method], 4)) + if (0 != strncmp("%EXT", RequestMethods[method].str, 4)) continue; /* Don't free statically allocated "%EXTnn" string */ - if (0 == strncmp("%EXT_", RequestMethodStr[method], 5)) - safe_free(RequestMethodStr[method]); - RequestMethodStr[method] = xstrdup(mstr); + if (0 == strncmp("%EXT_", RequestMethods[method].str, 5)) + safe_free(RequestMethods[method].str); + RequestMethods[method].str = xstrdup(mstr); + RequestMethods[method].len = strlen(mstr); debug(23, 1) ("Extension method '%s' added, enum=%d\n", mstr, (int) method); return; } @@ -628,7 +703,7 @@ } void -parse_extension_method(const char *(*_methods)[]) +parse_extension_method(rms_t * foo[]) { char *token; char *t = strtok(NULL, ""); @@ -638,27 +713,27 @@ } void -free_extension_method(const char *(*_methods)[]) +free_extension_method(rms_t * foo[]) { method_t method; - char **methods = (char **) _methods; for (method = METHOD_EXT00; method < METHOD_ENUM_END; method++) { - if (*methods[method] != '%') { + if (RequestMethods[method].str[0] != '%') { char buf[32]; snprintf(buf, sizeof(buf), "%%EXT_%02d", method - METHOD_EXT00); - safe_free(methods[method]); - methods[method] = xstrdup(buf); + safe_free(RequestMethods[method].str); + RequestMethods[method].str = xstrdup(buf); + RequestMethods[method].len = strlen(buf); } } } void -dump_extension_method(StoreEntry * entry, const char *name, const char **methods) +dump_extension_method(StoreEntry * entry, const char *name, rms_t * methods[]) { method_t method; for (method = METHOD_EXT00; method < METHOD_ENUM_END; method++) { - if (*methods[method] != '%') { - storeAppendPrintf(entry, "%s %s\n", name, methods[method]); + if (RequestMethods[method].str[0] != '%') { + storeAppendPrintf(entry, "%s %s\n", name, RequestMethods[method].str); } } } Index: squid/src/urn.c =================================================================== RCS file: /cvsroot/squid/squid/src/urn.c,v retrieving revision 1.80 retrieving revision 1.81 diff -u -r1.80 -r1.81 --- squid/src/urn.c 25 Aug 2006 12:26:07 -0000 1.80 +++ squid/src/urn.c 21 Jan 2007 12:54:00 -0000 1.81 @@ -1,6 +1,6 @@ /* - * $Id: urn.c,v 1.80 2006/08/25 12:26:07 serassio Exp $ + * $Id: urn.c,v 1.81 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 52 URN Parsing * AUTHOR: Kostas Anagnostakis @@ -137,7 +137,7 @@ } httpHeaderPutStr(&urlres_r->header, HDR_ACCEPT, "text/plain"); if ((urlres_e = storeGetPublic(urlres, METHOD_GET)) == NULL) { - urlres_e = storeCreateEntry(urlres, urlres, null_request_flags, METHOD_GET); + urlres_e = storeCreateEntry(urlres, null_request_flags, METHOD_GET); urnState->sc = storeClientRegister(urlres_e, urnState); fwdStart(-1, urlres_e, urlres_r); } else { Index: squid/src/wais.c =================================================================== RCS file: /cvsroot/squid/squid/src/wais.c,v retrieving revision 1.142 retrieving revision 1.143 diff -u -r1.142 -r1.143 --- squid/src/wais.c 25 Aug 2006 12:26:07 -0000 1.142 +++ squid/src/wais.c 21 Jan 2007 12:54:00 -0000 1.143 @@ -1,6 +1,6 @@ /* - * $Id: wais.c,v 1.142 2006/08/25 12:26:07 serassio Exp $ + * $Id: wais.c,v 1.143 2007/01/21 12:54:00 adrian Exp $ * * DEBUG: section 24 WAIS Relay * AUTHOR: Harvest Derived @@ -188,7 +188,7 @@ { WaisStateData *waisState = data; MemBuf mb; - const char *Method = RequestMethodStr[waisState->method]; + const char *Method = RequestMethods[waisState->method].str; debug(24, 5) ("waisSendRequest: FD %d\n", fd); memBufDefInit(&mb); memBufPrintf(&mb, "%s %s HTTP/1.0\r\n", Method, waisState->url); @@ -216,7 +216,7 @@ int fd = fwd->server_fd; const char *url = storeUrl(entry); method_t method = request->method; - debug(24, 3) ("waisStart: \"%s %s\"\n", RequestMethodStr[method], url); + debug(24, 3) ("waisStart: \"%s %s\"\n", RequestMethods[method].str, url); statCounter.server.all.requests++; statCounter.server.other.requests++; CBDATA_INIT_TYPE(WaisStateData); Index: squid/src/auth/digest/auth_digest.c =================================================================== RCS file: /cvsroot/squid/squid/src/auth/digest/auth_digest.c,v retrieving revision 1.21 retrieving revision 1.22 diff -u -r1.21 -r1.22 --- squid/src/auth/digest/auth_digest.c 30 Jul 2006 23:27:04 -0000 1.21 +++ squid/src/auth/digest/auth_digest.c 21 Jan 2007 12:54:01 -0000 1.22 @@ -1,6 +1,6 @@ /* - * $Id: auth_digest.c,v 1.21 2006/07/30 23:27:04 hno Exp $ + * $Id: auth_digest.c,v 1.22 2007/01/21 12:54:01 adrian Exp $ * * DEBUG: section 29 Authenticator * AUTHOR: Robert Collins @@ -698,7 +698,7 @@ digest_user->HA1, SESSIONKEY); DigestCalcResponse(SESSIONKEY, authenticateDigestNonceNonceb64(digest_request->nonce), digest_request->nc, digest_request->cnonce, digest_request->qop, - RequestMethodStr[request->method], digest_request->uri, HA2, Response); + RequestMethods[request->method].str, digest_request->uri, HA2, Response); debug(29, 9) ("\nResponse = '%s'\n" "squid is = '%s'\n", digest_request->response, Response); @@ -719,7 +719,7 @@ */ DigestCalcResponse(SESSIONKEY, authenticateDigestNonceNonceb64(digest_request->nonce), digest_request->nc, digest_request->cnonce, digest_request->qop, - RequestMethodStr[METHOD_GET], digest_request->uri, HA2, Response); + RequestMethods[METHOD_GET].str, digest_request->uri, HA2, Response); if (strcasecmp(digest_request->response, Response)) { digest_request->flags.credentials_ok = 3; safe_free(auth_user_request->message); Index: squid/src/fs/aufs/store_dir_aufs.c =================================================================== RCS file: /cvsroot/squid/squid/src/fs/aufs/store_dir_aufs.c,v retrieving revision 1.67 retrieving revision 1.68 diff -u -r1.67 -r1.68 --- squid/src/fs/aufs/store_dir_aufs.c 25 Nov 2006 20:07:41 -0000 1.67 +++ squid/src/fs/aufs/store_dir_aufs.c 21 Jan 2007 12:54:02 -0000 1.68 @@ -1,6 +1,6 @@ /* - * $Id: store_dir_aufs.c,v 1.67 2006/11/25 20:07:41 serassio Exp $ + * $Id: store_dir_aufs.c,v 1.68 2007/01/21 12:54:02 adrian Exp $ * * DEBUG: section 47 Store Directory Routines * AUTHOR: Duane Wessels @@ -1002,7 +1002,7 @@ debug(47, 5) ("storeAufsAddDiskRestore: %s, fileno=%08X\n", storeKeyText(key), file_number); /* if you call this you'd better be sure file_number is not * already in use! */ - e = new_StoreEntry(STORE_ENTRY_WITHOUT_MEMOBJ, NULL, NULL); + e = new_StoreEntry(STORE_ENTRY_WITHOUT_MEMOBJ, NULL); e->store_status = STORE_OK; storeSetMemStatus(e, NOT_IN_MEMORY); e->swap_status = SWAPOUT_DONE; Index: squid/src/fs/coss/store_dir_coss.c =================================================================== RCS file: /cvsroot/squid/squid/src/fs/coss/store_dir_coss.c,v retrieving revision 1.66 retrieving revision 1.67 diff -u -r1.66 -r1.67 --- squid/src/fs/coss/store_dir_coss.c 5 Nov 2006 21:14:32 -0000 1.66 +++ squid/src/fs/coss/store_dir_coss.c 21 Jan 2007 12:54:04 -0000 1.67 @@ -1,6 +1,6 @@ /* - * $Id: store_dir_coss.c,v 1.66 2006/11/05 21:14:32 hno Exp $ + * $Id: store_dir_coss.c,v 1.67 2007/01/21 12:54:04 adrian Exp $ * * DEBUG: section 47 Store COSS Directory Routines * AUTHOR: Eric Stern @@ -1412,7 +1412,7 @@ rb->counts.objcount++; /* The Passed-in store entry is temporary; don't bloody use it directly! */ assert(e->swap_dirn == SD->index); - ne = new_StoreEntry(STORE_ENTRY_WITHOUT_MEMOBJ, NULL, NULL); + ne = new_StoreEntry(STORE_ENTRY_WITHOUT_MEMOBJ, NULL); ne->store_status = STORE_OK; storeSetMemStatus(ne, NOT_IN_MEMORY); ne->swap_status = SWAPOUT_DONE; Index: squid/src/fs/diskd/store_dir_diskd.c =================================================================== RCS file: /cvsroot/squid/squid/src/fs/diskd/store_dir_diskd.c,v retrieving revision 1.87 retrieving revision 1.88 diff -u -r1.87 -r1.88 --- squid/src/fs/diskd/store_dir_diskd.c 5 Nov 2006 21:32:12 -0000 1.87 +++ squid/src/fs/diskd/store_dir_diskd.c 21 Jan 2007 12:54:05 -0000 1.88 @@ -1,6 +1,6 @@ /* - * $Id: store_dir_diskd.c,v 1.87 2006/11/05 21:32:12 hno Exp $ + * $Id: store_dir_diskd.c,v 1.88 2007/01/21 12:54:05 adrian Exp $ * * DEBUG: section 47 Store Directory Routines * AUTHOR: Duane Wessels @@ -1218,7 +1218,7 @@ debug(20, 5) ("storeDiskdAddDiskRestore: %s, fileno=%08X\n", storeKeyText(key), file_number); /* if you call this you'd better be sure file_number is not * already in use! */ - e = new_StoreEntry(STORE_ENTRY_WITHOUT_MEMOBJ, NULL, NULL); + e = new_StoreEntry(STORE_ENTRY_WITHOUT_MEMOBJ, NULL); e->store_status = STORE_OK; storeSetMemStatus(e, NOT_IN_MEMORY); e->swap_status = SWAPOUT_DONE; Index: squid/src/fs/ufs/store_dir_ufs.c =================================================================== RCS file: /cvsroot/squid/squid/src/fs/ufs/store_dir_ufs.c,v retrieving revision 1.63 retrieving revision 1.64 diff -u -r1.63 -r1.64 --- squid/src/fs/ufs/store_dir_ufs.c 5 Nov 2006 21:32:13 -0000 1.63 +++ squid/src/fs/ufs/store_dir_ufs.c 21 Jan 2007 12:54:06 -0000 1.64 @@ -1,6 +1,6 @@ /* - * $Id: store_dir_ufs.c,v 1.63 2006/11/05 21:32:13 hno Exp $ + * $Id: store_dir_ufs.c,v 1.64 2007/01/21 12:54:06 adrian Exp $ * * DEBUG: section 47 Store Directory Routines * AUTHOR: Duane Wessels @@ -1007,7 +1007,7 @@ debug(47, 5) ("storeUfsAddDiskRestore: %s, fileno=%08X\n", storeKeyText(key), file_number); /* if you call this you'd better be sure file_number is not * already in use! */ - e = new_StoreEntry(STORE_ENTRY_WITHOUT_MEMOBJ, NULL, NULL); + e = new_StoreEntry(STORE_ENTRY_WITHOUT_MEMOBJ, NULL); e->store_status = STORE_OK; storeSetMemStatus(e, NOT_IN_MEMORY); e->swap_status = SWAPOUT_DONE;