------------------------------------------------------------ revno: 13266 [merge] revision-id: kinkie@squid-cache.org-20140210121540-dpztrfkxg6s5phw2 parent: squid3@treenet.co.nz-20140210110858-j9qhjothxmn9y1b1 parent: kinkie@squid-cache.org-20140210121444-4j5qsixlid63qsrl committer: Francesco Chemolli branch nick: trunk timestamp: Mon 2014-02-10 13:15:40 +0100 message: Merge vector-refactor branch: align Vector API with std::vector ------------------------------------------------------------ Use --include-merges or -n0 to see merged revisions. ------------------------------------------------------------ # Bazaar merge directive format 2 (Bazaar 0.90) # revision_id: kinkie@squid-cache.org-20140210121540-dpztrfkxg6s5phw2 # target_branch: http://bzr.squid-cache.org/bzr/squid3/trunk/ # testament_sha1: 3679fb5364a80e44f8673a82b801ba9c4a9493be # timestamp: 2014-02-10 12:53:52 +0000 # source_branch: http://bzr.squid-cache.org/bzr/squid3/trunk/ # base_revision_id: squid3@treenet.co.nz-20140210110858-\ # j9qhjothxmn9y1b1 # # Begin patch === modified file 'lib/MemPoolMalloc.cc' --- lib/MemPoolMalloc.cc 2013-01-14 05:01:04 +0000 +++ lib/MemPoolMalloc.cc 2014-02-02 08:57:20 +0000 @@ -127,7 +127,7 @@ bool MemPoolMalloc::idleTrigger(int shift) const { - return freelist.count >> (shift ? 8 : 0); + return freelist.size() >> (shift ? 8 : 0); } void === modified file 'src/ClientDelayConfig.cc' --- src/ClientDelayConfig.cc 2013-05-13 22:48:23 +0000 +++ src/ClientDelayConfig.cc 2014-02-02 09:42:23 +0000 @@ -30,7 +30,7 @@ void ClientDelayConfig::freePoolCount() { - pools.clean(); + pools.clear(); } void ClientDelayConfig::dumpPoolCount(StoreEntry * entry, const char *name) const === modified file 'src/ConfigOption.cc' --- src/ConfigOption.cc 2012-09-01 14:38:36 +0000 +++ src/ConfigOption.cc 2014-02-04 16:54:49 +0000 @@ -35,7 +35,7 @@ ConfigOptionVector::~ConfigOptionVector() { - while (options.size()) { + while (!options.empty()) { delete options.back(); options.pop_back(); } === modified file 'src/DiskIO/DiskIOModule.cc' --- src/DiskIO/DiskIOModule.cc 2013-09-03 09:05:02 +0000 +++ src/DiskIO/DiskIOModule.cc 2014-02-04 19:47:14 +0000 @@ -92,7 +92,7 @@ void DiskIOModule::FreeAllModules() { - while (GetModules().size()) { + while (!GetModules().empty()) { DiskIOModule *fs = GetModules().back(); GetModules().pop_back(); fs->gracefulShutdown(); === modified file 'src/ExternalACLEntry.cc' --- src/ExternalACLEntry.cc 2013-11-29 04:41:07 +0000 +++ src/ExternalACLEntry.cc 2014-02-02 09:42:23 +0000 @@ -70,7 +70,7 @@ result = someData.result; // replace all notes. not combine - notes.entries.clean(); + notes.entries.clear(); notes.append(&someData.notes); #if USE_AUTH === modified file 'src/FwdState.cc' --- src/FwdState.cc 2013-12-06 23:52:26 +0000 +++ src/FwdState.cc 2014-02-02 09:42:23 +0000 @@ -112,7 +112,7 @@ } else { debugs(17, 7, HERE << "store entry aborted; no connection to close"); } - fwd->serverDestinations.clean(); + fwd->serverDestinations.clear(); fwd->self = NULL; } @@ -277,7 +277,7 @@ serverConn->close(); } - serverDestinations.clean(); + serverDestinations.clear(); debugs(17, 3, HERE << "FwdState destructor done"); } === modified file 'src/HttpHdrRange.cc' --- src/HttpHdrRange.cc 2013-10-25 00:13:46 +0000 +++ src/HttpHdrRange.cc 2014-02-04 16:54:49 +0000 @@ -266,8 +266,10 @@ * at least one syntactically invalid byte-range-specs. */ if (!spec) { - while (!specs.empty()) - delete specs.pop_back(); + while (!specs.empty()) { + delete specs.back(); + specs.pop_back(); + } debugs(64, 2, "ignoring invalid range field: '" << range_spec << "'"); break; } @@ -281,8 +283,10 @@ HttpHdrRange::~HttpHdrRange() { - while (specs.size()) - delete specs.pop_back(); + while (!specs.empty()) { + delete specs.back(); + specs.pop_back(); + } } HttpHdrRange::HttpHdrRange(HttpHdrRange const &old) : @@ -341,7 +345,7 @@ HttpHdrRange::merge (Vector &basis) { /* reset old array */ - specs.clean(); + specs.clear(); /* merge specs: * take one spec from "goods" and merge it with specs from * "specs" (if any) until there is no overlap */ @@ -350,7 +354,8 @@ while (i != basis.end()) { if (specs.size() && (*i)->mergeWith(specs.back())) { /* merged with current so get rid of the prev one */ - delete specs.pop_back(); + delete specs.back(); + specs.pop_back(); continue; /* re-iterate */ } @@ -404,14 +409,14 @@ HttpHdrRange::canonize (int64_t newClen) { clen = newClen; - debugs(64, 3, "HttpHdrRange::canonize: started with " << specs.count << + debugs(64, 3, "HttpHdrRange::canonize: started with " << specs.size() << " specs, clen: " << clen); Vector goods; getCanonizedSpecs(goods); merge (goods); - debugs(64, 3, "HttpHdrRange::canonize: finished with " << specs.count << + debugs(64, 3, "HttpHdrRange::canonize: finished with " << specs.size() << " specs"); - return specs.count > 0; + return specs.size() > 0; // fixme, should return bool } /* hack: returns true if range specs are too "complex" for Squid to handle */ === modified file 'src/HttpHeader.cc' --- src/HttpHeader.cc 2014-01-15 23:57:54 +0000 +++ src/HttpHeader.cc 2014-02-04 19:47:14 +0000 @@ -454,12 +454,12 @@ * has been used. As a hack, just never count zero-sized header * arrays. */ - if (0 != entries.count) - HttpHeaderStats[owner].hdrUCountDistr.count(entries.count); + if (!entries.empty()) + HttpHeaderStats[owner].hdrUCountDistr.count(entries.size()); ++ HttpHeaderStats[owner].destroyedCount; - HttpHeaderStats[owner].busyDestroyedCount += entries.count > 0; + HttpHeaderStats[owner].busyDestroyedCount += entries.size() > 0; } // if (owner <= hoReply) while ((e = getEntry(&pos))) { @@ -474,7 +474,7 @@ delete e; } } - entries.clean(); + entries.clear(); httpHeaderMaskInit(&mask, 0); len = 0; PROF_stop(HttpHeaderClean); @@ -748,11 +748,11 @@ HttpHeader::getEntry(HttpHeaderPos * pos) const { assert(pos); - assert(*pos >= HttpHeaderInitPos && *pos < (ssize_t)entries.count); + assert(*pos >= HttpHeaderInitPos && *pos < static_cast(entries.size())); - for (++(*pos); *pos < (ssize_t)entries.count; ++(*pos)) { - if (entries.items[*pos]) - return (HttpHeaderEntry*)entries.items[*pos]; + for (++(*pos); *pos < static_cast(entries.size()); ++(*pos)) { + if (entries[*pos]) + return static_cast(entries[*pos]); } return NULL; @@ -871,9 +871,9 @@ HttpHeader::delAt(HttpHeaderPos pos, int &headers_deleted) { HttpHeaderEntry *e; - assert(pos >= HttpHeaderInitPos && pos < (ssize_t)entries.count); - e = (HttpHeaderEntry*)entries.items[pos]; - entries.items[pos] = NULL; + assert(pos >= HttpHeaderInitPos && pos < static_cast(entries.size())); + e = static_cast(entries[pos]); + entries[pos] = NULL; /* decrement header length, allow for ": " and crlf */ len -= e->name.size() + 2 + e->value.size() + 2; assert(len >= 0); @@ -914,7 +914,7 @@ assert_eid(e->id); assert(e->name.size()); - debugs(55, 7, HERE << this << " adding entry: " << e->id << " at " << entries.count); + debugs(55, 7, this << " adding entry: " << e->id << " at " << entries.size()); if (CBIT_TEST(mask, e->id)) ++ Headers[e->id].stat.repCount; @@ -936,7 +936,7 @@ assert(e); assert_eid(e->id); - debugs(55, 7, HERE << this << " adding entry: " << e->id << " at " << entries.count); + debugs(55, 7, this << " adding entry: " << e->id << " at " << entries.size()); if (CBIT_TEST(mask, e->id)) ++ Headers[e->id].stat.repCount; === modified file 'src/HttpRequest.cc' --- src/HttpRequest.cc 2013-09-08 04:27:31 +0000 +++ src/HttpRequest.cc 2014-02-04 19:50:02 +0000 @@ -479,7 +479,7 @@ bool HttpRequest::multipartRangeRequest() const { - return (range && range->specs.count > 1); + return (range && range->specs.size() > 1); } bool === modified file 'src/Notes.cc' --- src/Notes.cc 2013-12-05 11:04:45 +0000 +++ src/Notes.cc 2014-02-02 18:19:59 +0000 @@ -146,13 +146,15 @@ void Notes::clean() { - notes.clean(); + notes.clear(); } NotePairs::~NotePairs() { - while (!entries.empty()) - delete entries.pop_back(); + while (!entries.empty()) { + delete entries.back(); + entries.pop_back(); + } } const char * === modified file 'src/Notes.h' --- src/Notes.h 2013-12-05 11:04:45 +0000 +++ src/Notes.h 2014-02-02 09:42:23 +0000 @@ -76,7 +76,7 @@ Notes(const char *aDescr, const char **metasBlacklist, bool allowFormatted = false): descr(aDescr), blacklisted(metasBlacklist), formattedValues(allowFormatted) {} Notes(): descr(NULL), blacklisted(NULL) {} - ~Notes() { notes.clean(); } + ~Notes() { notes.clear(); } /** * Parse a notes line and returns a pointer to the * parsed Note object. === modified file 'src/StoreFileSystem.cc' --- src/StoreFileSystem.cc 2012-09-01 14:38:36 +0000 +++ src/StoreFileSystem.cc 2014-02-04 19:47:14 +0000 @@ -87,7 +87,7 @@ void StoreFileSystem::FreeAllFs() { - while (GetFileSystems().size()) { + while (!GetFileSystems().empty()) { StoreFileSystem *fs = GetFileSystems().back(); GetFileSystems().pop_back(); fs->done(); === modified file 'src/adaptation/AccessCheck.cc' --- src/adaptation/AccessCheck.cc 2014-01-11 01:35:50 +0000 +++ src/adaptation/AccessCheck.cc 2014-02-02 08:57:20 +0000 @@ -102,7 +102,7 @@ AccessRule *r = *i; if (isCandidate(*r)) { debugs(93, 5, HERE << "check: rule '" << r->id << "' is a candidate"); - candidates += r->id; + candidates.push_back(r->id); } } === modified file 'src/adaptation/Config.cc' --- src/adaptation/Config.cc 2013-12-05 11:04:45 +0000 +++ src/adaptation/Config.cc 2014-02-02 09:42:23 +0000 @@ -139,7 +139,7 @@ const ServiceConfigs& configs = serviceConfigs; for (SCI cfg = configs.begin(); cfg != configs.end(); ++cfg) removeService((*cfg)->key); - serviceConfigs.clean(); + serviceConfigs.clear(); debugs(93, 3, HERE << "rules: " << AllRules().size() << ", groups: " << AllGroups().size() << ", services: " << serviceConfigs.size()); } @@ -163,7 +163,7 @@ DetachServices(); - serviceConfigs.clean(); + serviceConfigs.clear(); } void @@ -210,7 +210,7 @@ debugs(93,3, HERE << "Created " << created << " adaptation services"); // services remember their configs; we do not have to - serviceConfigs.clean(); + serviceConfigs.clear(); return true; } === modified file 'src/adaptation/DynamicGroupCfg.cc' --- src/adaptation/DynamicGroupCfg.cc 2012-01-20 18:55:04 +0000 +++ src/adaptation/DynamicGroupCfg.cc 2014-02-02 09:42:23 +0000 @@ -18,5 +18,5 @@ Adaptation::DynamicGroupCfg::clear() { id.clean(); - services.clean(); + services.clear(); } === modified file 'src/adaptation/Service.cc' --- src/adaptation/Service.cc 2013-10-25 00:13:46 +0000 +++ src/adaptation/Service.cc 2014-02-04 19:47:14 +0000 @@ -71,6 +71,8 @@ void Adaptation::DetachServices() { - while (!AllServices().empty()) - AllServices().pop_back()->detach(); + while (!AllServices().empty()) { + AllServices().back()->detach(); + AllServices().pop_back(); + } } === modified file 'src/adaptation/ServiceGroups.cc' --- src/adaptation/ServiceGroups.cc 2013-07-21 19:24:35 +0000 +++ src/adaptation/ServiceGroups.cc 2014-02-02 09:42:23 +0000 @@ -48,7 +48,7 @@ } s.cut(s.size() - 1); debugs(93, DBG_IMPORTANT, "Adaptation group '" << id << "' contains disabled member(s) after reconfiguration: " << s); - removedServices.clean(); + removedServices.clear(); } String baselineKey; === modified file 'src/adaptation/icap/Options.cc' --- src/adaptation/icap/Options.cc 2013-03-18 04:55:51 +0000 +++ src/adaptation/icap/Options.cc 2014-02-02 08:57:20 +0000 @@ -125,7 +125,7 @@ void Adaptation::Icap::Options::cfgMethod(ICAP::Method m) { Must(m != ICAP::methodNone); - methods += m; + methods.push_back(m); } // TODO: HttpHeader should provide a general method for this type of conversion === modified file 'src/adaptation/icap/ServiceRep.cc' --- src/adaptation/icap/ServiceRep.cc 2014-01-11 01:35:50 +0000 +++ src/adaptation/icap/ServiceRep.cc 2014-02-04 19:47:14 +0000 @@ -375,7 +375,8 @@ Pointer us = NULL; while (!theClients.empty()) { - Client i = theClients.pop_back(); + Client i = theClients.back(); + theClients.pop_back(); ScheduleCallHere(i.callback); i.callback = 0; } === modified file 'src/auth/Gadgets.cc' --- src/auth/Gadgets.cc 2013-12-06 14:59:47 +0000 +++ src/auth/Gadgets.cc 2014-02-02 09:42:23 +0000 @@ -133,7 +133,7 @@ authenticateRotate(); /* free current global config details too. */ - Auth::TheConfig.clean(); + Auth::TheConfig.clear(); } AuthUserHashPointer::AuthUserHashPointer(Auth::User::Pointer anAuth_user): === modified file 'src/base/Vector.h' --- src/base/Vector.h 2014-01-27 02:11:08 +0000 +++ src/base/Vector.h 2014-02-07 15:37:11 +0000 @@ -88,7 +88,8 @@ typedef VectorIteratorBase > iterator; typedef VectorIteratorBase const> const_iterator; typedef ptrdiff_t difference_type; - + friend class VectorIteratorBase >; + friend class VectorIteratorBase const>; void *operator new (size_t); void operator delete (void *); @@ -96,29 +97,31 @@ ~Vector(); Vector(Vector const &); Vector &operator = (Vector const &); - void clean(); + void clear(); void reserve (size_t capacity); void push_back (E); - Vector &operator += (E item) {push_back(item); return *this;}; void insert (E); const E &front() const; E &front(); E &back(); - E pop_back(); + void pop_back(); E shift(); // aka pop_front void prune(E); void preAppend(int app_count); - bool empty() const; - size_t size() const; + inline bool empty() const; + inline size_t size() const; iterator begin(); const_iterator begin () const; iterator end(); const_iterator end () const; - E& operator [] (unsigned i); - const E& operator [] (unsigned i) const; + E& at(unsigned i); + const E& at(unsigned i) const; + inline E& operator [] (unsigned i); + inline const E& operator [] (unsigned i) const; + E* data() const { return items; } - /* Do not change these, until the entry C struct is removed */ +protected: size_t capacity; size_t count; E *items; @@ -145,12 +148,12 @@ template Vector::~Vector() { - clean(); + clear(); } template void -Vector::clean() +Vector::clear() { /* could also warn if some objects are left */ delete[] items; @@ -240,13 +243,12 @@ } template -E +void Vector::pop_back() { assert (size()); - value_type result = items[--count]; + --count; items[count] = value_type(); - return result; } template @@ -314,7 +316,7 @@ Vector & Vector::operator = (Vector const &old) { - clean(); + clear(); reserve (old.size()); for (size_t counter = 0; counter < old.size(); ++counter) @@ -367,9 +369,24 @@ template E & +Vector::at(unsigned i) +{ + assert (size() > i); + return operator[](i); +} + +template +const E & +Vector::at(unsigned i) const +{ + assert (size() > i); + return operator[](i); +} + +template +E & Vector::operator [] (unsigned i) { - assert (size() > i); return items[i]; } @@ -377,7 +394,6 @@ const E & Vector::operator [] (unsigned i) const { - assert (size() > i); return items[i]; } === modified file 'src/cache_cf.cc' --- src/cache_cf.cc 2014-02-08 12:33:31 +0000 +++ src/cache_cf.cc 2014-02-10 09:59:19 +0000 @@ -1871,12 +1871,7 @@ free_authparam(Auth::ConfigVector * cfg) { /* Wipe the Auth globals and Detach/Destruct component config + state. */ - cfg->clean(); - - /* remove our pointers to the probably-dead sub-configs */ - while (cfg->size()) { - cfg->pop_back(); - } + cfg->clear(); /* on reconfigure initialize new auth schemes for the new config. */ if (reconfiguring) { @@ -1897,7 +1892,7 @@ find_fstype(char *type) { for (size_t i = 0; i < StoreFileSystem::FileSystems().size(); ++i) - if (strcasecmp(type, StoreFileSystem::FileSystems().items[i]->type()) == 0) + if (strcasecmp(type, StoreFileSystem::FileSystems().at(i)->type()) == 0) return (int)i; return (-1); @@ -1940,7 +1935,7 @@ sd = dynamic_cast(swap->swapDirs[i].getRaw()); - if (strcmp(sd->type(), StoreFileSystem::FileSystems().items[fs]->type()) != 0) { + if (strcmp(sd->type(), StoreFileSystem::FileSystems().at(fs)->type()) != 0) { debugs(3, DBG_CRITICAL, "ERROR: Can't change type of existing cache_dir " << sd->type() << " " << sd->path << " to " << type_str << ". Restart required"); return; @@ -1965,7 +1960,7 @@ allocate_new_swapdir(swap); - swap->swapDirs[swap->n_configured] = StoreFileSystem::FileSystems().items[fs]->createSwapDir(); + swap->swapDirs[swap->n_configured] = StoreFileSystem::FileSystems().at(fs)->createSwapDir(); sd = dynamic_cast(swap->swapDirs[swap->n_configured].getRaw()); === modified file 'src/client_side.cc' --- src/client_side.cc 2014-02-08 13:36:42 +0000 +++ src/client_side.cc 2014-02-10 09:59:19 +0000 @@ -1361,7 +1361,7 @@ bool replyMatchRequest = rep->content_range != NULL ? request->range->contains(rep->content_range->spec) : true; - const int spec_count = http->request->range->specs.count; + const int spec_count = http->request->range->specs.size(); int64_t actual_clen = -1; debugs(33, 3, "clientBuildRangeHeader: range spec count: " << === modified file 'src/errorpage.cc' --- src/errorpage.cc 2014-02-07 13:45:20 +0000 +++ src/errorpage.cc 2014-02-07 16:14:42 +0000 @@ -204,7 +204,7 @@ /** \par * Index any unknown file names used by deny_info. */ - ErrorDynamicPageInfo *info = ErrorDynamicPages.items[i - ERR_MAX]; + ErrorDynamicPageInfo *info = ErrorDynamicPages.at(i - ERR_MAX); assert(info && info->id == i && info->page_name); const char *pg = info->page_name; @@ -245,8 +245,10 @@ safe_free(error_text); } - while (ErrorDynamicPages.size()) - errorDynamicPageInfoDestroy(ErrorDynamicPages.pop_back()); + while (!ErrorDynamicPages.empty()) { + errorDynamicPageInfoDestroy(ErrorDynamicPages.back()); + ErrorDynamicPages.pop_back(); + } error_page_count = 0; @@ -531,7 +533,7 @@ } for (size_t j = 0; j < ErrorDynamicPages.size(); ++j) { - if (strcmp(ErrorDynamicPages.items[j]->page_name, page_name) == 0) + if (strcmp(ErrorDynamicPages[j]->page_name, page_name) == 0) return j + ERR_MAX; } @@ -561,7 +563,7 @@ return err_type_str[pageId]; if (pageId >= ERR_MAX && pageId - ERR_MAX < (ssize_t)ErrorDynamicPages.size()) - return ErrorDynamicPages.items[pageId - ERR_MAX]->page_name; + return ErrorDynamicPages[pageId - ERR_MAX]->page_name; return "ERR_UNKNOWN"; /* should not happen */ } @@ -593,8 +595,8 @@ { memset(&ftp, 0, sizeof(ftp)); - if (page_id >= ERR_MAX && ErrorDynamicPages.items[page_id - ERR_MAX]->page_redirect != Http::scNone) - httpStatus = ErrorDynamicPages.items[page_id - ERR_MAX]->page_redirect; + if (page_id >= ERR_MAX && ErrorDynamicPages[page_id - ERR_MAX]->page_redirect != Http::scNone) + httpStatus = ErrorDynamicPages[page_id - ERR_MAX]->page_redirect; if (req != NULL) { request = req; === modified file 'src/esi/CustomParser.cc' --- src/esi/CustomParser.cc 2013-10-25 00:13:46 +0000 +++ src/esi/CustomParser.cc 2014-02-10 09:19:56 +0000 @@ -205,7 +205,8 @@ attribute = end + 1; } - theClient->start (tag + 1, (const char **)attributes.items, attributes.size() >> 1); + // TODO: after c++11, replace &attributes.front() with attributes.data() + theClient->start (tag + 1, const_cast(&attributes.front()), attributes.size() >> 1); /* TODO: attributes */ if (*(tagEnd - 1) == '/') === modified file 'src/esi/VarState.cc' --- src/esi/VarState.cc 2014-02-08 13:36:42 +0000 +++ src/esi/VarState.cc 2014-02-10 09:59:19 +0000 @@ -167,8 +167,10 @@ { freeResources(); - while (variablesForCleanup.size()) - delete variablesForCleanup.pop_back(); + while (!variablesForCleanup.empty()) { + delete variablesForCleanup.back(); + variablesForCleanup.pop_back(); + } delete defaultVariable; } === modified file 'src/fs/ufs/UFSSwapDir.cc' --- src/fs/ufs/UFSSwapDir.cc 2013-12-06 23:52:26 +0000 +++ src/fs/ufs/UFSSwapDir.cc 2014-02-02 18:19:59 +0000 @@ -230,8 +230,10 @@ IO->io = anIO; /* Change the IO Options */ - if (currentIOOptions && currentIOOptions->options.size() > 2) - delete currentIOOptions->options.pop_back(); + if (currentIOOptions && currentIOOptions->options.size() > 2) { + delete currentIOOptions->options.back(); + currentIOOptions->options.pop_back(); + } /* TODO: factor out these 4 lines */ ConfigOption *ioOptions = IO->io->getOptionTree(); === modified file 'src/ipc/Kids.cc' --- src/ipc/Kids.cc 2013-10-25 00:13:46 +0000 +++ src/ipc/Kids.cc 2014-02-02 09:42:23 +0000 @@ -19,8 +19,7 @@ /// maintain n kids void Kids::init() { - if (storage.size() > 0) - storage.clean(); + storage.clear(); storage.reserve(NumberOfKids()); === modified file 'src/store.cc' --- src/store.cc 2014-01-03 10:32:53 +0000 +++ src/store.cc 2014-02-02 08:57:20 +0000 @@ -1290,7 +1290,7 @@ } for (i = 0; i < 10; ++i) { - e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL; + e = LateReleaseStack.empty() ? NULL : LateReleaseStack.pop(); if (e == NULL) { /* done! */ === modified file 'src/store_dir.cc' --- src/store_dir.cc 2013-12-31 18:49:41 +0000 +++ src/store_dir.cc 2014-02-04 16:54:49 +0000 @@ -1370,7 +1370,7 @@ bool StoreSearchHashIndex::next() { - if (entries.size()) + if (!entries.empty()) entries.pop_back(); while (!isDone() && !entries.size()) === modified file 'src/tests/test_http_range.cc' --- src/tests/test_http_range.cc 2013-10-25 00:13:46 +0000 +++ src/tests/test_http_range.cc 2014-02-02 08:57:20 +0000 @@ -86,7 +86,7 @@ HttpHdrRange copy(*range); - assert (copy.specs.count == range->specs.count); + assert (copy.specs.size() == range->specs.size()); HttpHdrRange::iterator pos = range->begin(); @@ -111,7 +111,7 @@ testRangeIter () { HttpHdrRange *range=rangeFromString("bytes=0-3, 1-, -2"); - assert (range->specs.count == 3); + assert (range->specs.size() == 3); size_t counter = 0; HttpHdrRange::iterator i = range->begin(); @@ -132,7 +132,7 @@ testRangeCanonization() { HttpHdrRange *range=rangeFromString("bytes=0-3, 1-, -2"); - assert (range->specs.count == 3); + assert (range->specs.size() == 3); /* 0-3 needs a content length of 4 */ /* This passes in the extant code - but should it? */ @@ -140,13 +140,13 @@ if (!range->canonize(3)) exit(1); - assert (range->specs.count == 3); + assert (range->specs.size() == 3); delete range; range=rangeFromString("bytes=0-3, 1-, -2"); - assert (range->specs.count == 3); + assert (range->specs.size() == 3); /* 0-3 needs a content length of 4 */ if (!range->canonize(4)) @@ -156,7 +156,7 @@ range=rangeFromString("bytes=3-6"); - assert (range->specs.count == 1); + assert (range->specs.size() == 1); /* 3-6 needs a content length of 4 or more */ if (range->canonize(3)) @@ -166,7 +166,7 @@ range=rangeFromString("bytes=3-6"); - assert (range->specs.count == 1); + assert (range->specs.size() == 1); /* 3-6 needs a content length of 4 or more */ if (!range->canonize(4)) @@ -176,12 +176,12 @@ range=rangeFromString("bytes=1-1,2-3"); - assert (range->specs.count == 2); + assert (range->specs.size()== 2); if (!range->canonize(4)) exit(1); - assert (range->specs.count == 2); + assert (range->specs.size() == 2); delete range; } === modified file 'src/tunnel.cc' --- src/tunnel.cc 2013-12-06 14:59:47 +0000 +++ src/tunnel.cc 2014-02-02 09:42:23 +0000 @@ -238,7 +238,7 @@ debugs(26, 3, "TunnelStateData destructed this=" << this); assert(noConnections()); xfree(url); - serverDestinations.clean(); + serverDestinations.clear(); delete connectRespBuf; }