Merge remote-tracking branch 'aleric/master'

This commit is contained in:
Latif Khalifa
2013-06-24 19:42:50 +02:00
7 changed files with 235 additions and 64 deletions

View File

@@ -49,7 +49,7 @@ AIThreadSafeSimpleDC<AIPerService::TotalQueued> AIPerService::sTotalQueued;
namespace AICurlPrivate {
// Cached value of CurlConcurrentConnectionsPerService.
U32 CurlConcurrentConnectionsPerService;
U16 CurlConcurrentConnectionsPerService;
// Friend functions of RefCountedThreadSafePerService
@@ -75,7 +75,9 @@ AIPerService::AIPerService(void) :
mConcurrectConnections(CurlConcurrentConnectionsPerService),
mTotalAdded(0),
mApprovedFirst(0),
mUnapprovedFirst(0)
mUnapprovedFirst(0),
mUsedCT(0),
mCTInUse(0)
{
}
@@ -85,7 +87,8 @@ AIPerService::CapabilityType::CapabilityType(void) :
mAdded(0),
mFlags(0),
mDownloading(0),
mMaxPipelinedRequests(CurlConcurrentConnectionsPerService)
mMaxPipelinedRequests(CurlConcurrentConnectionsPerService),
mConcurrectConnections(CurlConcurrentConnectionsPerService)
{
}
@@ -278,9 +281,57 @@ void AIPerService::release(AIPerServicePtr& instance)
instance.reset();
}
bool AIPerService::throttled() const
void AIPerService::redivide_connections(void)
{
return mTotalAdded >= mConcurrectConnections;
// Priority order.
static AICapabilityType order[number_of_capability_types] = { cap_inventory, cap_texture, cap_mesh, cap_other };
// Count the number of capability types that are currently in use and store the types in an array.
AICapabilityType used_order[number_of_capability_types];
int number_of_capability_types_in_use = 0;
for (int i = 0; i < number_of_capability_types; ++i)
{
U32 const mask = CT2mask(order[i]);
if ((mCTInUse & mask))
{
used_order[number_of_capability_types_in_use++] = order[i];
}
else
{
// Give every other type (that is not in use) one connection, so they can be used (at which point they'll get more).
mCapabilityType[order[i]].mConcurrectConnections = 1;
}
}
// Keep one connection in reserve for currently unused capability types (that have been used before).
int reserve = (mUsedCT != mCTInUse) ? 1 : 0;
// Distribute (mConcurrectConnections - reserve) over number_of_capability_types_in_use.
U16 max_connections_per_CT = (mConcurrectConnections - reserve) / number_of_capability_types_in_use + 1;
// The first count CTs get max_connections_per_CT connections.
int count = (mConcurrectConnections - reserve) % number_of_capability_types_in_use;
for(int i = 1, j = 0;; --i)
{
while (j < count)
{
mCapabilityType[used_order[j++]].mConcurrectConnections = max_connections_per_CT;
}
if (i == 0)
{
break;
}
// Finish the loop till all used CTs are assigned.
count = number_of_capability_types_in_use;
// Never assign 0 as maximum.
if (max_connections_per_CT > 1)
{
// The remaining CTs get one connection less so that the sum of all assigned connections is mConcurrectConnections - reserve.
--max_connections_per_CT;
}
}
}
bool AIPerService::throttled(AICapabilityType capability_type) const
{
return mTotalAdded >= mConcurrectConnections ||
mCapabilityType[capability_type].mAdded >= mCapabilityType[capability_type].mConcurrectConnections;
}
void AIPerService::added_to_multi_handle(AICapabilityType capability_type)
@@ -291,20 +342,32 @@ void AIPerService::added_to_multi_handle(AICapabilityType capability_type)
void AIPerService::removed_from_multi_handle(AICapabilityType capability_type, bool downloaded_something)
{
llassert(mTotalAdded > 0 && mCapabilityType[capability_type].mAdded > 0);
--mCapabilityType[capability_type].mAdded;
CapabilityType& ct(mCapabilityType[capability_type]);
llassert(mTotalAdded > 0 && ct.mAdded > 0);
bool done = --ct.mAdded == 0;
if (downloaded_something)
{
llassert(mCapabilityType[capability_type].mDownloading > 0);
--mCapabilityType[capability_type].mDownloading;
llassert(ct.mDownloading > 0);
--ct.mDownloading;
}
--mTotalAdded;
if (done && ct.pipelined_requests() == 0)
{
mark_unused(capability_type);
}
}
void AIPerService::queue(AICurlEasyRequest const& easy_request, AICapabilityType capability_type)
// Returns true if the request was queued.
bool AIPerService::queue(AICurlEasyRequest const& easy_request, AICapabilityType capability_type, bool force_queuing)
{
mCapabilityType[capability_type].mQueuedRequests.push_back(easy_request.get_ptr());
TotalQueued_wat(sTotalQueued)->count++;
CapabilityType::queued_request_type& queued_requests(mCapabilityType[capability_type].mQueuedRequests);
bool needs_queuing = force_queuing || !queued_requests.empty();
if (needs_queuing)
{
queued_requests.push_back(easy_request.get_ptr());
TotalQueued_wat(sTotalQueued)->count++;
}
return needs_queuing;
}
bool AIPerService::cancel(AICurlEasyRequest const& easy_request, AICapabilityType capability_type)
@@ -466,16 +529,25 @@ void AIPerService::adjust_concurrent_connections(int increment)
for (AIPerService::iterator iter = instance_map_w->begin(); iter != instance_map_w->end(); ++iter)
{
PerService_wat per_service_w(*iter->second);
U32 old_concurrent_connections = per_service_w->mConcurrectConnections;
per_service_w->mConcurrectConnections = llclamp(old_concurrent_connections + increment, (U32)1, CurlConcurrentConnectionsPerService);
U16 old_concurrent_connections = per_service_w->mConcurrectConnections;
int new_concurrent_connections = llclamp(old_concurrent_connections + increment, 1, (int)CurlConcurrentConnectionsPerService);
per_service_w->mConcurrectConnections = (U16)new_concurrent_connections;
increment = per_service_w->mConcurrectConnections - old_concurrent_connections;
for (int i = 0; i < number_of_capability_types; ++i)
{
per_service_w->mCapabilityType[i].mMaxPipelinedRequests = llmax(per_service_w->mCapabilityType[i].mMaxPipelinedRequests + increment, (U32)0);
per_service_w->mCapabilityType[i].mMaxPipelinedRequests = llmax(per_service_w->mCapabilityType[i].mMaxPipelinedRequests + increment, 0);
int new_concurrent_connections_per_capability_type =
llclamp((new_concurrent_connections * per_service_w->mCapabilityType[i].mConcurrectConnections + old_concurrent_connections / 2) / old_concurrent_connections, 1, new_concurrent_connections);
per_service_w->mCapabilityType[i].mConcurrectConnections = (U16)new_concurrent_connections_per_capability_type;
}
}
}
void AIPerService::ResetUsed::operator()(AIPerService::instance_map_type::value_type const& service) const
{
PerService_wat(*service.second)->resetUsedCt();
}
void AIPerService::Approvement::honored(void)
{
if (!mHonored)

View File

@@ -103,7 +103,7 @@ class AIPerService {
typedef AIAccess<instance_map_type> instance_map_wat;
private:
static threadsafe_instance_map_type sInstanceMap; // Map of AIPerService instances with the hostname as key.
static threadsafe_instance_map_type sInstanceMap; // Map of AIPerService instances with the canonical hostname:port as key.
friend class AIThreadSafeSimpleDC<AIPerService>; // threadsafe_PerService
AIPerService(void);
@@ -115,7 +115,7 @@ class AIPerService {
// Utility function; extract canonical (lowercase) hostname and port from url.
static std::string extract_canonical_servicename(std::string const& url);
// Return (possibly create) a unique instance for the given hostname.
// Return (possibly create) a unique instance for the given hostname:port combination.
static AIPerServicePtr instance(std::string const& servicename);
// Release instance (object will be deleted if this was the last instance).
@@ -144,7 +144,8 @@ class AIPerService {
// ctf_full : Set to true when the queue is popped and then still isn't empty;
// ctf_starvation: Set to true when the queue was about to be popped but was already empty.
U32 mDownloading; // The number of active easy handles with this service for which data was received.
U32 mMaxPipelinedRequests; // The maximum number of accepted requests for this service and (approved) capability type, that didn't finish yet.
U16 mMaxPipelinedRequests; // The maximum number of accepted requests for this service and (approved) capability type, that didn't finish yet.
U16 mConcurrectConnections; // The maximum number of allowed concurrent connections to the service of this capability type.
// Declare, not define, constructor and destructor - in order to avoid instantiation of queued_request_type from header.
CapabilityType(void);
@@ -158,10 +159,52 @@ class AIPerService {
AIAverage mHTTPBandwidth; // Keeps track on number of bytes received for this service in the past second.
int mConcurrectConnections; // The maximum number of allowed concurrent connections to this service.
int mTotalAdded; // Number of active easy handles with this host.
int mTotalAdded; // Number of active easy handles with this service.
int mApprovedFirst; // First capability type to try.
int mUnapprovedFirst; // First capability type to try after all approved types were tried.
U32 mUsedCT; // Bit mask with one bit per capability type. A '1' means the capability was in use since the last resetUsedCT().
U32 mCTInUse; // Bit mask with one bit per capability type. A '1' means the capability is in use right now.
// Helper struct, used in the static resetUsed.
struct ResetUsed { void operator()(instance_map_type::value_type const& service) const; };
void redivide_connections(void);
void mark_inuse(AICapabilityType capability_type)
{
U32 bit = CT2mask(capability_type);
if ((mCTInUse & bit) == 0) // If this CT went from unused to used
{
mCTInUse |= bit;
mUsedCT |= bit;
if (mUsedCT != bit) // and more than one CT use this service.
{
redivide_connections();
}
}
}
void mark_unused(AICapabilityType capability_type)
{
U32 bit = CT2mask(capability_type);
if ((mCTInUse & bit) != 0) // If this CT went from used to unused
{
mCTInUse &= ~bit;
if (mCTInUse && mUsedCT != bit) // and more than one CT use this service, and at least one is in use.
{
redivide_connections();
}
}
}
public:
static U32 CT2mask(AICapabilityType capability_type) { return (U32)1 << capability_type; }
void resetUsedCt(void) { mUsedCT = mCTInUse; }
bool is_used(AICapabilityType capability_type) const { return (mUsedCT & CT2mask(capability_type)); }
bool is_inuse(AICapabilityType capability_type) const { return (mCTInUse & CT2mask(capability_type)); }
static void resetUsed(void) { copy_forEach(ResetUsed()); }
U32 is_used(void) const { return mUsedCT; } // Non-zero if this service was used for any capability type.
U32 is_inuse(void) const { return mCTInUse; } // Non-zero if this service is in use for any capability type.
// Global administration of the total number of queued requests of all services combined.
private:
struct TotalQueued {
@@ -211,15 +254,15 @@ class AIPerService {
static bool sNoHTTPBandwidthThrottling; // Global override to disable bandwidth throttling.
public:
void added_to_command_queue(AICapabilityType capability_type) { ++mCapabilityType[capability_type].mQueuedCommands; }
void added_to_command_queue(AICapabilityType capability_type) { ++mCapabilityType[capability_type].mQueuedCommands; mark_inuse(capability_type); }
void removed_from_command_queue(AICapabilityType capability_type) { --mCapabilityType[capability_type].mQueuedCommands; llassert(mCapabilityType[capability_type].mQueuedCommands >= 0); }
void added_to_multi_handle(AICapabilityType capability_type); // Called when an easy handle for this host has been added to the multi handle.
void removed_from_multi_handle(AICapabilityType capability_type, bool downloaded_something); // Called when an easy handle for this host is removed again from the multi handle.
void added_to_multi_handle(AICapabilityType capability_type); // Called when an easy handle for this service has been added to the multi handle.
void removed_from_multi_handle(AICapabilityType capability_type, bool downloaded_something); // Called when an easy handle for this service is removed again from the multi handle.
void download_started(AICapabilityType capability_type) { ++mCapabilityType[capability_type].mDownloading; }
bool throttled(void) const; // Returns true if the maximum number of allowed requests for this host have been added to the multi handle.
bool throttled(AICapabilityType capability_type) const; // Returns true if the maximum number of allowed requests for this service/capability type have been added to the multi handle.
void queue(AICurlEasyRequest const& easy_request, AICapabilityType capability_type); // Add easy_request to the queue.
bool cancel(AICurlEasyRequest const& easy_request, AICapabilityType capability_type); // Remove easy_request from the queue (if it's there).
bool queue(AICurlEasyRequest const& easy_request, AICapabilityType capability_type, bool force_queuing = true); // Add easy_request to the queue if queue is empty or force_queuing.
bool cancel(AICurlEasyRequest const& easy_request, AICapabilityType capability_type); // Remove easy_request from the queue (if it's there).
void add_queued_to(AICurlPrivate::curlthread::MultiHandle* mh, bool recursive = false);
// Add queued easy handle (if any) to the multi handle. The request is removed from the queue,
@@ -256,7 +299,7 @@ class AIPerService {
// the AIPerService object locked for the whole duration of the call.
// The functions only lock it when access is required.
// Returns approvement if curl can handle another request for this host.
// Returns approvement if curl can handle another request for this service.
// Should return NULL if the maximum allowed HTTP bandwidth is reached, or when
// the latency between request and actual delivery becomes too large.
static Approvement* approveHTTPRequestFor(AIPerServicePtr const& per_service, AICapabilityType capability_type);
@@ -283,7 +326,7 @@ class RefCountedThreadSafePerService : public threadsafe_PerService {
friend void intrusive_ptr_release(RefCountedThreadSafePerService* p);
};
extern U32 CurlConcurrentConnectionsPerService;
extern U16 CurlConcurrentConnectionsPerService;
} // namespace AICurlPrivate

View File

@@ -1721,9 +1721,22 @@ bool MultiHandle::add_easy_request(AICurlEasyRequest const& easy_request, bool f
AICurlEasyRequest_wat curl_easy_request_w(*easy_request);
capability_type = curl_easy_request_w->capability_type();
per_service = curl_easy_request_w->getPerServicePtr();
if (!from_queue)
{
// Add the request to the back of a non-empty queue.
if (PerService_wat(*per_service)->queue(easy_request, capability_type, false))
{
// The queue was not empty, therefore the request was queued.
#ifdef SHOW_ASSERT
// Not active yet, but it's no longer an error if next we try to remove the request.
curl_easy_request_w->mRemovedPerCommand = false;
#endif
return true;
}
}
bool too_much_bandwidth = !curl_easy_request_w->approved() && AIPerService::checkBandwidthUsage(per_service, get_clock_count() * HTTPTimeout::sClockWidth_40ms);
PerService_wat per_service_w(*per_service);
if (!too_much_bandwidth && sTotalAdded < curl_max_total_concurrent_connections && !per_service_w->throttled())
if (!too_much_bandwidth && sTotalAdded < curl_max_total_concurrent_connections && !per_service_w->throttled(capability_type))
{
curl_easy_request_w->set_timeout_opts();
if (curl_easy_request_w->add_handle_to_multi(curl_easy_request_w, mMultiHandle) == CURLM_OK)
@@ -2548,7 +2561,7 @@ void startCurlThread(LLControlGroup* control_group)
// Cache Debug Settings.
sConfigGroup = control_group;
curl_max_total_concurrent_connections = sConfigGroup->getU32("CurlMaxTotalConcurrentConnections");
CurlConcurrentConnectionsPerService = sConfigGroup->getU32("CurlConcurrentConnectionsPerService");
CurlConcurrentConnectionsPerService = (U16)sConfigGroup->getU32("CurlConcurrentConnectionsPerService");
gNoVerifySSLCert = sConfigGroup->getBOOL("NoVerifySSLCert");
AIPerService::setMaxPipelinedRequests(curl_max_total_concurrent_connections);
AIPerService::setHTTPThrottleBandwidth(sConfigGroup->getF32("HTTPThrottleBandwidth"));
@@ -2573,10 +2586,19 @@ bool handleCurlConcurrentConnectionsPerService(LLSD const& newvalue)
{
using namespace AICurlPrivate;
U32 new_concurrent_connections = newvalue.asInteger();
AIPerService::adjust_concurrent_connections(new_concurrent_connections - CurlConcurrentConnectionsPerService);
CurlConcurrentConnectionsPerService = new_concurrent_connections;
llinfos << "CurlConcurrentConnectionsPerService set to " << CurlConcurrentConnectionsPerService << llendl;
U16 new_concurrent_connections = (U16)newvalue.asInteger();
U16 const maxCurlConcurrentConnectionsPerService = 32;
if (new_concurrent_connections < 1 || new_concurrent_connections > maxCurlConcurrentConnectionsPerService)
{
sConfigGroup->setU32("CurlConcurrentConnectionsPerService", static_cast<U32>((new_concurrent_connections < 1) ? 1 : maxCurlConcurrentConnectionsPerService));
}
else
{
int increment = new_concurrent_connections - CurlConcurrentConnectionsPerService;
CurlConcurrentConnectionsPerService = new_concurrent_connections;
AIPerService::adjust_concurrent_connections(increment);
llinfos << "CurlConcurrentConnectionsPerService set to " << CurlConcurrentConnectionsPerService << llendl;
}
return true;
}
@@ -2625,29 +2647,24 @@ AIThreadSafeSimpleDC<AIPerService::ThrottleFraction> AIPerService::sThrottleFrac
LLAtomicU32 AIPerService::sHTTPThrottleBandwidth125(250000);
bool AIPerService::sNoHTTPBandwidthThrottling;
// Return true if we want at least one more HTTP request for this host.
// Return Approvement if we want at least one more HTTP request for this service.
//
// It's OK if this function is a bit fuzzy, but we don't want it to return
// true a hundred times on a row when it is called fast in a loop.
// approvement a hundred times on a row when it is called in a tight loop.
// Hence the following consideration:
//
// This function is called only from LLTextureFetchWorker::doWork, and when it returns true
// then doWork will call LLHTTPClient::request with a NULL default engine (signaling that
// it is OK to run in any thread).
//
// At the end, LLHTTPClient::request calls AIStateMachine::run, which in turn calls
// AIStateMachine::reset at the end. Because NULL is passed as default_engine, reset will
// call AIStateMachine::multiplex to immediately start running the state machine. This
// causes it to go through the states bs_reset, bs_initialize and then bs_multiplex with
// run state AICurlEasyRequestStateMachine_addRequest. Finally, in this state, multiplex
// calls AICurlEasyRequestStateMachine::multiplex_impl which then calls AICurlEasyRequest::addRequest
// which causes an increment of command_queue_w->size and AIPerService::mQueuedCommands.
// If this function returns non-NULL, a Approvement object was created and
// the corresponding AIPerService::CapabilityType::mApprovedRequests was
// incremented. The Approvement object is passed to LLHTTPClient::request,
// and once the request is added to the command queue, used to update the counters.
//
// It is therefore guaranteed that in one loop of LLTextureFetchWorker::doWork,
// this size is incremented; stopping this function from returning true once we reached the
// threshold of "pipelines" requests (the sum of requests in the command queue, the ones
// throttled and queued in AIPerService::mQueuedRequests and the already
// running requests (in MultiHandle::mAddedEasyRequests)).
// or LLInventoryModelBackgroundFetch::bulkFetch (the two functions currently
// calling this function) this function will stop returning aprovement once we
// reached the threshold of "pipelined" requests (the sum of approved requests,
// requests in the command queue, the ones throttled and queued in
// AIPerService::mQueuedRequests and the already running requests
// (in MultiHandle::mAddedEasyRequests)).
//
//static
AIPerService::Approvement* AIPerService::approveHTTPRequestFor(AIPerServicePtr const& per_service, AICapabilityType capability_type)
@@ -2690,7 +2707,7 @@ AIPerService::Approvement* AIPerService::approveHTTPRequestFor(AIPerServicePtr c
bool reject, equal, increment_threshold;
{
PerService_wat per_service_w(*per_service);
PerService_wat per_service_w(*per_service); // Keep this lock for the duration of accesses to ct.
CapabilityType& ct(per_service_w->mCapabilityType[capability_type]);
S32 const pipelined_requests_per_capability_type = ct.pipelined_requests();
reject = pipelined_requests_per_capability_type >= (S32)ct.mMaxPipelinedRequests;
@@ -2700,14 +2717,14 @@ AIPerService::Approvement* AIPerService::approveHTTPRequestFor(AIPerServicePtr c
ct.mFlags = 0;
if (decrement_threshold)
{
if ((int)ct.mMaxPipelinedRequests > per_service_w->mConcurrectConnections)
if ((int)ct.mMaxPipelinedRequests > ct.mConcurrectConnections)
{
ct.mMaxPipelinedRequests--;
}
}
else if (increment_threshold && reject)
{
if ((int)ct.mMaxPipelinedRequests < 2 * per_service_w->mConcurrectConnections)
if ((int)ct.mMaxPipelinedRequests < 2 * ct.mConcurrectConnections)
{
ct.mMaxPipelinedRequests++;
// Immediately take the new threshold into account.

View File

@@ -76,33 +76,56 @@ void AIServiceBar::draw()
LLFontGL::getFontMonospace()->renderUTF8(mName, 0, start, height, text_color, LLFontGL::LEFT, LLFontGL::TOP);
start += LLFontGL::getFontMonospace()->getWidth(mName);
std::string text;
PerService_rat per_service_r(*mPerService);
AIPerService::CapabilityType const* cts;
U32 is_used;
U32 is_inuse;
int total_added;
int concurrent_connections;
size_t bandwidth;
{
PerService_rat per_service_r(*mPerService);
is_used = per_service_r->is_used();
is_inuse = per_service_r->is_inuse();
total_added = per_service_r->mTotalAdded;
concurrent_connections = per_service_r->mConcurrectConnections;
bandwidth = per_service_r->bandwidth().truncateData(AIHTTPView::getTime_40ms());
cts = per_service_r->mCapabilityType; // Not thread-safe, but we're only reading from it and only using the results to show in a debug console.
}
for (int col = 0; col < number_of_capability_types; ++col)
{
AIPerService::CapabilityType& ct(per_service_r->mCapabilityType[col]);
AICapabilityType capability_type = static_cast<AICapabilityType>(col);
AIPerService::CapabilityType const& ct(cts[capability_type]);
start = mHTTPView->updateColumn(col, start);
if (col < 2)
U32 mask = AIPerService::CT2mask(capability_type);
if (!(is_used & mask))
{
text = llformat(" | %hu-%hu-%lu,{%hu,%u}/%u", ct.mApprovedRequests, ct.mQueuedCommands, ct.mQueuedRequests.size(), ct.mAdded, ct.mDownloading, ct.mMaxPipelinedRequests);
text = " | ";
}
else if (col < 2)
{
text = llformat(" | %hu-%hu-%lu,{%hu/%hu,%u}/%u", ct.mApprovedRequests, ct.mQueuedCommands, ct.mQueuedRequests.size(), ct.mAdded, ct.mConcurrectConnections, ct.mDownloading, ct.mMaxPipelinedRequests);
}
else
{
text = llformat(" | --%hu-%lu,{%hu,%u}", ct.mQueuedCommands, ct.mQueuedRequests.size(), ct.mAdded, ct.mDownloading);
text = llformat(" | --%hu-%lu,{%hu/%hu,%u}", ct.mQueuedCommands, ct.mQueuedRequests.size(), ct.mAdded, ct.mConcurrectConnections, ct.mDownloading);
}
LLFontGL::getFontMonospace()->renderUTF8(text, 0, start, height, text_color, LLFontGL::LEFT, LLFontGL::TOP);
LLFontGL::getFontMonospace()->renderUTF8(text, 0, start, height, ((is_inuse & mask) == 0) ? LLColor4::grey2 : text_color, LLFontGL::LEFT, LLFontGL::TOP);
start += LLFontGL::getFontMonospace()->getWidth(text);
}
start = mHTTPView->updateColumn(mc_col, start);
text = llformat(" | %d/%d", per_service_r->mTotalAdded, per_service_r->mConcurrectConnections);
text = llformat(" | %d/%d", total_added, concurrent_connections);
LLFontGL::getFontMonospace()->renderUTF8(text, 0, start, height, text_color, LLFontGL::LEFT, LLFontGL::TOP);
start += LLFontGL::getFontMonospace()->getWidth(text);
start = mHTTPView->updateColumn(bw_col, start);
size_t bandwidth = per_service_r->bandwidth().truncateData(AIHTTPView::getTime_40ms());
size_t max_bandwidth = mHTTPView->mMaxBandwidthPerService;
text = " | ";
LLFontGL::getFontMonospace()->renderUTF8(text, 0, start, height, text_color, LLFontGL::LEFT, LLFontGL::TOP);
start += LLFontGL::getFontMonospace()->getWidth(text);
text = llformat("%lu", bandwidth / 125);
if (bandwidth == 0)
{
text_color = LLColor4::grey2;
}
LLColor4 color = (bandwidth > max_bandwidth) ? LLColor4::red : ((bandwidth > max_bandwidth * .75f) ? LLColor4::yellow : text_color);
LLFontGL::getFontMonospace()->renderUTF8(text, 0, start, height, color, LLFontGL::LEFT, LLFontGL::TOP);
start += LLFontGL::getFontMonospace()->getWidth(text);
@@ -151,7 +174,7 @@ void AIGLHTTPHeaderBar::draw(void)
// First header line.
F32 height = v_offset + sLineHeight * number_of_header_lines;
text = "HTTP console -- [approved]-commandQ-curlQ,{added,downloading}[/max]";
text = "HTTP console -- [approved]-commandQ-curlQ,{added/max,downloading}[/max]";
LLFontGL::getFontMonospace()->renderUTF8(text, 0, h_offset, height, text_color, LLFontGL::LEFT, LLFontGL::TOP);
text = " | Added/Max";
U32 start = mHTTPView->updateColumn(mc_col, 100);
@@ -241,6 +264,18 @@ U32 AIHTTPView::updateColumn(int col, U32 start)
return mStartColumn[col];
}
//static
void AIHTTPView::toggle_visibility(void* user_data)
{
LLView* viewp = (LLView*)user_data;
bool visible = !viewp->getVisible();
if (visible)
{
AIPerService::resetUsed();
}
viewp->setVisible(visible);
}
U64 AIHTTPView::sTime_40ms;
struct KillView
@@ -260,6 +295,8 @@ struct CreateServiceBar
void operator()(AIPerService::instance_map_type::value_type const& service)
{
if (!PerService_rat(*service.second)->is_used())
return;
AIServiceBar* service_bar = new AIServiceBar(mHTTPView, service);
mHTTPView->addChild(service_bar);
mHTTPView->mServiceBars.push_back(service_bar);

View File

@@ -66,6 +66,7 @@ class AIHTTPView : public LLContainerView
public:
static U64 getTime_40ms(void) { return sTime_40ms; }
static void toggle_visibility(void* user_data);
};
extern AIHTTPView *gHttpView;

View File

@@ -31,7 +31,7 @@
<key>Type</key>
<string>U32</string>
<key>Value</key>
<integer>16</integer>
<integer>8</integer>
</map>
<key>NoVerifySSLCert</key>
<map>

View File

@@ -169,6 +169,7 @@
#include "llfloatermessagelog.h"
#include "shfloatermediaticker.h"
#include "llpacketring.h"
#include "aihttpview.h"
// </edit>
#include "scriptcounter.h"
@@ -818,7 +819,7 @@ void init_client_menu(LLMenuGL* menu)
}
sub->addChild(new LLMenuItemCheckGL("HTTP Console",
&toggle_visibility,
&AIHTTPView::toggle_visibility,
NULL,
&get_visibility,
(void*)gHttpView,