Add AIPerService::CapabilityType::mConcurrectConnections
Prepares for throttling number of connections per capability type. The value is current left at AIPerService::mConcurrectConnections, so not having an effect yet.
This commit is contained in:
@@ -85,7 +85,8 @@ AIPerService::CapabilityType::CapabilityType(void) :
|
||||
mAdded(0),
|
||||
mFlags(0),
|
||||
mDownloading(0),
|
||||
mMaxPipelinedRequests(CurlConcurrentConnectionsPerService)
|
||||
mMaxPipelinedRequests(CurlConcurrentConnectionsPerService),
|
||||
mConcurrectConnections(CurlConcurrentConnectionsPerService)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -278,9 +279,10 @@ void AIPerService::release(AIPerServicePtr& instance)
|
||||
instance.reset();
|
||||
}
|
||||
|
||||
bool AIPerService::throttled() const
|
||||
bool AIPerService::throttled(AICapabilityType capability_type) const
|
||||
{
|
||||
return mTotalAdded >= mConcurrectConnections;
|
||||
return mTotalAdded >= mConcurrectConnections ||
|
||||
mCapabilityType[capability_type].mAdded >= mCapabilityType[capability_type].mConcurrectConnections;
|
||||
}
|
||||
|
||||
void AIPerService::added_to_multi_handle(AICapabilityType capability_type)
|
||||
@@ -479,7 +481,10 @@ void AIPerService::adjust_concurrent_connections(int increment)
|
||||
increment = per_service_w->mConcurrectConnections - old_concurrent_connections;
|
||||
for (int i = 0; i < number_of_capability_types; ++i)
|
||||
{
|
||||
per_service_w->mCapabilityType[i].mMaxPipelinedRequests = llmax(per_service_w->mCapabilityType[i].mMaxPipelinedRequests + increment, (U32)0);
|
||||
per_service_w->mCapabilityType[i].mMaxPipelinedRequests = llmax(per_service_w->mCapabilityType[i].mMaxPipelinedRequests + increment, 0);
|
||||
int new_concurrent_connections_per_capability_type =
|
||||
llclamp((new_concurrent_connections * per_service_w->mCapabilityType[i].mConcurrectConnections + old_concurrent_connections / 2) / old_concurrent_connections, 1, new_concurrent_connections);
|
||||
per_service_w->mCapabilityType[i].mConcurrectConnections = (U16)new_concurrent_connections_per_capability_type;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,7 +144,8 @@ class AIPerService {
|
||||
// ctf_full : Set to true when the queue is popped and then still isn't empty;
|
||||
// ctf_starvation: Set to true when the queue was about to be popped but was already empty.
|
||||
U32 mDownloading; // The number of active easy handles with this service for which data was received.
|
||||
U32 mMaxPipelinedRequests; // The maximum number of accepted requests for this service and (approved) capability type, that didn't finish yet.
|
||||
U16 mMaxPipelinedRequests; // The maximum number of accepted requests for this service and (approved) capability type, that didn't finish yet.
|
||||
U16 mConcurrectConnections; // The maximum number of allowed concurrent connections to the service of this capability type.
|
||||
|
||||
// Declare, not define, constructor and destructor - in order to avoid instantiation of queued_request_type from header.
|
||||
CapabilityType(void);
|
||||
@@ -216,7 +217,7 @@ class AIPerService {
|
||||
void added_to_multi_handle(AICapabilityType capability_type); // Called when an easy handle for this service has been added to the multi handle.
|
||||
void removed_from_multi_handle(AICapabilityType capability_type, bool downloaded_something); // Called when an easy handle for this service is removed again from the multi handle.
|
||||
void download_started(AICapabilityType capability_type) { ++mCapabilityType[capability_type].mDownloading; }
|
||||
bool throttled(void) const; // Returns true if the maximum number of allowed requests for this service have been added to the multi handle.
|
||||
bool throttled(AICapabilityType capability_type) const; // Returns true if the maximum number of allowed requests for this service/capability type have been added to the multi handle.
|
||||
|
||||
bool queue(AICurlEasyRequest const& easy_request, AICapabilityType capability_type, bool force_queuing = true); // Add easy_request to the queue if queue is empty or force_queuing.
|
||||
bool cancel(AICurlEasyRequest const& easy_request, AICapabilityType capability_type); // Remove easy_request from the queue (if it's there).
|
||||
|
||||
@@ -1736,7 +1736,7 @@ bool MultiHandle::add_easy_request(AICurlEasyRequest const& easy_request, bool f
|
||||
}
|
||||
bool too_much_bandwidth = !curl_easy_request_w->approved() && AIPerService::checkBandwidthUsage(per_service, get_clock_count() * HTTPTimeout::sClockWidth_40ms);
|
||||
PerService_wat per_service_w(*per_service);
|
||||
if (!too_much_bandwidth && sTotalAdded < curl_max_total_concurrent_connections && !per_service_w->throttled())
|
||||
if (!too_much_bandwidth && sTotalAdded < curl_max_total_concurrent_connections && !per_service_w->throttled(capability_type))
|
||||
{
|
||||
curl_easy_request_w->set_timeout_opts();
|
||||
if (curl_easy_request_w->add_handle_to_multi(curl_easy_request_w, mMultiHandle) == CURLM_OK)
|
||||
|
||||
Reference in New Issue
Block a user