c ++ – simple parallel download using a connection pool class using cpprestsdk

The following is a simple class for establishing multiple http connections, primarily for downloading a list of small files:

#include 
#include
#include
#include 
#include 

using namespace utility;                    // Common utilities like string conversions
using namespace web;                        // Common features like URIs.
using namespace web::http;                  // Common HTTP functionality
using namespace web::http::client;          // HTTP client features
using namespace concurrency::streams;       // Asynchronous streams

    class ConnectionPool
{
public:
    ConnectionPool(size_t nWorkers, std::wstring baseUri) :BaseUri(baseUri)
    {
        for (size_t i = 0; i < nWorkers; i++) Pool.emplace_back(http_client(baseUri), http_request(methods::GET));
    }

    void ResetState(size_t nWorkers, std::wstring baseUri)
    {
        BaseUri = baseUri;
        nDone = 0;
        Pool.clear();
        for (size_t i = 0; i < nWorkers; i++) Pool.emplace_back(http_client(baseUri), http_request(methods::GET));
    }

    void ResizePool(size_t nWorkers)
    {
        Pool.resize(nWorkers, { http_client(BaseUri) , http_request(methods::GET) });
    }

    /*template
    void DownloadAsync(std::vector Uris, const Function& f)//Not implemented
    {
        WorkItems = Uris;
        const size_t limit = (std::min)(Pool.size(), WorkItems.size());
        for (size_t i = 0; i < limit; i++) assignWork(i, f);
    }*/

    template
    void DownloadSync(const std::vector Uris, const Function& f)
    {
        std::wcout << "*DownloadSync Started*" << std::endl;
        WorkItems = Uris;
         for (size_t i = nDone = 0, limit = nActive = std::min(Pool.size(), WorkItems.size()); i < limit; ++i) assignWork(i, f);

        std::unique_lock lk(m1);
        cv.wait(lk, (&)() { return nActive == 0; });
        std::wcout << "*DownloadSync Ended*" << std::endl;
    }

    template
    void assignWork(int pidx, const Function& f)
    {
        //m2 isn't needed, right?!
        //m2.lock();
        if (nDone >= WorkItems.size())
        {
            std::lock_guard lk(m1);
            --nActive;
            cv.notify_one();
            //m2.unlock();
            return;
        }
        const auto wItem = WorkItems(nDone);
        int cIdx = nDone;
        ++nDone;
        //m2.unlock();

        std::wcout << L"Worker " << pidx << L": Assigning/t" << wItem << L" succeed" << std::endl;
        auto& (client, request) = Pool(pidx);

        request.set_request_uri(wItem);
        client.request(request).then((=)(pplx::task   responseTask) {
            try {
                if (auto response = responseTask.get(); response.status_code() == http::status_codes::OK)
                {
                    f(response, cIdx);
                    std::wcout << L"Worker " << pidx << L": Downloading/t" << wItem <> Pool;
    std::vector WorkItems;
    std::wstring BaseUri;
    std::mutex m1/*,m2*/;
    std::condition_variable cv;
    std::atomic nActive = 0, nDone = 0;
};

int main()
{
//....code....//
ConnectionPool con(n, L"base url");
        con.DownloadSync(urls, ()(http_response res, int idx)
            {
                auto outFile = fstream::open_ostream(std::to_wstring(idx) + L".ext").get();
                res.body().read_to_end(outFile.streambuf()).wait();
                outFile.close().wait();
            });
}

services – How to best execute temporary processes in parallel on the Cloud

I have an application design that would have a collection of microservices running a task for about 30 minutes to a few hours, writing data to files, and then stopping. I was debating at the beginning to deploy these microservices on a kubernetes cluster, by saving the generated files in a storage location like an S3 or GCS bucket, but their temporary existence and their lack of redundancy make the kubernetes seem exaggerated for this task. I was wondering if anyone had advice on which solution would work best for this?

Ruby parallel assignment – Code Review Stack Exchange

I recently wrote the following code:

def freq_progress
    user_flashcards = input_flashcards.includes(:term)
    terms_unicos = user_flashcards.map(&:term).compact.uniq

    freq_1, freq_2, freq_3, freq_4, freq_5 = 0, 0, 0, 0, 0
    terms_unicos.each do |term|
      case term.ranking
      when nil
        next
      when 1..100
        freq_1 += 1
      when 101..300
        freq_2 += 1
      when 301..1000
        freq_3 += 1
      when 1001..2000
        freq_4 += 1
      when 2001..5000
        freq_5 += 1
      else
        next
      end
    end
    {
        f1: freq_1,
        f2: freq_2,
        f3: freq_3,
        f4: freq_4,
        f5: freq_5
    }
  end

During the code review, our technical manager told me that parallel posting is considered bad practice.

In the Ruby style guide, it says:

Avoid using parallel assignment to define variables. Parallel assignment is allowed when it is the return of a method call, used with the splat operator, or when it is used to swap the 39; variable assignment. The parallel assignment is less readable than the separate assignment.

However, I think this would mainly apply to situations where each variable is set to a different value, and my particular use case (setting all values ​​to 0, the same "boring" value) would be potentially an exception to the readability rule.

In other words, it seems that this would underline the "equal" character of the 5 variables.

It may be a matter of opinion, but as the Ruby style guide recommends, I would love to hear what Rubyists have more experience than I have to say about it.

As well as all the refactoring opportunities!

Thanks Michael

p.s. this question was originally posted on StackOverflow, but I was told to close the question and move it to Code Review.

java – parallel sql requests using Completable future

My problem is that I have a child table and table 7, I get the details using first the main table, then querying the child tables one by one sequentially, to improve performance, I decide to execute the seven queries in parallel using future completable

 StoryDetail storyDetail = new StoryDetail();
      CompletableFuture iStoryDetailCompletableFuture = CompletableFuture
          .supplyAsync(() -> storyRepository.getStoryDetails(id), executorService);
      CompletableFuture> iCommentFuture = CompletableFuture
          .supplyAsync(() -> commentRepository.getComments(id), executorService);
      CompletableFuture> iImageFuture = CompletableFuture
          .supplyAsync(() -> imageRepository.getImagesByStoryId(id), executorService);

here we execute all queries sequentially

CompletableFuture.allOf(iCommentFuture, iStoryDetailCompletableFuture, iImageFuture)
      ;

and wait for everyone to finish

then set the value in the StoryDetail object

  storyDetail.setComments(iCommentFuture.get());
  storyDetail.setImages(iImageFuture.get());
  mapStoryDetail(iStoryDetailCompletableFuture.get(), storyDetail);

is the approach correct

Can two bitcoin chains coexist if copying code and running a parallel chain?

If someone forked the code without changing any of the fundamental properties and connected nodes / miners, could it not disrupt the main chain (original chain)?

thread – Best way to create a parallel process in python

So I need to create code in Python that uses parallel multiprocessing. There would be, for example, three processes running at the same time, the first would change the value of a global variable, then the second would perceive this change and start taking that value to perform a calculation, then the third process would perceive the change in this value and start taking the result of the calculation to perform a task. All this must be very unidirectional synchronous, it must operate continuously in a loop and moderately fast. For example, the third process could not happen before the second. If you have an example, thanks, I'm already familiar with Thread and multiprocess in Python, but I'm not getting the specs I need, there is always a bad loop.

javascript – Organization of parallel asynchronous promise / task tables

I'm fighting a bit for a preferred way of organizing an asynchronous task sequence that can be applied in parallel. Let's say you are analyzing data from many files. In my case, I am using javascript and promise, but that could be most languages. (Hence the strange tags like "javascript" and "language independent".

Option A: Parallelize at the end

1) First, create the task chain for a single file / stream, for example

function readAndParseAndConvert(file) {
  return read(filename)
    .then((body) => parse(body))
    .then((parsed) => convert(parsed));
}

2) Then put it all together

Promise.all(theArrayOfFilenames.map(readAndParseAndConvert));

Option B: Parallelize each step

1) Create the steps

function readFiles(filenames) {
   return Promise.all(filenames.map((filename) => read(filename))
}

function parseBodies(bodies) {
  return Promise.all(bodies.map(body) => parse(body))
}

function convertAll(parsed) {
  return Promise.all((parsed) => convert(parsed));
}

2) Put them together

readFiles(filenames)
  .then(parseBodies)
  .then(convertAll);

In the end, this can be reported as "opinion-based", but objective thoughts? Remember that the real code would try / catch, close files, etc …

accessories – Achieving perfect parallel alignment between the subject and the sensor

For fine art photography, I want the paint to be "perfectly" parallel to my camera sensor, in order to improve sharpness and reduce the amount of post-processing in my workflow.

Are there dedicated tools to do this?

The camera grid is usually too large to get accurate results, and I tried the mirror technique (you place a mirror in the center of the illustration, if you see your lens in it from the viewfinder or screen, then you have the right alignment), but it is difficult to set up and I found the results inaccurate.

I was offered a variable tilt lens, but I think the problem of relying on my eyes in the viewfinder will be the same.

Spirit levels can help but cannot fix the horizontal angle between the two vertical planes.

I was thinking about a 4 point laser rangefinder to mount on the camera, is there something like that?

Note that I can usually move the illustration (placing it flat on the floor can work for small rooms, but not for larger ones).

dnd 5th – Line of sight, 5 foot parallel obstacle between the player and the enemy

I don't know what ability or what spell is causing this condition, but judging only by what you wrote, the player can easily hide behind the pillar – as long as the boss doesn't move.

Here's why:

Remember that players are usually medium-sized creatures. According to the player's manual, Ch 9: Size of the creature:

Each creature occupies a different amount of space. Size
Table of watch categories how much space a creature of a particular size
controls in combat
.

According to the table provided, an average creature (your player) controls 5 'x 5' space while fight. Does this mean that they physically occupy an entire 5 foot square? Of course not. For a concrete example, the doors of your house or apartment are generally 32 to 36 inches wide. Pretty easy to hide behind, right?

Therefore, a PC can easily hide behind a 5-foot diameter pillar to break the LOS to the boss. Now, if they can keep this broken line of sight, it's a whole different story!

multithreading – Parallel MergeSort in C ++

I have tried to implement parallel MergeSort in C ++, which also tracks the number of comparisons made and the number of threads it uses:

#include 
#include 
#include 
#include 
#include 
#include 

int *original_array,*auxiliary_array;
std::mutex protector_of_the_global_counter;
int global_counter=0;
std::mutex protector_of_the_thread_counter;
int number_of_threads=0;


template
class Counting_Comparator {
    private:
    bool was_allocated;
    int *local_counter;
    public:
    Counting_Comparator() {
        was_allocated=true;
        local_counter=new int(0);
    }
    Counting_Comparator(int *init) {
        was_allocated=false;
        local_counter=init;
    }
    int get_count() {return *local_counter;}
    bool operator() (T first, T second) {
        (*local_counter)++;
        return first &x) {
        was_allocated=x.was_allocated;
        local_counter=x.local_counter;
    }
    ~Counting_Comparator() {
        if (was_allocated) delete local_counter;
    }
};

struct limits {
    int lower_limit,upper_limit,reccursion_depth;
};

void parallel_merge_sort(limits argument) {
    int lower_limit=argument.lower_limit;
    int upper_limit=argument.upper_limit;
    if (upper_limit-lower_limit<2) return; //An array of length less than 2 is already sorted.
    int reccursion_depth=argument.reccursion_depth;
    int middle_of_the_array=(upper_limit+lower_limit)/2;
    limits left_part={lower_limit,middle_of_the_array,reccursion_depth+1},
            right_part={middle_of_the_array,upper_limit,reccursion_depth+1};
    if (reccursion_depth comparator_functor(&local_counter);
    std::merge(original_array+lower_limit,
            original_array+middle_of_the_array,
            original_array+middle_of_the_array,
            original_array+upper_limit,
            auxiliary_array+lower_limit,
            comparator_functor);
    protector_of_the_global_counter.lock();
    global_counter+=comparator_functor.get_count();
    protector_of_the_global_counter.unlock();
    std::copy(auxiliary_array+lower_limit,
            auxiliary_array+upper_limit,
            original_array+lower_limit);
}

int main(void) {
    using std::cout;
    using std::cin;
    using std::endl;
    cout <<"Enter how many numbers you will input." <>n;
    try {
        original_array=new int(n);
        auxiliary_array=new int(n);
    }
    catch (...) {
        std::cerr <<"Not enough memory!?" <>original_array(i);
    limits entire_array={0,n,0};
    number_of_threads=1;
    clock_t processor_time=clock();
    try {
    std::thread root_of_the_reccursion(parallel_merge_sort,entire_array);
    root_of_the_reccursion.join();
    }
    catch (std::system_error error) {
        std::cerr <<"Can't create a new thread, error "" <

So what do you think?