Skip to content
Snippets Groups Projects
Select Git revision
  • def7a07288d23a2ea6ad0645f9db6558a54d6d12
  • devel default
  • 107-compilation-error-when-building-maestro-core-on-m1-apple-processors
  • 108-implement-cpu-id-query-for-apple-m1-hardware
  • 58-scripting-interface-to-maestro-core
  • 101-need-ci-test-using-installed-maestro
  • 57-sphinx-documentation
  • 105-memory-leak-in-pm-message-envelope-handling
  • 104-permit-disabling-memory-pool
  • 103-liberl-installation-issue-on-devel
  • 94-maestro-rdma-transport-ignores-max_msg_size-2
  • main protected
  • 102-possible-race-in-check_pm_redundant_interlock-test
  • 97-check-if-shm-provider-can-be-enabled-after-libfabric-1-14-is-in-our-tree-2
  • 100-include-maestro-attributes-h-cannot-include-mamba-header-from-deps-path
  • 97-check-if-shm-provider-can-be-enabled-after-libfabric-1-14-is-in-our-tree
  • 17-job-failed-282354-needs-update-of-mio-interface-and-build-rules
  • 96-test-libfabric-update-to-1-13-or-1-14
  • feature/stop-telemetry-after-all-left
  • 94-maestro-rdma-transport-ignores-max_msg_size
  • 93-improve-performance-of-mstro_attribute_val_cmp_str
  • v0.3_rc1
  • maestro_d65
  • d65_experiments_20211113
  • v0.2
  • v0.2_rc1
  • d3.3
  • d3.3-review
  • d5.5
  • d5.5-review
  • v0.1
  • d3.2
  • d3.2-draft
  • v0.0
34 results

erl_threadpool.h

Blame
  • erl_threadpool.h 5.61 KiB
    /*
     * Copyright (C) 2018 Cray Computer GmbH
     *
     * Redistribution and use in source and binary forms, with or without
     * modification, are permitted provided that the following conditions are
     * met:
     *
     * 1. Redistributions of source code must retain the above copyright
     *    notice, this list of conditions and the following disclaimer.
     *
     * 2. Redistributions in binary form must reproduce the above copyright
     *    notice, this list of conditions and the following disclaimer in the
     *    documentation and/or other materials provided with the distribution.
     *
     * 3. Neither the name of the copyright holder nor the names of its
     *    contributors may be used to endorse or promote products derived from
     *    this software without specific prior written permission.
     *
     * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
     * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
     * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     */
    
    /* -*- mode:c -*- */
    /** @file
     ** @brief Thread pool abstraction
     **
     **/
    /** status codes */
    #ifndef ERL_THREADPOOL_H_
    #define ERL_THREADPOOL_H_ 1
    
    #include "erl_status.h"
    #include <stddef.h>
    
    struct erl_threadpool_;
    /** A threadpool handle */
    typedef struct erl_threadpool_ *erl_threadpool;
    
    /** scheduling strategy of threads in pool */
    enum erl_threadpool_type {
      ERL_SCHED_DEFAULT = 1,
      ERL_SCHED_FIFO    = 1,
      ERL_SCHED_LIFO       ,
      ERL_SCHED_RANDOM     ,
      ERL_sched__MAX
    };
    enum erl_threadpool_cpubind {
      ERL_TP_BIND_NONE        = 0, /**< no CPU binding, let threading runtime decide */
      ERL_TP_BIND_FIXED       = 1, /**< bind threads to specified cpu IDs */
      ERL_TP_BIND_NUMA_RR_1   = 2, /**< bind threads to one fixed CPU per NUMA domain, round-robin */
      ERL_TP_BIND_NUMA_RR_ALL = 3, /**< bind threads to a CPU per NUMA domain, round robin, choosing a fresh CPU each round (as long as feasible) */
      ERL_tp_cpubind__max
    };
    
    /** attributes for the threads in the threadpool */
    struct erl_threadpool_attr {
      size_t stacksize;                    /**< per-thread stack size */
      enum erl_threadpool_type type;       /**< scheduling type */
      enum erl_threadpool_cpubind bind;  /**< the desired CPU binding for threads in the pool */ 
      /** thread to CPU binding map: 
       * for NONE:   ignored
       * for FIXED:  must specify a CPU ID for each of the threads (as specified in the @ref num_threads argument to @ref erl_threadpool_create() call)
       * for RR_1:   must specify the desired CPU ID in each NUMA domain; -1 for 'don't care'. Thus must have as many entries as visible numa domains.
       * for RR_ALL: ignored
       */
      int bindings[];                     
    };
    
    int
    erl__get_binding_error(const struct erl_threadpool_attr *attr, int nnodes, int ncpus, int tid);
    int
    erl__get_binding_index_fixed(const struct erl_threadpool_attr *attr, int nnodes, int ncpus, int tid);
    int
    erl__get_binding_index_rr1(const struct erl_threadpool_attr *attr, int nnodes, int ncpus, int tid);
    int
    erl__get_binding_index_rrall(const struct erl_threadpool_attr *attr, int nnodes, int ncpus, int tid);
    
    static int (*const erl__get_binding_index[ERL_tp_cpubind__max]) 
        (const struct erl_threadpool_attr *attr, int nnodes, int ncpus, int tid) = {
      erl__get_binding_error,
      erl__get_binding_index_fixed,
      erl__get_binding_index_rr1,
      erl__get_binding_index_rrall
    };
    
    /** A structure describing a task to be run in a @ref erl_threadpool thread */
    typedef struct erl_tp_task_ *erl_tp_task;
    
    /** lock the task structure */
    erl_status
    erl_tp_task_lock(erl_tp_task task);
    
    /** unlock the task structure */
    erl_status
    erl_tp_task_unlock(erl_tp_task task);
    
    /** The task function: will be called with the closure specified at
     * task creation time, and the task container structure */
    typedef erl_status(*erl_tp_task_func)(void* closure,
                                          erl_tp_task task);
    
    
    /** default threadpool attributes */
    #define ERL_TP_DEFAULT_ATTR NULL
    
    /** Create a new threadpool
     **
     ** Create a new threadpool with up to @arg numthreads worker threads
     ** and attributes as specified by @arg attr. Use ERL_DEFAULT_ATTR to
     ** request default attributes.
     **
     ** @arg attr is safe to reuse after pool creation, it's not consumed
     ** by this function.
     **/
    erl_status
    erl_tp_create(size_t numthreads, const struct erl_threadpool_attr *attr,
                  erl_threadpool *result_p);
    
    /** Destroy a threadpool
     **/
    erl_status
    erl_tp_destroy(erl_threadpool pool);
    
    
    /** Wait for completion of a job
     **
     ** blocks calling thread until @arg job is in ERL_TASK_COMPLETE state.
     **
     ** Upon ERL_OK return *closure is set to the closure passed to the
     ** job at job construction.
     **/
    erl_status
    erl_tp_wait(erl_tp_task job, void **closure);
    
    /** construct a task */
    erl_status
    erl_tp_task_create(erl_tp_task_func function,
                     void *closure,
                     erl_tp_task *result_p);
    
    /** destroy a task
     **
     ** destroys the container only, closure slot must be handled by caller.
     **/
    erl_status
    erl_tp_task_destroy(erl_tp_task task);
    
    /** Submit a job */
    erl_status
    erl_tp_submit_task(erl_threadpool pool, erl_tp_task task);
    
    
    #endif /* ERL_THREADPOOL_H_ */