Coverage Report

Created: 2024-12-20 00:05

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/build/source/nativelink-config/src/cas_server.rs
Line
Count
Source
1
// Copyright 2024 The NativeLink Authors. All rights reserved.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//    http://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
use std::collections::HashMap;
16
17
use serde::Deserialize;
18
19
use crate::schedulers::SchedulerSpec;
20
use crate::serde_utils::{
21
    convert_data_size_with_shellexpand, convert_duration_with_shellexpand,
22
    convert_numeric_with_shellexpand, convert_optional_numeric_with_shellexpand,
23
    convert_optional_string_with_shellexpand, convert_string_with_shellexpand,
24
    convert_vec_string_with_shellexpand,
25
};
26
use crate::stores::{ClientTlsConfig, ConfigDigestHashFunction, StoreRefName, StoreSpec};
27
28
/// Name of the scheduler. This type will be used when referencing a
29
/// scheduler in the `CasConfig::schedulers`'s map key.
30
pub type SchedulerRefName = String;
31
32
/// Used when the config references `instance_name` in the protocol.
33
pub type InstanceName = String;
34
35
#[allow(non_camel_case_types)]
36
#[derive(Deserialize, Debug, Default, Clone, Copy)]
37
pub enum HttpCompressionAlgorithm {
38
    /// No compression.
39
    #[default]
40
    none,
41
42
    /// Zlib compression.
43
    gzip,
44
}
45
46
/// Note: Compressing data in the cloud rarely has a benefit, since most
47
/// cloud providers have very high bandwidth backplanes. However, for
48
/// clients not inside the data center, it might be a good idea to
49
/// compress data to and from the cloud. This will however come at a high
50
/// CPU and performance cost. If you are making remote execution share the
51
/// same CAS/AC servers as client's remote cache, you can create multiple
52
/// services with different compression settings that are served on
53
/// different ports. Then configure the non-cloud clients to use one port
54
/// and cloud-clients to use another.
55
#[derive(Deserialize, Debug, Default)]
56
#[serde(deny_unknown_fields)]
57
pub struct HttpCompressionConfig {
58
    /// The compression algorithm that the server will use when sending
59
    /// responses to clients. Enabling this will likely save a lot of
60
    /// data transfer, but will consume a lot of CPU and add a lot of
61
    /// latency.
62
    /// see: <https://github.com/tracemachina/nativelink/issues/109>
63
    ///
64
    /// Default: `HttpCompressionAlgorithm::none`
65
    pub send_compression_algorithm: Option<HttpCompressionAlgorithm>,
66
67
    /// The compression algorithm that the server will accept from clients.
68
    /// The server will broadcast the supported compression algorithms to
69
    /// clients and the client will choose which compression algorithm to
70
    /// use. Enabling this will likely save a lot of data transfer, but
71
    /// will consume a lot of CPU and add a lot of latency.
72
    /// see: <https://github.com/tracemachina/nativelink/issues/109>
73
    ///
74
    /// Default: {no supported compression}
75
    pub accepted_compression_algorithms: Vec<HttpCompressionAlgorithm>,
76
}
77
78
0
#[derive(Deserialize, Debug)]
79
#[serde(deny_unknown_fields)]
80
pub struct AcStoreConfig {
81
    /// The store name referenced in the `stores` map in the main config.
82
    /// This store name referenced here may be reused multiple times.
83
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
84
    pub ac_store: StoreRefName,
85
86
    /// Whether the Action Cache store may be written to, this if set to false
87
    /// it is only possible to read from the Action Cache.
88
    #[serde(default)]
89
    pub read_only: bool,
90
}
91
92
0
#[derive(Deserialize, Debug)]
93
#[serde(deny_unknown_fields)]
94
pub struct CasStoreConfig {
95
    /// The store name referenced in the `stores` map in the main config.
96
    /// This store name referenced here may be reused multiple times.
97
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
98
    pub cas_store: StoreRefName,
99
}
100
101
0
#[derive(Deserialize, Debug, Default)]
102
#[serde(deny_unknown_fields)]
103
pub struct CapabilitiesRemoteExecutionConfig {
104
    /// Scheduler used to configure the capabilities of remote execution.
105
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
106
    pub scheduler: SchedulerRefName,
107
}
108
109
#[derive(Deserialize, Debug, Default)]
110
#[serde(deny_unknown_fields)]
111
pub struct CapabilitiesConfig {
112
    /// Configuration for remote execution capabilities.
113
    /// If not set the capabilities service will inform the client that remote
114
    /// execution is not supported.
115
    pub remote_execution: Option<CapabilitiesRemoteExecutionConfig>,
116
}
117
118
0
#[derive(Deserialize, Debug)]
119
#[serde(deny_unknown_fields)]
120
pub struct ExecutionConfig {
121
    /// The store name referenced in the `stores` map in the main config.
122
    /// This store name referenced here may be reused multiple times.
123
    /// This value must be a CAS store reference.
124
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
125
    pub cas_store: StoreRefName,
126
127
    /// The scheduler name referenced in the `schedulers` map in the main config.
128
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
129
    pub scheduler: SchedulerRefName,
130
}
131
132
0
#[derive(Deserialize, Debug, Default)]
133
#[serde(deny_unknown_fields)]
134
pub struct ByteStreamConfig {
135
    /// Name of the store in the "stores" configuration.
136
    pub cas_stores: HashMap<InstanceName, StoreRefName>,
137
138
    /// Max number of bytes to send on each grpc stream chunk.
139
    /// According to <https://github.com/grpc/grpc.github.io/issues/371>
140
    /// 16KiB - 64KiB is optimal.
141
    ///
142
    ///
143
    /// Default: 64KiB
144
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
145
    pub max_bytes_per_stream: usize,
146
147
    /// Maximum number of bytes to decode on each grpc stream chunk.
148
    /// Default: 4 MiB
149
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
150
    pub max_decoding_message_size: usize,
151
152
    /// In the event a client disconnects while uploading a blob, we will hold
153
    /// the internal stream open for this many seconds before closing it.
154
    /// This allows clients that disconnect to reconnect and continue uploading
155
    /// the same blob.
156
    ///
157
    /// Default: 10 (seconds)
158
    #[serde(default, deserialize_with = "convert_duration_with_shellexpand")]
159
    pub persist_stream_on_disconnect_timeout: usize,
160
}
161
162
0
#[derive(Deserialize, Debug)]
163
#[serde(deny_unknown_fields)]
164
pub struct WorkerApiConfig {
165
    /// The scheduler name referenced in the `schedulers` map in the main config.
166
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
167
    pub scheduler: SchedulerRefName,
168
}
169
170
#[derive(Deserialize, Debug, Default)]
171
#[serde(deny_unknown_fields)]
172
pub struct PrometheusConfig {
173
    /// Path to register prometheus metrics. If path is "/metrics", and your
174
    /// domain is "example.com", you can reach the endpoint with:
175
    /// <http://example.com/metrics>.
176
    ///
177
    /// Default: "/metrics"
178
    #[serde(default)]
179
    pub path: String,
180
}
181
182
#[derive(Deserialize, Debug, Default)]
183
#[serde(deny_unknown_fields)]
184
pub struct AdminConfig {
185
    /// Path to register the admin API. If path is "/admin", and your
186
    /// domain is "example.com", you can reach the endpoint with:
187
    /// <http://example.com/admin>.
188
    ///
189
    /// Default: "/admin"
190
    #[serde(default)]
191
    pub path: String,
192
}
193
194
#[derive(Deserialize, Debug, Default)]
195
#[serde(deny_unknown_fields)]
196
pub struct HealthConfig {
197
    /// Path to register the health status check. If path is "/status", and your
198
    /// domain is "example.com", you can reach the endpoint with:
199
    /// <http://example.com/status>.
200
    ///
201
    /// Default: "/status"
202
    #[serde(default)]
203
    pub path: String,
204
}
205
206
0
#[derive(Deserialize, Debug)]
207
pub struct BepConfig {
208
    /// The store to publish build events to.
209
    /// The store name referenced in the `stores` map in the main config.
210
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
211
    pub store: StoreRefName,
212
}
213
214
0
#[derive(Deserialize, Clone, Debug, Default)]
215
pub struct IdentityHeaderSpec {
216
    /// The name of the header to look for the identity in.
217
    /// Default: "x-identity"
218
    #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")]
219
    pub header_name: Option<String>,
220
221
    /// If the header is required to be set or fail the request.
222
    #[serde(default)]
223
    pub required: bool,
224
}
225
226
0
#[derive(Deserialize, Clone, Debug)]
227
pub struct OriginEventsPublisherSpec {
228
    /// The store to publish nativelink events to.
229
    /// The store name referenced in the `stores` map in the main config.
230
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
231
    pub store: StoreRefName,
232
}
233
234
0
#[derive(Deserialize, Clone, Debug)]
235
pub struct OriginEventsSpec {
236
    /// The publisher configuration for origin events.
237
    pub publisher: OriginEventsPublisherSpec,
238
239
    /// The maximum number of events to queue before applying back pressure.
240
    /// IMPORTANT: Backpressure causes all clients to slow down significantly.
241
    /// Zero is default.
242
    ///
243
    /// Default: 65536 (zero defaults to this)
244
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
245
    pub max_event_queue_size: usize,
246
}
247
248
#[derive(Deserialize, Debug)]
249
#[serde(deny_unknown_fields)]
250
pub struct ServicesConfig {
251
    /// The Content Addressable Storage (CAS) backend config.
252
    /// The key is the `instance_name` used in the protocol and the
253
    /// value is the underlying CAS store config.
254
    pub cas: Option<HashMap<InstanceName, CasStoreConfig>>,
255
256
    /// The Action Cache (AC) backend config.
257
    /// The key is the `instance_name` used in the protocol and the
258
    /// value is the underlying AC store config.
259
    pub ac: Option<HashMap<InstanceName, AcStoreConfig>>,
260
261
    /// Capabilities service is required in order to use most of the
262
    /// bazel protocol. This service is used to provide the supported
263
    /// features and versions of this bazel GRPC service.
264
    pub capabilities: Option<HashMap<InstanceName, CapabilitiesConfig>>,
265
266
    /// The remote execution service configuration.
267
    /// NOTE: This service is under development and is currently just a
268
    /// place holder.
269
    pub execution: Option<HashMap<InstanceName, ExecutionConfig>>,
270
271
    /// This is the service used to stream data to and from the CAS.
272
    /// Bazel's protocol strongly encourages users to use this streaming
273
    /// interface to interact with the CAS when the data is large.
274
    pub bytestream: Option<ByteStreamConfig>,
275
276
    /// This is the service used for workers to connect and communicate
277
    /// through.
278
    /// NOTE: This service should be served on a different, non-public port.
279
    /// In other words, `worker_api` configuration should not have any other
280
    /// services that are served on the same port. Doing so is a security
281
    /// risk, as workers have a different permission set than a client
282
    /// that makes the remote execution/cache requests.
283
    pub worker_api: Option<WorkerApiConfig>,
284
285
    /// Experimental - Build Event Protocol (BEP) configuration. This is
286
    /// the service that will consume build events from the client and
287
    /// publish them to a store for processing by an external service.
288
    pub experimental_bep: Option<BepConfig>,
289
290
    /// Experimental - Prometheus metrics configuration. Metrics are gathered
291
    /// as a singleton but may be served on multiple endpoints.
292
    pub experimental_prometheus: Option<PrometheusConfig>,
293
294
    /// This is the service for any administrative tasks.
295
    /// It provides a REST API endpoint for administrative purposes.
296
    pub admin: Option<AdminConfig>,
297
298
    /// This is the service for health status check.
299
    pub health: Option<HealthConfig>,
300
}
301
302
0
#[derive(Deserialize, Debug)]
303
#[serde(deny_unknown_fields)]
304
pub struct TlsConfig {
305
    /// Path to the certificate file.
306
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
307
    pub cert_file: String,
308
309
    /// Path to the private key file.
310
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
311
    pub key_file: String,
312
313
    /// Path to the certificate authority for mTLS, if client authentication is
314
    /// required for this endpoint.
315
    #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")]
316
    pub client_ca_file: Option<String>,
317
318
    /// Path to the certificate revocation list for mTLS, if client
319
    /// authentication is required for this endpoint.
320
    #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")]
321
    pub client_crl_file: Option<String>,
322
}
323
324
/// Advanced Http configurations. These are generally should not be set.
325
/// For documentation on what each of these do, see the hyper documentation:
326
/// See: <https://docs.rs/hyper/latest/hyper/server/conn/struct.Http.html>
327
///
328
/// Note: All of these default to hyper's default values unless otherwise
329
/// specified.
330
0
#[derive(Deserialize, Debug, Default)]
331
#[serde(deny_unknown_fields)]
332
pub struct HttpServerConfig {
333
    /// Interval to send keep-alive pings via HTTP2.
334
    /// Note: This is in seconds.
335
    #[serde(
336
        default,
337
        deserialize_with = "convert_optional_numeric_with_shellexpand"
338
    )]
339
    pub http2_keep_alive_interval: Option<u32>,
340
341
    #[serde(
342
        default,
343
        deserialize_with = "convert_optional_numeric_with_shellexpand"
344
    )]
345
    pub experimental_http2_max_pending_accept_reset_streams: Option<u32>,
346
347
    #[serde(
348
        default,
349
        deserialize_with = "convert_optional_numeric_with_shellexpand"
350
    )]
351
    pub experimental_http2_initial_stream_window_size: Option<u32>,
352
353
    #[serde(
354
        default,
355
        deserialize_with = "convert_optional_numeric_with_shellexpand"
356
    )]
357
    pub experimental_http2_initial_connection_window_size: Option<u32>,
358
359
    #[serde(default)]
360
    pub experimental_http2_adaptive_window: Option<bool>,
361
362
    #[serde(
363
        default,
364
        deserialize_with = "convert_optional_numeric_with_shellexpand"
365
    )]
366
    pub experimental_http2_max_frame_size: Option<u32>,
367
368
    #[serde(
369
        default,
370
        deserialize_with = "convert_optional_numeric_with_shellexpand"
371
    )]
372
    pub experimental_http2_max_concurrent_streams: Option<u32>,
373
374
    /// Note: This is in seconds.
375
    #[serde(
376
        default,
377
        deserialize_with = "convert_optional_numeric_with_shellexpand"
378
    )]
379
    pub experimental_http2_keep_alive_timeout: Option<u32>,
380
381
    #[serde(
382
        default,
383
        deserialize_with = "convert_optional_numeric_with_shellexpand"
384
    )]
385
    pub experimental_http2_max_send_buf_size: Option<u32>,
386
387
    #[serde(default)]
388
    pub experimental_http2_enable_connect_protocol: Option<bool>,
389
390
    #[serde(
391
        default,
392
        deserialize_with = "convert_optional_numeric_with_shellexpand"
393
    )]
394
    pub experimental_http2_max_header_list_size: Option<u32>,
395
}
396
397
#[allow(non_camel_case_types)]
398
#[derive(Deserialize, Debug)]
399
pub enum ListenerConfig {
400
    /// Listener for HTTP/HTTPS/HTTP2 sockets.
401
    http(HttpListener),
402
}
403
404
0
#[derive(Deserialize, Debug)]
405
#[serde(deny_unknown_fields)]
406
pub struct HttpListener {
407
    /// Address to listen on. Example: `127.0.0.1:8080` or `:8080` to listen
408
    /// to all IPs.
409
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
410
    pub socket_address: String,
411
412
    /// Data transport compression configuration to use for this service.
413
    #[serde(default)]
414
    pub compression: HttpCompressionConfig,
415
416
    /// Advanced Http server configuration.
417
    #[serde(default)]
418
    pub advanced_http: HttpServerConfig,
419
420
    /// Tls Configuration for this server.
421
    /// If not set, the server will not use TLS.
422
    ///
423
    /// Default: None
424
    #[serde(default)]
425
    pub tls: Option<TlsConfig>,
426
}
427
428
0
#[derive(Deserialize, Debug)]
429
#[serde(deny_unknown_fields)]
430
pub struct ServerConfig {
431
    /// Name of the server. This is used to help identify the service
432
    /// for telemetry and logs.
433
    ///
434
    /// Default: {index of server in config}
435
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
436
    pub name: String,
437
438
    /// Configuration
439
    pub listener: ListenerConfig,
440
441
    /// Services to attach to server.
442
    pub services: Option<ServicesConfig>,
443
444
    /// The config related to identifying the client.
445
    /// Default: {see `IdentityHeaderSpec`}
446
    #[serde(default)]
447
    pub experimental_identity_header: IdentityHeaderSpec,
448
}
449
450
#[allow(non_camel_case_types)]
451
0
#[derive(Deserialize, Debug)]
452
pub enum WorkerProperty {
453
    /// List of static values.
454
    /// Note: Generally there should only ever be 1 value, but if the platform
455
    /// property key is `PropertyType::Priority` it may have more than one value.
456
    #[serde(deserialize_with = "convert_vec_string_with_shellexpand")]
457
    values(Vec<String>),
458
459
    /// A dynamic configuration. The string will be executed as a command
460
    /// (not sell) and will be split by "\n" (new line character).
461
    query_cmd(String),
462
}
463
464
/// Generic config for an endpoint and associated configs.
465
0
#[derive(Deserialize, Debug, Default)]
466
#[serde(deny_unknown_fields)]
467
pub struct EndpointConfig {
468
    /// URI of the endpoint.
469
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
470
    pub uri: String,
471
472
    /// Timeout in seconds that a request should take.
473
    /// Default: 5 (seconds)
474
    pub timeout: Option<f32>,
475
476
    /// The TLS configuration to use to connect to the endpoint.
477
    pub tls_config: Option<ClientTlsConfig>,
478
}
479
480
#[allow(non_camel_case_types)]
481
#[derive(Copy, Clone, Deserialize, Debug, Default)]
482
pub enum UploadCacheResultsStrategy {
483
    /// Only upload action results with an exit code of 0.
484
    #[default]
485
    success_only,
486
487
    /// Don't upload any action results.
488
    never,
489
490
    /// Upload all action results that complete.
491
    everything,
492
493
    /// Only upload action results that fail.
494
    failures_only,
495
}
496
497
#[allow(non_camel_case_types)]
498
0
#[derive(Clone, Deserialize, Debug)]
499
pub enum EnvironmentSource {
500
    /// The name of the platform property in the action to get the value from.
501
    property(String),
502
503
    /// The raw value to set.
504
    value(#[serde(deserialize_with = "convert_string_with_shellexpand")] String),
505
506
    /// The max amount of time in milliseconds the command is allowed to run
507
    /// (requested by the client).
508
    timeout_millis,
509
510
    /// A special file path will be provided that can be used to communicate
511
    /// with the parent process about out-of-band information. This file
512
    /// will be read after the command has finished executing. Based on the
513
    /// contents of the file, the behavior of the result may be modified.
514
    ///
515
    /// The format of the file contents should be json with the following
516
    /// schema:
517
    /// {
518
    ///   // If set the command will be considered a failure.
519
    ///   // May be one of the following static strings:
520
    ///   // "timeout": Will Consider this task to be a timeout.
521
    ///   "failure": "timeout",
522
    /// }
523
    ///
524
    /// All fields are optional, file does not need to be created and may be
525
    /// empty.
526
    side_channel_file,
527
528
    /// A "root" directory for the action. This directory can be used to
529
    /// store temporary files that are not needed after the action has
530
    /// completed. This directory will be purged after the action has
531
    /// completed.
532
    ///
533
    /// For example:
534
    /// If an action writes temporary data to a path but nativelink should
535
    /// clean up this path after the job has executed, you may create any
536
    /// directory under the path provided in this variable. A common pattern
537
    /// would be to use `entrypoint` to set a shell script that reads this
538
    /// variable, `mkdir $ENV_VAR_NAME/tmp` and `export TMPDIR=$ENV_VAR_NAME/tmp`.
539
    /// Another example might be to bind-mount the `/tmp` path in a container to
540
    /// this path in `entrypoint`.
541
    action_directory,
542
}
543
544
0
#[derive(Deserialize, Debug, Default)]
545
#[serde(deny_unknown_fields)]
546
pub struct UploadActionResultConfig {
547
    /// Underlying AC store that the worker will use to publish execution results
548
    /// into. Objects placed in this store should be reachable from the
549
    /// scheduler/client-cas after they have finished updating.
550
    /// Default: {No uploading is done}
551
    pub ac_store: Option<StoreRefName>,
552
553
    /// In which situations should the results be published to the `ac_store`,
554
    /// if set to `SuccessOnly` then only results with an exit code of 0 will be
555
    /// uploaded, if set to Everything all completed results will be uploaded.
556
    ///
557
    /// Default: `UploadCacheResultsStrategy::SuccessOnly`
558
    #[serde(default)]
559
    pub upload_ac_results_strategy: UploadCacheResultsStrategy,
560
561
    /// Store to upload historical results to. This should be a CAS store if set.
562
    ///
563
    /// Default: {CAS store of parent}
564
    pub historical_results_store: Option<StoreRefName>,
565
566
    /// In which situations should the results be published to the historical CAS.
567
    /// The historical CAS is where failures are published. These messages conform
568
    /// to the CAS key-value lookup format and are always a `HistoricalExecuteResponse`
569
    /// serialized message.
570
    ///
571
    /// Default: `UploadCacheResultsStrategy::FailuresOnly`
572
    #[serde(default)]
573
    pub upload_historical_results_strategy: Option<UploadCacheResultsStrategy>,
574
575
    /// Template to use for the `ExecuteResponse.message` property. This message
576
    /// is attached to the response before it is sent to the client. The following
577
    /// special variables are supported:
578
    /// - `digest_function`: Digest function used to calculate the action digest.
579
    /// - `action_digest_hash`: Action digest hash.
580
    /// - `action_digest_size`: Action digest size.
581
    /// - `historical_results_hash`: `HistoricalExecuteResponse` digest hash.
582
    /// - `historical_results_size`: `HistoricalExecuteResponse` digest size.
583
    ///
584
    /// A common use case of this is to provide a link to the web page that
585
    /// contains more useful information for the user.
586
    ///
587
    /// An example that is fully compatible with `bb_browser` is:
588
    /// <https://example.com/my-instance-name-here/blobs/{digest_function}/action/{action_digest_hash}-{action_digest_size}/>
589
    ///
590
    /// Default: "" (no message)
591
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
592
    pub success_message_template: String,
593
594
    /// Same as `success_message_template` but for failure case.
595
    ///
596
    /// An example that is fully compatible with `bb_browser` is:
597
    /// <https://example.com/my-instance-name-here/blobs/{digest_function}/historical_execute_response/{historical_results_hash}-{historical_results_size}/>
598
    ///
599
    /// Default: "" (no message)
600
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
601
    pub failure_message_template: String,
602
}
603
604
0
#[derive(Deserialize, Debug, Default)]
605
#[serde(deny_unknown_fields)]
606
pub struct LocalWorkerConfig {
607
    /// Name of the worker. This is give a more friendly name to a worker for logging
608
    /// and metric publishing.
609
    /// Default: {Index position in the workers list}
610
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
611
    pub name: String,
612
613
    /// Endpoint which the worker will connect to the scheduler's `WorkerApiService`.
614
    pub worker_api_endpoint: EndpointConfig,
615
616
    /// The maximum time an action is allowed to run. If a task requests for a timeout
617
    /// longer than this time limit, the task will be rejected. Value in seconds.
618
    ///
619
    /// Default: 1200 (seconds / 20 mins)
620
    #[serde(default, deserialize_with = "convert_duration_with_shellexpand")]
621
    pub max_action_timeout: usize,
622
623
    /// If timeout is handled in `entrypoint` or another wrapper script.
624
    /// If set to true `NativeLink` will not honor the timeout the action requested
625
    /// and instead will always force kill the action after `max_action_timeout`
626
    /// has been reached. If this is set to false, the smaller value of the action's
627
    /// timeout and `max_action_timeout` will be used to which `NativeLink` will kill
628
    /// the action.
629
    ///
630
    /// The real timeout can be received via an environment variable set in:
631
    /// `EnvironmentSource::TimeoutMillis`.
632
    ///
633
    /// Example on where this is useful: `entrypoint` launches the action inside
634
    /// a docker container, but the docker container may need to be downloaded. Thus
635
    /// the timer should not start until the docker container has started executing
636
    /// the action. In this case, action will likely be wrapped in another program,
637
    /// like `timeout` and propagate timeouts via `EnvironmentSource::SideChannelFile`.
638
    ///
639
    /// Default: false (`NativeLink` fully handles timeouts)
640
    #[serde(default)]
641
    pub timeout_handled_externally: bool,
642
643
    /// The command to execute on every execution request. This will be parsed as
644
    /// a command + arguments (not shell).
645
    /// Example: "run.sh" and a job with command: "sleep 5" will result in a
646
    /// command like: "run.sh sleep 5".
647
    /// Default: {Use the command from the job request}.
648
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
649
    pub entrypoint: String,
650
651
    /// An optional script to run before every action is processed on the worker.
652
    /// The value should be the full path to the script to execute and will pause
653
    /// all actions on the worker if it returns an exit code other than 0.
654
    /// If not set, then the worker will never pause and will continue to accept
655
    /// jobs according to the scheduler configuration.
656
    /// This is useful, for example, if the worker should not take any more
657
    /// actions until there is enough resource available on the machine to
658
    /// handle them.
659
    pub experimental_precondition_script: Option<String>,
660
661
    /// Underlying CAS store that the worker will use to download CAS artifacts.
662
    /// This store must be a `FastSlowStore`. The `fast` store must be a
663
    /// `FileSystemStore` because it will use hardlinks when building out the files
664
    /// instead of copying the files. The slow store must eventually resolve to the
665
    /// same store the scheduler/client uses to send job requests.
666
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
667
    pub cas_fast_slow_store: StoreRefName,
668
669
    /// Configuration for uploading action results.
670
    #[serde(default)]
671
    pub upload_action_result: UploadActionResultConfig,
672
673
    /// The directory work jobs will be executed from. This directory will be fully
674
    /// managed by the worker service and will be purged on startup.
675
    /// This directory and the directory referenced in `local_filesystem_store_ref`'s
676
    /// `stores::FilesystemStore::content_path` must be on the same filesystem.
677
    /// Hardlinks will be used when placing files that are accessible to the jobs
678
    /// that are sourced from `local_filesystem_store_ref`'s `content_path`.
679
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
680
    pub work_directory: String,
681
682
    /// Properties of this worker. This configuration will be sent to the scheduler
683
    /// and used to tell the scheduler to restrict what should be executed on this
684
    /// worker.
685
    pub platform_properties: HashMap<String, WorkerProperty>,
686
687
    /// An optional mapping of environment names to set for the execution
688
    /// as well as those specified in the action itself.  If set, will set each
689
    /// key as an environment variable before executing the job with the value
690
    /// of the environment variable being the value of the property of the
691
    /// action being executed of that name or the fixed value.
692
    pub additional_environment: Option<HashMap<String, EnvironmentSource>>,
693
}
694
695
#[allow(non_camel_case_types)]
696
#[derive(Deserialize, Debug)]
697
pub enum WorkerConfig {
698
    /// A worker type that executes jobs locally on this machine.
699
    local(LocalWorkerConfig),
700
}
701
702
0
#[derive(Deserialize, Debug, Clone, Copy)]
703
#[serde(deny_unknown_fields)]
704
pub struct GlobalConfig {
705
    /// Maximum number of open files that can be opened at one time.
706
    /// This value is not strictly enforced, it is a best effort. Some internal libraries
707
    /// open files or read metadata from a files which do not obey this limit, however
708
    /// the vast majority of cases will have this limit be honored.
709
    /// As a rule of thumb this value should be less than half the value of `ulimit -n`.
710
    /// Any network open file descriptors is not counted in this limit, but is counted
711
    /// in the kernel limit. It is a good idea to set a very large `ulimit -n`.
712
    /// Note: This value must be greater than 10.
713
    ///
714
    /// Default: 512
715
    #[serde(deserialize_with = "convert_numeric_with_shellexpand")]
716
    pub max_open_files: usize,
717
718
    /// If a file descriptor is idle for this many milliseconds, it will be closed.
719
    /// In the event a client or store takes a long time to send or receive data
720
    /// the file descriptor will be closed, and since `max_open_files` blocks new
721
    /// `open_file` requests until a slot opens up, it will allow new requests to be
722
    /// processed. If a read or write is attempted on a closed file descriptor, the
723
    /// file will be reopened and the operation will continue.
724
    ///
725
    /// On services where worker(s) and scheduler(s) live in the same process, this
726
    /// also prevents deadlocks if a file->file copy is happening, but cannot open
727
    /// a new file descriptor because the limit has been reached.
728
    ///
729
    /// Default: 1000 (1 second)
730
    #[serde(default, deserialize_with = "convert_duration_with_shellexpand")]
731
    pub idle_file_descriptor_timeout_millis: u64,
732
733
    /// This flag can be used to prevent metrics from being collected at runtime.
734
    /// Metrics are still able to be collected, but this flag prevents metrics that
735
    /// are collected at runtime (performance metrics) from being tallied. The
736
    /// overhead of collecting metrics is very low, so this flag should only be
737
    /// used if there is a very good reason to disable metrics.
738
    /// This flag can be forcibly set using the `NATIVELINK_DISABLE_METRICS` variable.
739
    /// If the variable is set it will always disable metrics regardless of what
740
    /// this flag is set to.
741
    ///
742
    /// Default: <true (disabled) if no prometheus service enabled, false otherwise>
743
    #[serde(default)]
744
    pub disable_metrics: bool,
745
746
    /// Default hash function to use while uploading blobs to the CAS when not set
747
    /// by client.
748
    ///
749
    /// Default: `ConfigDigestHashFunction::sha256`
750
    pub default_digest_hash_function: Option<ConfigDigestHashFunction>,
751
752
    /// Default digest size to use for health check when running
753
    /// diagnostics checks. Health checks are expected to use this
754
    /// size for filling a buffer that is used for creation of
755
    /// digest.
756
    ///
757
    /// Default: 1024*1024 (1MiB)
758
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
759
    pub default_digest_size_health_check: usize,
760
}
761
762
#[derive(Deserialize, Debug)]
763
#[serde(deny_unknown_fields)]
764
pub struct CasConfig {
765
    /// List of stores available to use in this config.
766
    /// The keys can be used in other configs when needing to reference a store.
767
    pub stores: HashMap<StoreRefName, StoreSpec>,
768
769
    /// Worker configurations used to execute jobs.
770
    pub workers: Option<Vec<WorkerConfig>>,
771
772
    /// List of schedulers available to use in this config.
773
    /// The keys can be used in other configs when needing to reference a
774
    /// scheduler.
775
    pub schedulers: Option<HashMap<SchedulerRefName, SchedulerSpec>>,
776
777
    /// Servers to setup for this process.
778
    pub servers: Vec<ServerConfig>,
779
780
    /// Experimental - Origin events configuration. This is the service that will
781
    /// collect and publish nativelink events to a store for processing by an
782
    /// external service.
783
    pub experimental_origin_events: Option<OriginEventsSpec>,
784
785
    /// Any global configurations that apply to all modules live here.
786
    pub global: Option<GlobalConfig>,
787
}