New upstream version 248.3

This commit is contained in:
Michael Biebl 2021-05-18 16:40:37 +02:00
parent 48c4d28791
commit 874c989ecb
25 changed files with 132 additions and 109 deletions

View File

@ -30,5 +30,4 @@ foreach file : in_files
endforeach
meson.add_install_script('sh', '-c',
'test -n "$DESTDIR" || @0@/journalctl --update-catalog'
.format(rootbindir))
'test -n "$DESTDIR" || @0@/journalctl --update-catalog'.format(rootbindir))

View File

@ -13,7 +13,7 @@ account sufficient pam_unix.so
account required pam_permit.so
-password sufficient pam_systemd_home.so
password sufficient pam_unix.so sha512 shadow try_first_pass try_authtok
password sufficient pam_unix.so sha512 shadow try_first_pass use_authtok
password required pam_deny.so
-session optional pam_keyinit.so revoke

View File

@ -50,8 +50,7 @@ if conf.get('ENABLE_HWDB') == 1
mkdir_p.format(join_paths(sysconfdir, 'udev/hwdb.d')))
meson.add_install_script('sh', '-c',
'test -n "$DESTDIR" || @0@/systemd-hwdb update'
.format(rootbindir))
'test -n "$DESTDIR" || @0@/systemd-hwdb update'.format(rootbindir))
endif
if want_tests != 'false'

View File

@ -70,7 +70,7 @@ foreach tuple : xsltproc.found() ? manpages : []
link = custom_target(
htmlalias,
output : htmlalias,
command : ['ln', '-fs', html, '@OUTPUT@'])
command : [ln, '-fs', html, '@OUTPUT@'])
if want_html
dst = join_paths(docdir, 'html', htmlalias)
cmd = 'ln -fs @0@ $DESTDIR@1@'.format(html, dst)
@ -148,7 +148,7 @@ foreach tuple : xsltproc.found() ? [['systemd.directives', '7', systemd_directiv
htmlalias,
input : p2,
output : htmlalias,
command : ['ln', '-fs', html, '@OUTPUT@'])
command : [ln, '-fs', html, '@OUTPUT@'])
if want_html
dst = join_paths(docdir, 'html', htmlalias)
cmd = 'ln -fs @0@ $DESTDIR@1@'.format(html, dst)
@ -176,15 +176,14 @@ man = custom_target(
'man',
output : 'man',
depends : man_pages,
command : ['echo'])
command : [echo])
html = custom_target(
'html',
output : 'html',
depends : html_pages,
command : ['echo'])
command : [echo])
rsync = find_program('rsync', required : false)
if rsync.found()
run_target(
'doc-sync',

View File

@ -318,7 +318,8 @@ account sufficient pam_unix.so
account required pam_permit.so
-password sufficient pam_systemd_home.so
password sufficient pam_unix.so sha512 shadow try_first_pass try_authtok
password sufficient pam_unix.so sha512 shadow try_first_pass use_authtok
password required pam_deny.so
-session optional pam_keyinit.so revoke

View File

@ -139,7 +139,7 @@ account sufficient pam_unix.so
account required pam_permit.so
<command>-password sufficient pam_systemd_home.so</command>
password sufficient pam_unix.so sha512 shadow try_first_pass try_authtok
password sufficient pam_unix.so sha512 shadow try_first_pass use_authtok
password required pam_deny.so
-session optional pam_keyinit.so revoke

View File

@ -133,7 +133,10 @@
<term><varname>Type=</varname></term>
<listitem>
<para>A whitespace-separated list of shell-style globs matching the device type, as exposed by
<command>networkctl status</command>. If the list is prefixed with a "!", the test is inverted.
<command>networkctl list</command>. If the list is prefixed with a "!", the test is inverted.
Some valid values are <literal>ether</literal>, <literal>loopback</literal>, <literal>wlan</literal>, <literal>wwan</literal>.
Valid types are named either from the udev <literal>DEVTYPE</literal> attribute, or
<literal>ARPHRD_</literal> macros in <filename>linux/if_arp.h</filename>, so this is not comprehensive.
</para>
</listitem>
</varlistentry>

View File

@ -588,17 +588,22 @@ endif
versiondep = declare_dependency(sources: version_h)
sh = find_program('sh')
echo = find_program('echo')
test = find_program('test')
sed = find_program('sed')
awk = find_program('awk')
m4 = find_program('m4')
stat = find_program('stat')
ln = find_program('ln')
git = find_program('git', required : false)
env = find_program('env')
perl = find_program('perl', required : false)
rsync = find_program('rsync', required : false)
meson_make_symlink = project_source_root + '/tools/meson-make-symlink.sh'
mkdir_p = 'mkdir -p $DESTDIR/@0@'
test_efi_create_disk_sh = find_program('test/test-efi-create-disk.sh')
mkdir_p = 'mkdir -p $DESTDIR/@0@'
splash_bmp = files('test/splash.bmp')
# if -Dxxx-path option is found, use that. Otherwise, check in $PATH,
@ -632,7 +637,7 @@ endforeach
conf.set_quoted('TELINIT', get_option('telinit-path'))
if run_command('ln', '--relative', '--help').returncode() != 0
if run_command(ln, '--relative', '--help').returncode() != 0
error('ln does not support --relative (added in coreutils 8.16)')
endif
@ -646,7 +651,7 @@ const char * in_word_set(const char *, @0@);
@1@
'''
gperf_snippet_format = 'echo foo,bar | @0@ -L ANSI-C'
gperf_snippet = run_command('sh', '-c', gperf_snippet_format.format(gperf.path()))
gperf_snippet = run_command(sh, '-c', gperf_snippet_format.format(gperf.path()))
gperf_test = gperf_test_format.format('size_t', gperf_snippet.stdout())
if cc.compiles(gperf_test)
gperf_len_type = 'size_t'
@ -706,11 +711,11 @@ conf.set_quoted('DEFAULT_NET_NAMING_SCHEME', default_net_naming_scheme)
time_epoch = get_option('time-epoch')
if time_epoch == -1
time_epoch = run_command('sh', ['-c', 'echo "$SOURCE_DATE_EPOCH"']).stdout().strip()
time_epoch = run_command(sh, '-c', 'echo "$SOURCE_DATE_EPOCH"').stdout().strip()
if time_epoch == '' and git.found() and run_command('test', '-e', '.git').returncode() == 0
# If we're in a git repository, use the creation time of the latest git tag.
latest_tag = run_command('git', 'describe', '--abbrev=0', '--tags').stdout().strip()
time_epoch = run_command('git', 'log', '--no-show-signature', '-1', '--format=%at', latest_tag).stdout()
latest_tag = run_command(git, 'describe', '--abbrev=0', '--tags').stdout().strip()
time_epoch = run_command(git, 'log', '--no-show-signature', '-1', '--format=%at', latest_tag).stdout()
endif
if time_epoch == ''
NEWS = files('NEWS')
@ -3307,7 +3312,7 @@ executable(
custom_target(
'systemd-runtest.env',
output : 'systemd-runtest.env',
command : ['sh', '-c', '{ ' +
command : [sh, '-c', '{ ' +
'echo SYSTEMD_TEST_DATA=@0@; '.format(join_paths(project_source_root, 'test')) +
'echo SYSTEMD_CATALOG_DIR=@0@; '.format(join_paths(project_build_root, 'catalog')) +
'} >@OUTPUT@'],
@ -3541,7 +3546,7 @@ foreach tuple : sanitizers
name,
output : name,
depends : build,
command : [env, 'ln', '-fs',
command : [ln, '-fs',
join_paths(build.full_path(), b),
'@OUTPUT@'],
build_by_default : true)
@ -3568,9 +3573,8 @@ endforeach
if git.found()
all_files = run_command(
'env', '-u', 'GIT_WORK_TREE',
git,
'--git-dir=@0@/.git'.format(project_source_root),
env, '-u', 'GIT_WORK_TREE',
git, '--git-dir=@0@/.git'.format(project_source_root),
'ls-files', ':/*.[ch]')
all_files = files(all_files.stdout().split())
@ -3594,16 +3598,16 @@ endif
if git.found()
git_head = run_command(
git,
['--git-dir=@0@/.git'.format(project_source_root),
'rev-parse', 'HEAD']).stdout().strip()
'--git-dir=@0@/.git'.format(project_source_root),
'rev-parse', 'HEAD').stdout().strip()
git_head_short = run_command(
git,
['--git-dir=@0@/.git'.format(project_source_root),
'rev-parse', '--short=7', 'HEAD']).stdout().strip()
'--git-dir=@0@/.git'.format(project_source_root),
'rev-parse', '--short=7', 'HEAD').stdout().strip()
run_target(
'git-snapshot',
command : ['git', 'archive',
command : [git, 'archive',
'-o', '@0@/systemd-@1@.tar.gz'.format(project_source_root,
git_head_short),
'--prefix', 'systemd-@0@/'.format(git_head),
@ -3640,7 +3644,7 @@ endif
custom_target(
'update-man-rules',
output : 'update-man-rules',
command : ['sh', '-c',
command : [sh, '-c',
'cd @0@ && '.format(meson.build_root()) +
'python3 @0@/tools/update-man-rules.py $(find @0@ -wholename "*/man/*.xml") >t && '.format(project_source_root) +
'mv t @0@/man/rules/meson.build'.format(meson.current_source_dir())],

View File

@ -1,5 +1,5 @@
[Match]
Type=wifi
Type=wlan
WLANInterfaceType=ad-hoc
[Network]

View File

@ -1,5 +1,5 @@
[Match]
Type=wifi
Type=wlan
WLANInterfaceType=ap
[Network]

View File

@ -1,5 +1,5 @@
[Match]
Type=wifi
Type=wlan
WLANInterfaceType=station
[Network]

View File

@ -669,7 +669,11 @@ static bool same_entry(uint16_t id, sd_id128_t uuid, const char *path) {
return false;
if (!sd_id128_equal(uuid, ouuid))
return false;
if (!streq_ptr(path, opath))
/* Some motherboards convert the path to uppercase under certain circumstances
* (e.g. after booting into the Boot Menu in the ASUS ROG STRIX B350-F GAMING),
* so use case-insensitive checking */
if (!strcaseeq_ptr(path, opath))
return false;
return true;

View File

@ -72,14 +72,14 @@ if conf.get('ENABLE_EFI') == 1 and get_option('gnu-efi') != 'false'
if efi_libdir == ''
# New location first introduced with gnu-efi 3.0.11
efi_libdir = join_paths('/usr/lib/gnuefi', EFI_MACHINE_TYPE_NAME)
cmd = run_command('test', '-e', efi_libdir)
cmd = run_command(test, '-e', efi_libdir)
if cmd.returncode() != 0
# Fall back to the old approach
cmd = run_command(efi_cc + ['-print-multi-os-directory'])
if cmd.returncode() == 0
path = join_paths('/usr/lib', cmd.stdout().strip())
cmd = run_command('realpath', '-e', path)
cmd = run_command(env, 'realpath', '-e', path)
if cmd.returncode() == 0
efi_libdir = cmd.stdout().strip()
endif
@ -120,7 +120,7 @@ if have_gnu_efi
efi_lds = ''
foreach location : efi_location_map
if efi_lds == ''
cmd = run_command('test', '-f', location[0])
cmd = run_command(test, '-f', location[0])
if cmd.returncode() == 0
efi_lds = location[0]
efi_crt0 = location[1]

View File

@ -475,8 +475,11 @@ int manager_varlink_init(Manager *m) {
void manager_varlink_done(Manager *m) {
assert(m);
/* Send the final message if we still have a subscribe request open. */
m->managed_oom_varlink_request = varlink_close_unref(m->managed_oom_varlink_request);
/* Explicitly close the varlink connection to oomd. Note we first take the varlink connection out of
* the manager, and only then disconnect it in two steps so that we don't end up accidentally
* unreffing it twice. After all, closing the connection might cause the disconnect handler we
* installed (vl_disconnect() above) to be called, where we will unref it too. */
varlink_close_unref(TAKE_PTR(m->managed_oom_varlink_request));
m->varlink_server = varlink_server_unref(m->varlink_server);
}

View File

@ -20,7 +20,7 @@ endif
# If you know a way that allows the same variables to be used
# in sources list and concatenated to a string for test_env,
# let me know.
kbd_model_map = join_paths(meson.current_source_dir(), 'kbd-model-map')
kbd_model_map = join_paths(meson.current_source_dir(), 'kbd-model-map')
language_fallback_map = join_paths(meson.current_source_dir(), 'language-fallback-map')
if conf.get('ENABLE_LOCALED') == 1

View File

@ -350,21 +350,23 @@ int dhcp4_server_configure(Link *link) {
if (link->network->dhcp_server_emit_timezone) {
_cleanup_free_ char *buffer = NULL;
const char *tz;
const char *tz = NULL;
if (link->network->dhcp_server_timezone)
tz = link->network->dhcp_server_timezone;
else {
r = get_timezone(&buffer);
if (r < 0)
return log_link_error_errno(link, r, "Failed to determine timezone: %m");
tz = buffer;
log_link_warning_errno(link, r, "Failed to determine timezone, not sending timezone: %m");
else
tz = buffer;
}
r = sd_dhcp_server_set_timezone(link->dhcp_server, tz);
if (r < 0)
return log_link_error_errno(link, r, "Failed to set timezone for DHCP server: %m");
if (tz) {
r = sd_dhcp_server_set_timezone(link->dhcp_server, tz);
if (r < 0)
return log_link_error_errno(link, r, "Failed to set timezone for DHCP server: %m");
}
}
ORDERED_HASHMAP_FOREACH(p, link->network->dhcp_server_send_options) {

View File

@ -42,6 +42,8 @@ static void dns_query_candidate_stop(DnsQueryCandidate *c) {
assert(c);
/* Detach all the DnsTransactions attached to this query */
while ((t = set_steal_first(c->transactions))) {
set_remove(t->notify_query_candidates, c);
set_remove(t->notify_query_candidates_done, c);
@ -49,21 +51,34 @@ static void dns_query_candidate_stop(DnsQueryCandidate *c) {
}
}
static DnsQueryCandidate* dns_query_candidate_unlink(DnsQueryCandidate *c) {
assert(c);
/* Detach this DnsQueryCandidate from the Query and Scope objects */
if (c->query) {
LIST_REMOVE(candidates_by_query, c->query->candidates, c);
c->query = NULL;
}
if (c->scope) {
LIST_REMOVE(candidates_by_scope, c->scope->query_candidates, c);
c->scope = NULL;
}
return c;
}
static DnsQueryCandidate* dns_query_candidate_free(DnsQueryCandidate *c) {
if (!c)
return NULL;
dns_query_candidate_stop(c);
dns_query_candidate_unlink(c);
set_free(c->transactions);
dns_search_domain_unref(c->search_domain);
if (c->query)
LIST_REMOVE(candidates_by_query, c->query->candidates, c);
if (c->scope)
LIST_REMOVE(candidates_by_scope, c->scope->query_candidates, c);
return mfree(c);
}
@ -105,6 +120,7 @@ static int dns_query_candidate_add_transaction(
int r;
assert(c);
assert(c->query); /* We shan't add transactions to a candidate that has been detached already */
if (key) {
/* Regular lookup with a resource key */
@ -224,6 +240,7 @@ static int dns_query_candidate_setup_transactions(DnsQueryCandidate *c) {
int n = 0, r;
assert(c);
assert(c->query); /* We shan't add transactions to a candidate that has been detached already */
dns_query_candidate_stop(c);
@ -281,6 +298,9 @@ void dns_query_candidate_notify(DnsQueryCandidate *c) {
assert(c);
if (!c->query) /* This candidate has been abandoned, do nothing. */
return;
state = dns_query_candidate_state(c);
if (DNS_TRANSACTION_IS_LIVE(state))
@ -331,11 +351,13 @@ static void dns_query_stop(DnsQuery *q) {
dns_query_candidate_stop(c);
}
static void dns_query_unref_candidates(DnsQuery *q) {
static void dns_query_unlink_candidates(DnsQuery *q) {
assert(q);
while (q->candidates)
dns_query_candidate_unref(q->candidates);
/* Here we drop *our* references to each of the candidates. If we had the only reference, the
* DnsQueryCandidate object will be freed. */
dns_query_candidate_unref(dns_query_candidate_unlink(q->candidates));
}
static void dns_query_reset_answer(DnsQuery *q) {
@ -365,7 +387,7 @@ DnsQuery *dns_query_free(DnsQuery *q) {
LIST_REMOVE(auxiliary_queries, q->auxiliary_for->auxiliary_queries, q);
}
dns_query_unref_candidates(q);
dns_query_unlink_candidates(q);
dns_question_unref(q->question_idna);
dns_question_unref(q->question_utf8);
@ -1026,7 +1048,7 @@ static int dns_query_cname_redirect(DnsQuery *q, const DnsResourceRecord *cname)
dns_question_unref(q->question_utf8);
q->question_utf8 = TAKE_PTR(nq_utf8);
dns_query_unref_candidates(q);
dns_query_unlink_candidates(q);
/* Note that we do *not* reset the answer here, because the answer we previously got might already
* include everything we need, let's check that first */

View File

@ -1097,18 +1097,27 @@ uint32_t manager_find_mtu(Manager *m) {
uint32_t mtu = 0;
Link *l;
/* If we don't know on which link a DNS packet would be
* delivered, let's find the largest MTU that works on all
* interfaces we know of */
/* If we don't know on which link a DNS packet would be delivered, let's find the largest MTU that
* works on all interfaces we know of that have an IP address asociated */
HASHMAP_FOREACH(l, m->links) {
if (l->mtu <= 0)
/* Let's filter out links without IP addresses (e.g. AF_CAN links and suchlike) */
if (!l->addresses)
continue;
/* Safety check: MTU shorter than what we need for the absolutely shortest DNS request? Then
* let's ignore this link. */
if (l->mtu < MIN(UDP4_PACKET_HEADER_SIZE + DNS_PACKET_HEADER_SIZE,
UDP6_PACKET_HEADER_SIZE + DNS_PACKET_HEADER_SIZE))
continue;
if (mtu <= 0 || l->mtu < mtu)
mtu = l->mtu;
}
if (mtu == 0) /* found nothing? then let's assume the typical Ethernet MTU for lack of anything more precise */
return 1500;
return mtu;
}
@ -1141,15 +1150,16 @@ void manager_refresh_rrs(Manager *m) {
m->mdns_host_ipv4_key = dns_resource_key_unref(m->mdns_host_ipv4_key);
m->mdns_host_ipv6_key = dns_resource_key_unref(m->mdns_host_ipv6_key);
HASHMAP_FOREACH(l, m->links)
link_add_rrs(l, true);
if (m->mdns_support == RESOLVE_SUPPORT_YES)
HASHMAP_FOREACH(s, m->dnssd_services)
if (dnssd_update_rrs(s) < 0)
log_warning("Failed to refresh DNS-SD service '%s'", s->name);
HASHMAP_FOREACH(l, m->links) {
link_add_rrs(l, true);
HASHMAP_FOREACH(l, m->links)
link_add_rrs(l, false);
}
}
static int manager_next_random_name(const char *old, char **ret_new) {

View File

@ -18,7 +18,7 @@
# Some examples of DNS servers which may be used for DNS= and FallbackDNS=:
# Cloudflare: 1.1.1.1 1.0.0.1 2606:4700:4700::1111 2606:4700:4700::1001
# Google: 8.8.8.8 8.8.4.4 2001:4860:4860::8888 2001:4860:4860::8844
# Quad9: 9.9.9.9 2620:fe::fe
# Quad9: 9.9.9.9 149.112.112.112 2620:fe::fe 2620:fe::9
#DNS=
#FallbackDNS=@DNS_SERVERS@
#Domains=

View File

@ -576,13 +576,13 @@ static int parse_argv(int argc, char *argv[]) {
} else if (!arg_unit || !with_trigger)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Command line to execute required.");
if (arg_user && arg_transport != BUS_TRANSPORT_LOCAL)
if (arg_user && arg_transport == BUS_TRANSPORT_REMOTE)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
"Execution in user context is not supported on non-local systems.");
"Execution in user context is not supported on remote systems.");
if (arg_scope && arg_transport != BUS_TRANSPORT_LOCAL)
if (arg_scope && arg_transport == BUS_TRANSPORT_REMOTE)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
"Scope execution is not supported on non-local systems.");
"Scope execution is not supported on remote systems.");
if (arg_scope && (arg_remain_after_exit || arg_service_type))
return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
@ -1754,7 +1754,7 @@ static int run(int argc, char* argv[]) {
/* If --wait is used connect via the bus, unconditionally, as ref/unref is not supported via the limited direct
* connection */
if (arg_wait || arg_stdio != ARG_STDIO_NONE)
if (arg_wait || arg_stdio != ARG_STDIO_NONE || (arg_user && arg_transport != BUS_TRANSPORT_LOCAL))
r = bus_connect_transport(arg_transport, arg_host, arg_user, &bus);
else
r = bus_connect_transport_systemd(arg_transport, arg_host, arg_user, &bus);

View File

@ -1206,9 +1206,8 @@ int varlink_close(Varlink *v) {
varlink_set_state(v, VARLINK_DISCONNECTED);
/* Let's take a reference first, since varlink_detach_server() might drop the final ref from the
* disconnect callback, which would invalidate the pointer we are holding before we can call
* varlink_clear(). */
/* Let's take a reference first, since varlink_detach_server() might drop the final (dangling) ref
* which would destroy us before we can call varlink_clear() */
varlink_ref(v);
varlink_detach_server(v);
varlink_clear(v);
@ -1221,32 +1220,15 @@ Varlink* varlink_close_unref(Varlink *v) {
if (!v)
return NULL;
/* A reference is given to us to be destroyed. But when calling varlink_close(), a callback might
* also drop a reference. We allow this, and will hold a temporary reference to the object to make
* sure that the object still exists when control returns to us. If there's just one reference
* remaining after varlink_close(), even though there were at least two right before, we'll handle
* that gracefully instead of crashing.
*
* In other words, this call drops the donated reference, but if the internal call to varlink_close()
* dropped a reference to, we don't drop the reference afain. This allows the caller to say:
* global_object->varlink = varlink_close_unref(global_object->varlink);
* even though there is some callback which has access to global_object and may drop the reference
* stored in global_object->varlink. Without this step, the same code would have to be written as:
* Varlink *t = TAKE_PTR(global_object->varlink);
* varlink_close_unref(t);
*/
/* n_ref >= 1 */
varlink_ref(v); /* n_ref >= 2 */
varlink_close(v); /* n_ref >= 1 */
if (v->n_ref > 1)
v->n_ref--; /* n_ref >= 1 */
(void) varlink_close(v);
return varlink_unref(v);
}
Varlink* varlink_flush_close_unref(Varlink *v) {
if (v)
varlink_flush(v);
if (!v)
return NULL;
(void) varlink_flush(v);
return varlink_close_unref(v);
}

View File

@ -11,7 +11,7 @@ test_hashmap_ordered_c = custom_target(
test_include_dir = include_directories('.')
path = run_command('sh', ['-c', 'echo "$PATH"']).stdout().strip()
path = run_command(sh, '-c', 'echo "$PATH"').stdout().strip()
test_env = environment()
test_env.set('SYSTEMD_KBD_MODEL_MAP', kbd_model_map)
test_env.set('SYSTEMD_LANGUAGE_FALLBACK_MAP', language_fallback_map)

View File

@ -22,14 +22,11 @@ sanitize_address_undefined = custom_target(
sanitizers = [['address,undefined', sanitize_address_undefined]]
if git.found()
out = run_command(
'env', '-u', 'GIT_WORK_TREE',
git,
'--git-dir=@0@/.git'.format(project_source_root),
'ls-files', ':/test/fuzz/*/*')
out = run_command(env, '-u', 'GIT_WORK_TREE',
git, '--git-dir=@0@/.git'.format(project_source_root),
'ls-files', ':/test/fuzz/*/*')
else
out = run_command(
'sh', '-c', 'ls @0@/test/fuzz/*/*'.format(project_source_root))
out = run_command(sh, '-c', 'ls @0@/test/fuzz/*/*'.format(project_source_root))
endif
fuzz_regression_tests = []

View File

@ -144,13 +144,12 @@ if want_tests != 'false' and dmi_arches.contains(host_machine.cpu_family())
if git.found()
out = run_command(
'env', '-u', 'GIT_WORK_TREE',
git,
'--git-dir=@0@/.git'.format(project_source_root),
env, '-u', 'GIT_WORK_TREE',
git, '--git-dir=@0@/.git'.format(project_source_root),
'ls-files', ':/test/dmidecode-dumps/*.bin')
else
out = run_command(
'sh', '-c', 'ls @0@/test/dmidecode-dumps/*.bin'.format(project_source_root))
sh, '-c', 'ls @0@/test/dmidecode-dumps/*.bin'.format(project_source_root))
endif
foreach p : out.stdout().split()

View File

@ -348,8 +348,7 @@ if conf.get('HAVE_SYSV_COMPAT') == 1
foreach i : [1, 2, 3, 4, 5]
meson.add_install_script(
'sh', '-c',
mkdir_p
.format(join_paths(systemunitdir, 'runlevel@0@.target.wants'.format(i))))
mkdir_p.format(join_paths(systemunitdir, 'runlevel@0@.target.wants'.format(i))))
endforeach
endif