diff --git a/buildbot.tac b/buildbot.tac index 98055695..d98fb788 100644 --- a/buildbot.tac +++ b/buildbot.tac @@ -1,33 +1,50 @@ +# -*- python -*- +# ex: set filetype=python: import os from twisted.application import service +from twisted.python.log import FileLogObserver, ILogObserver +from twisted.python.logfile import LogFile from buildbot.master import BuildMaster -basedir = "." -log_basedir = "/var/log/buildbot/" +# This buildbot.tac file is a basis for all "autogen" masters. +# The folder structure for autogen masters is: +# +# autogen +# └── aarch64-master-0 +#    ├── buildbot.tac +#     ├── master.cfg +#     ├── master-config.yaml +#     └── master-private.cfg +# +# Thus basedir is two levels below this file"s position. +buildbot_tac_dir = os.path.abspath(os.path.dirname(__file__)) +basedir = os.path.abspath(f"{buildbot_tac_dir}/../../") + +# Hard coded as it runs in containers. +# TODO(cvicentiu) this should come as an environment variable. +log_basedir = "/var/log/buildbot" rotateLength = 20000000 maxRotatedFiles = 30 -configfile = "master.cfg" +last_two_dirs = os.path.normpath(buildbot_tac_dir).split(os.sep)[-2:] +master_name = last_two_dirs[-1] +# Last two directories. autogen and +cfg_from_basedir = last_two_dirs + ["master.cfg"] + +configfile = os.path.join(*cfg_from_basedir) # Default umask for server umask = None -# if this is a relocatable tac file, get the directory containing the TAC -if basedir == ".": - import os - - basedir = os.path.abspath(os.path.dirname(__file__)) - # note: this line is matched against to check that this is a buildmaster # directory; do not edit it. -application = service.Application('buildmaster') # fmt: skip -from twisted.python.log import FileLogObserver, ILogObserver -from twisted.python.logfile import LogFile +application = service.Application("buildmaster") # fmt: skip +# This logfile is monitored. It must end in .log. logfile = LogFile.fromFullPath( - os.path.join(log_basedir, "%s"), + os.path.join(log_basedir, f'{master_name}.log'), rotateLength=rotateLength, maxRotatedFiles=maxRotatedFiles, ) diff --git a/constants.py b/constants.py index fc9c165d..e2bdbdfa 100644 --- a/constants.py +++ b/constants.py @@ -21,6 +21,23 @@ "main", ] +# Branches with special prefixes that invoke a BB run. +BB_TEST_BRANCHES = [ + "bb-*", + "st-*", + "prot-*", + "refs/pull/*", + "preview-1[0-9].*", + "jpsn-*", +] + +# A list of all branches that invoke a buildbot run. +ALL_BB_TEST_BRANCHES = BRANCHES_MAIN + BB_TEST_BRANCHES + +STAGING_PROT_TEST_BRANCHES = [ + "prot-st-*", +] + # Defines what builders report status to GitHub GITHUB_STATUS_BUILDERS = [ "aarch64-macos-compile-only", diff --git a/define_masters.py b/define_masters.py index 4b62abed..264db50f 100755 --- a/define_masters.py +++ b/define_masters.py @@ -3,25 +3,31 @@ import os import shutil +from collections import defaultdict + import yaml BASE_PATH = "autogen/" config = {"private": {}} -exec(open("master-private.cfg").read(), config, {}) +with open("master-private.cfg", "r") as file: + exec(file.read(), config, {}) + +master_variables = config["private"]["master-variables"] with open("os_info.yaml", encoding="utf-8") as file: OS_INFO = yaml.safe_load(file) -platforms = {} +platforms = defaultdict(dict) for os_name in OS_INFO: if "install_only" in OS_INFO[os_name] and OS_INFO[os_name]["install_only"]: continue for arch in OS_INFO[os_name]["arch"]: - builder_name = arch + "-" + os_name - if arch not in platforms: - platforms[arch] = [] - platforms[arch].append(builder_name) + builder_name = f"{arch}-{os_name}" + platforms[arch][os_name] = { + 'image_tag': OS_INFO[os_name]['image_tag'], + 'tags': OS_INFO[os_name]['tags'] if 'tags' in OS_INFO[os_name] else [] + } # Clear old configurations if os.path.exists(BASE_PATH): @@ -33,32 +39,25 @@ # create multiple masters # "max_builds" is defined is master-private.py num_masters = ( - int(len(platforms[arch]) / config["private"]["master-variables"]["max_builds"]) - + 1 + int(len(platforms[arch]) / master_variables["max_builds"]) + 1 ) for master_id in range(num_masters): - dir_path = BASE_PATH + arch + "-master-" + str(master_id) + dir_path = f'{BASE_PATH}{arch}-master-{master_id}' os.makedirs(dir_path) - master_config = {} - master_config["builders"] = platforms[arch] - master_config["workers"] = config["private"]["master-variables"]["workers"][ - arch - ] - master_config["log_name"] = ( - "master-docker-" + arch + "-" + str(master_id) + ".log" - ) + master_config = { + 'builders': {arch: platforms[arch]}, + 'workers': master_variables["workers"][arch], + 'log_name': f'master-docker-{arch}-{master_id}.log' + } - with open(dir_path + "/master-config.yaml", mode="w", encoding="utf-8") as file: + with open(f"{dir_path}/master-config.yaml", mode="w", + encoding="utf-8") as file: yaml.dump(master_config, file) shutil.copyfile("master.cfg", dir_path + "/master.cfg") shutil.copyfile("master-private.cfg", dir_path + "/master-private.cfg") + shutil.copyfile("buildbot.tac", dir_path + "/buildbot.tac") - buildbot_tac = ( - open("buildbot.tac", encoding="utf-8").read() % master_config["log_name"] - ) - with open(dir_path + "/buildbot.tac", mode="w", encoding="utf-8") as f: - f.write(buildbot_tac) print(arch, len(master_config["builders"])) diff --git a/master-bintars/master.cfg b/master-bintars/master.cfg index 141fc45e..62bafa62 100644 --- a/master-bintars/master.cfg +++ b/master-bintars/master.cfg @@ -1,91 +1,39 @@ # -*- python -*- # ex: set filetype=python: -from buildbot.plugins import * -from buildbot.process.properties import Property, Properties -from buildbot.steps.shell import ShellCommand, Compile, Test, SetPropertyFromCommand -from buildbot.steps.mtrlogobserver import MTR, MtrLogObserver -from buildbot.steps.source.github import GitHub -from buildbot.process.remotecommand import RemoteCommand -from datetime import timedelta -from twisted.internet import defer - -import docker import os -import sys import json -sys.path.insert(0, "/srv/buildbot/master") -sys.setrecursionlimit(10000) +from buildbot.plugins import util, worker, steps -from common_factories import * -from constants import * -from locks import * -from schedulers_definition import SCHEDULERS -from utils import * -FQDN = os.environ["BUILDMASTER_WG_IP"] +from constants import SAVED_PACKAGE_BRANCHES +from utils import getSourceTarball, read_template, savePackageIfBranchMatch +from master_common import base_master_config -# This is the dictionary that the buildmaster pays attention to. We also use -# a shorter alias to save typing. -c = BuildmasterConfig = {} +cfg_dir = os.path.abspath(os.path.dirname(__file__)) + +# Non autogen master. For now the directory structure is: +# +# └── +#    ├── buildbot.tac +#     └── master.cfg +# +# Non autogen masters load from for now. +base_dir = os.path.abspath(f'{cfg_dir}/../') # Load the slave, database passwords and 3rd-party tokens from an external private file, so # that the rest of the configuration can be public. config = {"private": {}} -exec(open("../master-private.cfg").read(), config, {}) - -####### BUILDBOT SERVICES - -# 'services' is a list of BuildbotService items like reporter targets. The -# status of each build will be pushed to these targets. buildbot/reporters/*.py -# has a variety to choose from, like IRC bots. +with open(os.path.join(base_dir, "master-private.cfg"), "r") as file: + exec(file.read(), config, {}) -c["services"] = [] -context = util.Interpolate("buildbot/%(prop:buildername)s") -gs = reporters.GitHubStatusPush( - token=config["private"]["gh_mdbci"]["access_token"], - context=context, - startDescription="Build started.", - endDescription="Build done.", - verbose=True, - builders=GITHUB_STATUS_BUILDERS, -) -c["services"].append(gs) -c["secretsProviders"] = [ - secrets.SecretInAFile( - dirname=os.environ["MASTER_CREDENTIALS_DIR"] - ) -] - -####### PROJECT IDENTITY - -# the 'title' string will appear at the top of this buildbot installation's -# home pages (linked to the 'titleURL'). -c["title"] = os.environ["TITLE"] -c["titleURL"] = os.environ["TITLE_URL"] - -# the 'buildbotURL' string should point to the location where the buildbot's -# internal web server is visible. This typically uses the port number set in -# the 'www' entry below, but with an externally-visible host name which the -# buildbot cannot figure out without some help. -c["buildbotURL"] = os.environ["BUILDMASTER_URL"] - -# 'protocols' contains information about protocols which master will use for -# communicating with workers. You must define at least 'port' option that workers -# could connect to your master with this protocol. -# 'port' must match the value configured into the workers (with their -# --master option) -port = int(os.environ["MASTER_NONLATENT_BINTARS_WORKER_PORT"]) -c["protocols"] = {"pb": {"port": port}} - -####### DB URL - -c["db"] = { - # This specifies what database buildbot uses to store its state. - "db_url": config["private"]["db_url"] -} +# This is the dictionary that the buildmaster pays attention to. We also use +# a shorter alias to save typing. +c = BuildmasterConfig = base_master_config( + config, + master_port=os.environ["MASTER_NONLATENT_BINTARS_WORKER_PORT"]) mtrDbPool = util.EqConnectionPool( "MySQLdb", @@ -95,17 +43,10 @@ mtrDbPool = util.EqConnectionPool( config["private"]["db_mtr_db"], ) -####### Disable net usage reports from being sent to buildbot.net -c["buildbotNetUsageData"] = None - -####### SCHEDULERS - -# Configure the Schedulers, which decide how to react to incoming changes. -c["schedulers"] = SCHEDULERS - -####### WORKERS - +####### +# WORKERS +####### def mkWorker(name, **kwargs): return worker.Worker(name, config["private"]["worker_pass"][name], **kwargs) @@ -182,8 +123,6 @@ def getBintarFactory( kvm_image_test = kvm_image.replace("vm-centos5", "vm-centos6") - arch = "x86_64" if "amd64" in name else "i686" - # Step 1: Cleaning build directory bin_fact.addStep( steps.ShellCommand( @@ -331,7 +270,6 @@ def getBintarFactory( ####### BUILDERS LIST - c["builders"] = [] builder_definitions = { @@ -364,15 +302,3 @@ for b in builder_definitions: factory=f, ) ) - -c["logEncoding"] = "utf-8" - -c["multiMaster"] = True - -c["mq"] = { # Need to enable multimaster aware mq. Wamp is the only option for now. - "type": "wamp", - "router_url": os.environ["MQ_ROUTER_URL"], - "realm": "realm1", - # valid are: none, critical, error, warn, info, debug, trace - "wamp_debug_level": "info", -} diff --git a/master-config.yaml-sample b/master-config.yaml-sample index 7f9bcc32..9423d05e 100644 --- a/master-config.yaml-sample +++ b/master-config.yaml-sample @@ -1,14 +1,76 @@ builders: -- aarch64-centos-stream9 -- aarch64-debian-11 -- aarch64-debian-12 -- aarch64-debian-sid -- aarch64-rhel-8 -- aarch64-rhel-9 -- aarch64-ubuntu-2004 -- aarch64-ubuntu-2204 + aarch64: + centos-stream9: + image_tag: centos-stream9 + tags: + - release_packages + - autobake + debian-11: + image_tag: debian-11 + tags: + - release_packages + - autobake + debian-12: + image_tag: debian-12 + tags: + - release_packages + - autobake + debian-sid: + image_tag: debian-sid + tags: + - autobake + - bleeding_edge + fedora-39: + image_tag: fedora-39 + tags: + - autobake + - bleeding_edge + fedora-40: + image_tag: fedora-40 + tags: + - autobake + - bleeding_edge + fedora-41: + image_tag: fedora-41 + tags: + - autobake + - bleeding_edge + openeuler-2403: + image_tag: openeuler-2403 + tags: + - autobake + - bleeding_edge + rhel-8: + image_tag: rhel-8 + tags: + - autobake + - bleeding_edge + rhel-9: + image_tag: rhel-9 + tags: + - autobake + - bleeding_edge + ubuntu-2004: + image_tag: ubuntu-2004 + tags: + - autobake + - bleeding_edge + ubuntu-2204: + image_tag: ubuntu-2204 + tags: + - autobake + - bleeding_edge + ubuntu-2404: + image_tag: ubuntu-2404 + tags: + - autobake + - bleeding_edge + ubuntu-2410: + image_tag: ubuntu-2410 + tags: + - autobake + - bleeding_edge log_name: master-docker-aarch64-0.log -port: 9998 workers: - aarch64-bbw1 - aarch64-bbw2 diff --git a/master-docker-nonstandard-2/master.cfg b/master-docker-nonstandard-2/master.cfg index 9e46dd91..eddf9a90 100644 --- a/master-docker-nonstandard-2/master.cfg +++ b/master-docker-nonstandard-2/master.cfg @@ -1,89 +1,60 @@ # -*- python -*- # ex: set filetype=python: -from buildbot.plugins import * -from buildbot.process.properties import Property, Properties -from buildbot.steps.shell import ShellCommand, Compile, Test, SetPropertyFromCommand -from buildbot.steps.mtrlogobserver import MTR, MtrLogObserver -from buildbot.steps.source.github import GitHub -from buildbot.process.remotecommand import RemoteCommand -from datetime import timedelta -from twisted.internet import defer - -import docker import os -import sys - -sys.path.insert(0, "/srv/buildbot/master") -sys.setrecursionlimit(10000) - -from common_factories import * -from constants import * -from locks import * -from schedulers_definition import SCHEDULERS -from utils import * - -FQDN = os.environ["BUILDMASTER_WG_IP"] - -# This is the dictionary that the buildmaster pays attention to. We also use -# a shorter alias to save typing. -c = BuildmasterConfig = {} +from collections import defaultdict + +from buildbot.plugins import steps, util +from buildbot.process.properties import Property +from common_factories import ( + getQuickBuildFactory, + getRpmAutobakeFactory, + getSourceTarball, +) +from constants import MTR_ENV, SAVED_PACKAGE_BRANCHES +from locks import getLocks +from master_common import base_master_config +from utils import ( + canStartBuild, + createVar, + createWorker, + dockerfile, + filterBranch, + getHTMLLogString, + hasAutobake, + hasBigtest, + hasEco, + hasFailed, + moveMTRLogs, + mtrJobsMultiplier, + nextBuild, + printEnv, + read_template, + saveLogs, + savePackageIfBranchMatch, +) + +cfg_dir = os.path.abspath(os.path.dirname(__file__)) + +# Non autogen master. For now the directory structure is: +# +# └── +#    ├── buildbot.tac +#     └── master.cfg +# +# Non autogen masters load from for now. +base_dir = os.path.abspath(f"{cfg_dir}/../") # Load the slave, database passwords and 3rd-party tokens from an external private file, so # that the rest of the configuration can be public. config = {"private": {}} -exec(open("../master-private.cfg").read(), config, {}) - -####### BUILDBOT SERVICES - -# 'services' is a list of BuildbotService items like reporter targets. The -# status of each build will be pushed to these targets. buildbot/reporters/*.py -# has a variety to choose from, like IRC bots. - - -c["services"] = [] -context = util.Interpolate("buildbot/%(prop:buildername)s") -gs = reporters.GitHubStatusPush( - token=config["private"]["gh_mdbci"]["access_token"], - context=context, - startDescription="Build started.", - endDescription="Build done.", - verbose=True, - builders=GITHUB_STATUS_BUILDERS, -) -c["services"].append(gs) -c["secretsProviders"] = [ - secrets.SecretInAFile( - dirname=os.environ["MASTER_CREDENTIALS_DIR"] - ) -] -####### PROJECT IDENTITY +with open(os.path.join(base_dir, "master-private.cfg"), "r") as file: + exec(file.read(), config, {}) -# the 'title' string will appear at the top of this buildbot installation's -# home pages (linked to the 'titleURL'). -c["title"] = os.environ["TITLE"] -c["titleURL"] = os.environ["TITLE_URL"] -# the 'buildbotURL' string should point to the location where the buildbot's -# internal web server is visible. This typically uses the port number set in -# the 'www' entry below, but with an externally-visible host name which the -# buildbot cannot figure out without some help. -c["buildbotURL"] = os.environ["BUILDMASTER_URL"] - -# 'protocols' contains information about protocols which master will use for -# communicating with workers. You must define at least 'port' option that workers -# could connect to your master with this protocol. -# 'port' must match the value configured into the workers (with their -# --master option) -port = int(os.environ["PORT"]) -c["protocols"] = {"pb": {"port": port}} - -####### DB URL - -c["db"] = { - # This specifies what database buildbot uses to store its state. - "db_url": config["private"]["db_url"] -} +# This is the dictionary that the buildmaster pays attention to. We also use +# a shorter alias to save typing. +c = BuildmasterConfig = base_master_config(config) mtrDbPool = util.EqConnectionPool( "MySQLdb", @@ -93,14 +64,6 @@ mtrDbPool = util.EqConnectionPool( config["private"]["db_mtr_db"], ) -####### Disable net usage reports from being sent to buildbot.net -c["buildbotNetUsageData"] = None - -####### SCHEDULERS - -# Configure the Schedulers, which decide how to react to incoming changes. -c["schedulers"] = SCHEDULERS - ####### WORKERS # The 'workers' list defines the set of recognized workers. Each element is @@ -108,7 +71,7 @@ c["schedulers"] = SCHEDULERS # worker name and password must be configured on the worker. c["workers"] = [] -workers = {} +workers = defaultdict(list) def addWorker( @@ -120,7 +83,7 @@ def addWorker( save_packages=False, shm_size="15G", ): - name, instance = createWorker( + base_name, name, instance = createWorker( worker_name_prefix, worker_id, worker_type, @@ -130,23 +93,15 @@ def addWorker( shm_size, ) - if name[0] not in workers: - workers[name[0]] = [name[1]] - else: - workers[name[0]].append(name[1]) - + workers[base_name].append(name) c["workers"].append(instance) -# Docker workers -fqdn = os.environ["BUILDMASTER_WG_IP"] - addWorker( "amd-bbw", 1, - "-debian-12-32-bit", - os.environ["CONTAINER_REGISTRY_URL"] - + "debian12-386", + "debian-12-32-bit", + os.environ["CONTAINER_REGISTRY_URL"] + "debian12-386", jobs=10, save_packages=False, shm_size="30G", @@ -155,9 +110,8 @@ addWorker( addWorker( "amd-bbw", 2, - "-debian-12-32-bit", - os.environ["CONTAINER_REGISTRY_URL"] - + "debian12-386", + "debian-12-32-bit", + os.environ["CONTAINER_REGISTRY_URL"] + "debian12-386", jobs=10, save_packages=False, shm_size="30G", @@ -166,9 +120,8 @@ addWorker( addWorker( "apexis-bbw", 3, - "-debian-12-32-bit", - os.environ["CONTAINER_REGISTRY_URL"] - + "debian12-386", + "debian-12-32-bit", + os.environ["CONTAINER_REGISTRY_URL"] + "debian12-386", jobs=10, save_packages=False, shm_size="30G", @@ -177,9 +130,8 @@ addWorker( addWorker( "apexis-bbw", 3, - "-msan-clang-debian-11", - os.environ["CONTAINER_REGISTRY_URL"] - + "debian11-msan", + "msan-clang-debian-11", + os.environ["CONTAINER_REGISTRY_URL"] + "debian11-msan", jobs=20, save_packages=False, ) @@ -187,9 +139,8 @@ addWorker( addWorker( "apexis-bbw", 3, - "-ubuntu-2204-jepsen-mariadb", - os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu22.04-jepsen-mariadb", + "ubuntu-2204-jepsen-mariadb", + os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu22.04-jepsen-mariadb", jobs=5, save_packages=False, ) @@ -923,7 +874,6 @@ f_eco_mysqljs.addStep( ) ) -## f_bintar f_bintar = util.BuildFactory() f_bintar.addStep(printEnv()) f_bintar.addStep( @@ -1157,15 +1107,3 @@ c["builders"].append( factory=f_jepsen_mariadb, ) ) - -c["logEncoding"] = "utf-8" - -c["multiMaster"] = True - -c["mq"] = { # Need to enable multimaster aware mq. Wamp is the only option for now. - "type": "wamp", - "router_url": os.environ["MQ_ROUTER_URL"], - "realm": "realm1", - # valid are: none, critical, error, warn, info, debug, trace - "wamp_debug_level": "info", -} diff --git a/master-docker-nonstandard/master.cfg b/master-docker-nonstandard/master.cfg index 83e04b34..19837c7c 100644 --- a/master-docker-nonstandard/master.cfg +++ b/master-docker-nonstandard/master.cfg @@ -1,89 +1,63 @@ # -*- python -*- # ex: set filetype=python: -from buildbot.plugins import * -from buildbot.process.properties import Property, Properties -from buildbot.steps.shell import ShellCommand, Compile, Test, SetPropertyFromCommand -from buildbot.steps.mtrlogobserver import MTR, MtrLogObserver -from buildbot.steps.source.github import GitHub -from buildbot.process.remotecommand import RemoteCommand -from datetime import timedelta -from twisted.internet import defer - -import docker import os -import sys +from collections import defaultdict -sys.path.insert(0, "/srv/buildbot/master") -sys.setrecursionlimit(10000) - -from common_factories import * -from constants import * -from locks import * -from schedulers_definition import SCHEDULERS -from utils import * +import docker +from buildbot.plugins import steps, util, worker +from buildbot.process.properties import Property +from common_factories import ( + addTests, + getHTMLLogString, + getQuickBuildFactory, + getRpmAutobakeFactory, + getSourceTarball, +) +from constants import MTR_ENV, SAVED_PACKAGE_BRANCHES +from locks import getLocks +from master_common import base_master_config +from utils import ( + canStartBuild, + createVar, + createWorker, + dockerfile, + filterBranch, + hasAutobake, + hasBigtest, + hasFailed, + moveMTRLogs, + mtrJobsMultiplier, + nextBuild, + printEnv, + saveLogs, + savePackageIfBranchMatch, +) FQDN = os.environ["BUILDMASTER_WG_IP"] -# This is the dictionary that the buildmaster pays attention to. We also use -# a shorter alias to save typing. -c = BuildmasterConfig = {} +cfg_dir = os.path.abspath(os.path.dirname(__file__)) + +# Non autogen master. For now the directory structure is: +# +# └── +#    ├── buildbot.tac +#     └── master.cfg +# +# Non autogen masters load from for now. +base_dir = os.path.abspath(f"{cfg_dir}/../") # Load the slave, database passwords and 3rd-party tokens from an external private file, so # that the rest of the configuration can be public. config = {"private": {}} -exec(open("../master-private.cfg").read(), config, {}) - -####### BUILDBOT SERVICES - -# 'services' is a list of BuildbotService items like reporter targets. The -# status of each build will be pushed to these targets. buildbot/reporters/*.py -# has a variety to choose from, like IRC bots. - - -c["services"] = [] -context = util.Interpolate("buildbot/%(prop:buildername)s") -gs = reporters.GitHubStatusPush( - token=config["private"]["gh_mdbci"]["access_token"], - context=context, - startDescription="Build started.", - endDescription="Build done.", - verbose=True, - builders=GITHUB_STATUS_BUILDERS, -) -c["services"].append(gs) -c["secretsProviders"] = [ - secrets.SecretInAFile( - dirname=os.environ["MASTER_CREDENTIALS_DIR"] - ) -] -####### PROJECT IDENTITY - -# the 'title' string will appear at the top of this buildbot installation's -# home pages (linked to the 'titleURL'). -c["title"] = os.environ["TITLE"] -c["titleURL"] = os.environ["TITLE_URL"] - -# the 'buildbotURL' string should point to the location where the buildbot's -# internal web server is visible. This typically uses the port number set in -# the 'www' entry below, but with an externally-visible host name which the -# buildbot cannot figure out without some help. -c["buildbotURL"] = os.environ["BUILDMASTER_URL"] - -# 'protocols' contains information about protocols which master will use for -# communicating with workers. You must define at least 'port' option that workers -# could connect to your master with this protocol. -# 'port' must match the value configured into the workers (with their -# --master option) -port = int(os.environ["PORT"]) -c["protocols"] = {"pb": {"port": port}} - -####### DB URL - -c["db"] = { - # This specifies what database buildbot uses to store its state. - "db_url": config["private"]["db_url"] -} +with open(os.path.join(base_dir, "master-private.cfg"), "r") as file: + exec(file.read(), config, {}) + + +# This is the dictionary that the buildmaster pays attention to. We also use +# a shorter alias to save typing. +c = BuildmasterConfig = base_master_config(config) + mtrDbPool = util.EqConnectionPool( "MySQLdb", @@ -93,14 +67,6 @@ mtrDbPool = util.EqConnectionPool( config["private"]["db_mtr_db"], ) -####### Disable net usage reports from being sent to buildbot.net -c["buildbotNetUsageData"] = None - -####### SCHEDULERS - -# Configure the Schedulers, which decide how to react to incoming changes. -c["schedulers"] = SCHEDULERS - ####### WORKERS # The 'workers' list defines the set of recognized workers. Each element is @@ -108,7 +74,7 @@ c["schedulers"] = SCHEDULERS # worker name and password must be configured on the worker. c["workers"] = [] -workers = {} +workers = defaultdict(list) def addWorker( @@ -120,7 +86,7 @@ def addWorker( save_packages=False, shm_size="15G", ): - name, instance = createWorker( + base_name, name, instance = createWorker( worker_name_prefix, worker_id, worker_type, @@ -130,17 +96,11 @@ def addWorker( shm_size, ) - if name[0] not in workers: - workers[name[0]] = [name[1]] - else: - workers[name[0]].append(name[1]) - + workers[base_name].append(name) c["workers"].append(instance) # Docker workers -fqdn = os.environ["BUILDMASTER_WG_IP"] - ## hz-bbw2-docker c["workers"].append( worker.DockerLatentWorker( @@ -149,7 +109,7 @@ c["workers"].append( docker_host=config["private"]["docker_workers"]["hz-bbw2-docker"], dockerfile=open("dockerfiles/eco-php-ubuntu-2004.dockerfile").read(), followStartupLogs=False, - masterFQDN=fqdn, + masterFQDN=FQDN, hostconfig={ "shm_size": "6G", "ulimits": [ @@ -172,7 +132,7 @@ c["workers"].append( "dockerfiles/eco-pymysql-python-3-9-slim-buster.dockerfile" ).read(), followStartupLogs=False, - masterFQDN=fqdn, + masterFQDN=FQDN, hostconfig={ "shm_size": "6G", "ulimits": [ @@ -193,7 +153,7 @@ c["workers"].append( docker_host=config["private"]["docker_workers"]["hz-bbw2-docker"], dockerfile=open("dockerfiles/eco-mysqljs-nodejs15-buster.dockerfile").read(), followStartupLogs=False, - masterFQDN=fqdn, + masterFQDN=FQDN, hostconfig={ "shm_size": "6G", "ulimits": [ @@ -214,12 +174,11 @@ c["workers"].append( "bm-bbw1-docker-ubuntu-2004", None, docker_host=config["private"]["docker_workers"]["bm-bbw1-docker"], - image=os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu20.04", + image=os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu20.04", followStartupLogs=False, autopull=True, alwaysPull=True, - masterFQDN=fqdn, + masterFQDN=FQDN, hostconfig={ "shm_size": "20G", "ulimits": [ @@ -235,9 +194,8 @@ c["workers"].append( addWorker( "hz-bbw", 6, - "-bigtest-ubuntu-2004", - os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu20.04", + "bigtest-ubuntu-2004", + os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu20.04", jobs=20, save_packages=False, ) @@ -247,9 +205,8 @@ for w_name in ["ppc64le-osuosl-bbw"]: addWorker( w_name, 1, - "-ubuntu-2004", - os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu20.04", + "ubuntu-2004", + os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu20.04", jobs=7, save_packages=True, shm_size="20G", @@ -257,9 +214,8 @@ for w_name in ["ppc64le-osuosl-bbw"]: addWorker( w_name, 1, - "-ubuntu-2004-debug", - os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu20.04", + "ubuntu-2004-debug", + os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu20.04", jobs=30, save_packages=True, ) @@ -275,36 +231,32 @@ for w_name in ["ns-x64-bbw", "apexis-bbw"]: addWorker( w_name, i, - "-aocc-debian-11", - os.environ["CONTAINER_REGISTRY_URL"] - + "debian11-aocc", + "aocc-debian-11", + os.environ["CONTAINER_REGISTRY_URL"] + "debian11-aocc", jobs=jobs, save_packages=False, ) addWorker( w_name, i, - "-asan-ubuntu-2404", - os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu24.04", + "asan-ubuntu-2404", + os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu24.04", jobs=jobs, save_packages=False, ) addWorker( w_name, i, - "-icc-ubuntu-2204", - os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu22.04-icc", + "icc-ubuntu-2204", + os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu22.04-icc", jobs=jobs, save_packages=False, ) addWorker( w_name, i, - "-ubuntu-2004", - os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu20.04", + "ubuntu-2004", + os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu20.04", jobs=jobs, save_packages=True, ) @@ -313,27 +265,24 @@ for w_name in ["ns-x64-bbw", "apexis-bbw"]: addWorker( "amd-bbw", 1, - "-valgrind-fedora-40", - os.environ["CONTAINER_REGISTRY_URL"] - + "fedora40-valgrind", + "valgrind-fedora-40", + os.environ["CONTAINER_REGISTRY_URL"] + "fedora40-valgrind", jobs=20, save_packages=False, ) addWorker( "amd-bbw", 2, - "-valgrind-fedora-40", - os.environ["CONTAINER_REGISTRY_URL"] - + "fedora40-valgrind", + "valgrind-fedora-40", + os.environ["CONTAINER_REGISTRY_URL"] + "fedora40-valgrind", jobs=20, save_packages=False, ) addWorker( "hz-bbw", 6, - "-valgrind-fedora-40", - os.environ["CONTAINER_REGISTRY_URL"] - + "fedora40-valgrind", + "valgrind-fedora-40", + os.environ["CONTAINER_REGISTRY_URL"] + "fedora40-valgrind", jobs=20, save_packages=False, ) @@ -341,27 +290,24 @@ addWorker( addWorker( "hz-bbw", 1, - "-msan-clang-16-debian-11", - os.environ["CONTAINER_REGISTRY_URL"] - + "debian11-msan-clang-16", + "msan-clang-16-debian-11", + os.environ["CONTAINER_REGISTRY_URL"] + "debian11-msan-clang-16", jobs=20, save_packages=False, ) addWorker( "hz-bbw", 4, - "-msan-clang-16-debian-11", - os.environ["CONTAINER_REGISTRY_URL"] - + "debian11-msan-clang-16", + "msan-clang-16-debian-11", + os.environ["CONTAINER_REGISTRY_URL"] + "debian11-msan-clang-16", jobs=20, save_packages=False, ) addWorker( "hz-bbw", 5, - "-msan-clang-16-debian-11", - os.environ["CONTAINER_REGISTRY_URL"] - + "debian11-msan-clang-16", + "msan-clang-16-debian-11", + os.environ["CONTAINER_REGISTRY_URL"] + "debian11-msan-clang-16", jobs=30, save_packages=False, ) @@ -369,18 +315,16 @@ addWorker( addWorker( "hz-bbw", 2, - "-debian-12", - os.environ["CONTAINER_REGISTRY_URL"] - + "debian12", + "debian-12", + os.environ["CONTAINER_REGISTRY_URL"] + "debian12", jobs=20, save_packages=False, ) addWorker( "hz-bbw", 5, - "-debian-12", - os.environ["CONTAINER_REGISTRY_URL"] - + "debian12", + "debian-12", + os.environ["CONTAINER_REGISTRY_URL"] + "debian12", jobs=20, save_packages=False, ) @@ -388,18 +332,16 @@ addWorker( addWorker( "aarch64-bbw", 6, - "-ubuntu-2004-debug", - os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu20.04", + "ubuntu-2004-debug", + os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu20.04", jobs=10, save_packages=True, ) addWorker( "aarch64-bbw", 6, - "-debian-10-bintar", - os.environ["CONTAINER_REGISTRY_URL"] - + "debian10-bintar", + "debian-10-bintar", + os.environ["CONTAINER_REGISTRY_URL"] + "debian10-bintar", jobs=10, save_packages=True, ) @@ -407,9 +349,8 @@ addWorker( addWorker( "hz-bbw", 5, - "-centos-7-bintar", - os.environ["CONTAINER_REGISTRY_URL"] - + "centos7-bintar", + "centos-7-bintar", + os.environ["CONTAINER_REGISTRY_URL"] + "centos7-bintar", jobs=10, save_packages=True, ) @@ -417,9 +358,8 @@ addWorker( addWorker( "s390x-bbw", 1, - "-ubuntu-2004-debug", - os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu20.04", + "ubuntu-2004-debug", + os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu20.04", jobs=7, save_packages=False, ) @@ -427,9 +367,8 @@ addWorker( addWorker( "s390x-bbw", 2, - "-ubuntu-2004-debug", - os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu20.04", + "ubuntu-2004-debug", + os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu20.04", jobs=7, save_packages=False, ) @@ -437,9 +376,8 @@ addWorker( addWorker( "s390x-bbw", 3, - "-ubuntu-2004-debug", - os.environ["CONTAINER_REGISTRY_URL"] - + "ubuntu20.04", + "ubuntu-2004-debug", + os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu20.04", jobs=7, save_packages=False, ) @@ -450,8 +388,7 @@ c["workers"].append( "release-prep-docker", None, docker_host=config["private"]["docker_workers"]["release-prep-docker"], - image=os.environ["CONTAINER_REGISTRY_URL"] - + "debian12-release", + image=os.environ["CONTAINER_REGISTRY_URL"] + "debian12-release", followStartupLogs=False, autopull=True, alwaysPull=True, @@ -1784,15 +1721,3 @@ c["builders"].append( factory=f_prep_local, ) ) - -c["logEncoding"] = "utf-8" - -c["multiMaster"] = True - -c["mq"] = { # Need to enable multimaster aware mq. Wamp is the only option for now. - "type": "wamp", - "router_url": os.environ["MQ_ROUTER_URL"], - "realm": "realm1", - # valid are: none, critical, error, warn, info, debug, trace - "wamp_debug_level": "info", -} diff --git a/master-galera/master.cfg b/master-galera/master.cfg index e5e53b59..1563f514 100644 --- a/master-galera/master.cfg +++ b/master-galera/master.cfg @@ -1,65 +1,41 @@ # -*- python -*- # ex: set filetype=python: -from buildbot.plugins import * -from buildbot.process.properties import Property, Properties -from buildbot.steps.shell import ShellCommand, Compile, Test, SetPropertyFromCommand -from buildbot.steps.mtrlogobserver import MTR, MtrLogObserver -from buildbot.steps.source.github import GitHub -from buildbot.process.remotecommand import RemoteCommand -from twisted.internet import defer -import sys +# git branch filter using fnmatch +import fnmatch import os -import docker -from datetime import timedelta -sys.setrecursionlimit(10000) +from collections import defaultdict -sys.path.append(os.getcwd() + "/..") -from constants import * -from utils import * +from buildbot.plugins import schedulers, steps, util, worker +from constants import ALL_PLATFORMS, BUILDERS_GALERA, OS_INFO +from master_common import base_master_config +from utils import createWorker, savePackageIfBranchMatch, waitIfStaging, nextBuild -# This is the dictionary that the buildmaster pays attention to. We also use -# a shorter alias to save typing. -c = BuildmasterConfig = {} + +cfg_dir = os.path.abspath(os.path.dirname(__file__)) + +# Non autogen master. For now the directory structure is: +# +# └── +#    ├── buildbot.tac +#     └── master.cfg +# +# Non autogen masters load from for now. +base_dir = os.path.abspath(f"{cfg_dir}/../") # Load the slave, database passwords and 3rd-party tokens from an external private file, so # that the rest of the configuration can be public. config = {"private": {}} -exec(open("../master-private.cfg").read(), config, {}) - -FQDN = os.environ["BUILDMASTER_WG_IP"] +with open(os.path.join(base_dir, "master-private.cfg"), "r") as file: + exec(file.read(), config, {}) -####### PROJECT IDENTITY - -# the 'title' string will appear at the top of this buildbot installation's -# home pages (linked to the 'titleURL'). -c["title"] = os.environ["TITLE"] -c["titleURL"] = os.environ["TITLE_URL"] - -# the 'buildbotURL' string should point to the location where the buildbot's -# internal web server is visible. This typically uses the port number set in -# the 'www' entry below, but with an externally-visible host name which the -# buildbot cannot figure out without some help. -c["buildbotURL"] = os.environ["BUILDMASTER_URL"] - -# 'protocols' contains information about protocols which master will use for -# communicating with workers. You must define at least 'port' option that workers -# could connect to your master with this protocol. -# 'port' must match the value configured into the workers (with their -# --master option) -port = int(os.environ["PORT"]) -c["protocols"] = {"pb": {"port": port}} - -####### DB URL +# This is the dictionary that the buildmaster pays attention to. We also use +# a shorter alias to save typing. +c = BuildmasterConfig = base_master_config(config) -c["db"] = { - # This specifies what database buildbot uses to store its state. - "db_url": config["private"]["db_url"] -} +FQDN = os.environ["BUILDMASTER_WG_IP"] -####### Disable net usage reports from being sent to buildbot.net -c["buildbotNetUsageData"] = None ####### SCHEDULERS @@ -68,9 +44,6 @@ c["buildbotNetUsageData"] = None BRANCHES_MAIN = ["mariadb-3.x", "mariadb-4.x", "bb-*"] SAVED_PACKAGE_BRANCHES_GALERA = ["mariadb-3.x", "mariadb-4.x", "bb-*"] -# git branch filter using fnmatch -import fnmatch - def upstream_branch_fn(branch): return ( @@ -82,9 +55,11 @@ def upstream_branch_fn(branch): ) - +# Override schedulers. +# TODO(cvicentiu): Move this to base_master_config maybe? c["schedulers"] = [] + schedulerTrigger = schedulers.AnyBranchScheduler( name="s_upstream_galera", change_filter=util.ChangeFilter( @@ -154,7 +129,8 @@ c["workers"].append( # Docker workers GALERA_PACKAGES = os.environ["GALERA_PACKAGES_DIR"] -workers = {} +workers = defaultdict(list) + def addWorker( worker_name_prefix, @@ -165,7 +141,7 @@ def addWorker( save_packages=False, shm_size="15G", ): - name, instance = createWorker( + base_name, name, instance = createWorker( worker_name_prefix, worker_id, worker_type, @@ -181,11 +157,7 @@ def addWorker( ], ) - if name[0] not in workers: - workers[name[0]] = [name[1]] - else: - workers[name[0]].append(name[1]) - + workers[base_name].append(name) c["workers"].append(instance) @@ -236,7 +208,7 @@ for platform in ALL_PLATFORMS: addWorker( w_name, i, - "-" + os_name, + os_name, quay_name, jobs=jobs, save_packages=True, @@ -244,7 +216,7 @@ for platform in ALL_PLATFORMS: def dpkgDeb(): - return ShellCommand( + return steps.ShellCommand( name="apt-ftparchive", haltOnFailure=True, command=[ @@ -263,14 +235,14 @@ def dpkgDeb(): """ ), ], - doStepIf=(lambda step: - savePackageIfBranchMatch(step, - SAVED_PACKAGE_BRANCHES_GALERA)), + doStepIf=( + lambda step: savePackageIfBranchMatch(step, SAVED_PACKAGE_BRANCHES_GALERA) + ), ) def rpmSave(): - return ShellCommand( + return steps.ShellCommand( name="move rpm files", haltOnFailure=True, command=[ @@ -280,13 +252,13 @@ def rpmSave(): """set -e mkdir -p rpms srpms cp `find *.rpm -maxdepth 1 -type f` rpms - find rpms -type f -exec sha256sum {} \; | sort > sha256sums.txt + find rpms -type f -exec sha256sum {} \\; | sort > sha256sums.txt """ ), ], - doStepIf=(lambda step: - savePackageIfBranchMatch(step, - SAVED_PACKAGE_BRANCHES_GALERA)), + doStepIf=( + lambda step: savePackageIfBranchMatch(step, SAVED_PACKAGE_BRANCHES_GALERA) + ), ) @@ -368,9 +340,9 @@ EOF """, url=os.environ["ARTIFACTS_URL"], ), - doStepIf=(lambda step: - savePackageIfBranchMatch(step, - SAVED_PACKAGE_BRANCHES_GALERA)), + doStepIf=( + lambda step: savePackageIfBranchMatch(step, SAVED_PACKAGE_BRANCHES_GALERA) + ), ) ) f_deb_build.addStep( @@ -427,9 +399,9 @@ EOF """, url=os.environ["ARTIFACTS_URL"], ), - doStepIf=(lambda step: - savePackageIfBranchMatch(step, - SAVED_PACKAGE_BRANCHES_GALERA)), + doStepIf=( + lambda step: savePackageIfBranchMatch(step, SAVED_PACKAGE_BRANCHES_GALERA) + ), ) ) f_rpm_build.addStep( @@ -438,6 +410,7 @@ f_rpm_build.addStep( ) ) + ####### BUILDERS LIST c["builders"] = [] @@ -455,6 +428,7 @@ c["builders"].append( ) ) + for os_i in OS_INFO: if "install_only" in OS_INFO[os_i] and OS_INFO[os_i]["install_only"]: continue @@ -476,7 +450,6 @@ for os_i in OS_INFO: env = {} if os_i == "ubuntu-2004": - print("using gcc/++-10") env = {"CC": "gcc-10", "CXX": "g++-10"} c["builders"].append( @@ -490,15 +463,3 @@ for os_i in OS_INFO: factory=factory, ) ) - -c["logEncoding"] = "utf-8" - -c["multiMaster"] = True - -c["mq"] = { # Need to enable multimaster aware mq. Wamp is the only option for now. - "type": "wamp", - "router_url": os.environ["MQ_ROUTER_URL"], - "realm": "realm1", - # valid are: none, critical, error, warn, info, debug, trace - "wamp_debug_level": "info", -} diff --git a/master-libvirt/master.cfg b/master-libvirt/master.cfg index 5a4555e1..88205ff7 100644 --- a/master-libvirt/master.cfg +++ b/master-libvirt/master.cfg @@ -1,67 +1,38 @@ # -*- python -*- # ex: set filetype=python: -from buildbot.plugins import * -from buildbot.process.properties import Property, Properties -from buildbot.steps.shell import ShellCommand, Compile, Test, SetPropertyFromCommand -from buildbot.steps.mtrlogobserver import MTR, MtrLogObserver -from buildbot.steps.source.github import GitHub -from buildbot.process.remotecommand import RemoteCommand -from datetime import timedelta -from twisted.internet import defer - -import docker import os -import sys -sys.setrecursionlimit(10000) -sys.path.insert(0, "/srv/buildbot/master") +from buildbot.plugins import steps, util, worker +from buildbot.steps.shell import Test +from constants import BUILDERS_INSTALL, OS_INFO +from master_common import base_master_config +from utils import canStartBuild, envFromProperties, getScript, nextBuild -from utils import * -from constants import OS_INFO +cfg_dir = os.path.abspath(os.path.dirname(__file__)) -# This is the dictionary that the buildmaster pays attention to. We also use -# a shorter alias to save typing. -c = BuildmasterConfig = {} +# Non autogen master. For now the directory structure is: +# +# └── +#    ├── buildbot.tac +#     └── master.cfg +# +# Non autogen masters load from for now. +base_dir = os.path.abspath(f"{cfg_dir}/../") # Load the slave, database passwords and 3rd-party tokens from an external private file, so # that the rest of the configuration can be public. config = {"private": {}} -exec(open("../master-private.cfg").read(), config, {}) +with open(os.path.join(base_dir, "master-private.cfg"), "r") as file: + exec(file.read(), config, {}) -####### PROJECT IDENTITY +# This is the dictionary that the buildmaster pays attention to. We also use +# a shorter alias to save typing. +c = BuildmasterConfig = base_master_config(config) -# the 'title' string will appear at the top of this buildbot installation's -# home pages (linked to the 'titleURL'). -c["title"] = os.environ["TITLE"] -c["titleURL"] = os.environ["TITLE_URL"] artifactsURL = os.environ["ARTIFACTS_URL"] -# the 'buildbotURL' string should point to the location where the buildbot's -# internal web server is visible. This typically uses the port number set in -# the 'www' entry below, but with an externally-visible host name which the -# buildbot cannot figure out without some help. -c["buildbotURL"] = os.environ["BUILDMASTER_URL"] - -# 'protocols' contains information about protocols which master will use for -# communicating with workers. You must define at least 'port' option that workers -# could connect to your master with this protocol. -# 'port' must match the value configured into the workers (with their -# --master option) -port = int(os.environ["PORT"]) -c["protocols"] = {"pb": {"port": port}} - -####### DB URL - -c["db"] = { - # This specifies what database buildbot uses to store its state. - "db_url": config["private"]["db_url"] -} - -####### Disable net usage reports from being sent to buildbot.net -c["buildbotNetUsageData"] = None - ####### UTILS def getRpmUpgradeStep(): @@ -120,6 +91,7 @@ def getRpmInstallStep(): command=["./rpm-install.sh"], ) + def getDebUpgradeStep(): return Test( name="upgrade", @@ -185,6 +157,7 @@ def getMajorVersionStep(): ), ) + def getPAMTestStep(): return Test( name="PAM authentication test", @@ -199,34 +172,31 @@ def getPAMTestStep(): command=["./pam-test.sh"], ) -# FACTORY -## f_deb_install +# FACTORY f_deb_install = util.BuildFactory() f_deb_install.addStep(getScript("deb-install.sh")) f_deb_install.addStep(getDebInstallStep()) f_deb_install.addStep(getScript("pam-test.sh")) f_deb_install.addStep(getPAMTestStep()) -## f_deb_upgrade f_deb_upgrade = util.BuildFactory() f_deb_upgrade.addStep(getMajorVersionStep()) f_deb_upgrade.addStep(getScript("deb-upgrade.sh")) f_deb_upgrade.addStep(getDebUpgradeStep()) -## f_rpm_install f_rpm_install = util.BuildFactory() f_rpm_install.addStep(getScript("rpm-install.sh")) f_rpm_install.addStep(getRpmInstallStep()) f_rpm_install.addStep(getScript("pam-test.sh")) f_rpm_install.addStep(getPAMTestStep()) -## f_rpm_upgrade f_rpm_upgrade = util.BuildFactory() f_rpm_upgrade.addStep(getMajorVersionStep()) f_rpm_upgrade.addStep(getScript("rpm-upgrade.sh")) f_rpm_upgrade.addStep(getRpmUpgradeStep()) + ####### WORKERS and BUILDERS # The 'workers' list defines the set of recognized workers. Each element is @@ -274,7 +244,9 @@ for builder_name in BUILDERS_INSTALL: elif builder_type == "rpm": factory_install = f_rpm_install factory_upgrade = f_rpm_upgrade - build_arch = os_name + str(OS_INFO[os_info_name]["version_name"]) + "-" + platform + build_arch = ( + os_name + str(OS_INFO[os_info_name]["version_name"]) + "-" + platform + ) # FIXME - all RPM's should follow the same conventions! if os_name == "centos" and OS_INFO[os_info_name]["version_name"] >= 9: @@ -282,7 +254,6 @@ for builder_name in BUILDERS_INSTALL: platform = "x86_64" build_arch = f"centos/{OS_INFO[os_info_name]['version_name']}/{platform}" - c["builders"].append( util.BuilderConfig( name=builder_name, @@ -379,15 +350,3 @@ for builder_name in BUILDERS_INSTALL: factory=factory_upgrade, ) ) - -c["logEncoding"] = "utf-8" - -c["multiMaster"] = True - -c["mq"] = { # Need to enable multimaster aware mq. Wamp is the only option for now. - "type": "wamp", - "router_url": os.environ["MQ_ROUTER_URL"], - "realm": "realm1", - # valid are: none, critical, error, warn, info, debug, trace - "wamp_debug_level": "info", -} diff --git a/master-nonlatent/master.cfg b/master-nonlatent/master.cfg index b9f2cbf7..85e07f80 100644 --- a/master-nonlatent/master.cfg +++ b/master-nonlatent/master.cfg @@ -1,66 +1,51 @@ # -*- python -*- # ex: set filetype=python: -from buildbot.plugins import * -from buildbot.process.properties import Property, Properties -from buildbot.steps.shell import ShellCommand, Compile, Test, SetPropertyFromCommand -from buildbot.steps.mtrlogobserver import MTR, MtrLogObserver -from buildbot.steps.source.github import GitHub -from buildbot.process.remotecommand import RemoteCommand -from datetime import timedelta -from twisted.internet import defer - -import docker import os -import sys - -sys.setrecursionlimit(10000) +from datetime import timedelta -sys.path.insert(0, "/srv/buildbot/master") -from common_factories import * -from locks import * -from schedulers_definition import SCHEDULERS -from utils import * +from buildbot.plugins import steps, util, worker +from buildbot.process.properties import Property +from common_factories import addWinTests, getQuickBuildFactory +from constants import MTR_ENV, SAVED_PACKAGE_BRANCHES +from master_common import base_master_config +from utils import ( + canStartBuild, + createVar, + getHTMLLogString, + getSourceTarball, + hasFailed, + ls2list, + moveMTRLogs, + mtrJobsMultiplier, + nextBuild, + prioritizeBuilders, + savePackageIfBranchMatch, +) ####### VARIABLES vsWarningPattern = "^.*: warning C.*$" -# This is the dictionary that the buildmaster pays attention to. We also use -# a shorter alias to save typing. -c = BuildmasterConfig = {} +cfg_dir = os.path.abspath(os.path.dirname(__file__)) + +# Non autogen master. For now the directory structure is: +# +# └── +#    ├── buildbot.tac +#     └── master.cfg +# +# Non autogen masters load from for now. +base_dir = os.path.abspath(f"{cfg_dir}/../") # Load the slave, database passwords and 3rd-party tokens from an external private file, so # that the rest of the configuration can be public. config = {"private": {}} -exec(open("../master-private.cfg").read(), config, {}) - -####### PROJECT IDENTITY - -# the 'title' string will appear at the top of this buildbot installation's -# home pages (linked to the 'titleURL'). -c["title"] = os.environ["TITLE"] -c["titleURL"] = os.environ["TITLE_URL"] +with open(os.path.join(base_dir, "master-private.cfg"), "r") as file: + exec(file.read(), config, {}) -# the 'buildbotURL' string should point to the location where the buildbot's -# internal web server is visible. This typically uses the port number set in -# the 'www' entry below, but with an externally-visible host name which the -# buildbot cannot figure out without some help. -c["buildbotURL"] = os.environ["BUILDMASTER_URL"] - -# 'protocols' contains information about protocols which master will use for -# communicating with workers. You must define at least 'port' option that workers -# could connect to your master with this protocol. -# 'port' must match the value configured into the workers (with their -# --master option) -port = int(os.environ["PORT"]) -c["protocols"] = {"pb": {"port": port}} - -####### DB URL - -c["db"] = { - # This specifies what database buildbot uses to store its state. - "db_url": config["private"]["db_url"] -} +# This is the dictionary that the buildmaster pays attention to. We also use +# a shorter alias to save typing. +c = BuildmasterConfig = base_master_config(config) mtrDbPool = util.EqConnectionPool( "MySQLdb", @@ -70,33 +55,10 @@ mtrDbPool = util.EqConnectionPool( config["private"]["db_mtr_db"], ) -####### Disable net usage reports from being sent to buildbot.net -c["buildbotNetUsageData"] = None -####### Services -c["services"] = [] -context = util.Interpolate("buildbot/%(prop:buildername)s") -gs = reporters.GitHubStatusPush( - token=config["private"]["gh_mdbci"]["access_token"], - context=context, - startDescription="Build started.", - endDescription="Build done.", - verbose=True, - builders=GITHUB_STATUS_BUILDERS, -) -c["services"].append(gs) -c["secretsProviders"] = [ - secrets.SecretInAFile( - dirname=os.environ["MASTER_CREDENTIALS_DIR"] - ) -] ####### Builder priority c["prioritizeBuilders"] = prioritizeBuilders -####### SCHEDULERS - -# Configure the Schedulers, which decide how to react to incoming changes. -c["schedulers"] = SCHEDULERS ####### WORKERS @@ -163,9 +125,6 @@ f_windows_env = { } f_windows_env.update(MTR_ENV) -# f_quick_build = getQuickBuildFactory(mtrDbPool) -f_rpm_autobake = getRpmAutobakeFactory(mtrDbPool) - ## f_windows f_windows = util.BuildFactory() f_windows.addStep( @@ -244,7 +203,7 @@ f_windows.addStep( "dojob", '"', util.Interpolate( - '"C:\Program Files (x86)\Microsoft Visual Studio\\2022\BuildTools\Common7\Tools\VsDevCmd.bat" -arch=%(kw:arch)s && cmake . -A %(kw:arch_cmake)s -DPLUGIN_ROCKSDB=NO -DMYSQL_MAINTAINER_MODE=ERR -Wno-dev', + '"C:\\Program Files (x86)\\Microsoft Visual Studio\\2022\\BuildTools\\Common7\\Tools\\VsDevCmd.bat" -arch=%(kw:arch)s && cmake . -A %(kw:arch_cmake)s -DPLUGIN_ROCKSDB=NO -DMYSQL_MAINTAINER_MODE=ERR -Wno-dev', arch=util.Property("arch", default="x64"), arch_cmake=util.Property("arch_cmake", default="x64"), ), @@ -261,7 +220,7 @@ f_windows.addStep( "dojob", '"', util.Interpolate( - '"C:\Program Files (x86)\Microsoft Visual Studio\\2022\BuildTools\Common7\Tools\VsDevCmd.bat" -arch=%(kw:arch)s && cmake --build . --verbose --config Debug -- -m', + '"C:\\Program Files (x86)\\Microsoft Visual Studio\\2022\\BuildTools\\Common7\\Tools\\VsDevCmd.bat" -arch=%(kw:arch)s && cmake --build . --verbose --config Debug -- -m', arch=util.Property("arch", default="x64"), ), '"', @@ -302,14 +261,8 @@ f_windows.addStep( + "%(prop:buildername)s" ), url=util.Interpolate( - f'{os.environ["ARTIFACTS_URL"]}' - "/" - "%(prop:tarbuildnum)s" - "/" - "logs" - "/" - "%(prop:buildername)s" - "/" + os.environ["ARTIFACTS_URL"], + "/" "%(prop:tarbuildnum)s" "/" "logs" "/" "%(prop:buildername)s" "/", ), ) ) @@ -322,7 +275,7 @@ f_windows.addStep( "powershell", "-command", "Remove-Item", - '"$pwd\*"', + '"$pwd\\*"', "-Recurse", "-Force", '"', @@ -372,7 +325,7 @@ f_windows_msi.addStep( "powershell", "-command", "Remove-Item", - '"$pwd\*"', + '"$pwd\\*"', "-Recurse", "-Force", '"', @@ -424,7 +377,7 @@ f_windows_msi.addStep( "dojob", '"', util.Interpolate( - '"C:\Program Files (x86)\Microsoft Visual Studio\\2022\BuildTools\Common7\Tools\VsDevCmd.bat" -arch=%(kw:arch)s && cmake . -G "Visual Studio 17 2022" -A %(kw:arch_cmake)s -DBUILD_CONFIG=mysql_release -DWITH_THIRD_PARTY=HeidiSQL -DWITH_EMBEDDED_SERVER=0 -DSIGNCODE=ON -DSIGNTOOL_PARAMETERS="/fd;SHA256;/a;/t;http://timestamp.globalsign.com/?signature=sha2" -DWITH_UNIT_TESTS=0 -DMYSQL_MAINTAINER_MODE=ERR', + '"C:\\Program Files (x86)\\Microsoft Visual Studio\\2022\\BuildTools\\Common7\\Tools\\VsDevCmd.bat" -arch=%(kw:arch)s && cmake . -G "Visual Studio 17 2022" -A %(kw:arch_cmake)s -DBUILD_CONFIG=mysql_release -DWITH_THIRD_PARTY=HeidiSQL -DWITH_EMBEDDED_SERVER=0 -DSIGNCODE=ON -DSIGNTOOL_PARAMETERS="/fd;SHA256;/a;/t;http://timestamp.globalsign.com/?signature=sha2" -DWITH_UNIT_TESTS=0 -DMYSQL_MAINTAINER_MODE=ERR', arch=util.Property("arch", default="x64"), arch_cmake=util.Property("arch_cmake", default="x64"), ), @@ -441,7 +394,7 @@ f_windows_msi.addStep( "dojob", '"', util.Interpolate( - '"C:\Program Files (x86)\Microsoft Visual Studio\\2022\BuildTools\Common7\Tools\VsDevCmd.bat" -arch=%(kw:arch)s && cmake --build . --verbose --config RelWithDebInfo -- -m', + '"C:\\Program Files (x86)\\Microsoft Visual Studio\\2022\\BuildTools\\Common7\\Tools\\VsDevCmd.bat" -arch=%(kw:arch)s && cmake --build . --verbose --config RelWithDebInfo -- -m', arch=util.Property("arch", default="x64"), ), '"', @@ -458,7 +411,7 @@ f_windows_msi.addStep( "dojob", '"', util.Interpolate( - '"C:\Program Files (x86)\Microsoft Visual Studio\\2022\BuildTools\Common7\Tools\VsDevCmd.bat" -arch=%(kw:arch)s && cmake --build . --config RelWithDebInfo --target win_package && cmake --build . --config RelWithDebInfo --target MSI', + '"C:\\Program Files (x86)\\Microsoft Visual Studio\\2022\\BuildTools\\Common7\\Tools\\VsDevCmd.bat" -arch=%(kw:arch)s && cmake --build . --config RelWithDebInfo --target win_package && cmake --build . --config RelWithDebInfo --target MSI', arch=util.Property("arch", default="x64"), ), '"', @@ -516,7 +469,7 @@ f_windows_msi.addStep( command=[ "powershell", "-command", - 'Get-ChildItem .\* -Include @("*.msi", "*.zip") | Get-FileHash | Select-Object Hash, @{Name="Name";Expression={[System.IO.Path]::GetFileName($_.Path)}} | Format-Table -HideTableHeaders | Out-File sha256sums.txt', + 'Get-ChildItem .\\* -Include @("*.msi", "*.zip") | Get-FileHash | Select-Object Hash, @{Name="Name";Expression={[System.IO.Path]::GetFileName($_.Path)}} | Format-Table -HideTableHeaders | Out-File sha256sums.txt', ], ) ) @@ -578,7 +531,7 @@ f_windows_msi.addStep( "powershell", "-command", "Remove-Item", - '"$pwd\*"', + '"$pwd\\*"', "-Recurse", "-Force", '"', @@ -693,9 +646,7 @@ f_dockerlibrary.addStep( f_dockerlibrary.addStep( steps.ShellCommand( name="building MariaDB docker library test image", - env={ - "ARTIFACTS_URL": os.environ["ARTIFACTS_URL"] - }, + env={"ARTIFACTS_URL": os.environ["ARTIFACTS_URL"]}, command=[ "bash", "-xc", @@ -954,7 +905,7 @@ def get_macos_factory(compile_only=False): util.Interpolate( """ cd mysql-test && - exec perl mysql-test-run.pl --verbose-restart --force --retry=3 --max-save-core=2 --max-save-datadir=10 --max-test-fail=20 --parallel=$(expr %(kw:jobs)s \* 2) %(kw:mtr_additional_args)s + exec perl mysql-test-run.pl --verbose-restart --force --retry=3 --max-save-core=2 --max-save-datadir=10 --max-test-fail=20 --parallel=$(expr %(kw:jobs)s \\* 2) %(kw:mtr_additional_args)s """, mtr_additional_args=util.Property( "mtr_additional_args", default="" @@ -1042,9 +993,7 @@ c["builders"].append( name=os.environ["MASTER_NONLATENT_DOCKERLIBRARY_WORKER"] .replace("bb", "amd64") .replace("docker", "wordpress"), - workernames=[ - os.environ["MASTER_NONLATENT_DOCKERLIBRARY_WORKER"] - ], + workernames=[os.environ["MASTER_NONLATENT_DOCKERLIBRARY_WORKER"]], tags=["RHEL"], collapseRequests=True, nextBuild=nextBuild, @@ -1057,9 +1006,7 @@ c["builders"].append( util.BuilderConfig( name=os.environ["MASTER_NONLATENT_DOCKERLIBRARY_WORKER"].replace("bb", "amd64") + "library", - workernames=[ - os.environ["MASTER_NONLATENT_DOCKERLIBRARY_WORKER"] - ], + workernames=[os.environ["MASTER_NONLATENT_DOCKERLIBRARY_WORKER"]], tags=["RHEL"], collapseRequests=True, nextBuild=nextBuild, @@ -1147,15 +1094,3 @@ c["builders"].append( # Add a Janitor configurator that removes old logs c["configurators"] = [util.JanitorConfigurator(logHorizon=timedelta(weeks=6), hour=23)] - -c["logEncoding"] = "utf-8" - -c["multiMaster"] = True - -c["mq"] = { # Need to enable multimaster aware mq. Wamp is the only option for now. - "type": "wamp", - "router_url": os.environ["MQ_ROUTER_URL"], - "realm": "realm1", - # valid are: none, critical, error, warn, info, debug, trace - "wamp_debug_level": "info", -} diff --git a/master-protected-branches/master.cfg b/master-protected-branches/master.cfg index fb7db83f..f6de257c 100644 --- a/master-protected-branches/master.cfg +++ b/master-protected-branches/master.cfg @@ -1,84 +1,45 @@ # -*- python -*- # ex: set filetype=python: -from buildbot.plugins import * -from buildbot.process.properties import Property, Properties -from buildbot.steps.shell import ShellCommand, Compile, Test, SetPropertyFromCommand -from buildbot.steps.mtrlogobserver import MTR, MtrLogObserver -from buildbot.steps.source.github import GitHub -from buildbot.process.remotecommand import RemoteCommand -from datetime import timedelta -from twisted.internet import defer - -import docker import os -import sys - -sys.setrecursionlimit(10000) - -sys.path.insert(0, "/srv/buildbot/master") +from collections import defaultdict + +from buildbot.plugins import steps, util, worker +from buildbot.process.properties import Property +from common_factories import getLastNFailedBuildsFactory, getQuickBuildFactory +from locks import getLocks +from master_common import base_master_config +from utils import ( + canStartBuild, + createWorker, + dockerfile, + isJepsenBranch, + isStagingBranch, + ls2list, + nextBuild, + waitIfStaging, +) -from constants import * -from utils import * -from locks import * -from schedulers_definition import SCHEDULERS -from common_factories import * +cfg_dir = os.path.abspath(os.path.dirname(__file__)) -# This is the dictionary that the buildmaster pays attention to. We also use -# a shorter alias to save typing. -c = BuildmasterConfig = {} +# Non autogen master. For now the directory structure is: +# +# └── +#    ├── buildbot.tac +#     └── master.cfg +# +# Non autogen masters load from for now. +base_dir = os.path.abspath(f'{cfg_dir}/../') # Load the slave, database passwords and 3rd-party tokens from an external private file, so # that the rest of the configuration can be public. config = {"private": {}} -exec(open("../master-private.cfg").read(), config, {}) - -####### BUILDBOT SERVICES - -# 'services' is a list of BuildbotService items like reporter targets. The -# status of each build will be pushed to these targets. buildbot/reporters/*.py -# has a variety to choose from, like IRC bots. - - -c["services"] = [] -context = util.Interpolate("buildbot/%(prop:buildername)s") -gs = reporters.GitHubStatusPush( - token=config["private"]["gh_mdbci"]["access_token"], - context=context, - startDescription="Build started.", - endDescription="Build done.", - verbose=True, - builders=GITHUB_STATUS_BUILDERS, -) -c["services"].append(gs) -c['secretsProviders'] = [secrets.SecretInAFile(dirname=os.environ["MASTER_CREDENTIALS_DIR"])] -####### PROJECT IDENTITY - -# the 'title' string will appear at the top of this buildbot installation's -# home pages (linked to the 'titleURL'). -c["title"] = os.environ["TITLE"] -c["titleURL"] = os.environ["TITLE_URL"] +with open(os.path.join(base_dir, "master-private.cfg"), "r") as file: + exec(file.read(), config, {}) -# the 'buildbotURL' string should point to the location where the buildbot's -# internal web server is visible. This typically uses the port number set in -# the 'www' entry below, but with an externally-visible host name which the -# buildbot cannot figure out without some help. -c["buildbotURL"] = os.environ["BUILDMASTER_URL"] - -# 'protocols' contains information about protocols which master will use for -# communicating with workers. You must define at least 'port' option that workers -# could connect to your master with this protocol. -# 'port' must match the value configured into the workers (with their -# --master option) -port = int(os.environ["PORT"]) -c["protocols"] = {"pb": {"port": port}} - -####### DB URL - -c["db"] = { - # This specifies what database buildbot uses to store its state. - "db_url": config["private"]["db_url"] -} +# This is the dictionary that the buildmaster pays attention to. We also use +# a shorter alias to save typing. +c = BuildmasterConfig = base_master_config(config) mtrDbPool = util.EqConnectionPool( "MySQLdb", @@ -88,14 +49,6 @@ mtrDbPool = util.EqConnectionPool( config["private"]["db_mtr_db"], ) -####### Disable net usage reports from being sent to buildbot.net -c["buildbotNetUsageData"] = None - -####### SCHEDULERS - -# Configure the Schedulers, which decide how to react to incoming changes. -c["schedulers"] = SCHEDULERS - ####### WORKERS # The 'workers' list defines the set of recognized workers. Each element is @@ -144,7 +97,7 @@ c["workers"].append( ) ) -workers = {} +workers = defaultdict(list) def addWorker( @@ -156,7 +109,7 @@ def addWorker( save_packages=False, shm_size="15G", ): - name, instance = createWorker( + base_name, name, worker_instance = createWorker( worker_name_prefix, worker_id, worker_type, @@ -166,12 +119,8 @@ def addWorker( shm_size, ) - if name[0] not in workers: - workers[name[0]] = [name[1]] - else: - workers[name[0]].append(name[1]) - - c["workers"].append(instance) + workers[base_name].append(name) + c["workers"].append(worker_instance) for w_name in ["hz-bbw"]: @@ -180,7 +129,7 @@ for w_name in ["hz-bbw"]: addWorker( w_name, i, - "-debian-11-debug-ps-embed", + "debian-11-debug-ps-embed", os.environ["CONTAINER_REGISTRY_URL"] + "debian11", jobs=14, save_packages=False, @@ -188,7 +137,7 @@ for w_name in ["hz-bbw"]: addWorker( w_name, i, - "-debian-12", + "debian-12", os.environ["CONTAINER_REGISTRY_URL"] + "debian12", jobs=jobs, save_packages=True, @@ -196,7 +145,7 @@ for w_name in ["hz-bbw"]: addWorker( w_name, i, - "-debian-12-debug-embed", + "debian-12-debug-embed", os.environ["CONTAINER_REGISTRY_URL"] + "debian12", jobs=14, save_packages=False, @@ -204,7 +153,7 @@ for w_name in ["hz-bbw"]: addWorker( w_name, i, - "-fedora-40", + "fedora-40", os.environ["CONTAINER_REGISTRY_URL"] + "fedora40", jobs=jobs, save_packages=True, @@ -212,7 +161,7 @@ for w_name in ["hz-bbw"]: addWorker( w_name, i, - "-last-N-failed", + "last-N-failed", os.environ["CONTAINER_REGISTRY_URL"] + "rhel9", jobs=jobs, save_packages=True, @@ -220,7 +169,7 @@ for w_name in ["hz-bbw"]: addWorker( w_name, i, - "-ubuntu-2004-clang", + "ubuntu-2004-clang", "vladbogo/bb:amd64-ubuntu-2004-clang", jobs=jobs, save_packages=True, @@ -228,7 +177,7 @@ for w_name in ["hz-bbw"]: addWorker( w_name, i, - "-ubuntu-2004-debug", + "ubuntu-2004-debug", os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu20.04", jobs=14, save_packages=True, @@ -236,7 +185,7 @@ for w_name in ["hz-bbw"]: addWorker( w_name, i, - "-ubuntu-2204-debug-ps", + "ubuntu-2204-debug-ps", os.environ["CONTAINER_REGISTRY_URL"] + "ubuntu22.04", jobs=14, save_packages=False, @@ -561,15 +510,3 @@ c["builders"].append( factory=getLastNFailedBuildsFactory("debug", mtrDbPool), ) ) - -c["logEncoding"] = "utf-8" - -c["multiMaster"] = True - -c["mq"] = { # Need to enable multimaster aware mq. Wamp is the only option for now. - "type": "wamp", - "router_url": os.environ["MQ_ROUTER_URL"], - "realm": "realm1", - # valid are: none, critical, error, warn, info, debug, trace - "wamp_debug_level": "info", -} diff --git a/master-web/master.cfg b/master-web/master.cfg index eaac6d69..bb2ad46d 100644 --- a/master-web/master.cfg +++ b/master-web/master.cfg @@ -1,53 +1,47 @@ # -*- python -*- # ex: set filetype=python: -from __future__ import absolute_import -from __future__ import print_function - +# git branch filter using fnmatch import os -import sys -import time -import requests - -from buildbot.plugins import * -from buildbot.process.properties import Property, Properties -from buildbot.steps.shell import ShellCommand, Compile, Test, SetPropertyFromCommand -from buildbot.steps.mtrlogobserver import MTR, MtrLogObserver -from buildbot.steps.source.github import GitHub -from buildbot.process.remotecommand import RemoteCommand -from twisted.internet import defer -from datetime import timedelta - -from flask import Flask -from flask import render_template - -from buildbot.process.results import statusToString -sys.setrecursionlimit(10000) +from buildbot.plugins import schedulers, util +from flask import Flask, render_template +from master_common import base_master_config +from utils import upstream_branch_fn -sys.path.append(os.getcwd() + "/..") -from constants import * +cfg_dir = os.path.abspath(os.path.dirname(__file__)) -c = BuildmasterConfig = {} +# Non autogen master. For now the directory structure is: +# +# └── +#    ├── buildbot.tac +#     └── master.cfg +# +# Non autogen masters load from for now. +base_dir = os.path.abspath(f"{cfg_dir}/../") -# Load the slave, database passwords and 3rd-party tokens from an external -# private file, so that the rest of the configuration can be public. +# Load the slave, database passwords and 3rd-party tokens from an external private file, so +# that the rest of the configuration can be public. config = {"private": {}} -exec(open("../master-private.cfg").read(), config, {}) +with open(os.path.join(base_dir, "master-private.cfg"), "r") as file: + exec(file.read(), config, {}) -####### PROJECT IDENTITY -c["title"] = os.environ["TITLE"] -c["titleURL"] = os.environ["TITLE_URL"] -c["buildbotURL"] = os.environ["BUILDMASTER_URL"] +c = BuildmasterConfig = base_master_config(config) -port = int(os.environ["PORT"]) + +# No slaves connect to this master, only meant for WEB. +c["protocols"] = {} # minimalistic config to activate web UI -c["www"] = dict( - port=port, - plugins=dict(waterfall_view={}, console_view={}, grid_view={}), - custom_templates_dir="templates", -) +c["www"] = { + "port": os.environ["PORT"], + "plugins": { + "waterfall_view": {}, + "console_view": {}, + "grid_view": {}, + }, + "custom_templates_dir": os.path.join(cfg_dir, "templates"), +} # Github Auth, allow control for MariaDB affiliated accounts c["www"]["authz"] = util.Authz( @@ -85,11 +79,6 @@ c["www"]["plugins"]["wsgi_dashboards"] = ( ] ) -####### DB URL -c["db"] = {"db_url": config["private"]["db_url"]} - -####### Disable net usage reports from being sent to buildbot.net -c["buildbotNetUsageData"] = None ####### GitHub hooks @@ -106,43 +95,8 @@ c["www"]["ui_default_config"] = { "Grid.buildFetchLimit": 50, } -c["logEncoding"] = "utf-8" - -c["multiMaster"] = True - -# Need to enable multimaster aware mq. Wamp is the only option for now. -c["mq"] = { - "type": "wamp", - "router_url": os.environ["MQ_ROUTER_URL"], - "realm": "realm1", - # valid are: none, critical, error, warn, info, debug, trace - "wamp_debug_level": "info", -} - -# git branch filter using fnmatch -import fnmatch - - -def upstream_branch_fn(branch): - return ( - branch in BRANCHES_MAIN - or fnmatch.fnmatch(branch, "bb-*") - or fnmatch.fnmatch(branch, "st-*") - or fnmatch.fnmatch(branch, "prot-*") - or fnmatch.fnmatch(branch, "refs/pull/*") - or fnmatch.fnmatch(branch, "preview-1[0-9].*") - or fnmatch.fnmatch(branch, "jpsn-*") - ) - - -def staging_branch_fn(branch): - return fnmatch.fnmatch(branch, "st-*") - - -def fnmatch_any(s, list_of_patterns): - return any(fnmatch.fnmatch(s, p) for p in list_of_patterns) - +# Override schedulers c["schedulers"] = [] # upstream scheduling diff --git a/master.cfg b/master.cfg index 27dc0b7b..0aea869a 100644 --- a/master.cfg +++ b/master.cfg @@ -1,90 +1,62 @@ # -*- python -*- # ex: set filetype=python: -from buildbot.plugins import * -from buildbot.process.properties import Property, Properties -from buildbot.steps.shell import ShellCommand, Compile, Test, SetPropertyFromCommand -from buildbot.steps.mtrlogobserver import MTR, MtrLogObserver -from buildbot.steps.source.github import GitHub -from buildbot.process.remotecommand import RemoteCommand -from datetime import timedelta -from twisted.internet import defer - -import docker import os -import sys +from collections import defaultdict -sys.setrecursionlimit(10000) +import yaml -sys.path.insert(0, "/srv/buildbot/master") - -from common_factories import * -from constants import * -from locks import * -from schedulers_definition import SCHEDULERS -from utils import * +from buildbot.plugins import steps, util +from buildbot.process.properties import Property +from common_factories import ( + getQuickBuildFactory, + getRpmAutobakeFactory, + getSourceTarball, +) +from constants import ( + GITHUB_STATUS_BUILDERS, + OS_INFO, + SAVED_PACKAGE_BRANCHES, +) +from locks import getLocks +from master_common import base_master_config +from utils import ( + canStartBuild, + createDebRepo, + createWorker, + dockerfile, + hasDockerLibrary, + hasInstall, + hasPackagesGenerated, + hasUpgrade, + ls2string, + nextBuild, + printEnv, + savePackageIfBranchMatch, + uploadDebArtifacts, +) -with open("master-config.yaml", "r") as f: - master_config = yaml.safe_load(f) +cfg_dir = os.path.abspath(os.path.dirname(__file__)) +# Autogen master, see buildbot.tac for why this is the case. +base_dir = os.path.abspath(f"{cfg_dir}/../../") +with open(os.path.join(cfg_dir, "master-config.yaml"), "r") as file: + master_config = yaml.safe_load(file) -# This is the dictionary that the buildmaster pays attention to. We also use -# a shorter alias to save typing. -c = BuildmasterConfig = {} -# Load the slave, database passwords and 3rd-party tokens from an external private file, so -# that the rest of the configuration can be public. +# Load the slave, database passwords and 3rd-party tokens from an external +# private file, so that the rest of the configuration can be public. config = {"private": {}} -exec(open("master-private.cfg").read(), config, {}) - -####### BUILDBOT SERVICES - -# 'services' is a list of BuildbotService items like reporter targets. The -# status of each build will be pushed to these targets. buildbot/reporters/*.py -# has a variety to choose from, like IRC bots. +with open(os.path.join(cfg_dir, "master-private.cfg"), "r") as file: + exec(file.read(), config, {}) +# This is the dictionary that the buildmaster pays attention to. We also use +# a shorter alias to save typing. +c = BuildmasterConfig = base_master_config(config) -c["services"] = [] -context = util.Interpolate("buildbot/%(prop:buildername)s") -gs = reporters.GitHubStatusPush( - token=config["private"]["gh_mdbci"]["access_token"], - context=context, - startDescription="Build started.", - endDescription="Build done.", - verbose=True, - builders=GITHUB_STATUS_BUILDERS, -) -c["services"].append(gs) -c["secretsProviders"] = [ - secrets.SecretInAFile(dirname=os.environ["MASTER_CREDENTIALS_DIR"]) -] - -####### PROJECT IDENTITY - -# the 'title' string will appear at the top of this buildbot installation's -# home pages (linked to the 'titleURL'). -c["title"] = os.environ["TITLE"] -c["titleURL"] = os.environ["TITLE_URL"] - -# the 'buildbotURL' string should point to the location where the buildbot's -# internal web server is visible. This typically uses the port number set in -# the 'www' entry below, but with an externally-visible host name which the -# buildbot cannot figure out without some help. -c["buildbotURL"] = os.environ["BUILDMASTER_URL"] - -# 'protocols' contains information about protocols which master will use for -# communicating with workers. You must define at least 'port' option that workers -# could connect to your master with this protocol. -# 'port' must match the value configured into the workers (with their -# --master option) -c["protocols"] = {"pb": {"port": os.environ["PORT"]}} - -####### DB URL - -c["db"] = { - # This specifies what database buildbot uses to store its state. - "db_url": config["private"]["db_url"] -} +####### +# DB URL +####### mtrDbPool = util.EqConnectionPool( "MySQLdb", config["private"]["db_host"], @@ -93,15 +65,9 @@ mtrDbPool = util.EqConnectionPool( config["private"]["db_mtr_db"], ) -####### Disable net usage reports from being sent to buildbot.net -c["buildbotNetUsageData"] = None - -####### SCHEDULERS - -# Configure the Schedulers, which decide how to react to incoming changes. -c["schedulers"] = SCHEDULERS - -####### WORKERS +######### +# WORKERS +######### # The 'workers' list defines the set of recognized workers. Each element is # a Worker object, specifying a unique worker name and password. The same @@ -110,75 +76,53 @@ c["workers"] = [] # Docker workers -workers = {} - - -def addWorker( - worker_name_prefix, - worker_id, - worker_type, - dockerfile, - jobs=5, - save_packages=False, - shm_size="15G", -): - name, instance = createWorker( - worker_name_prefix, - worker_id, - worker_type, - dockerfile, - jobs, - save_packages, - shm_size, - ) - - if name[0] not in workers: - workers[name[0]] = [name[1]] - else: - workers[name[0]].append(name[1]) - - c["workers"].append(instance) - +workers = defaultdict(list) +# For each worker in master_config ['aarch64-bbw1', 2, 3, 4] for w_name in master_config["workers"]: jobs = 7 + worker_name = w_name[:-1] # aarch64-bbw + worker_id = w_name[-1] # 1, 2, 3, 4 + + for arch in master_config["builders"]: + builders = master_config["builders"][arch] + for os_name in builders: + os_definition = builders[os_name] + image_tag = os_definition['image_tag'] + + # Skip s390x non-SLES builders on SLES host (bbw2) + if ("s390x" in arch + and (worker_id == "2") + and ("sles" not in os_name)): + continue + + if image_tag.startswith("ubuntu"): + image_tag = image_tag[:-2] + "." + image_tag[-2:] + + quay_name = f'{os.environ["CONTAINER_REGISTRY_URL"]}{image_tag}' + if arch.startswith("x86"): + os_name += "-i386" + quay_name += "-386" + + base_name, name, worker_instance = createWorker( + worker_name, + worker_id, + os_name, + quay_name, + jobs=jobs, + save_packages=True, + shm_size="15G", + ) - for builder in master_config["builders"]: - worker_name = w_name[:-1] - worker_id = w_name[-1] - - os_name = "-".join(builder.split("-")[1:]) - image_tag = "".join(os_name.split("-")) - - # Skip s390x non-SLES builders on SLES host (bbw2) - if ("s390x" in builder) and (worker_id == "2") and ("sles" not in os_name): - continue + workers[base_name].append(name) + c["workers"].append(worker_instance) - if image_tag.startswith("ubuntu"): - image_tag = image_tag[:-2] + "." + image_tag[-2:] - - quay_name = ( - os.environ["CONTAINER_REGISTRY_URL"] - + image_tag - ) - if builder.startswith("x86"): - os_name += "-i386" - quay_name += "-386" - addWorker( - worker_name, - worker_id, - "-" + os_name, - quay_name, - jobs=jobs, - save_packages=True, - ) ####### FACTORY CODE f_quick_build = getQuickBuildFactory("nm", mtrDbPool) f_rpm_autobake = getRpmAutobakeFactory(mtrDbPool) -## f_deb_autobake f_deb_autobake = util.BuildFactory() f_deb_autobake.addStep(printEnv()) f_deb_autobake.addStep( @@ -291,99 +235,58 @@ f_deb_autobake.addStep( c["builders"] = [] -for builder in master_config["builders"]: - splits = builder.split("-") - arch = splits[0] - os_name = "-".join(splits[1:]) +for arch in master_config["builders"]: + builders_group = master_config["builders"][arch] + for os_name in builders_group: + worker_prefix = arch + worker_suffix = '' - mtr_additional_args = None - if "mtr_additional_args" in OS_INFO[os_name]: - if arch in OS_INFO[os_name]["mtr_additional_args"]: - mtr_additional_args = OS_INFO[os_name]["mtr_additional_args"][arch] + if arch == "amd64": + worker_prefix = "x64" - if arch == "amd64": - arch = "x64" - worker_name = arch + "-bbw-docker-" + os_name + if arch == "x86": + worker_prefix = 'x64' + worker_suffix = '-i386' + worker_name = f'{worker_prefix}-bbw-docker-{os_name}{worker_suffix}' - if arch == "x86": - worker_name = "x64-bbw-docker-" + os_name + "-i386" + build_type = OS_INFO[os_name]["type"] - build_type = OS_INFO[os_name]["type"] + builder = f'{arch}-{os_name}' - # Add builder only if it's not a protected branches one - if builder not in GITHUB_STATUS_BUILDERS: tags = [os_name] - if arch == "s390x" and builder in BUILDERS_GALERA_MTR: - tags += ["experimental"] - if "sid" in builder or "stream-9" in builder: - tags += ["bleeding-edge"] + # Add builder only if it's not a protected branches one + if builder not in GITHUB_STATUS_BUILDERS: + c["builders"].append( + util.BuilderConfig( + name=builder, + workernames=workers[worker_name], + tags=tags, + collapseRequests=True, + nextBuild=nextBuild, + canStartBuild=canStartBuild, + locks=getLocks, + factory=f_quick_build, + ) + ) + + factory_instance = f_deb_autobake if build_type != "rpm" else f_rpm_autobake + properties = { + "verbose_build": "VERBOSE=1" if arch == "ppc4le" else None, + "rpm_type": "".join(os_name.split("-")) if build_type == "rpm" else None + } + + tags += [build_type, "autobake"] + c["builders"].append( util.BuilderConfig( - name=builder, + name=builder + "-" + build_type + "-autobake", workernames=workers[worker_name], tags=tags, collapseRequests=True, nextBuild=nextBuild, canStartBuild=canStartBuild, locks=getLocks, - factory=f_quick_build, + properties=properties, + factory=factory_instance, ) ) - - factory_instance = f_deb_autobake - properties = {} - - if arch == "ppc64le": - properties["verbose_build"] = "VERBOSE=1" - if mtr_additional_args is not None: - properties["mtr_additional_args"] = mtr_additional_args - if build_type == "rpm": - properties["rpm_type"] = "".join(os_name.split("-")) - factory_instance = f_rpm_autobake - tags = [os_name, build_type, "autobake"] - # From mariadb.org-tools/release/prep - under - # Dirs for buildbot.mariadb.org - if builder in [ - "aarch64-openeuler-2403", - "amd64-openeuler-2403", - "s390x-ubuntu-2004", - "s390x-rhel-8", - "s390x-sles-15", - "ppc64le-rhel-9", - "s390x-rhel-9", - "ppc64le-ubuntu-2204", - "s390x-ubuntu-2204", - "amd64-debian-sid", - "aarch64-debian-sid", - "ppc64le-debian-sid", - "amd64-opensuse-1505", - "amd64-opensuse-1506", - "amd64-sles-1505", - "s390x-sles-1505", - ]: - tags += ["release_packages"] - c["builders"].append( - util.BuilderConfig( - name=builder + "-" + build_type + "-autobake", - workernames=workers[worker_name], - tags=tags, - collapseRequests=True, - nextBuild=nextBuild, - canStartBuild=canStartBuild, - locks=getLocks, - properties=properties, - factory=factory_instance, - ) - ) - -c["logEncoding"] = "utf-8" - -c["multiMaster"] = True - -c["mq"] = { # Need to enable multimaster aware mq. Wamp is the only option for now. - "type": "wamp", - "router_url": os.environ["MQ_ROUTER_URL"], - "realm": "realm1", - # valid are: none, critical, error, warn, info, debug, trace - "wamp_debug_level": "info", -} diff --git a/master_common.py b/master_common.py new file mode 100644 index 00000000..c996961f --- /dev/null +++ b/master_common.py @@ -0,0 +1,79 @@ +import os + +from buildbot.plugins import reporters, secrets, util +from constants import GITHUB_STATUS_BUILDERS +from schedulers_definition import SCHEDULERS + + +def base_master_config( + config: dict, + title=os.environ["TITLE"], + title_url=os.environ["TITLE_URL"], + buildbot_url=os.environ["BUILDMASTER_URL"], + secrets_provider_file=os.environ["MASTER_CREDENTIALS_DIR"], + master_port=os.environ["PORT"], + mq_router_url=os.environ["MQ_ROUTER_URL"], +): + # TODO(cvicentiu) either move this to environ or all other params to config + # file. + github_access_token = (config["private"]["gh_mdbci"]["access_token"],) + db_url = config["private"]["db_url"] + + return { + ####### + # PROJECT IDENTITY + ####### + # the 'title' string will appear at the top of this buildbot + # installation's + "title": title, + # home pages (linked to the 'titleURL'). + "titleURL": title_url, + # the 'buildbotURL' string should point to the location where the + # buildbot's internal web server is visible. This typically uses the + # port number set in the 'www' entry below, but with an + # externally-visible host name which the buildbot cannot figure out + # without some help. + "buildbotURL": buildbot_url, + # 'services' is a list of BuildbotService items like reporter targets. + # The status of each build will be pushed to these targets. + # buildbot/reporters/*.py has a variety to choose from, like IRC bots. + "services": [ + reporters.GitHubStatusPush( + token=github_access_token, + context=util.Interpolate("buildbot/%(prop:buildername)s"), + startDescription="Build started.", + endDescription="Build done.", + verbose=True, + builders=GITHUB_STATUS_BUILDERS, + ) + ], + "secretsProviders": [secrets.SecretInAFile(dirname=secrets_provider_file)], + # 'protocols' contains information about protocols which master will + # use for communicating with workers. You must define at least 'port' + # option that workers could connect to your master with this protocol. + # 'port' must match the value configured into the workers (with their + # --master option) + "protocols": { + "pb": {"port": master_port}, + }, + # This specifies what database buildbot uses to store its state. + "db": { + "db_url": db_url, + }, + # Disable net usage reports from being sent to buildbot.net + "buildbotNetUsageData": None, + # Configure the Schedulers, which decide how to react to incoming + # changes. + "schedulers": SCHEDULERS, + "logEncoding": "utf-8", + "multiMaster": True, + "mq": { + # Need to enable multimaster aware mq. Wamp is the only option for + # now. + "type": "wamp", + "router_url": mq_router_url, + "realm": "realm1", + # valid are: none, critical, error, warn, info, debug, trace + "wamp_debug_level": "info", + }, + } diff --git a/os_info.yaml b/os_info.yaml index 48d12a83..d993c3f2 100644 --- a/os_info.yaml +++ b/os_info.yaml @@ -1,33 +1,53 @@ --- almalinux-8: + image_tag: almalinux-8 version_name: 8 + tags: + - release_packages + - autobake arch: - amd64 - aarch64 type: rpm install_only: True almalinux-9: + image_tag: almalinux-9 version_name: 9 + tags: + - release_packages + - autobake arch: - amd64 - aarch64 type: rpm install_only: True centos-stream9: + image_tag: centos-stream9 version_name: 9 + tags: + - release_packages + - autobake arch: - amd64 - aarch64 - ppc64le type: rpm debian-11: + image_tag: debian-11 version_name: bullseye + tags: + - release_packages + - autobake arch: - amd64 - aarch64 type: deb debian-12: + image_tag: debian-12 version_name: bookworm + tags: + - release_packages + - autobake arch: - amd64 - aarch64 @@ -35,7 +55,11 @@ debian-12: - x86 type: deb debian-sid: + image_tag: debian-sid version_name: sid + tags: + - autobake + - bleeding_edge arch: - amd64 - aarch64 @@ -43,40 +67,68 @@ debian-sid: - x86 type: deb fedora-39: + image_tag: fedora-39 version_name: 39 arch: - amd64 - aarch64 type: rpm + tags: + - autobake + - bleeding_edge fedora-40: + image_tag: fedora-40 + tags: + - autobake + - bleeding_edge version_name: 40 arch: - amd64 - aarch64 type: rpm fedora-41: + image_tag: fedora-41 + tags: + - autobake + - bleeding_edge version_name: 41 arch: - amd64 - aarch64 type: rpm openeuler-2403: + image_tag: openeuler-2403 + tags: + - autobake + - bleeding_edge version_name: 24.03 arch: - amd64 - aarch64 type: rpm opensuse-1505: + image_tag: opensuse-1505 + tags: + - autobake + - bleeding_edge version_name: 155 arch: - amd64 type: rpm opensuse-1506: + image_tag: opensuse-1506 + tags: + - autobake + - bleeding_edge version_name: 156 arch: - amd64 type: rpm rhel-8: + image_tag: rhel-8 + tags: + - autobake + - bleeding_edge version_name: 8 arch: - amd64 @@ -85,6 +137,10 @@ rhel-8: - s390x type: rpm rhel-9: + image_tag: rhel-9 + tags: + - autobake + - bleeding_edge version_name: 9 arch: - amd64 @@ -93,30 +149,50 @@ rhel-9: - s390x type: rpm rockylinux-8: + image_tag: rockylinux-8 + tags: + - autobake + - bleeding_edge version_name: 8 arch: - amd64 type: rpm install_only: True rockylinux-9: + image_tag: rockylinux-9 + tags: + - autobake + - bleeding_edge version_name: 9 arch: - amd64 type: rpm install_only: True sles-1505: + image_tag: sles-1505 + tags: + - autobake + - bleeding_edge version_name: 15.5 arch: - amd64 # TEMP - currently short on hardware - s390x type: rpm sles-1506: + image_tag: sles-1506 + tags: + - autobake + - bleeding_edge version_name: 15.6 arch: - amd64 - s390x type: rpm ubuntu-2004: + image_tag: ubuntu-2004 + tags: + - autobake + - bleeding_edge version_name: focal arch: - amd64 @@ -125,6 +201,10 @@ ubuntu-2004: - s390x type: deb ubuntu-2204: + image_tag: ubuntu-2204 + tags: + - autobake + - bleeding_edge version_name: jammy arch: - amd64 @@ -133,6 +213,10 @@ ubuntu-2204: - s390x type: deb ubuntu-2404: + image_tag: ubuntu-2404 + tags: + - autobake + - bleeding_edge version_name: noble arch: - amd64 @@ -141,6 +225,10 @@ ubuntu-2404: - s390x type: deb ubuntu-2410: + image_tag: ubuntu-2410 + tags: + - autobake + - bleeding_edge version_name: oracular arch: - amd64 diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/utils.py b/utils.py index 0a9ff479..e779c20a 100644 --- a/utils.py +++ b/utils.py @@ -7,6 +7,9 @@ import docker from pyzabbix import ZabbixAPI +# TODO(cvicentiu) remove +# from builder import DockerBuilder, WorkerMachine + from buildbot.buildrequest import BuildRequest from buildbot.interfaces import IProperties from buildbot.master import BuildMaster @@ -17,6 +20,7 @@ from buildbot.process.workerforbuilder import AbstractWorkerForBuilder from buildbot.worker import AbstractWorker from constants import ( + ALL_BB_TEST_BRANCHES, BUILDERS_AUTOBAKE, BUILDERS_BIG, BUILDERS_ECO, @@ -28,6 +32,7 @@ MTR_ENV, RELEASE_BRANCHES, SAVED_PACKAGE_BRANCHES, + STAGING_PROT_TEST_BRANCHES, ) private_config = {"private": {}} @@ -79,25 +84,22 @@ def createWorker( "/srv/buildbot/packages:/mnt/packages", MASTER_PACKAGES + "/:/packages", ], -) -> Tuple[str, worker.DockerLatentWorker]: +) -> Tuple[str, str, worker.DockerLatentWorker]: worker_name = f"{worker_name_prefix}{worker_id}-docker" - name = f"{worker_name}{worker_type}" - - if worker_name_prefix.startswith("hz"): - b_name = "x64-bbw" - elif worker_name_prefix.startswith("intel"): - b_name = "x64-bbw" - elif worker_name_prefix.startswith("ppc64le"): - b_name = "ppc64le-bbw" - elif worker_name_prefix.startswith("amd"): - b_name = "x64-bbw" - elif worker_name_prefix.startswith("apexis"): - b_name = "x64-bbw" - elif worker_name_prefix.startswith("ns"): - b_name = "x64-bbw" - else: - b_name = worker_name_prefix - base_name = b_name + "-docker" + worker_type + name = f"{worker_name}-{worker_type}{worker_name_suffix}" + + # TODO(cvicentiu) Remove this list when refactoring YAML. + b_name = worker_name_prefix + X64_BUILDER_PREFIXES = ["hz", "intel", "amd", "apexis", "ns"] + PPC64LE_BUILDER_PREFIXES = ["ppc64le"] + for x64_prefix in X64_BUILDER_PREFIXES: + if worker_name_prefix.startswith(x64_prefix): + b_name = "x64-bbw" + for ppc_prefix in PPC64LE_BUILDER_PREFIXES: + if worker_name_prefix.startswith(ppc_prefix): + b_name = "ppc64le-bbw" + + base_name = f"{b_name}-docker-{worker_type}" # Set master FQDN - default to wireguard interface fqdn = os.environ["BUILDMASTER_WG_IP"] @@ -113,7 +115,7 @@ def createWorker( need_pull = False worker_instance = worker.DockerLatentWorker( - name + worker_name_suffix, + name, None, docker_host=private_config["private"]["docker_workers"][worker_name], image=image_str, @@ -135,7 +137,7 @@ def createWorker( volumes=volumes, properties={"jobs": jobs, "save_packages": save_packages}, ) - return ((base_name, name + worker_name_suffix), worker_instance) + return (base_name, name, worker_instance) def printEnv() -> steps.ShellCommand: @@ -262,14 +264,18 @@ def uploadDebArtifacts() -> steps.ShellCommand: ) -def staging_branch_fn(branch: str) -> bool: - return fnmatch.fnmatch(branch, "prot-st-*") - - def fnmatch_any(branch: str, patterns: list[str]) -> bool: return any(fnmatch.fnmatch(branch, pattern) for pattern in patterns) +def upstream_branch_fn(branch): + return fnmatch_any(branch, ALL_BB_TEST_BRANCHES) + + +def staging_branch_fn(branch: str) -> bool: + return fnmatch_any(branch, STAGING_PROT_TEST_BRANCHES) + + # Priority filter based on saved package branches def nextBuild(builder: Builder, requests: list[BuildRequest]) -> BuildRequest: def build_request_sort_key(request: BuildRequest): @@ -665,3 +671,17 @@ def mtrEnv(props: IProperties) -> dict: mtr_add_env[key] = value return mtr_add_env return MTR_ENV + + +# TODO(cvicentiu) remove +# def create_latent_workers(machine: WorkerMachine, +# builders: list[DockerBuilder] +# ) -> list[worker.DockerLatentWorker]: +# result = [] +# for builder in builders: +# worker = createWorker(machine.name, +# '', +# builder.distro_name, +# builder.image) +# result.append(worker) +# return result diff --git a/validate_master_cfg.sh b/validate_master_cfg.sh index 067b4040..255ab365 100755 --- a/validate_master_cfg.sh +++ b/validate_master_cfg.sh @@ -68,16 +68,8 @@ fi command -v python3 >/dev/null || err "python3 command not found" +. ./docker-compose/.env python3 define_masters.py -echo "Checking master.cfg" -# Port is set by generate-config.py (docker-compose), not present in .env -$RUNC run -i -v "$(pwd):/srv/buildbot/master" \ - --env PORT=1234 \ - --env-file <(sed "s/='\([^']*\)'/=\1/" $ENVFILE) \ - -w /srv/buildbot/master \ - $IMAGE \ - buildbot checkconfig master.cfg -echo -e "done\n" # not checking libvirt config file (//TEMP we need to find a solution # to not check ssh connection) for dir in autogen/* \ @@ -92,8 +84,8 @@ for dir in autogen/* \ $RUNC run -i -v "$(pwd):/srv/buildbot/master" \ --env PORT=1234 \ --env-file <(sed "s/='\([^']*\)'/=\1/" $ENVFILE) \ - -w "/srv/buildbot/master/$dir" \ + -w "/srv/buildbot/master/" \ $IMAGE \ - buildbot checkconfig master.cfg - echo -e "done\n" -done \ No newline at end of file + bash -c "buildbot checkconfig $dir/master.cfg" + echo "done" +done