[ansible] Update hv01
by Nicolas Chauvet
commit 12a4fc071819978bbf90a9454f2930ec4a73a955
Author: Nicolas Chauvet <kwizart(a)gmail.com>
Date: Wed Aug 18 23:30:40 2021 +0200
Update hv01
inventory/host_vars/hv01.online.rpmfusion.net | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
---
diff --git a/inventory/host_vars/hv01.online.rpmfusion.net b/inventory/host_vars/hv01.online.rpmfusion.net
index dcfcea4..a8d802c 100644
--- a/inventory/host_vars/hv01.online.rpmfusion.net
+++ b/inventory/host_vars/hv01.online.rpmfusion.net
@@ -15,5 +15,5 @@ udp_ports: ['53', '1194']
custom_rules: [ '-A FORWARD -d 192.168.181.0/24 -o br1 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT' , '-A FORWARD -s 192.168.181.0/24 -i br1 -j ACCEPT', '-A INPUT -i br1 -p tcp -m tcp --dport 111 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 111 -j ACCEPT' , '-A INPUT -i br1 -p udp -m udp --dport 514 -j ACCEPT', '-A INPUT -i tun0 -p udp -m udp --dport 514 -j ACCEPT' , '-A INPUT -i br1 -p tcp -m tcp --dport 514 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 514 -j ACCEPT' ,'-A INPUT -i br1 -p tcp -m tcp --dport 662 -j ACCEPT' , '-A INPUT -i tun0 -p tcp -m tcp --dport 662 -j ACCEPT', '-A INPUT -i br1 -p tcp -m tcp --dport 892 -j ACCEPT' , '-A INPUT -i tun0 -p tcp -m tcp --dport 892 -j ACCEPT', '-A INPUT -i br1 -p tcp -m tcp --dport 2049 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 2049 -j ACCEPT', '-A INPUT -i br1 -p udp -m udp --dport 2049 -j ACCEPT', '-A INPUT -i tun0 -p udp -m udp --dport 2049 -j ACCEPT', '-A INPUT -i br1 -p tcp -m tcp --dport 50
00 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 5000 -j ACCEPT' ,'-A INPUT -i br1 -p tcp -m tcp --dport 32803 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 32803 -j ACCEPT' , '-A INPUT -i br1 -p udp -m udp --dport 32769 -j ACCEPT', '-A INPUT -i tun0 -p udp -m udp --dport 32769 -j ACCEPT' , '-A INPUT -i br1 -p tcp -m tcp --dport 3128 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 3128 -j ACCEPT']
custom_nat_rules: ['-A POSTROUTING -o br0 -j MASQUERADE' ]
-ansible_python
-
+ansible_python:
+ executable: /usr/bin/python2
3 years, 4 months
[ansible] Update ansible.cfg
by Nicolas Chauvet
commit ef5e576057d33bf5e776b4751b10c3bc6602de0c
Author: Nicolas Chauvet <kwizart(a)gmail.com>
Date: Wed Aug 18 23:17:36 2021 +0200
Update ansible.cfg
roles/ansible-server/templates/ansible.cfg.j2 | 6 ++++--
1 files changed, 4 insertions(+), 2 deletions(-)
---
diff --git a/roles/ansible-server/templates/ansible.cfg.j2 b/roles/ansible-server/templates/ansible.cfg.j2
index 14bc86d..61325da 100644
--- a/roles/ansible-server/templates/ansible.cfg.j2
+++ b/roles/ansible-server/templates/ansible.cfg.j2
@@ -29,6 +29,8 @@ forks = 150
#module_lang = C
#module_set_locale = False
+interpreter_python = auto
+
# plays will gather facts by default, which contain information about
# the remote system.
#
@@ -59,7 +61,7 @@ gathering = smart
# additional paths to search for roles in, colon separated
#roles_path = /etc/ansible/roles
-roles_path = {{ ansible_base }}/ansible/roles
+roles_path = {{ ansible_base }}/ansible/roles:/usr/share/ansible/roles
# uncomment this to disable SSH key host checking
#host_key_checking = False
@@ -75,7 +77,7 @@ roles_path = {{ ansible_base }}/ansible/roles
# enable callback plugins, they can output to stdout but cannot be 'stdout' type.
#callback_whitelist = timer, mail
-callback_whitelist = fedmsg_callback2,profile_tasks,logdetail2
+callback_whitelist = fedora_messaging_callback,profile_roles,profile_tasks,logdetail
# Determine whether includes in tasks and handlers are "static" by
# default. As of 2.0, includes are dynamic by default. Setting these
3 years, 4 months
[ansible] Add pagure role
by Nicolas Chauvet
commit 907cbc70cb9d07819115ec1d699f7b0b661632b2
Author: Nicolas Chauvet <kwizart(a)gmail.com>
Date: Wed Aug 18 22:58:41 2021 +0200
Add pagure role
roles/pagure/fedmsg/files/selinux/fedmsg.mod | Bin 0 -> 1158 bytes
roles/pagure/fedmsg/files/selinux/fedmsg.pp | Bin 0 -> 1174 bytes
roles/pagure/fedmsg/files/selinux/fedmsg.te | 21 +
roles/pagure/fedmsg/tasks/main.yml | 162 ++++++
roles/pagure/fedmsg/templates/base.py.j2 | 68 +++
roles/pagure/fedmsg/templates/logging.py.j2 | 162 ++++++
roles/pagure/fedmsg/templates/relay.py.j2 | 16 +
roles/pagure/fedmsg/templates/ssl.py.j2 | 16 +
roles/pagure/frontend/files/aliases | 93 +++
roles/pagure/frontend/files/backup-database | 10 +
roles/pagure/frontend/files/pagure_ev.service | 15 +
.../frontend/files/pagure_fast_worker.service | 15 +
.../frontend/files/pagure_medium_worker.service | 15 +
roles/pagure/frontend/files/pagure_mirror.service | 18 +
roles/pagure/frontend/files/pagure_sar.py | 161 ++++++
.../frontend/files/pagure_slow_worker.service | 15 +
roles/pagure/frontend/files/pg_hba.conf | 77 +++
roles/pagure/frontend/files/selinux/pagure.if | 1 +
roles/pagure/frontend/files/selinux/pagure.pp | Bin 0 -> 7261 bytes
roles/pagure/frontend/files/selinux/pagure.te | 11 +
roles/pagure/frontend/files/stunnel.service | 15 +
roles/pagure/frontend/files/syslog-logrotate | 20 +
roles/pagure/frontend/handlers/main.yml | 3 +
roles/pagure/frontend/tasks/main.yml | 598 ++++++++++++++++++++
roles/pagure/frontend/templates/0_pagure.conf | 236 ++++++++
roles/pagure/frontend/templates/alembic.ini | 50 ++
roles/pagure/frontend/templates/docs_pagure.wsgi | 22 +
.../frontend/templates/fedora-messaging.toml | 25 +
roles/pagure/frontend/templates/gitolite.rc | 196 +++++++
roles/pagure/frontend/templates/pagure.cfg | 380 +++++++++++++
roles/pagure/frontend/templates/pagure.wsgi | 29 +
roles/pagure/frontend/templates/robots.txt.j2 | 10 +
.../pagure/frontend/templates/securityheaders.conf | 8 +
roles/pagure/frontend/templates/stunnel-conf.j2 | 16 +
34 files changed, 2484 insertions(+), 0 deletions(-)
---
diff --git a/roles/pagure/fedmsg/files/selinux/fedmsg.mod b/roles/pagure/fedmsg/files/selinux/fedmsg.mod
new file mode 100644
index 0000000..25e47ae
Binary files /dev/null and b/roles/pagure/fedmsg/files/selinux/fedmsg.mod differ
diff --git a/roles/pagure/fedmsg/files/selinux/fedmsg.pp b/roles/pagure/fedmsg/files/selinux/fedmsg.pp
new file mode 100644
index 0000000..17a2594
Binary files /dev/null and b/roles/pagure/fedmsg/files/selinux/fedmsg.pp differ
diff --git a/roles/pagure/fedmsg/files/selinux/fedmsg.te b/roles/pagure/fedmsg/files/selinux/fedmsg.te
new file mode 100644
index 0000000..6ce38d4
--- /dev/null
+++ b/roles/pagure/fedmsg/files/selinux/fedmsg.te
@@ -0,0 +1,21 @@
+
+module fedmsg 1.1;
+
+require {
+ type anon_inodefs_t;
+ type httpd_t;
+ class file write;
+}
+
+require {
+ type ptmx_t;
+ type httpd_t;
+ class chr_file getattr;
+}
+
+#============= httpd_t ==============
+# For basic port binding
+allow httpd_t anon_inodefs_t:file write;
+# So that psutil can work from /etc/fedmsg.d/logging.py
+allow httpd_t ptmx_t:chr_file getattr;
+
diff --git a/roles/pagure/fedmsg/tasks/main.yml b/roles/pagure/fedmsg/tasks/main.yml
new file mode 100644
index 0000000..0bff6d1
--- /dev/null
+++ b/roles/pagure/fedmsg/tasks/main.yml
@@ -0,0 +1,162 @@
+---
+# tasklist for setting up fedmsg
+# This is the base set of files needed for fedmsg
+
+- name: install needed packages
+ package: name={{ item }} state=present
+ with_items:
+ - fedmsg
+ - libsemanage-python
+ - python-psutil
+ tags:
+ - packages
+ - pagure
+ - pagure/fedmsg
+ when: ansible_distribution_major_version|int < 8
+
+- name: install needed packages
+ package: name={{ item }} state=present
+ with_items:
+ - fedmsg
+ - python3-libsemanage
+ - python3-psutil
+ tags:
+ - packages
+ - pagure
+ - pagure/fedmsg
+ when: ansible_distribution_major_version|int >= 8
+
+
+# We use setgid here so that the monitoring sockets created by fedmsg services
+# are accessible to the nrpe group.
+- name: create a /var/run/fedmsg dir with setgid for monitoring.
+ file: >
+ dest=/var/run/fedmsg
+ mode=2775
+ owner=fedmsg
+ group=nrpe
+ state=directory
+ tags:
+ - pagure
+ - pagure
+ - pagure/fedmsg
+
+- name: setup /etc/fedmsg.d directory
+ file: path=/etc/fedmsg.d owner=root group=root mode=0755 state=directory
+ tags:
+ - pagure
+ - pagure/fedmsg
+ - config
+
+# Any files that change need to restart any services that depend on them. A
+# trick here is that some hosts have an httpd that uses fedmsg, while others do
+# not. Some hosts have a fedmsg-hub that uses this config, while others do not.
+# Our handlers in handlers/restart_services.yml are smart enough to
+# *conditionally* restart these services, only if they are installed on the
+# system.
+- name: setup basic /etc/fedmsg.d/ contents
+ template: >
+ src="{{ item }}.j2"
+ dest="/etc/fedmsg.d/{{ item }}"
+ owner=root
+ group=root
+ mode=644
+ with_items:
+ - ssl.py
+ - relay.py
+ - logging.py
+ - base.py
+ tags:
+ - config
+ - fedmsgdconfig
+ - pagure
+ - pagure/fedmsg
+ notify:
+ - reload httpd
+ - restart fedmsg-relay
+
+- name: Remove unwanted files
+ file: dest=/etc/fedmsg.d/{{item}} state=absent
+ with_items:
+ - endpoints.py
+ tags:
+ - config
+ - fedmsgdconfig
+ - pagure
+ - pagure/fedmsg
+ notify:
+ - reload httpd
+ - restart fedmsg-relay
+
+- name: setup /etc/pki/fedmsg directory
+ file: path=/etc/pki/fedmsg owner=root group=root mode=0755 state=directory
+ tags:
+ - config
+ - pagure
+ - pagure/fedmsg
+
+- name: install fedmsg ca.cert
+ copy: >
+ src="{{ private }}/files/fedmsg-certs/keys/ca.crt"
+ dest=/etc/pki/fedmsg/ca.crt
+ owner=root
+ group=root
+ mode=0644
+ tags:
+ - config
+ - pagure
+ - pagure/fedmsg
+
+- name: fedmsg certs
+ copy: >
+ src="{{ private }}/files/fedmsg-certs/keys/{{item['service']}}-{{fedmsg_fqdn | default(inventory_hostname)}}.crt"
+ dest=/etc/pki/fedmsg/
+ mode=644
+ owner={{item['owner']}}
+ group={{item['group']}}
+ with_items:
+ - "{{ fedmsg_certs }}"
+ when: fedmsg_certs != []
+ tags:
+ - config
+ - pagure
+ - pagure/fedmsg
+
+- name: fedmsg keys
+ copy: >
+ src="{{ private }}/files/fedmsg-certs/keys/{{item['service']}}-{{fedmsg_fqdn | default(inventory_hostname)}}.key"
+ dest=/etc/pki/fedmsg/
+ mode=0640
+ owner={{item['owner']}}
+ group={{item['group']}}
+ with_items:
+ - "{{ fedmsg_certs }}"
+ when: fedmsg_certs != []
+ tags:
+ - config
+ - pagure
+ - pagure/fedmsg
+
+# Three tasks for handling our custom selinux module
+- name: ensure a directory exists for our custom selinux module
+ file: dest=/usr/local/share/fedmsg state=directory
+ tags:
+ - selinux
+ - pagure
+ - pagure/fedmsg
+
+- name: copy over our custom selinux module
+ copy: src=selinux/fedmsg.pp dest=/usr/local/share/fedmsg/fedmsg.pp
+ register: selinux_module
+ tags:
+ - selinux
+ - pagure
+ - pagure/fedmsg
+
+- name: install our custom selinux module
+ command: semodule -i /usr/local/share/fedmsg/fedmsg.pp
+ when: selinux_module is changed
+ tags:
+ - selinux
+ - pagure
+ - pagure/fedmsg
diff --git a/roles/pagure/fedmsg/templates/base.py.j2 b/roles/pagure/fedmsg/templates/base.py.j2
new file mode 100644
index 0000000..31a8e93
--- /dev/null
+++ b/roles/pagure/fedmsg/templates/base.py.j2
@@ -0,0 +1,68 @@
+config = dict(
+
+ # Tell every call to `fedmsg.publish` to use the relay
+ active=True,
+ cert_prefix="pagure",
+
+ topic_prefix="{{ fedmsg_prefix }}",
+ environment="{{ fedmsg_env }}",
+
+ # This used to be set to 1 for safety, but it turns out it was
+ # excessive. It is the number of seconds that fedmsg should sleep
+ # after it has initialized, but before it begins to try and send any
+ # messages. If set to a non-zero value, this will slow down one-off
+ # fedmsg scripts like the git post-receive hook and pkgdb2branch.
+ # If we are experiencing message-loss problems, one of the first things
+ # to try should be to turn this number up to a non-zero value. '1' should
+ # be more than sufficient.
+ post_init_sleep=0.4,
+
+ # This is the number of milliseconds to wait before timing out on
+ # connections.. notably to the fedmsg-relay in the event that it has
+ # crashed.
+ zmq_linger=2000,
+
+ # Default is 0
+ high_water_mark=0,
+ io_threads=1,
+
+ # We almost always want the fedmsg-hub to be sending messages with zmq as
+ # opposed to amqp or stomp. The only exception will be the bugzilla
+ # amqp<->zmq bridge service.
+ zmq_enabled=True,
+
+ # When subscribing to messages, we want to allow splats ('*') so we tell the
+ # hub to not be strict when comparing messages topics to subscription
+ # topics.
+ zmq_strict=False,
+
+ # See the following
+ # - http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
+ # - http://api.zeromq.org/3-2:zmq-setsockopt
+ zmq_tcp_keepalive=1,
+ zmq_tcp_keepalive_cnt=3,
+ zmq_tcp_keepalive_idle=60,
+ zmq_tcp_keepalive_intvl=5,
+)
+
+# This option adds an IPC socket by which we can monitor hub health.
+try:
+ import os
+ import psutil
+
+ pid = os.getpid()
+ proc = [p for p in psutil.process_iter() if p.pid == pid][0]
+
+ # proc.name is a method on modern versions of psutil.
+ name = proc.name
+ if callable(name):
+ name = name()
+
+ config['moksha.monitoring.socket'] = \
+ 'ipc:///var/run/fedmsg/monitoring-%s.socket' % name
+ config['moksha.monitoring.socket.mode'] = '770'
+except (OSError, ImportError):
+ # We run into issues when trying to import psutil from mod_wsgi on rhel7
+ # but this feature is of no concern in that context, so just fail quietly.
+ # https://github.com/jmflinuxtx/kerneltest-harness/pull/17#issuecomment-480...
+ pass
diff --git a/roles/pagure/fedmsg/templates/logging.py.j2 b/roles/pagure/fedmsg/templates/logging.py.j2
new file mode 100644
index 0000000..154c100
--- /dev/null
+++ b/roles/pagure/fedmsg/templates/logging.py.j2
@@ -0,0 +1,162 @@
+# Setup fedmsg logging.
+
+# All of these modules are just used by the ContextInjector below.
+import inspect
+import logging
+import os
+import socket
+import traceback
+
+psutil = None
+try:
+ import psutil
+except (OSError, ImportError):
+ # We run into issues when trying to import psutil from inside mod_wsgi on
+ # rhel7. If we hit that here, then just fail quietly.
+ # https://github.com/jmflinuxtx/kerneltest-harness/pull/17#issuecomment-480...
+ pass
+
+
+class ContextInjector(logging.Filter):
+ """ Logging filter that adds context to log records.
+
+ Filters are typically used to "filter" log records. They declare a filter
+ method that can return True or False. Only records with 'True' will
+ actually be logged.
+
+ Here, we somewhat abuse the concept of a filter. We always return true,
+ but we use the opportunity to hang important contextual information on the
+ log record to later be used by the logging Formatter. We don't normally
+ want to see all this stuff in normal log records, but we *do* want to see
+ it when we are emailed error messages. Seeing an error, but not knowing
+ which host it comes from, is not that useful.
+
+ http://docs.python.org/2/howto/logging-cookbook.html#filters-contextual
+ """
+
+ def filter(self, record):
+ current_process = ContextInjector.get_current_process()
+ current_hostname = socket.gethostname()
+
+ record.host = current_hostname
+ record.proc = current_process
+ record.pid = current_process.pid
+ record.proc_name = current_process.name
+ record.command_line = current_process.cmdline
+ # These are callabls on more modern versions of psutil.
+ if callable(record.proc_name):
+ record.proc_name = record.proc_name()
+ if callable(record.command_line):
+ record.command_line = record.command_line()
+ record.command_line = " ".join(record.command_line)
+ record.callstack = self.format_callstack()
+ return True
+
+ @staticmethod
+ def format_callstack():
+ for i, frame in enumerate(f[0] for f in inspect.stack()):
+ if not '__name__' in frame.f_globals:
+ continue
+ modname = frame.f_globals['__name__'].split('.')[0]
+ if modname != "logging":
+ break
+
+ def _format_frame(frame):
+ return ' File "%s", line %i in %s\n %s' % (frame)
+
+ stack = traceback.extract_stack()
+ stack = stack[:-i]
+ return "\n".join([_format_frame(frame) for frame in stack])
+
+ @staticmethod
+ def get_current_process():
+ mypid = os.getpid()
+
+ if not psutil:
+ raise OSError("Could not import psutil for %r" % mypid)
+
+ for proc in psutil.process_iter():
+ if proc.pid == mypid:
+ return proc
+
+ # This should be impossible.
+ raise ValueError("Could not find process %r" % mypid)
+
+ @classmethod
+ def __json__(cls):
+ """ We need to be jsonifiable for "fedmsg-config" """
+ return {'name': 'ContextInjector'}
+
+
+hefty_format = """Message
+-------
+[%(asctime)s][%(name)10s %(levelname)7s]
+%(message)s
+
+Process Details
+---------------
+host: %(host)s
+PID: %(pid)s
+name: %(proc_name)s
+command: %(command_line)s
+
+Callstack that lead to the logging statement
+--------------------------------------------
+%(callstack)s
+"""
+
+
+# See the following for constraints on this format http://bit.ly/Xn1WDn
+config = dict(
+ logging=dict(
+ version=1,
+ formatters=dict(
+ bare={
+ "datefmt": "%Y-%m-%d %H:%M:%S",
+ "format": "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
+ },
+ hefty={
+ "datefmt": "%Y-%m-%d %H:%M:%S",
+ "format": hefty_format,
+ },
+ ),
+ filters=dict(
+ context={
+ # This "()" syntax in the stdlib doesn't seem to be documented
+ # anywhere. I had to read
+ # /usr/lib64/python2.7/logging/config.py to figure it out.
+ "()": ContextInjector,
+ },
+ ),
+ handlers=dict(
+ console={
+ "class": "logging.StreamHandler",
+ "formatter": "bare",
+ "level": "INFO",
+ "stream": "ext://sys.stdout",
+ },
+ mailer={
+ "class": "logging.handlers.SMTPHandler",
+ "formatter": "hefty",
+ "filters": ["context"],
+ "level": "ERROR",
+ "mailhost": "hv01.online.rpmfusion.net",
+ "fromaddr": "noreply(a)rpmfusion.org",
+ "toaddrs": ["sysadmin-datanommer-members(a)rpmfusion.org"],
+ "subject": "fedmsg error log (pagure)",
+ },
+ ),
+ loggers=dict(
+ fedmsg={
+ "level": "INFO",
+ "propagate": False,
+ "handlers": ["console", "mailer"],
+ },
+ moksha={
+ "level": "INFO",
+ "propagate": False,
+ "handlers": ["console", "mailer"],
+ },
+ ),
+ ),
+)
diff --git a/roles/pagure/fedmsg/templates/relay.py.j2 b/roles/pagure/fedmsg/templates/relay.py.j2
new file mode 100644
index 0000000..cd620d9
--- /dev/null
+++ b/roles/pagure/fedmsg/templates/relay.py.j2
@@ -0,0 +1,16 @@
+config = dict(
+ endpoints={
+ # This is the output side of the relay to which all other
+ # services can listen.
+ "relay_outbound": [
+ # Messages emerge here
+ #"tcp://pagure.io:9940",
+ "tcp://{{inventory_hostname}}:9940",
+ ],
+ },
+
+ # wsgi scripts on the frontend talk back here
+ relay_inbound=[
+ "tcp://{{inventory_hostname}}:9941",
+ ],
+)
diff --git a/roles/pagure/fedmsg/templates/ssl.py.j2 b/roles/pagure/fedmsg/templates/ssl.py.j2
new file mode 100644
index 0000000..224b23a
--- /dev/null
+++ b/roles/pagure/fedmsg/templates/ssl.py.j2
@@ -0,0 +1,16 @@
+
+config = dict(
+ sign_messages=True,
+ validate_signatures=True,
+ ssldir="/etc/pki/fedmsg",
+
+ crl_location="https://rpmfusion.org/fedmsg/crl.pem",
+ crl_cache="/var/run/fedmsg/crl.pem",
+ crl_cache_expiry=86400, # Daily
+
+ certnames=dict([
+ ("shell.{{inventory_hostname_short}}", "shell-{{inventory_hostname}}"),
+ ("pagure.{{inventory_hostname_short}}", "pagure-{{inventory_hostname}}"),
+ ]),
+)
+
diff --git a/roles/pagure/frontend/files/aliases b/roles/pagure/frontend/files/aliases
new file mode 100644
index 0000000..aba690b
--- /dev/null
+++ b/roles/pagure/frontend/files/aliases
@@ -0,0 +1,93 @@
+#
+# Aliases in this file will NOT be expanded in the header from
+# Mail, but WILL be visible over networks or from /bin/mail.
+#
+# >>>>>>>>>> The program "newaliases" must be run after
+# >> NOTE >> this file is updated for any changes to
+# >>>>>>>>>> show through to sendmail.
+#
+
+# Basic system aliases -- these MUST be present.
+mailer-daemon: postmaster
+admin: postmaster
+hostmaster: postmaster
+postmaster: sysadmin-main(a)rpmfusion.org
+
+# General redirections for pseudo accounts.
+bin: root
+daemon: root
+adm: root
+lp: root
+sync: root
+shutdown: root
+halt: root
+mail: root
+#news: root
+uucp: root
+operator: root
+games: root
+gopher: root
+ftp: root
+#nobody: root
+radiusd: root
+nut: root
+dbus: root
+vcsa: root
+canna: root
+wnn: root
+rpm: root
+nscd: root
+pcap: root
+apache: root
+webalizer: root
+dovecot: root
+fax: root
+quagga: root
+radvd: root
+pvm: root
+amanda: root
+privoxy: root
+ident: root
+named: root
+xfs: root
+gdm: root
+mailnull: root
+postgres: root
+sshd: root
+smmsp: root
+postfix: root
+netdump: root
+ldap: root
+squid: root
+ntp: root
+mysql: root
+desktop: root
+rpcuser: root
+rpc: root
+nfsnobody: root
+notifications: root
+
+ingres: root
+system: root
+toor: root
+manager: root
+dumper: root
+abuse: root
+nagios: root
+
+newsadm: news
+newsadmin: news
+usenet: news
+ftpadm: ftp
+ftpadmin: ftp
+ftp-adm: ftp
+ftp-admin: ftp
+
+# trap decode to catch security attacks
+decode: root
+
+# Person who should get root's mail
+root: sysadmin-main
+
+pagure: /dev/null
+reply: /dev/null
diff --git a/roles/pagure/frontend/files/backup-database b/roles/pagure/frontend/files/backup-database
new file mode 100644
index 0000000..3f6e7d8
--- /dev/null
+++ b/roles/pagure/frontend/files/backup-database
@@ -0,0 +1,10 @@
+#!/bin/bash
+# Backup a database *locally* to /backups/.
+
+DB=$1
+
+# Make our latest backup
+/usr/bin/pg_dump -C $DB | /usr/bin/xz > /backups/$DB-$(date +%F).dump.xz
+
+# Also, delete the backup from a few days ago.
+rm -f /backups/$DB-$(date --date="3 days ago" +%F).dump.xz
diff --git a/roles/pagure/frontend/files/pagure_ev.service b/roles/pagure/frontend/files/pagure_ev.service
new file mode 100644
index 0000000..f194b1b
--- /dev/null
+++ b/roles/pagure/frontend/files/pagure_ev.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Pagure EventSource server (Allowing live refresh of the pages supporting it)
+After=redis.target
+Documentation=https://pagure.io/pagure
+
+[Service]
+ExecStart=/usr/libexec/pagure-ev/pagure_stream_server.py
+Type=simple
+User=git
+Group=git
+Restart=on-failure
+LimitNOFILE=40960
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/pagure/frontend/files/pagure_fast_worker.service b/roles/pagure/frontend/files/pagure_fast_worker.service
new file mode 100644
index 0000000..5b97b84
--- /dev/null
+++ b/roles/pagure/frontend/files/pagure_fast_worker.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Pagure worker for backend git interaction
+After=redis.target
+Documentation=https://pagure.io/pagure
+
+[Service]
+ExecStart=/usr/bin/celery worker -A pagure.lib.tasks --loglevel=info -Q fast_workers -c 5
+Environment="PAGURE_CONFIG=/etc/pagure/pagure.cfg"
+Type=simple
+User=git
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/roles/pagure/frontend/files/pagure_medium_worker.service b/roles/pagure/frontend/files/pagure_medium_worker.service
new file mode 100644
index 0000000..799235a
--- /dev/null
+++ b/roles/pagure/frontend/files/pagure_medium_worker.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Pagure worker for backend git interaction
+After=redis.target
+Documentation=https://pagure.io/pagure
+
+[Service]
+ExecStart=/usr/bin/celery worker -A pagure.lib.tasks --loglevel=info -Q medium_workers -c 5
+Environment="PAGURE_CONFIG=/etc/pagure/pagure.cfg"
+Type=simple
+User=git
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/roles/pagure/frontend/files/pagure_mirror.service b/roles/pagure/frontend/files/pagure_mirror.service
new file mode 100644
index 0000000..786eee8
--- /dev/null
+++ b/roles/pagure/frontend/files/pagure_mirror.service
@@ -0,0 +1,18 @@
+# This is a systemd's service file for the mirroring service, if you change
+# the default value of the CI_CELERY_QUEUE configuration key, do not
+# forget to edit it in the ExecStart line below
+
+[Unit]
+Description=Pagure service mirroring projects outside of pagure that asked for it
+After=redis.target
+Documentation=https://pagure.io/pagure
+
+[Service]
+ExecStart=/usr/bin/celery worker -A pagure.lib.tasks_mirror --loglevel=info -Q pagure_mirror
+Environment="PAGURE_CONFIG=/etc/pagure/pagure.cfg"
+Type=simple
+User=paguremirroring
+Group=paguremirroring
+Restart=on-failure
+
+[Install]
diff --git a/roles/pagure/frontend/files/pagure_sar.py b/roles/pagure/frontend/files/pagure_sar.py
new file mode 100644
index 0000000..dbe10cd
--- /dev/null
+++ b/roles/pagure/frontend/files/pagure_sar.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+
+from __future__ import unicode_literals, print_function
+
+import json
+import os
+import sys
+
+import sqlalchemy
+
+import pagure.config
+import pagure.lib.query
+import pagure.lib.model_base
+from pagure.lib import model
+
+
+if 'PAGURE_CONFIG' not in os.environ \
+ and os.path.exists('/etc/pagure/pagure.cfg'):
+ os.environ['PAGURE_CONFIG'] = '/etc/pagure/pagure.cfg'
+
+
+_config = pagure.config.reload_config()
+session = pagure.lib.model_base.create_session(_config['DB_URL'])
+
+
+def get_issue_users(session, user_id):
+ ''' Return all pagure.lib.model.Issue related to the usernames provided
+ '''
+ query1 = session.query(
+ model.Issue.uid
+ ).filter(
+ sqlalchemy.or_(
+ model.Issue.assignee_id == user_id,
+ model.Issue.user_id == user_id
+ )
+ )
+ query2 = session.query(
+ model.Issue.uid
+ ).filter(
+ model.Issue.uid == model.IssueComment.issue_uid
+ ).filter(
+ model.IssueComment.user_id == user_id
+ )
+
+ query = session.query(
+ model.Issue
+ ).filter(
+ sqlalchemy.or_(
+ model.Issue.uid.in_(query1.subquery()),
+ model.Issue.uid.in_(query2.subquery())
+ )
+ ).order_by(
+ model.Issue.date_created
+ )
+
+ return query.all()
+
+
+def get_pr_users(session, user_id):
+ ''' Return all pagure.lib.model.PullRequest related to the usernames provided
+ '''
+ query1 = session.query(
+ model.PullRequest.uid
+ ).filter(
+ sqlalchemy.or_(
+ model.PullRequest.assignee_id == user_id,
+ model.PullRequest.user_id == user_id
+ )
+ )
+ query2 = session.query(
+ model.PullRequest.uid
+ ).filter(
+ model.PullRequest.uid == model.PullRequestComment.pull_request_uid
+ ).filter(
+ model.PullRequestComment.user_id == user_id
+ )
+
+ query = session.query(
+ model.PullRequest
+ ).filter(
+ sqlalchemy.or_(
+ model.PullRequest.uid.in_(query1.subquery()),
+ model.PullRequest.uid.in_(query2.subquery())
+ )
+ ).order_by(
+ model.PullRequest.date_created
+ )
+
+ return query.all()
+
+
+def main():
+ ''' Prints out all the pagure project and comment related to the username
+ specified in the SAR_USERNAME environment variable or the email
+ specified in the SAR_EMAIL environment variable..
+ '''
+
+ username = os.getenv('SAR_USERNAME')
+ email = os.getenv('SAR_EMAIL')
+
+ users = []
+ if username:
+ users.append(pagure.lib.query.search_user(session, username=username))
+ if email:
+ user_email = pagure.lib.query.search_user(session, email=email)
+ if user_email not in users:
+ users.append(user_email)
+
+ output = {}
+
+ for user in users:
+ if not user:
+ continue
+
+ temp = {}
+ temp['user_info'] = user.to_json(public=False)
+
+ projects = pagure.lib.query.search_projects(session, user.username)
+ projects = [
+ project.to_json()
+ for project in projects
+ ]
+ temp['projects'] = projects
+
+ issues = get_issue_users(session, user.id)
+ issues_json = []
+ for issue in issues:
+ tmp = issue.to_json()
+ comments = []
+ for comment in tmp['comments']:
+ if comment['user']['name'] != username:
+ continue
+ comments.append(comment)
+ tmp['comments'] = comments
+ issues_json.append(tmp)
+ temp['issues'] = issues_json
+
+ prs = get_pr_users(session, user.id)
+ prs_json = []
+ for pr in prs:
+ tmp = pr.to_json()
+ comments = []
+ for comment in tmp['comments']:
+ if comment['user']['name'] != username:
+ continue
+ comments.append(comment)
+ tmp['comments'] = comments
+ prs_json.append(tmp)
+ temp['pull_requests'] = prs_json
+
+ output[user.username] = temp
+
+ session.remove()
+
+ print(json.dumps(
+ output, sort_keys=True, indent=4, separators=(',', ': ')
+ ).encode('utf-8'))
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/roles/pagure/frontend/files/pagure_slow_worker.service b/roles/pagure/frontend/files/pagure_slow_worker.service
new file mode 100644
index 0000000..17621ab
--- /dev/null
+++ b/roles/pagure/frontend/files/pagure_slow_worker.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Pagure worker for backend git interaction
+After=redis.target
+Documentation=https://pagure.io/pagure
+
+[Service]
+ExecStart=/usr/bin/celery worker -A pagure.lib.tasks --loglevel=info -Q slow_workers -c 5
+Environment="PAGURE_CONFIG=/etc/pagure/pagure.cfg"
+Type=simple
+User=git
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/roles/pagure/frontend/files/pg_hba.conf b/roles/pagure/frontend/files/pg_hba.conf
new file mode 100644
index 0000000..20db085
--- /dev/null
+++ b/roles/pagure/frontend/files/pg_hba.conf
@@ -0,0 +1,77 @@
+# PostgreSQL Client Authentication Configuration File
+# ===================================================
+#
+# Refer to the PostgreSQL Administrator's Guide, chapter "Client
+# Authentication" for a complete description. A short synopsis
+# follows.
+#
+# This file controls: which hosts are allowed to connect, how clients
+# are authenticated, which PostgreSQL user names they can use, which
+# databases they can access. Records take one of these forms:
+#
+# local DATABASE USER METHOD [OPTION]
+# host DATABASE USER CIDR-ADDRESS METHOD [OPTION]
+# hostssl DATABASE USER CIDR-ADDRESS METHOD [OPTION]
+# hostnossl DATABASE USER CIDR-ADDRESS METHOD [OPTION]
+#
+# (The uppercase items must be replaced by actual values.)
+#
+# The first field is the connection type: "local" is a Unix-domain socket,
+# "host" is either a plain or SSL-encrypted TCP/IP socket, "hostssl" is an
+# SSL-encrypted TCP/IP socket, and "hostnossl" is a plain TCP/IP socket.
+#
+# DATABASE can be "all", "sameuser", "samerole", a database name, or
+# a comma-separated list thereof.
+#
+# USER can be "all", a user name, a group name prefixed with "+", or
+# a comma-separated list thereof. In both the DATABASE and USER fields
+# you can also write a file name prefixed with "@" to include names from
+# a separate file.
+#
+# CIDR-ADDRESS specifies the set of hosts the record matches.
+# It is made up of an IP address and a CIDR mask that is an integer
+# (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that specifies
+# the number of significant bits in the mask. Alternatively, you can write
+# an IP address and netmask in separate columns to specify the set of hosts.
+#
+# METHOD can be "trust", "reject", "md5", "crypt", "password",
+# "krb5", "ident", or "pam". Note that "password" sends passwords
+# in clear text; "md5" is preferred since it sends encrypted passwords.
+#
+# OPTION is the ident map or the name of the PAM service, depending on METHOD.
+#
+# Database and user names containing spaces, commas, quotes and other special
+# characters must be quoted. Quoting one of the keywords "all", "sameuser" or
+# "samerole" makes the name lose its special character, and just match a
+# database or username with that name.
+#
+# This file is read on server startup and when the postmaster receives
+# a SIGHUP signal. If you edit the file on a running system, you have
+# to SIGHUP the postmaster for the changes to take effect. You can use
+# "pg_ctl reload" to do that.
+
+# Put your actual configuration here
+# ----------------------------------
+#
+# If you want to allow non-local connections, you need to add more
+# "host" records. In that case you will also need to make PostgreSQL listen
+# on a non-local interface via the listen_addresses configuration parameter,
+# or via the -i or -h command line switches.
+#
+
+#@authcomment@
+
+# TYPE DATABASE USER CIDR-ADDRESS METHOD
+
+#@remove-line-for-nolocal@# "local" is for Unix domain socket connections only
+#@remove-line-for-nolocal@local all all @authmethod@
+# IPv4 local connections:
+#host all all 127.0.0.1/32 @authmethod@
+# IPv6 local connections:
+#host all all ::1/128 @authmethod@
+
+local all all ident
+host all all 0.0.0.0 0.0.0.0 md5
+# Note, I can't think of a reason to make this more restrictive than ipv4 but
+# only fakefas needs it so far
+host all all ::1/128 md5
diff --git a/roles/pagure/frontend/files/selinux/pagure.fc b/roles/pagure/frontend/files/selinux/pagure.fc
new file mode 100644
index 0000000..e69de29
diff --git a/roles/pagure/frontend/files/selinux/pagure.if b/roles/pagure/frontend/files/selinux/pagure.if
new file mode 100644
index 0000000..3eb6a30
--- /dev/null
+++ b/roles/pagure/frontend/files/selinux/pagure.if
@@ -0,0 +1 @@
+## <summary></summary>
diff --git a/roles/pagure/frontend/files/selinux/pagure.pp b/roles/pagure/frontend/files/selinux/pagure.pp
new file mode 100644
index 0000000..a6248e7
Binary files /dev/null and b/roles/pagure/frontend/files/selinux/pagure.pp differ
diff --git a/roles/pagure/frontend/files/selinux/pagure.te b/roles/pagure/frontend/files/selinux/pagure.te
new file mode 100644
index 0000000..d661e61
--- /dev/null
+++ b/roles/pagure/frontend/files/selinux/pagure.te
@@ -0,0 +1,11 @@
+module pagure 1.0;
+
+require {
+ type httpd_t;
+ type gitosis_var_lib_t;
+ class dir { add_name remove_name write };
+ class file { create link setattr unlink write };
+}
+
+allow httpd_t gitosis_var_lib_t:dir { add_name remove_name write };
+allow httpd_t gitosis_var_lib_t:file { create link setattr unlink write };
diff --git a/roles/pagure/frontend/files/stunnel.service b/roles/pagure/frontend/files/stunnel.service
new file mode 100644
index 0000000..1e9f492
--- /dev/null
+++ b/roles/pagure/frontend/files/stunnel.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=stunnel
+After=network.target
+Documentation=https://infrastructure.fedoraproject.org/infra/docs/fedmsg-websocket.txt
+
+[Service]
+ExecStart=/usr/bin/stunnel /etc/stunnel/stunnel.conf
+Type=forking
+User=root
+Group=root
+Restart=on-failure
+LimitNOFILE=40960
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/pagure/frontend/files/syslog-logrotate b/roles/pagure/frontend/files/syslog-logrotate
new file mode 100644
index 0000000..7028290
--- /dev/null
+++ b/roles/pagure/frontend/files/syslog-logrotate
@@ -0,0 +1,20 @@
+/var/log/cron
+/var/log/maillog
+/var/log/messages
+/var/log/secure
+/var/log/spooler
+{
+ sharedscripts
+ postrotate
+ /bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true
+ endscript
+ daily
+ rotate 7
+ missingok
+ ifempty
+ compress
+ compresscmd /usr/bin/xz
+ uncompresscmd /usr/bin/xz
+ compressext .xz
+ dateext
+}
diff --git a/roles/pagure/frontend/handlers/main.yml b/roles/pagure/frontend/handlers/main.yml
new file mode 100644
index 0000000..62b144e
--- /dev/null
+++ b/roles/pagure/frontend/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: restart pagure_milter
+ service: name=pagure_milter state=restarted
diff --git a/roles/pagure/frontend/tasks/main.yml b/roles/pagure/frontend/tasks/main.yml
new file mode 100644
index 0000000..ab2aa4b
--- /dev/null
+++ b/roles/pagure/frontend/tasks/main.yml
@@ -0,0 +1,598 @@
+---
+# Configuration for the pagure webapp
+
+- name: install needed packages
+ package: name={{ item }} state=present
+ with_items:
+ - pagure
+ - pagure-ci
+ - pagure-ev
+ - pagure-loadjson
+ - pagure-logcom
+ - pagure-milters
+ - pagure-webhook
+ - python-psycopg2
+ - python2-pygments2
+ - redis
+ - libsemanage-python
+ - mod_ssl
+ - stunnel
+ # Use haveged to ensure the server keeps some entropy
+ - haveged
+ when: env != 'pagure-staging'
+ tags:
+ - pagure
+ - packages
+
+- name: install needed packages
+ package: name={{ item }} state=present
+ with_items:
+ - pagure
+ - pagure-ci
+ - pagure-ev
+ - pagure-loadjson
+ - pagure-logcom
+ - pagure-milters
+ - pagure-webhook
+ - python3-psycopg2
+ - python3-pygments
+ - redis
+ - python3-libsemanage
+ - mod_ssl
+ - stunnel
+ # Use haveged to ensure the server keeps some entropy
+ - haveged
+ when: env == 'pagure-staging'
+ tags:
+ - pagure
+ - packages
+
+- name: install needed packages
+ package: name={{ item }} state=present
+ when: env == 'pagure-staging'
+ with_items:
+ - pagure-theme-pagureio
+ tags:
+ - pagure
+ - packages
+
+- name: Initialize postgres if necessary
+ command: /usr/bin/postgresql-setup initdb
+ creates=/var/lib/pgsql/data
+ notify:
+ - restart postgresql
+ tags:
+ - pagure
+
+- name: Put in robots.txt
+ template: src=robots.txt.j2 dest=/var/www/html/robots.txt
+ tags:
+ - pagure
+
+
+- name: Create the "git" user
+ command: useradd --move-home --login git --home /srv/git/
+ creates=/srv/git/
+ when: env != 'pagure-staging'
+ tags:
+ - pagure
+
+- name: Create the "git" user
+ command: useradd --create-home --home-dir=/srv/git/ git
+ creates=/srv/git/
+ when: env == 'pagure-staging'
+ tags:
+ - pagure
+
+- name: create the /attachments folder
+ file: state=directory
+ path=/srv/attachments
+ owner=git group=git mode=0775
+ tags:
+ - pagure
+
+# This now fails when there are broken symlinks. Lets figure out a better way to do this - kevin
+#- name: Adjust owner of /srv/git
+# file: name=/srv/git state=directory recurse=yes owner=git group=git
+# tags:
+# - gitolite
+
+- name: create all the directories where we store the git repos
+ file: state=directory
+ path={{ item }}
+ owner=git group=git mode=0775
+ with_items:
+ - /srv/git/repositories/
+ - /srv/git/repositories/forks
+ - /srv/git/repositories/docs
+ - /srv/git/repositories/tickets
+ - /srv/git/repositories/requests
+ - /srv/git/remotes
+ tags:
+ - pagure
+
+- name: create the /srv/tmp folder where to clone repos
+ file: state=directory
+ path=/srv/tmp
+ owner=git group=git mode=0775
+ tags:
+ - pagure
+
+# Set things up for the mirroring feature
+
+- name: create the `paguremirroring` group
+ group:
+ name: paguremirroring
+ state: present
+ tags:
+ - pagure
+ - mirror
+
+- name: create the `paguremirroring` user
+ user:
+ name: paguremirroring
+ group: paguremirroring
+ groups: paguremirroring,git
+ shell: /bin/nologin
+ home: /srv/mirror
+ tags:
+ - pagure
+ - mirror
+
+# We need the SSL certs early on
+
+- name: Install the SSL cert so that we can use https
+ copy: >
+ src={{ private}}/files/httpd/{{ item }} dest=/etc/pki/tls/certs/{{ item }}
+ owner=root group=root mode=0600
+ notify: restart stunnel
+ with_items:
+ - pagure.io.cert
+ - pagure.io.key
+ - pagure.io.intermediate.cert
+ tags:
+ - config
+ - pagure
+ - httpd/certificate
+
+# Set-up postfix and the milter for postfix
+
+- name: Add the /etc/aliases file
+ copy: src=aliases dest=/etc/aliases owner=root mode=644
+ tags:
+ - config
+ - pagure
+ - postfix
+ notify:
+ - restart postfix
+ - restart pagure_milter
+
+# Override pagure_ev systemd service file
+
+- name: install pagure_ev service definition
+ copy: src=pagure_ev.service
+ dest=/usr/lib/systemd/system/pagure_ev.service
+ owner=root group=root mode=0644
+ notify:
+ - reload systemd
+ - restart pagure_ev
+ tags:
+ - pagure
+ - pagure_ev
+
+# Set-up stunnel for the event source server
+
+- name: install stunnel service definition
+ copy: src=stunnel.service
+ dest=/usr/lib/systemd/system/stunnel.service
+ owner=root group=root mode=0644
+ notify:
+ - reload systemd
+ - restart stunnel
+ tags:
+ - pagure
+ - stunnel
+
+- name: ensure old stunnel init file is gone
+ file: dest=/etc/init.d/stunnel/stunnel.init state=absent
+ tags:
+ - pagure
+ - stunnel
+ - config
+
+- name: make a bundle file of the cert and intermediate for stunnel
+ shell: cat /etc/pki/tls/certs/pagure.io.cert /etc/pki/tls/certs/pagure.io.intermediate.cert > /etc/pki/tls/certs/pagure.io.bundle.cert creates=/etc/pki/tls/certs/pagure.io.bundle.cert
+ tags:
+ - pagure
+ - stunnel
+ - config
+ when: env != 'pagure-staging'
+
+- name: make a bundle file of the cert and intermediate for stunnel (stg)
+ shell: cat /etc/pki/tls/certs/stg.pagure.io.cert /etc/pki/tls/certs/stg.pagure.io.intermediate.cert > /etc/pki/tls/certs/stg.pagure.io.bundle.cert creates=/etc/pki/tls/certs/stg.pagure.io.bundle.cert
+ tags:
+ - pagure
+ - stunnel
+ - config
+ when: env == 'pagure-staging'
+
+- name: install stunnel.conf
+ template: src={{ item.file }}
+ dest={{ item.dest }}
+ owner=root group=root mode=0600
+ with_items:
+ - { file: stunnel-conf.j2, dest: /etc/stunnel/stunnel.conf }
+ notify: restart stunnel
+ tags:
+ - pagure
+ - stunnel
+ - config
+
+- name: Add the different service files for the different services
+ copy: src={{ item }}.service
+ dest=/etc/systemd/system/{{ item }}.service
+ owner=root group=root mode=0755
+ with_items:
+ - pagure_fast_worker
+ - pagure_medium_worker
+ - pagure_slow_worker
+ - pagure_mirror
+ notify:
+ - reload systemd
+ tags:
+ - pagure
+
+# setup fedora-messaging
+
+- name: install fedora-messaging as a dependency
+ package: name={{ item }} state=present
+ with_items:
+ - python2-fedora-messaging
+ when: env != 'pagure-staging'
+ tags:
+ - pagure
+ - fedora-messaging
+
+- name: install fedora-messaging as a dependency
+ package: name={{ item }} state=present
+ with_items:
+ - python3-fedora-messaging
+ when: env == 'pagure-staging'
+ tags:
+ - pagure
+ - fedora-messaging
+
+- name: create the config folder for fedora-messaging
+ file: path=/etc/fedora-messaging/ owner=root group=root mode=0755 state=directory
+ tags:
+ - pagure
+ - fedora-messaging
+
+- name: install the configuration file for fedora-messaging
+ template:
+ src=fedora-messaging.toml
+ dest=/etc/fedora-messaging/config.toml
+ tags:
+ - pagure
+ - fedora-messaging
+
+- name: create folder where we'll place the certs
+ file: path=/etc/pki/rabbitmq/pagurecert/ owner=root group=root mode=0755 state=directory
+ tags:
+ - pagure
+ - fedora-messaging
+
+- name: deploy pagure/rabbitmq certificate
+ copy: src={{ item.src }}
+ dest=/etc/pki/rabbitmq/pagurecert/{{ item.dest }}
+ owner={{ item.owner }} group={{ item.group}} mode={{ item.mode }}
+ when: env == 'pagure-staging'
+ with_items:
+ - src: "{{private}}/files/rabbitmq/staging/pki/issued/pagure.stg.crt"
+ dest: pagure.crt
+ owner: git
+ group: root
+ mode: "444"
+ - src: "{{private}}/files/rabbitmq/staging/pki/private/pagure.stg.key"
+ dest: pagure.key
+ owner: git
+ group: root
+ mode: "440"
+ - src: "{{private}}/files/rabbitmq/staging/pki/ca.crt"
+ dest: pagure.ca
+ owner: git
+ group: root
+ mode: "444"
+ tags:
+ - pagure
+ - fedora-messaging
+
+- name: deploy pagure/rabbitmq certificate
+ copy: src={{ item.src }}
+ dest=/etc/pki/rabbitmq/pagurecert/{{ item.dest }}
+ owner={{ item.owner }} group={{ item.group}} mode={{ item.mode }}
+ when: env != 'pagure-staging'
+ with_items:
+ - src: "{{private}}/files/rabbitmq/production/pki/issued/pagure.crt"
+ dest: pagure.crt
+ owner: git
+ group: root
+ mode: "444"
+ - src: "{{private}}/files/rabbitmq/production/pki/private/pagure.key"
+ dest: pagure.key
+ owner: git
+ group: root
+ mode: "440"
+ - src: "{{private}}/files/rabbitmq/production/pki/ca.crt"
+ dest: pagure.ca
+ owner: git
+ group: root
+ mode: "444"
+ tags:
+ - pagure
+ - fedora-messaging
+
+
+# Set-up Pagure
+
+- name: create the folders used for releases and archives
+ file: state=directory
+ path={{ item }}
+ owner=git group=git mode=0775
+ with_items:
+ - /var/www/releases
+ - /var/www/archives
+ tags:
+ - pagure
+ - web
+
+- name: copy sundry pagure configuration
+ template: src={{ item.file }}
+ dest={{ item.location }}/{{ item.file }}
+ owner=git group=postfix mode=0640
+ with_items:
+ - { file: pagure.cfg, location: /etc/pagure }
+ - { file: alembic.ini, location: /etc/pagure }
+ tags:
+ - config
+ - web
+ - pagure
+ notify:
+ - restart apache
+
+
+- name: create the database scheme
+ command: /usr/bin/python2 /usr/share/pagure/pagure_createdb.py
+ changed_when: "1 != 1"
+ environment:
+ PAGURE_CONFIG: /etc/pagure/pagure.cfg
+ when: env != 'pagure-staging'
+ tags:
+ - web
+ - pagure
+
+- name: create the database scheme
+ command: /usr/bin/python3 /usr/share/pagure/pagure_createdb.py
+ changed_when: "1 != 1"
+ environment:
+ PAGURE_CONFIG: /etc/pagure/pagure.cfg
+ when: env == 'pagure-staging'
+ tags:
+ - web
+ - pagure
+
+- name: Install the configuration file to activate https
+ template: src={{ item }} dest=/etc/httpd/conf.d/{{ item }}
+ owner=root group=root mode=0644
+ with_items:
+ - 0_pagure.conf
+ - securityheaders.conf
+ tags:
+ - files
+ - config
+ - pagure
+ - sslciphers
+ notify:
+ - restart apache
+
+- name: Install the wsgi file
+ template: src={{ item }}
+ dest=/var/www/{{ item }}
+ owner=git group=git mode=0644
+ with_items:
+ - pagure.wsgi
+ - docs_pagure.wsgi
+ tags:
+ - config
+ - web
+ - pagure
+ notify:
+ - restart apache
+
+- name: let paguremirroring read the pagure config
+ command: /usr/bin/setfacl -m user:paguremirroring:rx /etc/pagure/pagure.cfg
+ tags:
+ - pagure
+ - mirror
+
+- name: Add default facl so apache can read git repos
+ acl: default=yes etype=user entity=apache permissions="rx" name=/srv/git state=present
+ register: acl_updates
+ tags:
+ - pagure
+
+- name: Manually fix current default ACLs since Ansible doesnt know recursive acls
+ when: acl_updates.changed
+ command: /usr/bin/setfacl -Rdm user:apache:rx /srv/git
+ tags:
+ - pagure
+
+- name: Manually fix current ACLs since Ansible doesnt know recursive acls
+ when: acl_updates.changed
+ command: /usr/bin/setfacl -Rm user:apache:rx /srv/git
+ tags:
+ - pagure
+
+- name: check the selinux context of the git repo directory
+ command: matchpathcon /srv/git
+ register: distgitcontext
+ check_mode: no
+ changed_when: false
+ tags:
+ - config
+ - pagure
+ - selinux
+
+- name: set the SELinux policy for the distgit root directory
+ command: semanage fcontext -a -t gitosis_var_lib_t "/srv/git(/.*)?"
+ when: distgitcontext.stdout.find('gitosis_var_lib_t') == -1
+ tags:
+ - config
+ - pagure
+ - selinux
+
+- name: check the selinux context of the releases directory
+ command: matchpathcon /var/www/releases
+ register: distgitcontext
+ check_mode: no
+ changed_when: false
+ tags:
+ - config
+ - pagure
+ - selinux
+
+# Note: On Fedora its httpd_sys_content_rw_t - Don't we love confusions?
+- name: set the SELinux policy for the releases directory
+ command: semanage fcontext -a -t httpd_sys_rw_content_t "/var/www/releases(/.*)?"
+ when: distgitcontext.stdout.find('httpd_sys_rw_content_t') == -1
+ tags:
+ - config
+ - pagure
+ - selinux
+
+- name: copy over our custom selinux module
+ copy: src=selinux/pagure.pp dest=/usr/local/share/pagure.pp
+ register: selinux_module
+ tags:
+ - pagure
+
+- name: install our custom selinux module
+ command: semodule -i /usr/local/share/pagure.pp
+ when: selinux_module is changed
+ tags:
+ - pagure
+
+- name: set sebooleans so pagure can talk to the network (db + redis)
+ seboolean: name=httpd_can_network_connect
+ state=true
+ persistent=true
+ tags:
+ - selinux
+ - web
+ - pagure
+
+- name: set sebooleans so apache can send emails
+ seboolean: name=httpd_can_sendmail
+ state=true
+ persistent=true
+ tags:
+ - selinux
+ - web
+ - pagure
+
+
+# Ensure all the services are up and running
+
+- name: Start and enable httpd, postfix, pagure_milter
+ service: name={{ item }} enabled=yes state=started
+ with_items:
+ - httpd
+ - postfix
+ - stunnel
+ - redis
+ - pagure_ev
+ - pagure_ci
+ - pagure_loadjson
+ - pagure_logcom
+ - pagure_milter
+ - pagure_webhook
+ - pagure_worker
+ - pagure_gitolite_worker
+ - pagure_fast_worker
+ - pagure_medium_worker
+ - pagure_slow_worker
+# - pagure_api_key_expire_mail
+# - pagure_api_key_expire_mail.timer
+ - pagure_mirror_project_in
+ - pagure_mirror_project_in.timer
+# - fedmsg-relay
+ - haveged
+ ignore_errors: true
+ tags:
+ - pagure
+ - service
+ - postfix
+
+- name: setup logrotate to our needs
+ copy: src="{{ files }}/httpd/httpd.logrotate" dest=/etc/logrotate.d/httpd
+ tags:
+ - config
+ - apache
+
+- name: Add SAR script for pagure
+ copy: src=pagure_sar.py dest=/usr/local/bin/pagure_sar.py owner=git mode=0700
+ tags:
+ - SAR
+ - GDPR
+ - pagure
+
+- name: override the default syslog logrotate file
+ copy: src=syslog-logrotate dest=/etc/logrotate.d/syslog
+ tags:
+ - pagure
+ - logrotate
+
+- name: Letsencrypt for releases.stg.pagure.org
+ include_role: name=letsencrypt
+ vars:
+ site_name: releases.stg.pagure.org
+ when: env == 'pagure-staging'
+ tags:
+ - pagure
+ - letsencrypt
+
+- name: Letsencrypt for docs.stg.pagure.org
+ include_role: name=letsencrypt
+ vars:
+ site_name: docs.stg.pagure.org
+ when: env == 'pagure-staging'
+ tags:
+ - pagure
+ - letsencrypt
+
+- name: Letsencrypt for stg.pagure.org
+ include_role: name=letsencrypt
+ vars:
+ site_name: stg.pagure.org
+ when: env == 'pagure-staging'
+ tags:
+ - pagure
+ - letsencrypt
+
+- name: Letsencrypt for stg.pagure.io
+ include_role: name=letsencrypt
+ vars:
+ site_name: stg.pagure.io
+ when: env == 'pagure-staging'
+ tags:
+ - pagure
+ - letsencrypt
+
+- name: Letsencrypt for pagure.org
+ include_role: name=letsencrypt
+ vars:
+ site_name: pagure.org
+ when: env != 'pagure-staging'
+ tags:
+ - pagure
+ - letsencrypt
diff --git a/roles/pagure/frontend/templates/0_pagure.conf b/roles/pagure/frontend/templates/0_pagure.conf
new file mode 100644
index 0000000..d14512f
--- /dev/null
+++ b/roles/pagure/frontend/templates/0_pagure.conf
@@ -0,0 +1,236 @@
+WSGISocketPrefix run/wsgi
+#WSGIRestrictStdout On
+WSGIRestrictSignal Off
+WSGIPythonOptimize 1
+WSGIPassAuthorization On
+WSGIDaemonProcess pagure user=git group=git maximum-requests=1000 display-name=pagure processes=6 threads=6 inactivity-timeout=300
+WSGIDaemonProcess paguredocs user=git group=git maximum-requests=1000 display-name=paguredocs processes=4 threads=4 inactivity-timeout=300
+
+## Redirects http -> https
+
+<VirtualHost *:80>
+{% if env == 'pagure-staging' %}
+ ServerName stg.pagure.io
+ ProxyPass "/.well-known/acme-challenge" "http://certgetter01/.well-known/acme-challenge"
+ Redirect permanent / https://stg.pagure.io/
+{% else %}
+ ServerName pagure.io
+ Redirect permanent / https://pagure.io/
+{% endif %}
+</VirtualHost>
+
+<VirtualHost *:80>
+{% if env == 'pagure-staging' %}
+ ServerName docs.stg.pagure.org
+ ProxyPass "/.well-known/acme-challenge" "http://certgetter01/.well-known/acme-challenge"
+ Redirect permanent / https://docs.stg.pagure.org/
+{% else %}
+ ServerName docs.pagure.org
+ Redirect permanent / https://docs.pagure.org/
+{% endif %}
+</VirtualHost>
+
+<VirtualHost *:80>
+{% if env == 'pagure-staging' %}
+ ServerName releases.stg.pagure.org
+ ProxyPass "/.well-known/acme-challenge" "http://certgetter01/.well-known/acme-challenge"
+ Redirect permanent / https://releases.stg.pagure.org/
+{% else %}
+ ServerName releases.pagure.org
+ Redirect permanent / https://releases.pagure.org/
+{% endif %}
+</VirtualHost>
+
+<VirtualHost *:80>
+{% if env == 'pagure-staging' %}
+ ServerName stg.pagure.org
+ ProxyPass "/.well-known/acme-challenge" "http://certgetter01/.well-known/acme-challenge"
+ Redirect permanent / https://releases.stg.pagure.org/
+{% else %}
+ ServerName pagure.org
+ #Redirect permanent / https://releases.pagure.org/
+ ProxyPass "/.well-known/acme-challenge" "http://certgetter01/.well-known/acme-challenge"
+{% endif %}
+
+# Added until we can get the cert out
+ DocumentRoot "/var/www/releases"
+
+ <Directory />
+ Options +Indexes
+ IndexOptions NameWidth=*
+ </Directory>
+
+</VirtualHost>
+
+
+
+## End of redirects http -> https
+
+
+<VirtualHost *:443>
+{% if env == 'pagure-staging' %}
+ ServerName stg.pagure.io
+{% else %}
+ ServerName pagure.io
+{% endif %}
+
+ Alias "/robots.txt" "/var/www/html/robots.txt"
+
+ WSGIScriptAlias / /var/www/pagure.wsgi
+
+ ServerAdmin admin(a)fedoraproject.org
+
+ SSLEngine on
+ SSLProtocol {{ ssl_protocols }}
+ SSLCipherSuite {{ ssl_ciphers }}
+ # Use secure TLSv1.1 and TLSv1.2 ciphers
+ Header always add Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
+
+ SSLCertificateFile /etc/pki/tls/certs/pagure.io.cert
+ SSLCertificateChainFile /etc/pki/tls/certs/pagure.io.intermediate.cert
+ SSLCertificateKeyFile /etc/pki/tls/certs/pagure.io.key
+
+{% if env == 'pagure-staging' %}
+ Alias /static /usr/lib/python3.6/site-packages/pagure/static/
+{% else %}
+ Alias /static /usr/lib/python2.7/site-packages/pagure/static/
+{% endif %}
+
+ <Location "/static/vendor/emojione/emojione.sprites.png">
+ ExpiresActive On
+ ExpiresDefault "access plus 1 week"
+ Header append Cache-Control "public"
+ </Location>
+
+ SetEnv GIT_PROJECT_ROOT /srv/git/repositories
+
+ <Location />
+ WSGIProcessGroup pagure
+ <IfModule mod_authz_core.c>
+ # Apache 2.4
+ Require all granted
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ # Apache 2.2
+ Order deny,allow
+ Allow from all
+ </IfModule>
+ </Location>
+
+ <Location /releases>
+ Redirect "/releases" https://releases.pagure.org
+ </Location>
+
+ <Location /apache-status>
+ SetHandler server-status
+ <RequireAny>
+ Require ip 127.0.0.1
+ Require ip ::1
+ Require host localhost
+ </RequireAny>
+ </Location>
+
+</VirtualHost>
+
+
+<VirtualHost *:443>
+{% if env == 'pagure-staging' %}
+ ServerName stg.pagure.org
+{% else %}
+ ServerName pagure.org
+{% endif %}
+
+ SSLEngine on
+ SSLProtocol {{ ssl_protocols }}
+ SSLCipherSuite {{ ssl_ciphers }}
+ # Use secure TLSv1.1 and TLSv1.2 ciphers
+ Header always add Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
+
+{% if env == 'pagure-staging' %}
+ SSLCertificateFile /etc/pki/tls/certs/stg.pagure.org.cert
+ SSLCertificateChainFile /etc/pki/tls/certs/stg.pagure.org.intermediate.cert
+ SSLCertificateKeyFile /etc/pki/tls/private/stg.pagure.org.key
+{% else %}
+ SSLCertificateFile /etc/pki/tls/certs/pagure.org.cert
+ SSLCertificateChainFile /etc/pki/tls/certs/pagure.org.intermediate.cert
+ SSLCertificateKeyFile /etc/pki/tls/private/pagure.org.key
+{% endif %}
+{% if env == 'pagure-staging' %}
+ Redirect permanent / https://stg.pagure.io/
+{% else %}
+ Redirect permanent / https://pagure.io/
+{% endif %}
+</VirtualHost>
+
+
+<VirtualHost *:443>
+{% if env == 'pagure-staging' %}
+ ServerName docs.stg.pagure.org
+{% else %}
+ ServerName docs.pagure.org
+{% endif %}
+
+ WSGIScriptAlias / /var/www/docs_pagure.wsgi
+
+ SSLEngine on
+ SSLProtocol {{ ssl_protocols }}
+ SSLCipherSuite {{ ssl_ciphers }}
+ # Use secure TLSv1.1 and TLSv1.2 ciphers
+ Header always add Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
+
+{% if env == 'pagure-staging' %}
+ SSLCertificateFile /etc/pki/tls/certs/docs.stg.pagure.org.cert
+ SSLCertificateChainFile /etc/pki/tls/certs/docs.stg.pagure.org.intermediate.cert
+ SSLCertificateKeyFile /etc/pki/tls/private/docs.stg.pagure.org.key
+{% else %}
+ SSLCertificateFile /etc/pki/tls/certs/pagure.io.cert
+ SSLCertificateChainFile /etc/pki/tls/certs/pagure.io.intermediate.cert
+ SSLCertificateKeyFile /etc/pki/tls/certs/pagure.io.key
+{% endif %}
+
+ Alias /static /usr/lib/python2.7/site-packages/pagure/static/
+
+ <Location "/static/vendor/emojione/emojione.sprites.png">
+ ExpiresActive On
+ ExpiresDefault "access plus 1 week"
+ Header append Cache-Control "public"
+ </Location>
+
+ <Location />
+ WSGIProcessGroup paguredocs
+ <IfModule mod_authz_core.c>
+ # Apache 2.4
+ Require all granted
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ # Apache 2.2
+ Order deny,allow
+ Allow from all
+ </IfModule>
+ </Location>
+</VirtualHost>
+
+<VirtualHost *:443>
+ DocumentRoot "/var/www/releases"
+{% if env == 'pagure-staging' %}
+ ServerName releases.stg.pagure.org
+ SSLEngine on
+ SSLCertificateFile /etc/pki/tls/certs/releases.stg.pagure.org.cert
+ SSLCertificateKeyFile /etc/pki/tls/private/releases.stg.pagure.org.key
+ SSLCertificateChainFile /etc/pki/tls/certs/releases.stg.pagure.org.intermediate.cert
+ SSLHonorCipherOrder On
+ SSLProtocol {{ ssl_protocols }}
+ SSLCipherSuite {{ ssl_ciphers }}
+{% else %}
+ ServerName releases.pagure.org
+{% endif %}
+
+ AddType application/octet-stream msi
+
+ <Directory />
+ Options +Indexes
+ IndexOptions NameWidth=*
+ </Directory>
+
+</VirtualHost>
+
diff --git a/roles/pagure/frontend/templates/alembic.ini b/roles/pagure/frontend/templates/alembic.ini
new file mode 100644
index 0000000..7daf38c
--- /dev/null
+++ b/roles/pagure/frontend/templates/alembic.ini
@@ -0,0 +1,50 @@
+# A generic, single database configuration.
+
+[alembic]
+# path to migration scripts
+script_location = /usr/share/pagure/alembic
+
+# template used to generate migration files
+# file_template = %%(rev)s_%%(slug)s
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+#sqlalchemy.url = postgresql://<%= pkgdb_app %>:<%= pkgdb_appPassword %>@db-pkgdb/pkgdb
+
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/roles/pagure/frontend/templates/docs_pagure.wsgi b/roles/pagure/frontend/templates/docs_pagure.wsgi
new file mode 100644
index 0000000..a9f8cea
--- /dev/null
+++ b/roles/pagure/frontend/templates/docs_pagure.wsgi
@@ -0,0 +1,22 @@
+#-*- coding: utf-8 -*-
+
+# The three lines below are required to run on EL6 as EL6 has
+# two possible version of python-sqlalchemy and python-jinja2
+# These lines make sure the application uses the correct version.
+import __main__
+__main__.__requires__ = ['SQLAlchemy >= 0.8', 'jinja2 >= 2.4']
+import pkg_resources
+
+import os
+## Set the environment variable pointing to the configuration file
+os.environ['PAGURE_CONFIG'] = '/etc/pagure/pagure.cfg'
+
+## The following is only needed if you did not install pagure
+## as a python module (for example if you run it from a git clone).
+#import sys
+#sys.path.insert(0, '/path/to/pagure/')
+
+
+## The most import line to make the wsgi working
+from pagure.docs_server import APP as application
+#application.debug = True
diff --git a/roles/pagure/frontend/templates/fedora-messaging.toml b/roles/pagure/frontend/templates/fedora-messaging.toml
new file mode 100644
index 0000000..195a89d
--- /dev/null
+++ b/roles/pagure/frontend/templates/fedora-messaging.toml
@@ -0,0 +1,25 @@
+# A sample configuration for fedora-messaging. This file is in the TOML format.
+# For complete details on all configuration options, see the documentation.
+
+{% if env == "pagure-staging" %}
+amqp_url = "amqps://pagure.stg:@rabbitmq.stg.fedoraproject.org/%2Fpubsub"
+{% else %}
+amqp_url = "amqps://pagure:@rabbitmq.fedoraproject.org/%2Fpubsub"
+{% endif %}
+
+# The topic_prefix configuration value will add a prefix to the topics of every sent message.
+# This is used for migrating from fedmsg, and should not be used afterwards.
+{% if env == "pagure-staging" %}
+topic_prefix = "io.pagure.stg"
+{% else %}
+topic_prefix = "io.pagure.prod"
+{% endif %}
+
+[tls]
+ca_cert = "/etc/pki/rabbitmq/pagurecert/pagure.ca"
+keyfile = "/etc/pki/rabbitmq/pagurecert/pagure.key"
+certfile = "/etc/pki/rabbitmq/pagurecert/pagure.crt"
+
+[client_properties]
+app = "pagure.io"
+
diff --git a/roles/pagure/frontend/templates/gitolite.rc b/roles/pagure/frontend/templates/gitolite.rc
new file mode 100644
index 0000000..77eb32b
--- /dev/null
+++ b/roles/pagure/frontend/templates/gitolite.rc
@@ -0,0 +1,196 @@
+# configuration variables for gitolite
+
+# This file is in perl syntax. But you do NOT need to know perl to edit it --
+# just mind the commas, use single quotes unless you know what you're doing,
+# and make sure the brackets and braces stay matched up!
+
+# (Tip: perl allows a comma after the last item in a list also!)
+
+# HELP for commands can be had by running the command with "-h".
+
+# HELP for all the other FEATURES can be found in the documentation (look for
+# "list of non-core programs shipped with gitolite" in the master index) or
+# directly in the corresponding source file.
+
+%RC = (
+
+ # ------------------------------------------------------------------
+ HTTP_ANON_USER => 'ANONYMOUS_',
+
+ # default umask gives you perms of '0700'; see the rc file docs for
+ # how/why you might change this
+ UMASK => 0077,
+
+ # look for "git-config" in the documentation
+ GIT_CONFIG_KEYS => '',
+
+ # comment out if you don't need all the extra detail in the logfile
+ LOG_EXTRA => 1,
+ # syslog options
+ # 1. leave this section as is for normal gitolite logging
+ # 2. uncomment this line to log only to syslog:
+ # LOG_DEST => 'syslog',
+ # 3. uncomment this line to log to syslog and the normal gitolite log:
+ # LOG_DEST => 'syslog,normal',
+
+ # roles. add more roles (like MANAGER, TESTER, ...) here.
+ # WARNING: if you make changes to this hash, you MUST run 'gitolite
+ # compile' afterward, and possibly also 'gitolite trigger POST_COMPILE'
+ ROLES => {
+ READERS => 1,
+ WRITERS => 1,
+ },
+
+ # enable caching (currently only Redis). PLEASE RTFM BEFORE USING!!!
+ # CACHE => 'Redis',
+
+ # ------------------------------------------------------------------
+
+ # rc variables used by various features
+
+ # the 'info' command prints this as additional info, if it is set
+ # SITE_INFO => 'Please see http://blahblah/gitolite for more help',
+
+ # the CpuTime feature uses these
+ # display user, system, and elapsed times to user after each git operation
+ # DISPLAY_CPU_TIME => 1,
+ # display a warning if total CPU times (u, s, cu, cs) crosses this limit
+ # CPU_TIME_WARN_LIMIT => 0.1,
+
+ # the Mirroring feature needs this
+ # HOSTNAME => "foo",
+
+ # TTL for redis cache; PLEASE SEE DOCUMENTATION BEFORE UNCOMMENTING!
+ # CACHE_TTL => 600,
+
+ # ------------------------------------------------------------------
+
+ # suggested locations for site-local gitolite code (see cust.html)
+
+ # this one is managed directly on the server
+ # LOCAL_CODE => "$ENV{HOME}/local",
+
+ # or you can use this, which lets you put everything in a subdirectory
+ # called "local" in your gitolite-admin repo. For a SECURITY WARNING
+ # on this, see http://gitolite.com/gitolite/non-core.html#pushcode
+ # LOCAL_CODE => "$rc{GL_ADMIN_BASE}/local",
+
+ # ------------------------------------------------------------------
+
+ # List of commands and features to enable
+
+ ENABLE => [
+
+ # COMMANDS
+
+ # These are the commands enabled by default
+ 'help',
+ 'desc',
+ 'info',
+ 'perms',
+ 'writable',
+
+ # Uncomment or add new commands here.
+ # 'create',
+ # 'fork',
+ # 'mirror',
+ # 'readme',
+ # 'sskm',
+ # 'D',
+
+ # These FEATURES are enabled by default.
+
+ # essential (unless you're using smart-http mode)
+ 'ssh-authkeys',
+
+ # creates git-config enties from gitolite.conf file entries like 'config foo.bar = baz'
+ 'git-config',
+
+ # creates git-daemon-export-ok files; if you don't use git-daemon, comment this out
+ #'daemon',
+
+ # creates projects.list file; if you don't use gitweb, comment this out
+ #'gitweb',
+
+ # These FEATURES are disabled by default; uncomment to enable. If you
+ # need to add new ones, ask on the mailing list :-)
+
+ # user-visible behaviour
+
+ # prevent wild repos auto-create on fetch/clone
+ # 'no-create-on-read',
+ # no auto-create at all (don't forget to enable the 'create' command!)
+ # 'no-auto-create',
+
+ # access a repo by another (possibly legacy) name
+ # 'Alias',
+
+ # give some users direct shell access. See documentation in
+ # sts.html for details on the following two choices.
+ # "Shell $ENV{HOME}/.gitolite.shell-users",
+ # 'Shell alice bob',
+
+ # set default roles from lines like 'option default.roles-1 = ...', etc.
+ # 'set-default-roles',
+
+ # show more detailed messages on deny
+ # 'expand-deny-messages',
+
+ # show a message of the day
+ # 'Motd',
+
+ # system admin stuff
+
+ # enable mirroring (don't forget to set the HOSTNAME too!)
+ # 'Mirroring',
+
+ # allow people to submit pub files with more than one key in them
+ # 'ssh-authkeys-split',
+
+ # selective read control hack
+ # 'partial-copy',
+
+ # manage local, gitolite-controlled, copies of read-only upstream repos
+ # 'upstream',
+
+ # updates 'description' file instead of 'gitweb.description' config item
+ # 'cgit',
+
+ # allow repo-specific hooks to be added
+ # 'repo-specific-hooks',
+
+ # performance, logging, monitoring...
+
+ # be nice
+ # 'renice 10',
+
+ # log CPU times (user, system, cumulative user, cumulative system)
+ # 'CpuTime',
+
+ # syntactic_sugar for gitolite.conf and included files
+
+ # allow backslash-escaped continuation lines in gitolite.conf
+ # 'continuation-lines',
+
+ # create implicit user groups from directory names in keydir/
+ # 'keysubdirs-as-groups',
+
+ # allow simple line-oriented macros
+ # 'macros',
+
+ # Kindergarten mode
+
+ # disallow various things that sensible people shouldn't be doing anyway
+ # 'Kindergarten',
+ ],
+
+);
+
+# ------------------------------------------------------------------------------
+# per perl rules, this should be the last line in such a file:
+1;
+
+# Local variables:
+# mode: perl
+# End:
+# vim: set syn=perl:
diff --git a/roles/pagure/frontend/templates/pagure.cfg b/roles/pagure/frontend/templates/pagure.cfg
new file mode 100644
index 0000000..253d855
--- /dev/null
+++ b/roles/pagure/frontend/templates/pagure.cfg
@@ -0,0 +1,380 @@
+from datetime import timedelta
+
+### Set the time after which the admin session expires
+# There are two sessions on pagure, login that holds for 31 days and
+# the session defined here after which an user has to re-login.
+# This session is used when accessing all administrative parts of pagure
+# (ie: changing a project's or a user's settings)
+ADMIN_SESSION_LIFETIME = timedelta(minutes=20)
+
+# Make the CSRF token not-time limited, this way it is valid for the entire
+# duration of the session.
+WTF_CSRF_TIME_LIMIT=None
+
+### Secret key for the Flask application
+SECRET_KEY='{{ pagure_secret_key }}'
+SALT_EMAIL='{{ pagure_secret_salt_email }}'
+
+EMAIL_SEND = True
+
+# This is required so that login specifies https
+PREFERRED_URL_SCHEME='https'
+
+{% if env == 'pagure-staging' %}
+# OpenID server to use
+FAS_OPENID_ENDPOINT = 'https://id.stg.fedoraproject.org/openid/'
+{% endif %}
+
+### url to the database server:
+#DB_URL=mysql://user:pass@host/db_name
+#DB_URL=postgres://user:pass@host/db_name
+DB_URL = 'postgresql://{{ pagure_db_user }}:{{ pagure_db_pass }}@{{ pagure_db_host }}/{{ pagure_db_name }}'
+
+### The FAS group in which the admin of pagure are
+ADMIN_GROUP = ['sysadmin-main']
+
+# The publicly visible admin email address
+ADMIN_EMAIL = 'admin(a)fedoraproject.org'
+
+### The email address to which the flask.log will send the errors (tracebacks)
+EMAIL_ERROR = 'pingou(a)pingoured.fr'
+
+### Default SMTP server to use for sending emails
+SMTP_SERVER = 'localhost'
+
+### Email used to sent emails
+{% if env == 'pagure-staging' %}
+FROM_EMAIL = 'pagure(a)stg.pagure.io'
+DOMAIN_EMAIL_NOTIFICATIONS = 'stg.pagure.io'
+{% else %}
+FROM_EMAIL = 'pagure(a)pagure.io'
+DOMAIN_EMAIL_NOTIFICATIONS = 'pagure.io'
+{% endif %}
+
+### The URL at which the project is available.
+{% if env == 'pagure-staging' %}
+APP_URL = 'https://stg.pagure.io/'
+DOC_APP_URL = 'https://docs.stg.pagure.org'
+{% else %}
+APP_URL = 'https://pagure.io/'
+DOC_APP_URL = 'https://docs.pagure.org'
+{% endif %}
+
+### Datagrepper info for the user profile
+{% if env == 'pagure-staging' %}
+DATAGREPPER_URL = 'https://apps.stg.fedoraproject.org/datagrepper'
+{% else %}
+DATAGREPPER_URL = 'https://apps.fedoraproject.org/datagrepper'
+{% endif %}
+DATAGREPPER_CATEGORY = 'pagure'
+
+### The URL to use to clone git repositories.
+{% if env == 'pagure-staging' %}
+GIT_URL_SSH = 'ssh://git@stg.pagure.io/'
+GIT_URL_GIT = 'https://stg.pagure.io/'
+{% else %}
+GIT_URL_SSH = 'ssh://git@pagure.io/'
+GIT_URL_GIT = 'https://pagure.io/'
+{% endif %}
+
+### The IP addresses allowed for the internal endpoints
+{% if eth0_ipv6 is defined %}
+IP_ALLOWED_INTERNAL = ['127.0.0.1', 'localhost', '::1', '{{ eth0_ip }}' , '{{ eth0_ipv6 }}']
+{% elif eth0_ip is defined %}
+IP_ALLOWED_INTERNAL = ['127.0.0.1', 'localhost', '::1', '{{ eth0_ip }}']
+{% else %}
+IP_ALLOWED_INTERNAL = ['127.0.0.1', 'localhost', '::1']
+{% endif %}
+
+# Redis configuration
+{% if env == 'pagure-staging' %}
+EVENTSOURCE_SOURCE = 'https://stg.pagure.io:8088'
+{% else %}
+EVENTSOURCE_SOURCE = 'https://pagure.io:8088'
+{% endif %}
+REDIS_HOST = '0.0.0.0'
+REDIS_PORT = 6379
+REDIS_DB = 0
+
+EV_STATS_PORT = '8888'
+
+WEBHOOK = True
+
+### Folder containing to the git repos
+GIT_FOLDER = '/srv/git/repositories'
+
+### Folder containing the forks repos
+FORK_FOLDER = '/srv/git/repositories/forks'
+
+### Folder containing the docs repos
+DOCS_FOLDER = '/srv/git/repositories/docs'
+
+### Folder containing the pull-requests repos
+REQUESTS_FOLDER = '/srv/git/repositories/requests'
+
+### Folder containing the tickets repos
+TICKETS_FOLDER = '/srv/git/repositories/tickets'
+
+### Folder containing the clones of the remotes git repo
+REMOTE_GIT_FOLDER = '/srv/git/remotes'
+
+### Folder containing out-of-git attachments cache
+ATTACHMENTS_FOLDER = '/srv/attachments'
+
+### Configuration file for gitolite
+GITOLITE_CONFIG = '/srv/git/.gitolite/conf/gitolite.conf'
+
+### Path of the release folder
+{% if env == 'pagure-staging' %}
+UPLOAD_FOLDER_URL = 'https://releases.stg.pagure.org/'
+{% else %}
+UPLOAD_FOLDER_URL = 'https://releases.pagure.org/'
+{% endif %}
+UPLOAD_FOLDER_PATH = '/var/www/releases/'
+
+### Folder where are cached the archives
+ARCHIVE_FOLDER = '/var/www/archives/'
+
+
+### Home folder of the gitolite user
+### Folder where to run gl-compile-conf from
+GITOLITE_HOME = '/srv/git/'
+
+### Folder containing all the public ssh keys for gitolite
+GITOLITE_KEYDIR = '/srv/git/.gitolite/keydir/'
+
+### Path to the gitolite.rc file
+GL_RC = '/srv/git/.gitolite.rc'
+
+### Path to the /bin directory where the gitolite tools can be found
+GL_BINDIR = '/usr/bin/'
+
+
+### Temp folder to be used to make the clones to work around bug in libgit2:
+## refs: https://github.com/libgit2/libgit2/issues/2965
+## and https://github.com/libgit2/libgit2/issues/2797
+TMP_FOLDER = '/srv/tmp'
+
+# Optional configuration
+
+### Number of items displayed per page
+# Used when listing items
+ITEM_PER_PAGE = 50
+
+### Maximum size of the uploaded content
+# Used to limit the size of file attached to a ticket for example
+MAX_CONTENT_LENGTH = 100 * 1024 * 1024 # 100 megabytes
+
+### Lenght for short commits ids or file hex
+SHORT_LENGTH = 7
+
+### List of blacklisted project names that can conflicts for pagure's URLs
+### or other
+BLACKLISTED_PROJECTS = [
+ 'static', 'pv', 'releases', 'new', 'api', 'settings', 'search', 'fork',
+ 'logout', 'login', 'user', 'users', 'groups', 'projects', 'ssh_info',
+ 'issues', 'pull-requests', 'commits', 'tree', 'forks', 'admin', 'c',
+ 'wait',
+]
+
+DISABLED_PLUGINS = ['IRC']
+
+
+# Authentication related configuration option
+
+### Switch the authentication method
+# Specify which authentication method to use, defaults to `fas` can be or
+# `local`
+# Default: ``fas``.
+PAGURE_AUTH = 'openid'
+
+# When this is set to True, the session cookie will only be returned to the
+# server via ssl (https). If you connect to the server via plain http, the
+# cookie will not be sent. This prevents sniffing of the cookie contents.
+# This may be set to False when testing your application but should always
+# be set to True in production.
+# Default: ``True``.
+SESSION_COOKIE_SECURE = True
+
+# The name of the cookie used to store the session id.
+# Default: ``.pagure``.
+SESSION_COOKIE_NAME = 'pagure'
+
+# Boolean specifying wether to check the user's IP address when retrieving
+# its session. This make things more secure (thus is on by default) but
+# under certain setup it might not work (for example is there are proxies
+# in front of the application).
+CHECK_SESSION_IP = True
+
+# Used by SESSION_COOKIE_PATH
+APPLICATION_ROOT = '/'
+
+# Set the SSH certs/keys
+{% if env == 'pagure-staging' %}
+SSH_KEYS = {
+ 'RSA': {
+ 'fingerprint': '2048 69:50:46:24:c7:94:44:f8:8d:83:05:5c:eb:73:fb:c4 (RSA)',
+ 'pubkey': 'stg.pagure.io,8.43.85.77,2620:52:3:1:dead:beef:cafe:fed3 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJNu490Rp305zGCJLvhVIrKjL7Xngew3NxgRYeopHBDvj+EFQUqULXtgrI5nUBMSB94RrsuHynFAXYy2m0snHjWzWjbIxM4ZVD2sX4GiKX6qu7WyxcGmGcL08MF919r+JSPL9oWWSq/CvvBF0M1eeqkIpjMZHpVKgR3uTMD5yW994NBLAQi9i1UdwGYNQc1KqWvlvW1XhFFtiIGscIFGRKsUOMvnJvWdU6T+djmzMy4hcahxnsPCZxCjbQpuH1JjihNNVWYOq7Ztjs1gxpTTV19ATp4Z2F95uyyQ3Y+Em9KeXcKXYxwVzYVho5SSB1ZYBL+xAH1osK23PvGD39UYp9',
+ 'SHA256': 'SHA256:x4xld/tPdeOhbyJcTOxd+IbSZ4OpnBzh/IskocyrOM',
+ }
+}
+{% else %}
+SSH_KEYS = {
+ 'RSA': {
+ 'fingerprint': '2048 90:8e:7f:a3:f7:f1:70:cb:56:77:96:17:44:c4:fc:82 (RSA)',
+ 'pubkey': 'pagure.io,8.43.85.75,2620:52:3:1:dead:beef:cafe:fed5 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC198DWs0SQ3DX0ptu+8Wq6wnZMrXUCufN+wdSCtlyhHUeQ3q5B4Hgto1n2FMj752vToCfNTn9mWO7l2rNTrKeBsELpubl2jECHu4LqxkRVihu5UEzejfjiWNDN2jdXbYFY27GW9zymD7Gq3u+T/Mkp4lIcQKRoJaLobBmcVxrLPEEJMKI4AJY31jgxMTnxi7KcR+U5udQrZ3dzCn2BqUdiN5dMgckr4yNPjhl3emJeVJ/uhAJrEsgjzqxAb60smMO5/1By+yF85Wih4TnFtF4LwYYuxgqiNv72Xy4D/MGxCqkO/nH5eRNfcJ+AJFE7727F7Tnbo4xmAjilvRria/+l',
+ 'SHA256': 'SHA256:Gddkd5H7oQ1RaK8WgXSKl7JZP+FgLyidmxbLercJ/JY',
+ }
+}
+{% endif %}
+
+# Allow the backward compatiblity endpoints for the old URLs schema to
+# see the commits of a repo. This is only interesting if you pagure instance
+# was running since before version 1.3 and if you care about backward
+# compatibility in your URLs.
+OLD_VIEW_COMMIT_ENABLED = True
+
+PAGURE_CI_SERVICES=['jenkins']
+
+from pagure.mail_logging import ContextInjector, MSG_FORMAT
+LOGGING = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'standard': {
+ 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
+ },
+ 'email_format': {
+ 'format': MSG_FORMAT
+ }
+ },
+ 'filters': {
+ 'myfilter': {
+ '()': ContextInjector,
+ }
+ },
+ 'handlers': {
+ 'console': {
+ 'formatter': 'standard',
+ 'class': 'logging.StreamHandler',
+ 'stream': 'ext://sys.stdout',
+ },
+ 'email': {
+ 'level': 'ERROR',
+ 'formatter': 'email_format',
+ 'class': 'logging.handlers.SMTPHandler',
+ 'mailhost': 'localhost',
+ 'fromaddr': 'pagure(a)pagure.io',
+ 'toaddrs': 'pingou(a)pingoured.fr',
+ 'subject': 'ERROR on pagure.io',
+ 'filters': ['myfilter'],
+ },
+ },
+ # The root logger configuration; this is a catch-all configuration
+ # that applies to all log messages not handled by a different logger
+ 'root': {
+ 'level': 'INFO',
+ 'handlers': ['console', 'email'],
+ },
+ 'loggers': {
+ 'pagure': {
+ 'handlers': ['console', 'email'],
+ 'level': 'DEBUG',
+ 'propagate': True
+ },
+ 'flask': {
+ 'handlers': ['console'],
+ 'level': 'INFO',
+ 'propagate': False
+ },
+ 'sqlalchemy': {
+ 'handlers': ['console'],
+ 'level': 'WARN',
+ 'propagate': False
+ },
+ 'binaryornot': {
+ 'handlers': ['console'],
+ 'level': 'WARN',
+ 'propagate': True
+ },
+ 'pagure.lib.encoding_utils': {
+ 'handlers': ['console'],
+ 'level': 'WARN',
+ 'propagate': False
+ },
+ }
+}
+
+CROSS_PROJECT_ACLS = [
+ 'create_project',
+ 'fork_project',
+ 'modify_project',
+ 'issue_create',
+ 'issue_comment',
+ 'pull_request_create',
+ 'pull_request_comment',
+ 'pull_request_merge',
+ 'pull_request_flag',
+]
+
+BLACKLISTED_GROUPS = ['forks', 'group', 'rpms', 'modules', 'container', 'tests']
+
+GITOLITE_CELERY_QUEUE = 'gitolite_queue'
+FAST_CELERY_QUEUE = 'fast_workers'
+MEDIUM_CELERY_QUEUE = 'medium_workers'
+SLOW_CELERY_QUEUE = 'slow_workers'
+PRIVATE_PROJECTS = False
+FEDMSG_NOTIFICATIONS = False
+FEDORA_MESSAGING_NOTIFICATIONS = True
+THEME = 'pagureio'
+
+MIRROR_SSHKEYS_FOLDER='/srv/mirror/ssh'
+
+SSH_KEYS_USERNAME_EXPECT = "git"
+SSH_KEYS_OPTIONS = 'restrict,command="/usr/libexec/pagure/aclchecker.py %(username)s"'
+
+SSH_COMMAND_REPOSPANNER = ([
+ "/usr/libexec/repobridge",
+ "--extra", "username", "%(username)s",
+ "--extra", "repotype", "%(repotype)s",
+ "--extra", "project_name", "%(project_name)s",
+ "--extra", "project_user", "%(project_user)s",
+ "--extra", "project_namespace", "%(project_namespace)s",
+ "%(cmd)s",
+ "'%(repotype)s/%(reponame)s'",
+], {"REPOBRIDGE_CONFIG": "/etc/pagure/repobridge_ansible.json"})
+SSH_COMMAND_NON_REPOSPANNER = ([
+ "/usr/bin/%(cmd)s",
+ "/srv/git/repositories/%(reponame)s",
+], {"GL_USER": "%(username)s"})
+
+
+
+GIT_AUTH_BACKEND = 'pagure'
+HTTP_REPO_ACCESS_GITOLITE = None
+
+{% if env == 'pagure-staging' %}
+CSP_HEADERS = (
+ "default-src 'self';"
+ "script-src 'self' '{nonce_script}'; "
+ "style-src 'self' '{nonce_style}'; "
+ "object-src 'none';"
+ "base-uri 'self';"
+ "img-src 'self' https:;"
+ "connect-src 'self' https://stg.pagure.io:8088;"
+ "frame-src https://docs.stg.pagure.org;"
+ "frame-ancestors https://stg.pagure.io;"
+)
+{% else %}
+CSP_HEADERS = (
+ "default-src 'self';"
+ "script-src 'self' '{nonce_script}'; "
+ "style-src 'self' '{nonce_style}'; "
+ "object-src 'none';"
+ "base-uri 'self';"
+ "img-src 'self' https:;"
+ "connect-src 'self' https://pagure.io:8088;"
+ "frame-src https://docs.pagure.org;"
+ "frame-ancestors https://pagure.io;"
+)
+{% endif %}
diff --git a/roles/pagure/frontend/templates/pagure.wsgi b/roles/pagure/frontend/templates/pagure.wsgi
new file mode 100644
index 0000000..75c6ef3
--- /dev/null
+++ b/roles/pagure/frontend/templates/pagure.wsgi
@@ -0,0 +1,29 @@
+#-*- coding: utf-8 -*-
+
+# The three lines below are required to run on EL6 as EL6 has
+# two possible version of python-sqlalchemy and python-jinja2
+# These lines make sure the application uses the correct version.
+import __main__
+__main__.__requires__ = ['SQLAlchemy >= 0.8', 'jinja2 >= 2.4', 'Pygments>=2.1.0']
+import pkg_resources
+
+import os
+## Set the environment variable pointing to the configuration file
+os.environ['PAGURE_CONFIG'] = '/etc/pagure/pagure.cfg'
+
+## Set the environment variable if the tmp folder needs to be moved
+## Is necessary to work around bug in libgit2:
+## refs: https://github.com/libgit2/libgit2/issues/2965
+## and https://github.com/libgit2/libgit2/issues/2797
+os.environ['TEMP'] = '/srv/tmp/'
+
+## The following is only needed if you did not install pagure
+## as a python module (for example if you run it from a git clone).
+#import sys
+#sys.path.insert(0, '/path/to/pagure/')
+
+
+# The most import line to make the wsgi working
+from pagure.flask_app import create_app
+
+application = create_app()
diff --git a/roles/pagure/frontend/templates/robots.txt.j2 b/roles/pagure/frontend/templates/robots.txt.j2
new file mode 100644
index 0000000..9e911bd
--- /dev/null
+++ b/roles/pagure/frontend/templates/robots.txt.j2
@@ -0,0 +1,10 @@
+User-agent: *
+{% if env == 'pagure-staging' %}
+Disallow: /
+{% else %}
+Disallow: /api
+Disallow: /login
+Disallow: /*/raw
+Disallow: /*/blob
+Crawl-Delay: 2
+{% endif %}
diff --git a/roles/pagure/frontend/templates/securityheaders.conf b/roles/pagure/frontend/templates/securityheaders.conf
new file mode 100644
index 0000000..42adcad
--- /dev/null
+++ b/roles/pagure/frontend/templates/securityheaders.conf
@@ -0,0 +1,8 @@
+Header always set X-Xss-Protection "1; mode=block"
+Header always set X-Content-Type-Options "nosniff"
+Header always set Referrer-Policy "same-origin"
+{% if env == 'pagure-staging' %}
+Header always set X-Frame-Options "ALLOW-FROM https://stg.pagure.io/"
+{% else %}
+Header always set X-Frame-Options "ALLOW-FROM https://pagure.io/"
+{% endif %}
diff --git a/roles/pagure/frontend/templates/stunnel-conf.j2 b/roles/pagure/frontend/templates/stunnel-conf.j2
new file mode 100644
index 0000000..3f97e5b
--- /dev/null
+++ b/roles/pagure/frontend/templates/stunnel-conf.j2
@@ -0,0 +1,16 @@
+{% if env == 'pagure-staging' %}
+cert = /etc/pki/tls/certs/stg.pagure.io.bundle.cert
+key = /etc/pki/tls/private/stg.pagure.io.key
+{% else %}
+cert = /etc/pki/tls/certs/pagure.io.bundle.cert
+key = /etc/pki/tls/certs/pagure.io.key
+{% endif %}
+pid = /var/run/stunnel.pid
+
+[{{ stunnel_service }}]
+sslVersion = all
+options = NO_SSLv2
+options = NO_SSLv3
+options = NO_TLSv1
+accept = {{ stunnel_source_port }}
+connect = {{ stunnel_destination_port }}
3 years, 4 months
[ansible] Scripts library plugins
by Nicolas Chauvet
commit ba358db1041cd555c2232b0a4ee83f25672fcb57
Author: Nicolas Chauvet <kwizart(a)gmail.com>
Date: Wed Aug 18 22:58:21 2021 +0200
Scripts library plugins
callback_plugins/logdetail.py | 203 ++++++++++++-------------
filter_plugins/fedmsg.py | 4 +-
inventory/group_vars/all | 1 +
inventory/host_vars/hv01.online.rpmfusion.net | 1 +
library/virt_boot | 10 +-
scripts/auth-keys-from-fas | 16 +-
scripts/freezelist | 13 +-
scripts/generate-oidc-token | 97 ++++++++++++
scripts/hosts_with_var_set | 61 ++++----
9 files changed, 247 insertions(+), 159 deletions(-)
---
diff --git a/callback_plugins/logdetail.py b/callback_plugins/logdetail.py
index 289e7ed..902ecad 100644
--- a/callback_plugins/logdetail.py
+++ b/callback_plugins/logdetail.py
@@ -15,13 +15,36 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-from __future__ import absolute_import
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+callback: logdetail
+callback_type: notification
+short_description: Logs playbook results, per date, playbook and host.
+description: Logs playbook results, per date, playbook and host, in I(log_path).
+options:
+ log_path:
+ description: The path where log files will be created.
+ default: /var/log/ansible
+ ini:
+ - section: callback_logdetail
+ key: log_path
+ env:
+ - name: ANSIBLE_LOGDETAIL_PATH
+'''
import os
import time
import json
import pwd
-from ansible import utils
+import gzip
+
+try:
+ from ansible.utils.hashing import secure_hash
+except ImportError:
+ from ansible.utils import md5 as secure_hash
try:
from ansible.plugins.callback import CallbackBase
@@ -29,31 +52,31 @@ except ImportError:
# Ansible v1 compat
CallbackBase = object
-TIME_FORMAT="%b %d %Y %H:%M:%S"
+TIME_FORMAT = "%b %d %Y %H:%M:%S"
-MSG_FORMAT="%(now)s\t%(count)s\t%(category)s\t%(name)s\t%(data)s\n"
+MSG_FORMAT = "%(now)s\t%(count)s\t%(category)s\t%(name)s\t%(data)s\n"
-LOG_PATH = '/var/log/ansible'
def getlogin():
try:
user = os.getlogin()
- except OSError, e:
+ except OSError as e:
user = pwd.getpwuid(os.geteuid())[0]
return user
+
class LogMech(object):
- def __init__(self):
+ def __init__(self, logpath):
self.started = time.time()
self.pid = str(os.getpid())
self._pb_fn = None
self._last_task_start = None
self.play_info = {}
- self.logpath = LOG_PATH
+ self.logpath = logpath
if not os.path.exists(self.logpath):
try:
os.makedirs(self.logpath, mode=0750)
- except OSError, e:
+ except OSError as e:
if e.errno != 17:
raise
@@ -74,13 +97,13 @@ class LogMech(object):
def logpath_play(self):
# this is all to get our path to look nice ish
tstamp = time.strftime('%Y/%m/%d/%H.%M.%S', time.localtime(self.started))
- path = os.path.normpath(self.logpath + '/' + self.playbook_id + '/' + tstamp + '/')
+ path = os.path.normpath(self.logpath + '/' + self.playbook_id + '/' + tstamp + '/')
if not os.path.exists(path):
try:
os.makedirs(path)
- except OSError, e:
- if e.errno != 17: # if it is not dir exists then raise it up
+ except OSError as e:
+ if e.errno != 17: # if it is not dir exists then raise it up
raise
return path
@@ -96,8 +119,8 @@ class LogMech(object):
def task_to_json(self, task):
res = {}
res['task_name'] = task.name
- res['task_module'] = task.module_name
- res['task_args'] = task.module_args
+ res['task_module'] = task.action
+ res['task_args'] = task.args
if self.playbook_id == 'ansible-cmd':
res['task_userid'] = getlogin()
for k in ("delegate_to", "environment", "with_first_found",
@@ -115,22 +138,21 @@ class LogMech(object):
host = 'HOSTMISSING'
if type(data) == dict:
- name = data.get('module_name',None)
+ name = data.get('module_name', None)
else:
name = "unknown"
-
# we're in setup - move the invocation info up one level
if 'invocation' in data:
invoc = data['invocation']
if not name and 'module_name' in invoc:
name = invoc['module_name']
- #don't add this since it can often contain complete passwords :(
+ # don't add this since it can often contain complete passwords :(
del(data['invocation'])
if task:
- name = task.name
+ name = task._name
data['task_start'] = self._last_task_start
data['task_end'] = time.time()
data.update(self.task_to_json(task))
@@ -143,7 +165,7 @@ class LogMech(object):
if self.play_info.get('check', False) and self.play_info.get('diff', False):
category = 'CHECK_DIFF:' + category
- elif self.play_info.get('check', False):
+ elif self.play_info.get('check', False):
category = 'CHECK:' + category
# Sometimes this is None.. othertimes it's fine. Othertimes it has
@@ -152,14 +174,12 @@ class LogMech(object):
name = name.strip()
sanitize_host = host.replace(' ', '_').replace('>', '-')
- fd = open(self.logpath_play + '/' + sanitize_host + '.log', 'a')
+ fd = gzip.open(self.logpath_play + '/' + sanitize_host + '.log.gz', 'at')
now = time.strftime(TIME_FORMAT, time.localtime())
fd.write(MSG_FORMAT % dict(now=now, name=name, count=count, category=category, data=json.dumps(data)))
fd.close()
-logmech = LogMech()
-
class CallbackModule(CallbackBase):
"""
logs playbook results, per host, in /var/log/ansible/hosts
@@ -172,101 +192,72 @@ class CallbackModule(CallbackBase):
def __init__(self):
self._task_count = 0
self._play_count = 0
+ self.task = None
+ self.playbook = None
- def on_any(self, *args, **kwargs):
- pass
+ super(CallbackModule, self).__init__()
+ self.set_options()
+ self.logmech = LogMech(self.get_option('log_path'))
+ def set_play_context(self, play_context):
+ self.play_context = play_context
- def runner_on_failed(self, host, res, ignore_errors=False):
+ def v2_runner_on_failed(self, result, ignore_errors=False):
category = 'FAILED'
- task = getattr(self,'task', None)
- logmech.log(host, category, res, task, self._task_count)
-
+ self.logmech.log(result._host.get_name(), category, result._result, self.task, self._task_count)
- def runner_on_ok(self, host, res):
+ def v2_runner_on_ok(self, result):
category = 'OK'
- task = getattr(self,'task', None)
- logmech.log(host, category, res, task, self._task_count)
-
+ self.logmech.log(result._host.get_name(), category, result._result, self.task, self._task_count)
- def runner_on_error(self, host, res):
- category = 'ERROR'
- task = getattr(self,'task', None)
- logmech.log(host, category, res, task, self._task_count)
-
- def runner_on_skipped(self, host, item=None):
+ def v2_runner_on_skipped(self, result):
category = 'SKIPPED'
- task = getattr(self,'task', None)
res = {}
- res['item'] = item
- logmech.log(host, category, res, task, self._task_count)
+ res['item'] = self._get_item_label(getattr(result._result, 'results', {}))
+ self.logmech.log(result._host.get_name(), category, res, self.task, self._task_count)
- def runner_on_unreachable(self, host, output):
+ def v2_runner_on_unreachable(self, result):
category = 'UNREACHABLE'
- task = getattr(self,'task', None)
res = {}
- res['output'] = output
- logmech.log(host, category, res, task, self._task_count)
-
- def runner_on_no_hosts(self):
- pass
+ res['output'] = result._result
+ self.logmech.log(result._host.get_name(), category, res, self.task, self._task_count)
- def runner_on_async_poll(self, host, res, jid, clock):
- pass
-
- def runner_on_async_ok(self, host, res, jid):
- pass
-
- def runner_on_async_failed(self, host, res, jid):
+ def v2_runner_on_async_failed(self, result):
category = 'ASYNC_FAILED'
- task = getattr(self,'task', None)
- logmech.log(host, category, res, task, self._task_count)
-
- def playbook_on_start(self):
- pass
-
- def playbook_on_notify(self, host, handler):
- pass
-
- def playbook_on_no_hosts_matched(self):
- pass
+ self.logmech.log(result._host.get_name(), category, result._result, self.task, self._task_count)
- def playbook_on_no_hosts_remaining(self):
- pass
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook
- def playbook_on_task_start(self, name, is_conditional):
- logmech._last_task_start = time.time()
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.task = task
+ if self.task:
+ self.task._name = task.get_name().strip()
+ self.logmech._last_task_start = time.time()
self._task_count += 1
- def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
- pass
-
- def playbook_on_setup(self):
+ def v2_playbook_on_setup(self):
self._task_count += 1
- pass
- def playbook_on_import_for_host(self, host, imported_file):
- task = getattr(self,'task', None)
+ def v2_playbook_on_import_for_host(self, result, imported_file):
res = {}
res['imported_file'] = imported_file
- logmech.log(host, 'IMPORTED', res, task)
+ self.logmech.log(result._host.get_name(), 'IMPORTED', res, self.task)
- def playbook_on_not_import_for_host(self, host, missing_file):
- task = getattr(self,'task', None)
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
res = {}
res['missing_file'] = missing_file
- logmech.log(host, 'NOTIMPORTED', res, task)
+ self.logmech.log(result._host.get_name(), 'NOTIMPORTED', res, self.task)
- def playbook_on_play_start(self, pattern):
+ def v2_playbook_on_play_start(self, play):
self._task_count = 0
- play = getattr(self, 'play', None)
if play:
# figure out where the playbook FILE is
- path = os.path.abspath(play.playbook.filename)
+ path = os.path.abspath(self.playbook._file_name)
# tel the logger what the playbook is
- logmech.playbook_id = path
+ self.logmech.playbook_id = path
# if play count == 0
# write out playbook info now
@@ -275,33 +266,35 @@ class CallbackModule(CallbackBase):
pb_info['playbook_start'] = time.time()
pb_info['playbook'] = path
pb_info['userid'] = getlogin()
- pb_info['extra_vars'] = play.playbook.extra_vars
- pb_info['inventory'] = play.playbook.inventory.host_list
- pb_info['playbook_checksum'] = utils.md5(path)
- pb_info['check'] = play.playbook.check
- pb_info['diff'] = play.playbook.diff
- logmech.play_log(json.dumps(pb_info, indent=4))
+ pb_info['extra_vars'] = play._variable_manager.extra_vars
+ pb_info['inventory'] = play._variable_manager._inventory._sources
+ pb_info['playbook_checksum'] = secure_hash(path)
+ if hasattr(self, "play_context"):
+ pb_info['check'] = self.play_context.check_mode
+ pb_info['diff'] = self.play_context.diff
+ self.logmech.play_log(json.dumps(pb_info, indent=4))
self._play_count += 1
# then write per-play info that doesn't duplcate the playbook info
info = {}
info['play'] = play.name
info['hosts'] = play.hosts
- info['transport'] = play.transport
info['number'] = self._play_count
- info['check'] = play.playbook.check
- info['diff'] = play.playbook.diff
- logmech.play_info = info
- logmech.play_log(json.dumps(info, indent=4))
-
+ if hasattr(self, "play_context"):
+ info['transport'] = str(self.play_context.connection)
+ info['check'] = self.play_context.check_mode
+ info['diff'] = self.play_context.diff
+ self.logmech.play_info = info
+ try:
+ self.logmech.play_log(json.dumps(info, indent=4))
+ except TypeError:
+ print(("Failed to conver to JSON:", info))
- def playbook_on_stats(self, stats):
+ def v2_playbook_on_stats(self, stats):
results = {}
- for host in stats.processed.keys():
+ for host in list(stats.processed.keys()):
results[host] = stats.summarize(host)
- logmech.log(host, 'STATS', results[host])
- logmech.play_log(json.dumps({'stats': results}, indent=4))
- logmech.play_log(json.dumps({'playbook_end': time.time()}, indent=4))
- print 'logs written to: %s' % logmech.logpath_play
-
-
+ self.logmech.log(host, 'STATS', results[host])
+ self.logmech.play_log(json.dumps({'stats': results}, indent=4))
+ self.logmech.play_log(json.dumps({'playbook_end': time.time()}, indent=4))
+ print(('logs written to: %s' % self.logmech.logpath_play))
diff --git a/filter_plugins/fedmsg.py b/filter_plugins/fedmsg.py
index 60beaf8..16773ce 100644
--- a/filter_plugins/fedmsg.py
+++ b/filter_plugins/fedmsg.py
@@ -9,7 +9,7 @@ def invert_fedmsg_policy(groups, vars, env):
"""
if env == 'staging':
- hosts = groups['staging'] + groups['fedmsg-qa-network-stg']
+ hosts = groups['staging'] + groups['staging_friendly']
else:
hosts = [h for h in groups['all'] if h not in groups['staging']]
@@ -25,7 +25,7 @@ def invert_fedmsg_policy(groups, vars, env):
inverted[key] = inverted.get(key, [])
inverted[key].append(cert['service'] + '-' + fqdn)
- result = inverted.items()
+ result = list(inverted.items())
# Sort things so they come out in a reliable order (idempotence)
[inverted[key].sort() for key in inverted]
result.sort(key=operator.itemgetter(0))
diff --git a/inventory/group_vars/all b/inventory/group_vars/all
index 6f086cb..0f43cb1 100644
--- a/inventory/group_vars/all
+++ b/inventory/group_vars/all
@@ -432,6 +432,7 @@ sshd_sftp: false
# Autodetect python version
#
ansible_python_interpreter: auto
+
#
# datacenter with active certbot in it
#
diff --git a/inventory/host_vars/hv01.online.rpmfusion.net b/inventory/host_vars/hv01.online.rpmfusion.net
index f28d410..dcfcea4 100644
--- a/inventory/host_vars/hv01.online.rpmfusion.net
+++ b/inventory/host_vars/hv01.online.rpmfusion.net
@@ -15,4 +15,5 @@ udp_ports: ['53', '1194']
custom_rules: [ '-A FORWARD -d 192.168.181.0/24 -o br1 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT' , '-A FORWARD -s 192.168.181.0/24 -i br1 -j ACCEPT', '-A INPUT -i br1 -p tcp -m tcp --dport 111 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 111 -j ACCEPT' , '-A INPUT -i br1 -p udp -m udp --dport 514 -j ACCEPT', '-A INPUT -i tun0 -p udp -m udp --dport 514 -j ACCEPT' , '-A INPUT -i br1 -p tcp -m tcp --dport 514 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 514 -j ACCEPT' ,'-A INPUT -i br1 -p tcp -m tcp --dport 662 -j ACCEPT' , '-A INPUT -i tun0 -p tcp -m tcp --dport 662 -j ACCEPT', '-A INPUT -i br1 -p tcp -m tcp --dport 892 -j ACCEPT' , '-A INPUT -i tun0 -p tcp -m tcp --dport 892 -j ACCEPT', '-A INPUT -i br1 -p tcp -m tcp --dport 2049 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 2049 -j ACCEPT', '-A INPUT -i br1 -p udp -m udp --dport 2049 -j ACCEPT', '-A INPUT -i tun0 -p udp -m udp --dport 2049 -j ACCEPT', '-A INPUT -i br1 -p tcp -m tcp --dport 50
00 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 5000 -j ACCEPT' ,'-A INPUT -i br1 -p tcp -m tcp --dport 32803 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 32803 -j ACCEPT' , '-A INPUT -i br1 -p udp -m udp --dport 32769 -j ACCEPT', '-A INPUT -i tun0 -p udp -m udp --dport 32769 -j ACCEPT' , '-A INPUT -i br1 -p tcp -m tcp --dport 3128 -j ACCEPT', '-A INPUT -i tun0 -p tcp -m tcp --dport 3128 -j ACCEPT']
custom_nat_rules: ['-A POSTROUTING -o br0 -j MASQUERADE' ]
+ansible_python
diff --git a/library/virt_boot b/library/virt_boot
index 6591e56..6280297 100755
--- a/library/virt_boot
+++ b/library/virt_boot
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python3
# -*- coding: utf-8 -*-
# (c) 2012, Jeroen Hoekx <jeroen(a)hoekx.be>
@@ -100,12 +100,12 @@ except ImportError:
import elementtree.ElementTree as ET
from elementtree.ElementTree import SubElement
except ImportError:
- print "failed=True msg='ElementTree python module unavailable'"
+ print("failed=True msg='ElementTree python module unavailable'")
try:
import libvirt
except ImportError:
- print "failed=True msg='libvirt python module unavailable'"
+ print("failed=True msg='libvirt python module unavailable'")
sys.exit(1)
from ansible.module_utils.basic import AnsibleModule
@@ -138,7 +138,7 @@ def detach_disk(domain, doc, device):
source = disk.find('source')
if source is not None and 'file' in source.attrib:
del source.attrib['file']
- domain.updateDeviceFlags(ET.tostring(disk), libvirt.VIR_DOMAIN_AFFECT_CONFIG)
+ domain.updateDeviceFlags(ET.tostring(disk).decode('utf-8'), libvirt.VIR_DOMAIN_AFFECT_CONFIG)
return True
return False
@@ -287,7 +287,7 @@ def main():
changed = True
### save back
- conn.defineXML( ET.tostring(doc) )
+ conn.defineXML( ET.tostring(doc).decode('utf-8') )
if start and not domain.isActive():
changed = True
diff --git a/scripts/auth-keys-from-fas b/scripts/auth-keys-from-fas
index 9ec9577..3c7505c 100755
--- a/scripts/auth-keys-from-fas
+++ b/scripts/auth-keys-from-fas
@@ -1,4 +1,4 @@
-#!/usr/bin/python -tt
+#!/usr/bin/python3
#
# Copyright 2012 Red Hat, Inc.
# License: GPLv2+
@@ -111,11 +111,11 @@ def read_config_files(cfg_files):
if results != True:
for (section_list, key, unused_) in flatten_errors(options, results):
if key is not None:
- print 'The "%s" key in the section "%s" failed validation' % (
- key, ', '.join(section_list))
+ print('The "%s" key in the section "%s" failed validation' % (
+ key, ', '.join(section_list)))
else:
- print 'The following section was missing:%s ' % ', '.join(
- section_list)
+ print('The following section was missing:%s ' % ', '.join(
+ section_list))
sys.exit(1)
return options
@@ -159,8 +159,8 @@ def retry_fas(function, *args, **kwargs):
return function(*args, **kwargs)
except AuthError:
retries += 1
- password = getpass('FAS Password for %s:' % function.im_self.username)
- function.im_self.password = password
+ password = getpass('FAS Password for %s:' % function.__self__.username)
+ function.__self__.password = password
if retries >= MAX_RETRIES:
raise
@@ -203,4 +203,4 @@ if __name__ == '__main__':
for user in sorted(ssh_keys.keys()):
for key in ssh_keys[user]:
- print '%s%s' % (from_string, key)
+ print('%s%s' % (from_string, key))
diff --git a/scripts/freezelist b/scripts/freezelist
index 2690a54..89856ea 100755
--- a/scripts/freezelist
+++ b/scripts/freezelist
@@ -25,23 +25,22 @@ variable_manager = VariableManager(loader=loader, inventory=inv)
frozen = []
unfrozen = []
-for host in sorted(inv.get_hosts()):
+for host in sorted(inv.get_hosts(), key=lambda host: host.name):
vars = variable_manager.get_vars(host=host)
freezes = vars.get('freezes', None)
if freezes:
frozen.append(host.get_name())
elif freezes is None:
- print 'Error: missing freezes: %s' % host.get_name()
+ print('Error: missing freezes: %s' % host.get_name())
else:
unfrozen.append(host.get_name())
-print 'freeze:'
+print('freeze:')
for host in sorted(frozen):
- print 'F: ' + host
+ print('F: ' + host)
-print 'do not freeze:'
+print('do not freeze:')
for host in sorted(unfrozen):
- print 'NF: ' + host
-
+ print('NF: ' + host)
diff --git a/scripts/generate-oidc-token b/scripts/generate-oidc-token
new file mode 100755
index 0000000..28e2272
--- /dev/null
+++ b/scripts/generate-oidc-token
@@ -0,0 +1,97 @@
+#!/usr/bin/python3
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+"""
+This script will accept some parameters and will print out some SQL you can run against the Ipsilon
+database, and a token you can give to an application to authenticate against a service.
+"""
+import base64
+import json
+import os
+import time
+import uuid
+
+import click
+
+
+secret = base64.urlsafe_b64encode(os.urandom(64))[:64].decode()
+
+
+template = """
+Run this SQL against Ipsilon's database:
+
+--------START CUTTING HERE--------
+BEGIN;
+insert into token values ('{uuid}','username','{service_name}@service');
+insert into token values ('{uuid}','security_check','{secret}');
+insert into token values ('{uuid}','client_id','{service_name}');
+insert into token values ('{uuid}','expires_at','{expiration}');
+insert into token values ('{uuid}','type','Bearer');
+insert into token values ('{uuid}','issued_at','{now}');
+insert into token values ('{uuid}','scope','{scope}');
+COMMIT;
+-------- END CUTTING HERE --------
+
+"""
+
+
+def validate_scopes(ctx, param, scopes):
+ """
+ Ensure that the user provided at least one scope.
+
+ Args:
+ ctx(click.core.Context): Unused.
+ param (click.core.Option): Unused.
+ scopes (tuple): The scopes provided by the user that we are validating.
+ Raises:
+ click.BadParameter: If the length of the scopes tuple is less than 1.
+ """
+ if len(scopes) < 1:
+ raise click.BadParameter('At least one scope must be provided.')
+
+ return scopes
+
+
+(a)click.command()
+(a)click.argument('service_name')
+(a)click.option('--expiration', '-e', prompt='Number of days until expiration', type=int,
+ help='The number of days from now until this token expires.')
+(a)click.option('--scope', '-s', multiple=True, callback=validate_scopes,
+ help='A scope to include for this token. May be supplied multiple times.')
+(a)click.option('--no-openid', is_flag=True, help='Do not use "openid" as the first item in scope.')
+def generate_token(service_name, expiration, scope, no_openid):
+ """
+ Print out SQL to insert a token in the Ipsilon database, and the token itself.
+
+ SERVICE_NAME is the name of the service that the token will be used by, (e.g., bodhi).
+ """
+ identifier = uuid.uuid4()
+
+ now = int(time.time())
+ expiration = now + (expiration * 24 * 3600)
+
+ scope = list(scope)
+ if not no_openid:
+ scope.insert(0, 'openid')
+ scope = json.dumps(scope)
+
+ print(template.format(uuid=identifier, service_name=service_name, secret=secret,
+ expiration=expiration, scope=scope, now=now))
+
+ print("Token: {}_{}\n".format(identifier, secret))
+
+
+if __name__ == '__main__':
+ generate_token()
diff --git a/scripts/hosts_with_var_set b/scripts/hosts_with_var_set
index ec35858..1bba9b2 100755
--- a/scripts/hosts_with_var_set
+++ b/scripts/hosts_with_var_set
@@ -3,7 +3,7 @@
# doteast porting to ansible 2.0
# list hosts with ansible var[=value], Or
# list all hosts with their corresponding vars
-# Note that the script will attempt to "match" the supplied value of the var against the values if it the var is multivalued
+# Note that the script will attempt to "match" the supplied value of the var against the values if it the var is multivalued
from ansible import constants as C
from ansible.parsing.dataloader import DataLoader
@@ -23,46 +23,43 @@ parser.add_option('-a', action="store_true", dest='all_vars', default=None,
opts, args = parser.parse_args(sys.argv[1:])
if ((opts.variable == None and opts.all_vars == None) or (opts.variable != None and opts.all_vars != None)):
- print "Usage: hosts_with_var_set -o varname[=value] | -a"
- sys.exit(-1)
+ print("Usage: hosts_with_var_set -o varname[=value] | -a")
+ sys.exit(-1)
loader = DataLoader()
inv = InventoryManager(loader=loader, sources=opts.inventory)
variable_manager = VariableManager(loader=loader, inventory=inv)
-matching=True
+matching = True
if opts.variable != None:
- if opts.variable.find("=") == -1:
- matching=False
- var_name=opts.variable
- else:
- var_name,value = opts.variable.split('=')
- if value == "":
- value="None"
+ if opts.variable.find("=") == -1:
+ matching = False
+ var_name = opts.variable
+ else:
+ var_name, value = opts.variable.split('=')
+ if value == "":
+ value = "None"
var_set = []
-
-for host in sorted(inv.get_hosts()):
+for host in inv.get_hosts():
vars = variable_manager.get_vars(host=host)
if opts.variable == None:
- # remove expanded 'all' groups
- vars.pop('groups')
- vars['groups']=host.get_groups()
- print "%s\n%s\n" % (host.get_name(),vars)
+ # remove expanded 'all' groups
+ vars.pop('groups')
+ vars['groups'] = host.get_groups()
+ print("%s\n%s\n" % (host.get_name(), vars))
else:
- if vars.has_key(var_name):
- if not matching:
- var_set.append(host.get_name())
- else:
- if str(vars.get(var_name)).find(value) != -1:
- var_set.append(host.get_name())
-
-if opts.variable != None:
- if not matching:
- print 'hosts with variable %s:' % var_name
- else:
- print 'hosts with variable %s matching %s value' % (var_name,value)
- for host in sorted(var_set):
- print host
-
+ if var_name in vars:
+ if not matching:
+ var_set.append(host.get_name())
+ else:
+ if str(vars.get(var_name)).find(value) != -1:
+ var_set.append(host.get_name())
+if opts.variable != None:
+ if not matching:
+ print('hosts with variable %s:' % var_name)
+ else:
+ print('hosts with variable %s matching %s value' % (var_name, value))
+ for host in sorted(var_set):
+ print(host)
3 years, 4 months
[ansible] Add proxies-misc
by Nicolas Chauvet
commit bffe9d17014c8511111220e41a8337b05fe9f4d3
Author: Nicolas Chauvet <kwizart(a)gmail.com>
Date: Wed Aug 18 22:01:12 2021 +0200
Add proxies-misc
playbooks/include/proxies-miscellaneous.yml | 61 +++++++++++++++++++++++++++
1 files changed, 61 insertions(+), 0 deletions(-)
---
diff --git a/playbooks/include/proxies-miscellaneous.yml b/playbooks/include/proxies-miscellaneous.yml
new file mode 100644
index 0000000..682fac2
--- /dev/null
+++ b/playbooks/include/proxies-miscellaneous.yml
@@ -0,0 +1,61 @@
+- name: Set up all the other proxy stuff -- miscellaneous
+ hosts: proxies_stg:proxies
+ user: root
+ gather_facts: True
+
+ vars_files:
+ - /srv/web/infra/ansible/vars/global.yml
+ - "/srv/private/ansible/vars.yml"
+ - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
+
+ handlers:
+ - import_tasks: "{{ handlers_path }}/restart_services.yml"
+
+ tasks:
+ # We retired this in favor of PDC
+ # https://lists.fedoraproject.org/archives/list/rel-eng@lists.fedoraproject...
+ - file:
+ dest=/etc/httpd/conf.d/apps.fedoraproject.org/fedora-releng-dash.conf
+ state=absent
+ tags: releng-dash
+ notify: reload proxyhttpd
+
+ roles:
+
+ - role: httpd/mime-type
+ website: fedoraproject.org
+ mimetype: image/vnd.microsoft.icon
+ extensions:
+ - .ico
+
+ - role: fedmsg/crl
+ website: fedoraproject.org
+ path: /fedmsg
+
+ - role: fedmsg/gateway/slave
+ stunnel_service: "websockets"
+ stunnel_source_port: 9939
+ stunnel_destination_port: 9938
+
+ - role: httpd/fingerprints
+ website: admin.fedoraproject.org
+
+ - role: easyfix/proxy
+ website: fedoraproject.org
+ path: /easyfix
+
+ - role: review-stats/proxy
+ website: fedoraproject.org
+ path: /PackageReviewStatus
+
+ - role: membership-map/proxy
+ website: fedoraproject.org
+ path: /membership-map
+
+ - role: apps-fp-o
+ website: apps.fedoraproject.org
+ path: /
+
+ - role: pkgdb-proxy
+ tags:
+ - pkgdb2
3 years, 4 months
[ansible] Update vars inventory
by Nicolas Chauvet
commit 901146e1f277004bb863fe93fb1793228e54621f
Author: Nicolas Chauvet <kwizart(a)gmail.com>
Date: Wed Aug 18 22:00:38 2021 +0200
Update vars inventory
inventory/group_vars/all | 79 ++++++++++++++++++++++++-----------
playbooks/run_fasClient.yml | 2 +-
playbooks/vhost_update.yml | 8 ++--
vars/Fedora.yml | 2 +-
vars/RedHat.yml | 4 +-
vars/all/00-FedoraCycleNumber.yaml | 2 +-
vars/global.yml | 2 +-
7 files changed, 64 insertions(+), 35 deletions(-)
---
diff --git a/inventory/group_vars/all b/inventory/group_vars/all
index 191ea0c..6f086cb 100644
--- a/inventory/group_vars/all
+++ b/inventory/group_vars/all
@@ -18,6 +18,7 @@ openshift_ansible: /srv/web/infra/openshift-ansible/
freezes: true
# most of our systems are in online
datacenter: online
+preferred_dc: online
postfix_group: "none"
# for httpd/website
server_admin: root(a)rpmfusion.org
@@ -71,8 +72,11 @@ eth0_nm: 255.255.255.0
eth1_nm: 255.255.255.0
br0_nm: 255.255.255.0
br1_nm: 255.255.255.0
-# Default to managing the network, we want to not do this on select hosts (like cloud nodes)
-ansible_ifcfg_blacklist: false
+nm: 255.255.255.0
+
+# Default to managing the network, we want to not do this on select
+# hosts (like cloud nodes)
+ansible_ifcfg_blocklist: false
# List of interfaces to explicitly disable
ansible_ifcfg_disabled: []
#
@@ -85,13 +89,28 @@ nfs_bridge: br1
mac_address: RANDOM
mac_address1: RANDOM
+
+virt_install_command_pxe_rhcos: virt-install -n {{ inventory_hostname }}
+ --vcpus {{ num_cpus }},maxvcpus={{ num_cpus }}
+ --cpu host
+ --memory {{ mem_size }}
+ --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
+ --nographics
+ --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
+ --hvm --accelerate
+ --autostart --wait=-1
+ --extra-args "ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:ens2:none hostname={{ inventory_hostname }} nameserver={{ dns }} console=ttyS0 nomodeset rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url={{ rhcos_install_rootfs_url }} coreos.inst.ignition_url={{ rhcos_ignition_file_url }}"
+ --os-variant rhel7
+ --location {{ rhcos_install_url }}
+
+
virt_install_command_one_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }} console=tty0 console=ttyS0
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
- ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
+ ip={{ eth0_ipv4 }}::{{ eth0_ipv4_gw }}:{{ eth0_ipv4_nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole --watchdog default --rng /dev/random --cpu host
@@ -99,7 +118,7 @@ virt_install_command_two_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }} console=tty0 console=ttyS0
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none'
@@ -111,9 +130,9 @@ virt_install_command_one_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }} console=tty0 console=ttyS0
- hostname={{ inventory_hostname }} nameserver={{ dns }}
- ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
+ hostname={{ inventory_hostname }} nameserver={{ dns1 }}
+ ip={{ eth0_ipv4 }}::{{ eth0_ipv4_gw }}:{{ eth0_ipv4_nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole --watchdog default --rng /dev/random --cpu host
@@ -121,7 +140,7 @@ virt_install_command_two_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }} console=tty0 console=ttyS0
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
@@ -129,11 +148,21 @@ virt_install_command_two_nic_unsafe: virt-install -n {{ inventory_hostname }}
--network bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }}
--autostart --noautoconsole --watchdog default --rng /dev/random
+virt_install_command_ppc64le_one_nic_unsafe: virt-install -n {{ inventory_hostname }}
+ --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
+ --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
+ --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
+ hostname={{ inventory_hostname }} nameserver={{ dns }}
+ ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
+ --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
+ --autostart --noautoconsole --watchdog default --rng /dev/random
+
virt_install_command_ppc64le_two_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }} console=tty0 console=ttyS0
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none'
@@ -145,7 +174,7 @@ virt_install_command_aarch64_one_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }}
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
@@ -155,7 +184,7 @@ virt_install_command_aarch64_one_nic_unsafe: virt-install -n {{ inventory_hostna
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }}
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
@@ -165,7 +194,7 @@ virt_install_command_aarch64_2nd_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }}
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address }}
@@ -175,7 +204,7 @@ virt_install_command_aarch64_two_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }}
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none'
@@ -187,7 +216,7 @@ virt_install_command_armv7_one_nic: virt-install -n {{ inventory_hostname }} --a
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }} console=tty0 console=ttyAMA0
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyAMA0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }}
@@ -197,17 +226,17 @@ virt_install_command_armv7_one_nic_unsafe: virt-install -n {{ inventory_hostname
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }} console=tty0 console=ttyAMA0
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyAMA0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }}
- --autostart --noautoconsole --rng /dev/random
+ --autostart --noautoconsole --rng /dev/random --qemu-commandline="-machine highmem=off"
virt_install_command_s390x_one_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }}
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
@@ -217,7 +246,7 @@ virt_install_command_s390x_one_nic_unsafe: virt-install -n {{ inventory_hostname
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- 'net.ifnames=0 ksdevice=eth0 ks={{ ks_url }}
+ 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
@@ -227,10 +256,10 @@ virt_install_command_rhel6: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }}
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
- "ksdevice=eth0 ks={{ ks_url }} ip={{ eth0_ip }} netmask={{ nm }}
+ "inst.ksdevice=eth0 inst.ks={{ ks_url }} ip={{ eth0_ip }} netmask={{ nm }}
gateway={{ gw }} dns={{ dns }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }}"
- --network=bridge=br1 --autostart --noautoconsole --watchdog default
+ --network=bridge=br0 --autostart --noautoconsole --watchdog default
max_mem_size: "{{ mem_size * 1 }}"
max_cpu: "{{ num_cpus * 1 }}"
@@ -303,7 +332,7 @@ env_suffix: ""
env_short: prod
# nfs mount options, override at the group/host level
-nfs_mount_opts: "ro,hard,bg,noatime,nodev,nosuid,nfsvers=3"
+nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
# by default set become to false here We can override it as needed.
# Note that if become is true, you need to unset requiretty for
@@ -384,18 +413,18 @@ dns2: "62.210.16.6"
# This is a list of services that need to wait for VPN to be up before getting started.
postvpnservices: []
-# true or false if we are or are not a copr build virthost.
+# true or false if we are or are not a copr build virthost.
# Default to false
copr_build_virthost: false
#
-# Set a redirectmatch variable we can use to disable some redirectmatches
+# Set a redirectmatch variable we can use to disable some redirectmatches
# like the prerelease to final ones.
#
redirectmatch_enabled: True
#
-# sshd can run a internal sftp server, we need this on some hosts, but
+# sshd can run a internal sftp server, we need this on some hosts, but
# not on most of them, so default to false
sshd_sftp: false
diff --git a/playbooks/run_fasClient.yml b/playbooks/run_fasClient.yml
index 585f797..7703de5 100644
--- a/playbooks/run_fasClient.yml
+++ b/playbooks/run_fasClient.yml
@@ -1,6 +1,6 @@
# Run `fasClient` on all hosts, N hosts at a time
#
-# We exclude builders, persistent-cloud, jenkins-master and jenkins-slave as they don't have fasclient
+# We exclude builders and persistent-cloud as they don't have fasclient
#
- name: run fasClient -a to make email aliases on bastion
diff --git a/playbooks/vhost_update.yml b/playbooks/vhost_update.yml
index 9c82ab3..f2fc63d 100644
--- a/playbooks/vhost_update.yml
+++ b/playbooks/vhost_update.yml
@@ -1,4 +1,4 @@
-# This playboook updates a virthost and all it's guests.
+# This playboook updates a virthost and all it's guests.
#
# requires --extra-vars="target=somevhostname yumcommand=update"
# Might add nodns=true or nonagios=true at extra-vars
@@ -16,7 +16,7 @@
- name: add them to myvms_new group
local_action: add_host hostname={{ item }} groupname=myvms_new
- with_items: vmlist.list_vms
+ with_items: '{{vmlist.list_vms}}'
# Call out to another playbook. Disable any proxies that may live here
#- import_playbook: update-proxy-dns.yml status=disable proxies=myvms_new:&proxies
@@ -51,7 +51,7 @@
command: dnf -y {{ yumcommand }}
async: 7200
poll: 30
- when: ansible_distribution_major_version|int > 21 and ansible_cmdline.ostree is not defined
+ when: package_excludes is defined
- name: run rkhunter if installed
hosts: "{{ target }}:myvms_new"
@@ -65,4 +65,4 @@
- name: run rkhunter --propupd
command: /usr/bin/rkhunter --propupd
- when: rkhunter|success
+ when: rkhunter is success
diff --git a/vars/Fedora.yml b/vars/Fedora.yml
index ceb6613..35e4b66 100644
--- a/vars/Fedora.yml
+++ b/vars/Fedora.yml
@@ -3,6 +3,6 @@ dist_tag: f{{ ansible_distribution_version }}
base_pkgs_inst: ['iptables-services' ]
base_pkgs_erase: ['firewalld', 'sendmail', 'at']
service_disabled: [ ]
-service_enabled: ['postfix']
+service_enabled: ['auditd','logrotate.timer']
is_fedora: True
pythonsitelib: /usr/lib/python2.7/site-packages
diff --git a/vars/RedHat.yml b/vars/RedHat.yml
index d5e7621..67d7b27 100644
--- a/vars/RedHat.yml
+++ b/vars/RedHat.yml
@@ -1,7 +1,7 @@
---
dist_tag: el{{ ansible_distribution_version[0] }}
-base_pkgs_inst: ['iptables-services']
-base_pkgs_erase: ['firstboot-tui','bluez-utils', 'sendmail', 'firewalld']
+base_pkgs_inst: ['iptables', 'iptables-services']
+base_pkgs_erase: ['firstboot-tui','bluez-utils', 'sendmail','firewalld']
service_disabled: []
service_enabled: []
is_rhel: True
diff --git a/vars/all/00-FedoraCycleNumber.yaml b/vars/all/00-FedoraCycleNumber.yaml
index b909379..d1ec27f 100644
--- a/vars/all/00-FedoraCycleNumber.yaml
+++ b/vars/all/00-FedoraCycleNumber.yaml
@@ -1 +1 @@
-FedoraCycleNumber: 29
+FedoraCycleNumber: 34
diff --git a/vars/global.yml b/vars/global.yml
index 9d4f968..f3ed1c8 100644
--- a/vars/global.yml
+++ b/vars/global.yml
@@ -53,7 +53,7 @@ centos66_x86_64: CentOS-6-x86_64-GenericCloud-20141129_01
rhel70_x86_64: rhel-guest-image-7.0-20140930.0.x86_64
rhel66_x86_64: rhel-guest-image-6.6-20141222.0.x86_64
-# Note: we do "all and blacklist" rather than whitelist to make sure we can use this
+# Note: we do "+all -some" rather than "+some" to make sure we can use this
# same list on both EL7 and Fedora and get new ciphers: on Fedora, at time of writing,
# this includes TLSv1.3, which EL7 does not have.
ssl_protocols: "+all -SSLv3 -TLSv1 -TLSv1.1"
3 years, 4 months
[ansible] Update include
by Nicolas Chauvet
commit 80e90921a5d93dc5d99fc8d647c86bf79f1a344e
Author: Nicolas Chauvet <kwizart(a)gmail.com>
Date: Wed Aug 18 21:35:36 2021 +0200
Update include
playbooks/include/happy_birthday.yml | 1 +
playbooks/include/proxies-certificates.yml | 4 ++--
playbooks/include/proxies-fedora-web.yml | 2 +-
playbooks/include/proxies-haproxy.yml | 2 +-
playbooks/include/proxies-redirects.yml | 2 +-
playbooks/include/proxies-reverseproxy.yml | 2 +-
playbooks/include/proxies-rewrites.yml | 2 +-
playbooks/include/proxies-websites.yml | 12 +++---------
8 files changed, 11 insertions(+), 16 deletions(-)
---
diff --git a/playbooks/include/happy_birthday.yml b/playbooks/include/happy_birthday.yml
index f9ba7e6..6d7a41c 100644
--- a/playbooks/include/happy_birthday.yml
+++ b/playbooks/include/happy_birthday.yml
@@ -9,6 +9,7 @@
tasks:
- import_tasks: "{{ tasks_path }}/happy_birthday.yml"
+ - include_vars: dir=/srv/web/infra/ansible/vars/all/ ignore_files=README
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
diff --git a/playbooks/include/proxies-certificates.yml b/playbooks/include/proxies-certificates.yml
index 8ecea0f..2f044e1 100644
--- a/playbooks/include/proxies-certificates.yml
+++ b/playbooks/include/proxies-certificates.yml
@@ -1,5 +1,5 @@
- name: Set up those proxy certificates. Good gravy..
- hosts: proxies-stg:proxies
+ hosts: proxies_stg:proxies
user: root
gather_facts: True
@@ -14,7 +14,7 @@
roles:
- role: httpd/mod_ssl
-
+
- role: httpd/certificate
name: wildcard-2016.rpmfusion.org
SSLCertificateChainFile: wildcard-2016.rpmfusion.org.intermediate.cert
diff --git a/playbooks/include/proxies-fedora-web.yml b/playbooks/include/proxies-fedora-web.yml
index b59fb00..9fdee72 100644
--- a/playbooks/include/proxies-fedora-web.yml
+++ b/playbooks/include/proxies-fedora-web.yml
@@ -1,5 +1,5 @@
- name: Set up all that fedora-web goodness. What a wonder!
- hosts: proxies-stg:proxies
+ hosts: proxies_stg:proxies
user: root
gather_facts: True
diff --git a/playbooks/include/proxies-haproxy.yml b/playbooks/include/proxies-haproxy.yml
index 1158351..941e5af 100644
--- a/playbooks/include/proxies-haproxy.yml
+++ b/playbooks/include/proxies-haproxy.yml
@@ -1,5 +1,5 @@
- name: Set up all the haproxy stuff.
- hosts: proxies-stg:proxies
+ hosts: proxies_stg:proxies
user: root
gather_facts: True
diff --git a/playbooks/include/proxies-redirects.yml b/playbooks/include/proxies-redirects.yml
index 9638be2..1cadeb1 100644
--- a/playbooks/include/proxies-redirects.yml
+++ b/playbooks/include/proxies-redirects.yml
@@ -1,5 +1,5 @@
- name: Set up those proxy redirects. Wow!
- hosts: proxies-stg:proxies
+ hosts: proxies_stg:proxies
user: root
gather_facts: True
diff --git a/playbooks/include/proxies-reverseproxy.yml b/playbooks/include/proxies-reverseproxy.yml
index c8d5239..0de8451 100644
--- a/playbooks/include/proxies-reverseproxy.yml
+++ b/playbooks/include/proxies-reverseproxy.yml
@@ -1,5 +1,5 @@
- name: Set up those ProxyPassReverse statements. Somebody get me a cup of coffee..
- hosts: proxies-stg:proxies
+ hosts: proxies_stg:proxies
user: root
gather_facts: True
diff --git a/playbooks/include/proxies-rewrites.yml b/playbooks/include/proxies-rewrites.yml
index 78b1109..30e9dd4 100644
--- a/playbooks/include/proxies-rewrites.yml
+++ b/playbooks/include/proxies-rewrites.yml
@@ -1,5 +1,5 @@
- name: Set up some domain rewrites.
- hosts: proxies-stg:proxies
+ hosts: proxies_stg:proxies
user: root
gather_facts: True
diff --git a/playbooks/include/proxies-websites.yml b/playbooks/include/proxies-websites.yml
index 2d9e52b..a5e0a29 100644
--- a/playbooks/include/proxies-websites.yml
+++ b/playbooks/include/proxies-websites.yml
@@ -1,5 +1,5 @@
- name: Set up those proxy websites. My, my..
- hosts: proxies-stg:proxies
+ hosts: proxies_stg:proxies
user: root
gather_facts: True
@@ -11,15 +11,9 @@
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
-
- pre_tasks:
- - name: Install policycoreutils-python
- package: name=policycoreutils-python state=present
-
+ tasks:
- name: Create /srv/web/ for all the goodies.
- file: >
- dest=/srv/web state=directory
- owner=root group=root mode=0755
+ file: dest=/srv/web state=directory owner=root group=root mode=0755
tags:
- httpd
- httpd/website
3 years, 4 months
[ansible] Update koji-hub
by Nicolas Chauvet
commit 02902a8250d872e573d0b5cf19d9d6b43801e70a
Author: Nicolas Chauvet <kwizart(a)gmail.com>
Date: Wed Aug 18 21:32:45 2021 +0200
Update koji-hub
playbooks/groups/koji-hub.yml | 20 ++++----------------
1 files changed, 4 insertions(+), 16 deletions(-)
---
diff --git a/playbooks/groups/koji-hub.yml b/playbooks/groups/koji-hub.yml
index f5b3961..0cb5af6 100644
--- a/playbooks/groups/koji-hub.yml
+++ b/playbooks/groups/koji-hub.yml
@@ -30,13 +30,14 @@
- fas_client
- collectd/base
- apache
- - fedmsg/base
- { role: nfs/server, when: env == "staging" }
-# - { role: keepalived, when: env == "production" and inventory_hostname.startswith('koji') }
+
+ # production nfs mounts from netapp
- role: nfs/client
mnt_dir: '/mnt'
nfs_src_dir: 'rpmfusion_koji'
when: env == 'production' and inventory_hostname.startswith('koji')
+
- role: nfs/client
mnt_dir: '/mnt/koji'
nfs_src_dir: 'fedora_arm/data'
@@ -48,25 +49,12 @@
when: env == 'staging' and inventory_hostname.startswith('koji')
- koji_hub
- { role: rsyncd, when: not inventory_hostname.startswith('koji') }
- - { role: koji_builder, when: env == "staging" or inventory_hostname.startswith('s390') or inventory_hostname.startswith('arm') }
- sudo
-
- pre_tasks:
- - import_tasks: "{{ tasks_path }}/yumrepos.yml"
-
tasks:
- - name: create secondary volume dir for stg koji
- file: dest=/mnt/koji/vol state=directory owner=apache group=apache mode=0755
- tags: koji_hub
- when: env == 'staging'
- - name: create symlink for stg/prod secondary volume
- file: src=/mnt/fedora_koji_prod/koji dest=/mnt/koji/vol/prod state=link
- tags: koji_hub
- when: env == 'staging'
- - import_tasks: "{{ tasks_path }}/2fa_client.yml"
- import_tasks: "{{ tasks_path }}/motd.yml"
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
+
3 years, 4 months
[ansible] Update tasks
by Nicolas Chauvet
commit 85575166a0d117345350fabfd5c0d712e52683ab
Author: Nicolas Chauvet <kwizart(a)gmail.com>
Date: Wed Aug 18 21:24:25 2021 +0200
Update tasks
tasks/cloud_setup_basic.yml | 60 ++++++----------
tasks/openvpn_client.yml | 1 -
tasks/persistent_cloud.yml | 55 ++++++---------
tasks/postfix_basic.yml | 1 +
tasks/rdiff_backup_server.yml | 2 +-
tasks/reg-server.yml | 17 ++++-
tasks/virt_instance_create.yml | 29 ++++++--
tasks/yumrepos.yml | 153 +++++++++++++++++++++++++++++++++++++++-
8 files changed, 233 insertions(+), 85 deletions(-)
---
diff --git a/tasks/cloud_setup_basic.yml b/tasks/cloud_setup_basic.yml
index 1ff61d5..c886ed0 100644
--- a/tasks/cloud_setup_basic.yml
+++ b/tasks/cloud_setup_basic.yml
@@ -6,51 +6,24 @@
- ntp
- libsemanage-python
- libselinux-python
- when: ansible_distribution_major_version|int < 22
+ - iptables
+ when: ansible_distribution_major_version|int < 8 and ansible_distribution == 'RedHat'
tags:
- packages
- name: Install desired extra packages (dnf)
- dnf: state=present pkg={{ item }}
- with_items:
- - ntpdate
- - ntp
- - libsemanage-python
- - libselinux-python
- when: ansible_distribution_major_version|int > 21 and ansible_cmdline.ostree is not defined
- tags:
- - packages
-
-- name: remove some packages (yum)
- package: state=absent pkg={{ item }}
- with_items:
- - chrony
- tags:
- - packages
- when: ansible_distribution_major_version|int < 22
-
-- name: remove some packages (dnf)
- dnf: state=absent pkg={{ item }}
- with_items:
- - chrony
+ dnf:
+ state: present
+ pkg:
+ - chrony
+ - python3-libselinux
+ when: ansible_distribution_major_version|int > 7 and ansible_distribution == 'RedHat' and ansible_cmdline.ostree is not defined
tags:
- packages
- when: ansible_distribution_major_version|int > 21 and ansible_cmdline.ostree is not defined
- name: Include basessh
include_role: name=basessh
-- name: put step-tickers in place
- copy: src="{{ files }}/common/step-tickers" dest=/etc/ntp/step-tickers
- when: ansible_cmdline.ostree is not defined
- tags:
- - ntp
- - config
-
-- name: enable the service
- service: name=ntpd state=started enabled=true
- when: ansible_cmdline.ostree is not defined
-
#- name: edit hostname to be instance name - prefix hostbase var if it exists
# shell: hostname {{ hostbase }}`curl -s http://169.254.169.254/latest/meta-data/instance-id`
# tags:
@@ -86,14 +59,21 @@
- name: update all
command: yum -y update creates=/etc/sysconfig/global-update-applied
register: updated
- when: ansible_distribution_major_version|int < 22
+ when: ansible_distribution_major_version|int < 8 and ansible_distribution == 'RedHat'
+ tags:
+ - packages
+
+- name: update all
+ command: dnf -y update creates=/etc/sysconfig/global-update-applied
+ register: updated
+ when: ansible_distribution_major_version|int > 7 and ansible_distribution == 'RedHat' and ansible_cmdline.ostree is not defined
tags:
- packages
- name: update all
command: dnf -y update creates=/etc/sysconfig/global-update-applied
register: updated
- when: ansible_distribution_major_version|int > 21 and ansible_cmdline.ostree is not defined
+ when: ansible_distribution_major_version|int >= 29 and ansible_distribution == 'Fedora' and ansible_cmdline.ostree is not defined
tags:
- packages
@@ -102,3 +82,9 @@
when: updated is defined
tags:
- packages
+
+- name: ensure tmp.mount is not masked, logrotate start would fail
+ systemd:
+ name: tmp.mount
+ masked: no
+ when: ansible_distribution_major_version|int >= 30 and ansible_distribution == 'Fedora'
diff --git a/tasks/openvpn_client.yml b/tasks/openvpn_client.yml
index f9b44d1..0758fff 100644
--- a/tasks/openvpn_client.yml
+++ b/tasks/openvpn_client.yml
@@ -44,4 +44,3 @@
service: name=openvpn state=started enabled=true
tags:
- service
- when: ansible_distribution_major_version|int == 6
diff --git a/tasks/persistent_cloud.yml b/tasks/persistent_cloud.yml
index 11a16dc..103bb1d 100644
--- a/tasks/persistent_cloud.yml
+++ b/tasks/persistent_cloud.yml
@@ -1,5 +1,7 @@
# New tasks to spin up instance in https://fedorainfracloud.org
+- include_vars: dir=/srv/web/infra/ansible/vars/all/ ignore_files=README
+
- name: check it out
local_action: shell nc -w 5 {{ inventory_hostname }} 22 < /dev/null
register: host_is_up
@@ -7,22 +9,6 @@
changed_when: false
check_mode: no
-- name: clean out old known_hosts (name based entries)
- local_action: known_hosts path={{item}} host={{inventory_hostname}} state=absent
- ignore_errors: True
- with_items:
- - /root/.ssh/known_hosts
- - /etc/ssh/ssh_known_hosts
- when: host_is_up|failed
-
-- name: clean out old known_hosts (IP based entries)
- local_action: known_hosts path={{item}} host={{ lookup('dig', inventory_hostname, wantlist=False) }} state=absent
- ignore_errors: True
- with_items:
- - /root/.ssh/known_hosts
- - /etc/ssh/ssh_known_hosts
- when: host_is_up|failed
-
- name: spin UP VM using nova_compute
become: False
local_action:
@@ -33,7 +19,7 @@
login_tenant_name: "{{inventory_tenant}}"
name: "{{inventory_instance_name}}"
image_id: "{{ image|image_name_to_id('admin', ADMIN_PASS, inventory_tenant, os_auth_url) }}"
- wait_for: 300
+ wait_for: 600
flavor_id: "{{ instance_type|flavor_name_to_id('admin', ADMIN_PASS, inventory_tenant, os_auth_url) }}"
security_groups: "{{security_group}}"
key_name: "{{ keypair }}"
@@ -42,7 +28,7 @@
floating_ips:
- "{{public_ip}}"
register: nova_result
- when: host_is_up|failed
+ when: host_is_up is failed
# instance can be both id and name, volume must be id
# volume must be id
@@ -62,35 +48,34 @@
# If it is attach it.
#
- local_action: shell nova --os-auth-url="{{os_auth_url}}" --os-username="admin" --os-password="{{ADMIN_PASS}}" --os-tenant-name={{inventory_tenant}} volume-attach "{{inventory_instance_name}}" "{{item.volume_id}}" "{{item.device}}"
- with_items: "{{ volumes|default([]) }}"
+ with_items: "{{ volume_available.results|default([]) }}"
ignore_errors: True
failed_when: False
- when: volumes is defined and volume_available is defined and volume_available
+ when: volumes is defined and volume_available is defined and item.changed
- name: wait for he host to be hot
local_action: wait_for host={{ public_ip }} port=22 delay=1 timeout=600
- when: host_is_up|failed
+ when: host_is_up is failed
+
+# SSH is up and running, however cloud-init still did not deployed ssh keypair
+# we have to wait some time. 10 sec is usually enough, but not always.
+
+- name: waiting for cloud-init
+ pause: seconds=30
+ when: host_is_up is failed
- name: gather ssh host key from new instance
local_action: command ssh-keyscan -t rsa {{ inventory_hostname }}
ignore_errors: True
register: hostkey
- when: host_is_up|failed
+ when: host_is_up is failed
-- name: add new ssh host key (you still need to add it to official ssh_host_keys later)
- local_action: known_hosts path={{ item }} key={{ hostkey.stdout }} host={{ inventory_hostname }} state=present
+- name: add new ssh host key (until we can sign it)
+ local_action: known_hosts path={{item}} key="{{ hostkey.stdout }}" host={{ inventory_hostname }} state=present
ignore_errors: True
with_items:
- /root/.ssh/known_hosts
- - /etc/ssh/ssh_known_hosts
- when: host_is_up|failed
-
-# SSH is up and running, however cloud-init still did not deployed ssh keypair
-# we have to wait some time. 10 sec is usually enough, but not always.
-
-- name: waiting for cloud-init
- pause: seconds=30
- when: host_is_up|failed
+ when: host_is_up is failed
#
# Next we try and gather facts. If the host doesn't have python2 this will fail.
@@ -108,7 +93,7 @@
- name: install python2 and dnf stuff
raw: dnf -y install python-dnf libselinux-python
- when: facts|failed
+ when: facts is failed
# TODO - somehow guess when keypair is finally deployed and return little bit earlier
## We need to specify user, here we trying with fedora or root
@@ -120,3 +105,5 @@
# # poll: 5
# ignore_errors: True
#
+- name: Include SSH config
+ import_role: name=basessh
diff --git a/tasks/postfix_basic.yml b/tasks/postfix_basic.yml
index 2688861..4bd6a7d 100644
--- a/tasks/postfix_basic.yml
+++ b/tasks/postfix_basic.yml
@@ -12,6 +12,7 @@
- "{{ roles_path }}/base/files/postfix/main.cf/main.cf.{{ inventory_hostname }}"
- "{{ roles_path }}/base/files/postfix/main.cf/main.cf.{{ host_group }}"
- "{{ roles_path }}/base/files/postfix/main.cf/main.cf.{{ postfix_group }}"
+ - "{{ roles_path }}/base/files/postfix/main.cf/main.cf.{{ datacenter }}"
- "{{ roles_path }}/base/files/postfix/main.cf/main.cf"
notify:
- restart postfix
diff --git a/tasks/rdiff_backup_server.yml b/tasks/rdiff_backup_server.yml
index 1e2d553..c3eeff1 100644
--- a/tasks/rdiff_backup_server.yml
+++ b/tasks/rdiff_backup_server.yml
@@ -1,7 +1,7 @@
---
# tasklist for setting up a rdiff backup server.
- name: install rdiff-backup
- yum: pkg={{ item }} state=present
+ package: name={{ item }} state=present
with_items:
- rdiff-backup
- git
diff --git a/tasks/reg-server.yml b/tasks/reg-server.yml
index 1173bff..6fb6b04 100644
--- a/tasks/reg-server.yml
+++ b/tasks/reg-server.yml
@@ -1,7 +1,6 @@
- name: install reg-server
package:
- name: reg-server
- state: latest
+ name: reg
tags:
- regserver
@@ -34,3 +33,17 @@
when: env == "staging"
tags:
- regserver
+
+- name: Copy fedora icon
+ copy:
+ src: "{{files}}/reg-server/fedora.png"
+ dest: "/var/lib/reg-server/static/fedora.png"
+ tags:
+ - regserver
+
+- name: Copy custom styles.css
+ copy:
+ src: "{{files}}/reg-server/styles.css"
+ dest: "/var/lib/reg-server/static/css/styles.css"
+ tags:
+ - regserver
diff --git a/tasks/virt_instance_create.yml b/tasks/virt_instance_create.yml
index 1ede210..f0ff9ef 100644
--- a/tasks/virt_instance_create.yml
+++ b/tasks/virt_instance_create.yml
@@ -4,11 +4,18 @@
- include_vars: dir=/srv/web/infra/ansible/vars/all/ ignore_files=README
- name: get vm list
+ vars:
+ ansible_python_interpreter: "{{ hostvars[vmhost]['ansible_python']['executable'] }}"
delegate_to: "{{ vmhost }}"
virt: command=list_vms
register: result
check_mode: no
+- name: ensure no old facts exist
+ delegate_to: localhost
+ file: path=/root/.ansible_facts_cache/{{ inventory_hostname }} state=absent
+ when: inventory_hostname not in result.list_vms
+
- name: ensure the lv for the guest is made
lvol: lv={{ inventory_hostname }} vg={{ volgroup }} size={{ lvm_size }} state=present
delegate_to: "{{ vmhost }}"
@@ -90,8 +97,8 @@
with_items:
- /root/.ssh/known_hosts
- /etc/ssh/ssh_known_hosts
- when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs-masters-stg']+groups['osbs-nodes-stg']
- delegate_to: osbs-control01.stg.phx2.fedoraproject.org
+ when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs_masters_stg']+groups['osbs_nodes_stg']
+ delegate_to: osbs-control01.stg.{{ datacenter }}.fedoraproject.org
- name: (osbs-control01) make sure there is no old ssh host key for the host still around
known_hosts: path={{item}} host={{ inventory_hostname }} state=absent
@@ -99,8 +106,8 @@
with_items:
- /root/.ssh/known_hosts
- /etc/ssh/ssh_known_hosts
- when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs-masters']+groups['osbs-nodes']
- delegate_to: osbs-control01.phx2.fedoraproject.org
+ when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs_masters']+groups['osbs_nodes']
+ delegate_to: osbs-control01.{{ datacenter }}.fedoraproject.org
- name: wait for ssh on the vm to start back
local_action: wait_for delay=10 host={{ inventory_hostname }} port=22 state=started timeout=1200
@@ -125,8 +132,8 @@
with_items:
- /root/.ssh/known_hosts
- /etc/ssh/ssh_known_hosts
- when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs-masters-stg']+groups['osbs-nodes-stg']
- delegate_to: osbs-control01.stg.phx2.fedoraproject.org
+ when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs_masters_stg']+groups['osbs_nodes_stg']
+ delegate_to: osbs-control01.stg.{{ datacenter }}.fedoraproject.org
- name: (osbs-control01) add new ssh host key
known_hosts: path={{item}} key="{{ hostkey.stdout }}" host={{ inventory_hostname }} state=present
@@ -134,6 +141,12 @@
with_items:
- /root/.ssh/known_hosts
- /etc/ssh/ssh_known_hosts
- when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs-masters']+groups['osbs-nodes']
- delegate_to: osbs-control01.phx2.fedoraproject.org
+ when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs_masters']+groups['osbs_nodes']
+ delegate_to: osbs-control01.{{ datacenter }}.fedoraproject.org
+- name: gather facts
+ setup:
+ check_mode: no
+ ignore_errors: True
+ register: facts
+ when: inventory_hostname not in result.list_vms
diff --git a/tasks/yumrepos.yml b/tasks/yumrepos.yml
index 5ae06a5..6ed69d3 100644
--- a/tasks/yumrepos.yml
+++ b/tasks/yumrepos.yml
@@ -1,4 +1,153 @@
---
+- name: Include vars/all/
+ include_vars:
+ dir: /srv/web/infra/ansible/vars/all/
+ ignore_files: [README]
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: enable repos for archived Fedora releases
+ set_fact:
+ archive_if_archived: >-
+ {{
+ '/archive'
+ if ansible_distribution_major_version|int < (
+ FedoraPreviousPreviousCycleNumber|int
+ if FedoraPreviousPrevious == True
+ else FedoraPreviousCycleNumber|int
+ )
+ else ''
+ }}
+ when: ansible_distribution == 'Fedora'
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: put rhel repos on rhel systems
+ copy: src="{{ files }}/common/rhel{{ ansible_distribution_major_version|int }}.repo" dest="/etc/yum.repos.d/rhel{{ ansible_distribution_major_version|int }}.repo"
+ when: ansible_distribution == 'RedHat' and not inventory_hostname.startswith('ppc9') and datacenter != "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: put rhel repos on rhel systems (power9)
+ copy: src="{{ files }}/common/rhel{{ ansible_distribution_major_version|int }}-power9.repo" dest="/etc/yum.repos.d/rhel{{ ansible_distribution_major_version|int }}.repo"
+ when: ansible_distribution == 'RedHat' and inventory_hostname.startswith('ppc9') and datacenter != "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: put rhel rhev for power repos on rhel power systems
+ copy: src="{{ files }}/common/rhel7-power-rhev.repo" dest="/etc/yum.repos.d/rhel7-power-rhev.repo"
+ when: ansible_distribution == 'RedHat' and ansible_distribution_major_version|int == 7 and ansible_architecture =='ppc64le' and not inventory_hostname.startswith('ppc9')
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: put openshift 3.4 repo on os- systems
+ template: src="{{ files }}/openshift/openshift.repo" dest="/etc/yum.repos.d/openshift.repo"
+ when: ansible_distribution == 'RedHat' and ansible_distribution_major_version|int == 7 and inventory_hostname.startswith('os-') and datacenter != "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: put epel repos on el systems
+ copy: src="{{ files }}/common/epel{{ ansible_distribution_major_version|int }}.repo" dest="/etc/yum.repos.d/epel{{ ansible_distribution_major_version|int }}.repo"
+ when: ((ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS') and use_default_epel) and datacenter != "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: put epel repos on el systems (aws)
+ copy: src="{{ files }}/common/original-epel{{ ansible_distribution_major_version|int }}.repo" dest="/etc/yum.repos.d/epel{{ ansible_distribution_major_version|int }}.repo"
+ when: ((ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS') and use_default_epel) and datacenter == "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: put epel gpg key on el systems (aws)
+ copy: src="{{ files }}/common/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version|int }}" dest="/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version|int }}"
+ when: ((ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS') and use_default_epel) and datacenter == "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: epel release on el systems (aws)
+ package:
+ name: 'epel-release'
+ state: present
+ when: ((ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS') and use_default_epel) and datacenter == "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: put fedora repos on primary architecture systems
+ template: src="{{ files }}/common/{{ item }}" dest="/etc/yum.repos.d/{{ item }}"
+ with_items:
+ - fedora.repo
+ - fedora-updates.repo
+ - fedora-updates-testing.repo
+ when: ansible_distribution == 'Fedora' and not inventory_hostname.startswith(('buildvm-s390x', 'buildvm-ppc64le')) and not inventory_hostname.startswith('rawhide-test') and datacenter != "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: put fedora modular repos on primary architecture systems
+ template: src="{{ files }}/common/{{ item }}" dest="/etc/yum.repos.d/{{ item }}"
+ with_items:
+ - fedora-modular.repo
+ - fedora-updates-modular.repo
+ - fedora-updates-testing-modular.repo
+ when: ansible_distribution == 'Fedora' and not inventory_hostname.startswith('rawhide-test') and not inventory_hostname.startswith(('buildvm-s390x', 'buildvm-ppc64le')) and datacenter != "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: put fedora repos on secondary architecture systems
+ template: src="{{ files }}/common/{{ item }}-secondary" dest="/etc/yum.repos.d/{{ item }}"
+ with_items:
+ - fedora.repo
+ - fedora-updates.repo
+ - fedora-updates-testing.repo
+ when: ansible_distribution == 'Fedora' and (ansible_architecture == 'ppc64' or ansible_architecture == 'ppc64le' or ansible_architecture == 's390x') and datacenter != "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: put fedora repos on secondary architecture systems
+ template: src="{{ files }}/common/{{ item }}-secondary" dest="/etc/yum.repos.d/{{ item }}"
+ with_items:
+ - fedora-modular.repo
+ - fedora-updates-modular.repo
+ - fedora-updates-testing-modular.repo
+ when: ansible_distribution == 'Fedora' and ansible_distribution_major_version|int >= 29 and (ansible_architecture == 'ppc64le' or ansible_architecture == 's390x') and datacenter != "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
+- name: add aarch64 server rpms repo
+ copy: src="{{ files }}/common/rhel-{{ ansible_distribution_major_version|int }}-aarch64-server-rpms.repo" dest="/etc/yum.repos.d/rhel-{{ ansible_distribution_major_version|int }}-aarch64-server-rpms.repo"
+ when: (ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS') and inventory_hostname.startswith('aarch64-c') and ansible_distribution_major_version|int >= 7 and datacenter != "aws"
+ tags:
+ - config
+ - packages
+ - yumrepos
+
- name: add infrastructure tags repo - RHEL
copy: src="{{ files }}/common/rhel-infra-tags.repo" dest="/etc/yum.repos.d/infra-tags.repo"
when: (ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS')
@@ -9,7 +158,7 @@
- name: add infrastructure STAGING tags repo - RHEL
copy: src="{{ files }}/common/rhel-infra-tags-stg.repo" dest="/etc/yum.repos.d/infra-tags-stg.repo"
- when: (ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS') and env == 'staging'
+ when: (ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS') and env in ['staging', 'pagure-staging']
tags:
- config
- packages
@@ -25,7 +174,7 @@
- name: add infrastructure STAGING tags repo - Fedora
copy: src="{{ files }}/common/fedora-infra-tags-stg.repo" dest="/etc/yum.repos.d/infra-tags-stg.repo"
- when: ansible_distribution == 'Fedora' and env == 'staging'
+ when: ansible_distribution == 'Fedora' and env in ['staging', 'pagure-staging']
tags:
- config
- packages
3 years, 4 months
[ansible] Update for linaro
by Nicolas Chauvet
commit e96af4a44ab185522cfbbad5532e17a8bf77d23f
Author: Nicolas Chauvet <kwizart(a)gmail.com>
Date: Wed Aug 18 20:46:25 2021 +0200
Update for linaro
inventory/builders | 6 +++++-
inventory/group_vars/linaro | 1 +
2 files changed, 6 insertions(+), 1 deletions(-)
---
diff --git a/inventory/builders b/inventory/builders
index 963b949..7b2edcd 100644
--- a/inventory/builders
+++ b/inventory/builders
@@ -64,11 +64,15 @@ arm-jetson-tx1.home.rpmfusion.net
[builders:children]
buildhw
+buildvm
+
+[buildhw:children]
buildhw_armv7
buildhw_ppc64
buildhw_ppc64le
buildhw_aarch64
-buildvm
+
+[buildvm:children]
buildvm_aarch64
buildvm_arm
diff --git a/inventory/group_vars/linaro b/inventory/group_vars/linaro
index 1ba16db..d0169df 100644
--- a/inventory/group_vars/linaro
+++ b/inventory/group_vars/linaro
@@ -1,2 +1,3 @@
datacenter: linaro
ansible_ifcfg_blacklist: True
+gw: 192.168.1.1
3 years, 4 months