mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1250585 - beetmover - add push to mirrors, r=rail
* this also fixes bug where we take build/0 from tc artifacts regardless if there was a retry MozReview-Commit-ID: KKJCGF6Hc7k
This commit is contained in:
parent
97791119bf
commit
be20987342
11
testing/mozharness/mozharness/mozilla/aws.py
Normal file
11
testing/mozharness/mozharness/mozilla/aws.py
Normal file
@ -0,0 +1,11 @@
|
||||
import os
|
||||
|
||||
|
||||
def pop_aws_auth_from_env():
|
||||
"""
|
||||
retrieves aws creds and deletes them from os.environ if present.
|
||||
"""
|
||||
aws_key_id = os.environ.pop("AWS_ACCESS_KEY_ID", None)
|
||||
aws_secret_key = os.environ.pop("AWS_SECRET_ACCESS_KEY", None)
|
||||
|
||||
return aws_key_id, aws_secret_key
|
@ -20,6 +20,7 @@ sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
|
||||
from mozharness.base.log import FATAL
|
||||
from mozharness.base.python import VirtualenvMixin
|
||||
from mozharness.base.script import BaseScript
|
||||
from mozharness.mozilla.aws import pop_aws_auth_from_env
|
||||
import mozharness
|
||||
|
||||
|
||||
@ -29,22 +30,6 @@ def get_hash(content, hash_type="md5"):
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
def get_aws_auth():
|
||||
"""
|
||||
retrieves aws creds and deletes them from os.environ if present.
|
||||
"""
|
||||
aws_key_id = os.environ.get("AWS_ACCESS_KEY_ID")
|
||||
aws_secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY")
|
||||
|
||||
if aws_key_id and aws_secret_key:
|
||||
del os.environ['AWS_ACCESS_KEY_ID']
|
||||
del os.environ['AWS_SECRET_ACCESS_KEY']
|
||||
else:
|
||||
exit("could not determine aws credentials from os environment")
|
||||
|
||||
return aws_key_id, aws_secret_key
|
||||
|
||||
|
||||
CONFIG_OPTIONS = [
|
||||
[["--template"], {
|
||||
"dest": "template",
|
||||
@ -141,9 +126,7 @@ class BeetMover(BaseScript, VirtualenvMixin, object):
|
||||
# Default configuration
|
||||
'config': {
|
||||
# base index url where to find taskcluster artifact based on taskid
|
||||
# TODO - find out if we need to support taskcluster run number other than 0.
|
||||
# e.g. maybe we could end up with artifacts in > 'run 0' in a re-trigger situation?
|
||||
"artifact_base_url": 'https://queue.taskcluster.net/v1/task/{taskid}/runs/0/artifacts/public/{subdir}',
|
||||
"artifact_base_url": 'https://queue.taskcluster.net/v1/task/{taskid}/artifacts/public/{subdir}',
|
||||
"virtualenv_modules": [
|
||||
"boto",
|
||||
"PyYAML",
|
||||
@ -167,6 +150,8 @@ class BeetMover(BaseScript, VirtualenvMixin, object):
|
||||
# assigned in _post_create_virtualenv
|
||||
self.virtualenv_imports = None
|
||||
self.bucket = c['buckets']['production'] if c['production'] else c['buckets']['development']
|
||||
if not all(aws_creds):
|
||||
self.fatal('credentials must be passed in env: "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"')
|
||||
self.aws_key_id, self.aws_secret_key = aws_creds
|
||||
# if excludes is set from command line, use it otherwise use defaults
|
||||
self.excludes = self.config.get('excludes', DEFAULT_EXCLUDES)
|
||||
@ -339,5 +324,5 @@ class BeetMover(BaseScript, VirtualenvMixin, object):
|
||||
return any(re.search(exclude, keyname) for exclude in self.excludes)
|
||||
|
||||
if __name__ == '__main__':
|
||||
beet_mover = BeetMover(get_aws_auth())
|
||||
beet_mover = BeetMover(pop_aws_auth_from_env())
|
||||
beet_mover.run_and_exit()
|
||||
|
@ -3,10 +3,12 @@ import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
sys.path.insert(1, os.path.dirname(os.path.dirname(sys.path[0])))
|
||||
|
||||
from mozharness.base.python import VirtualenvMixin, virtualenv_config_options
|
||||
from mozharness.base.script import BaseScript
|
||||
from mozharness.mozilla.aws import pop_aws_auth_from_env
|
||||
|
||||
|
||||
class ReleasePusher(BaseScript, VirtualenvMixin):
|
||||
@ -45,7 +47,7 @@ class ReleasePusher(BaseScript, VirtualenvMixin):
|
||||
}],
|
||||
] + virtualenv_config_options
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, aws_creds):
|
||||
BaseScript.__init__(self,
|
||||
config_options=self.config_options,
|
||||
require_config_file=False,
|
||||
@ -68,9 +70,20 @@ class ReleasePusher(BaseScript, VirtualenvMixin):
|
||||
],
|
||||
)
|
||||
|
||||
# set the env var for boto to read our special config file
|
||||
# rather than anything else we have at ~/.boto
|
||||
os.environ["BOTO_CONFIG"] = os.path.abspath(self.config["credentials"])
|
||||
# validate aws credentials
|
||||
if not (all(aws_creds) or self.config.get('credentials')):
|
||||
self.fatal("aws creds not defined. please add them to your config or env.")
|
||||
if any(aws_creds) and self.config.get('credentials'):
|
||||
self.fatal("aws creds found in env and self.config. please declare in one place only.")
|
||||
|
||||
# set aws credentials
|
||||
if aws_creds:
|
||||
self.aws_key_id, self.aws_secret_key = aws_creds
|
||||
else: # use
|
||||
self.aws_key_id, self.aws_secret_key = None, None
|
||||
# set the env var for boto to read our special config file
|
||||
# rather than anything else we have at ~/.boto
|
||||
os.environ["BOTO_CONFIG"] = os.path.abspath(self.config["credentials"])
|
||||
|
||||
def _pre_config_lock(self, rw_config):
|
||||
super(ReleasePusher, self)._pre_config_lock(rw_config)
|
||||
@ -128,7 +141,8 @@ class ReleasePusher(BaseScript, VirtualenvMixin):
|
||||
logging.getLogger('boto').setLevel(logging.INFO)
|
||||
|
||||
self.info("Connecting to S3")
|
||||
conn = S3Connection()
|
||||
conn = S3Connection(aws_access_key_id=self.aws_key_id,
|
||||
aws_secret_access_key=self.aws_secret_key)
|
||||
self.info("Getting bucket {}".format(self.config["bucket_name"]))
|
||||
bucket = conn.get_bucket(self.config["bucket_name"])
|
||||
|
||||
@ -167,5 +181,5 @@ class ReleasePusher(BaseScript, VirtualenvMixin):
|
||||
pool.map(worker, find_release_files())
|
||||
|
||||
if __name__ == "__main__":
|
||||
myScript = ReleasePusher()
|
||||
myScript = ReleasePusher(pop_aws_auth_from_env())
|
||||
myScript.run_and_exit()
|
||||
|
Loading…
Reference in New Issue
Block a user