diff --git a/nixops/backends/ec2.py b/nixops/backends/ec2.py index 4778c7bb2..2d09faddc 100644 --- a/nixops/backends/ec2.py +++ b/nixops/backends/ec2.py @@ -124,7 +124,7 @@ def __init__(self, depl, name, id): def _reset_state(self): """Discard all state pertaining to an instance.""" - with self.depl._db: + with self.depl._state.db: self.state = MachineState.MISSING self.associate_public_ip_address = None self.use_private_ip_address = None @@ -344,7 +344,7 @@ def _instance_ip_ready(ins): self.log_end("{0} / {1}".format(instance.ip_address, instance.private_ip_address)) - with self.depl._db: + with self.depl._state.db: self.private_ipv4 = instance.private_ip_address self.public_ipv4 = instance.ip_address self.public_dns_name = instance.public_dns_name @@ -586,7 +586,7 @@ def _assign_elastic_ip(self, elastic_ipv4, check): nixops.known_hosts.update(self.public_ipv4, elastic_ipv4, self.public_host_key) - with self.depl._db: + with self.depl._state.db: self.elastic_ipv4 = elastic_ipv4 self.public_ipv4 = elastic_ipv4 self.ssh_pinged = False @@ -599,7 +599,7 @@ def _assign_elastic_ip(self, elastic_ipv4, check): else: self.log("address ‘{0}’ was not associated with instance ‘{1}’".format(self.elastic_ipv4, self.vm_id)) - with self.depl._db: + with self.depl._state.db: self.elastic_ipv4 = None self.public_ipv4 = None self.ssh_pinged = False @@ -663,7 +663,7 @@ def create_instance(self, defn, zone, devmap, user_data, ebs_optimized): lambda: self._conn.request_spot_instances(price=defn.spot_instance_price/100.0, **common_args) )[0] - with self.depl._db: + with self.depl._state.db: self.spot_instance_price = defn.spot_instance_price self.spot_instance_request_id = request.id @@ -694,7 +694,7 @@ def create_instance(self, defn, zone, devmap, user_data, ebs_optimized): # the instance ID, we'll get the same instance ID on the # next run. if not self.client_token: - with self.depl._db: + with self.depl._state.db: self.client_token = nixops.util.generate_random_string(length=48) # = 64 ASCII chars self.state = self.STARTING @@ -893,7 +893,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # Generate a public/private host key. if not self.public_host_key: (private, public) = nixops.util.create_key_pair(type=defn.host_key_type()) - with self.depl._db: + with self.depl._state.db: self.public_host_key = public self.private_host_key = private @@ -903,7 +903,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): instance = self.create_instance(defn, zone, devmap, user_data, ebs_optimized) - with self.depl._db: + with self.depl._state.db: self.vm_id = instance.id self.ami = defn.ami self.instance_type = defn.instance_type @@ -971,7 +971,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): elastic_ipv4 = res.public_ipv4 self._assign_elastic_ip(elastic_ipv4, check) - with self.depl._db: + with self.depl._state.db: self.use_private_ip_address = defn.use_private_ip_address self.associate_public_ip_address = defn.associate_public_ip_address diff --git a/nixops/backends/hetzner.py b/nixops/backends/hetzner.py index cf85c1e97..d1a50ca20 100644 --- a/nixops/backends/hetzner.py +++ b/nixops/backends/hetzner.py @@ -594,7 +594,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.log_start("creating an exclusive robot admin sub-account " "for ‘{0}’... ".format(self.name)) server = self._get_server_from_main_robot(self.main_ipv4, defn) - with self.depl._db: + with self.depl._state._db: (self.robot_admin_user, self.robot_admin_pass) = server.admin.create() self.log_end("done. ({0})".format(self.robot_admin_user)) @@ -609,7 +609,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): ) if robot_user != self.robot_admin_user or \ robot_pass != self.robot_admin_pass: - with self.depl._db: + with self.depl._state._db: (self.robot_admin_user, self.robot_admin_pass) = (robot_user, robot_pass) diff --git a/nixops/backends/virtualbox.py b/nixops/backends/virtualbox.py index d4a9c4b80..de8275cb4 100644 --- a/nixops/backends/virtualbox.py +++ b/nixops/backends/virtualbox.py @@ -194,7 +194,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # Generate a public/private host key. if not self.public_host_key: (private, public) = nixops.util.create_key_pair() - with self.depl._db: + with self.depl._state.db: self.public_host_key = public self.private_host_key = private @@ -203,7 +203,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # Backwards compatibility. if self.disk: - with self.depl._db: + with self.depl._state.db: self._update_disk("disk1", {"created": True, "path": self.disk, "attached": self.disk_attached, "port": 0}) diff --git a/nixops/deployment.py b/nixops/deployment.py index 5796d4ca1..076bb0a5a 100644 --- a/nixops/deployment.py +++ b/nixops/deployment.py @@ -12,7 +12,6 @@ import errno from collections import defaultdict from xml.etree import ElementTree -import nixops.statefile import nixops.backends import nixops.logger import nixops.parallel @@ -22,7 +21,6 @@ import getpass import traceback import glob -import fcntl import itertools import platform from nixops.util import ansi_success @@ -50,9 +48,8 @@ class Deployment(object): configs_path = nixops.util.attr_property("configsPath", None) rollback_enabled = nixops.util.attr_property("rollbackEnabled", False) - def __init__(self, statefile, uuid, log_file=sys.stderr): - self._statefile = statefile - self._db = statefile._db + def __init__(self, state, uuid, log_file=sys.stderr): + self._state = state self.uuid = uuid self._last_log_prefix = None @@ -72,15 +69,8 @@ def __init__(self, statefile, uuid, log_file=sys.stderr): if not os.path.exists(self.expr_path): self.expr_path = os.path.dirname(__file__) + "/../nix" - self.resources = {} - with self._db: - c = self._db.cursor() - c.execute("select id, name, type from Resources where deployment = ?", (self.uuid,)) - for (id, name, type) in c.fetchall(): - r = _create_state(self, type, name, id) - self.resources[name] = r + self.resources = self._state.get_resources_for(self) self.logger.update_log_prefixes() - self.definitions = None @@ -120,65 +110,41 @@ def get_machine(self, name): raise Exception("resource ‘{0}’ is not a machine".format(name)) return res - def _set_attrs(self, attrs): - """Update deployment attributes in the state file.""" - with self._db: - c = self._db.cursor() - for n, v in attrs.iteritems(): - if v == None: - c.execute("delete from DeploymentAttrs where deployment = ? and name = ?", (self.uuid, n)) - else: - c.execute("insert or replace into DeploymentAttrs(deployment, name, value) values (?, ?, ?)", - (self.uuid, n, v)) + """Update deployment attributes in the state.""" + self._state.set_deployment_attrs(self.uuid, attrs) def _set_attr(self, name, value): - """Update one deployment attribute in the state file.""" + """Update one deployment attribute in the state.""" self._set_attrs({name: value}) def _del_attr(self, name): - """Delete a deployment attribute from the state file.""" - with self._db: - self._db.execute("delete from DeploymentAttrs where deployment = ? and name = ?", (self.uuid, name)) + """Delete a deployment attribute from the state.""" + self._state.del_deployment_attr(self.uuid, name) + #TODO(moretea): The default param does not appear to be used at all? + # Removed it when moving the body to nixops/state/file.py. def _get_attr(self, name, default=nixops.util.undefined): - """Get a deployment attribute from the state file.""" - with self._db: - c = self._db.cursor() - c.execute("select value from DeploymentAttrs where deployment = ? and name = ?", (self.uuid, name)) - row = c.fetchone() - if row != None: return row[0] - return nixops.util.undefined - + """Get a deployment attribute from the state.""" + return self._state.get_deployment_attr(self.uuid, name) def _create_resource(self, name, type): - c = self._db.cursor() - c.execute("select 1 from Resources where deployment = ? and name = ?", (self.uuid, name)) - if len(c.fetchall()) != 0: - raise Exception("resource already exists in database!") - c.execute("insert into Resources(deployment, name, type) values (?, ?, ?)", - (self.uuid, name, type)) - id = c.lastrowid - r = _create_state(self, type, name, id) + r = self._state.create_resource(self, name, type) self.resources[name] = r return r def export(self): - with self._db: - c = self._db.cursor() - c.execute("select name, value from DeploymentAttrs where deployment = ?", (self.uuid,)) - rows = c.fetchall() - res = {row[0]: row[1] for row in rows} - res['resources'] = {r.name: r.export() for r in self.resources.itervalues()} - return res + res = self._state.get_all_deployment_attrs(self.uuid) + res['resources'] = {r.name: r.export() for r in self.resources.itervalues()} + return res def import_(self, attrs): - with self._db: + with self._state.db: for k, v in attrs.iteritems(): if k == 'resources': continue self._set_attr(k, v) @@ -189,49 +155,21 @@ def import_(self, attrs): def clone(self): - with self._db: - new = self._statefile.create_deployment() - self._db.execute("insert into DeploymentAttrs (deployment, name, value) " + - "select ?, name, value from DeploymentAttrs where deployment = ?", - (new.uuid, self.uuid)) - new.configs_path = None - return new + return self._state.clone_deployment(self.uuid) def _get_deployment_lock(self): - if self._lock_file_path is None: - lock_dir = os.environ.get("HOME", "") + "/.nixops/locks" - if not os.path.exists(lock_dir): os.makedirs(lock_dir, 0700) - self._lock_file_path = lock_dir + "/" + self.uuid - class DeploymentLock(object): - def __init__(self, depl): - self._lock_file_path = depl._lock_file_path - self._logger = depl.logger - self._lock_file = None - def __enter__(self): - self._lock_file = open(self._lock_file_path, "w") - fcntl.fcntl(self._lock_file, fcntl.F_SETFD, fcntl.FD_CLOEXEC) - try: - fcntl.flock(self._lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) - except IOError: - self._logger.log( - "waiting for exclusive deployment lock..." - ) - fcntl.flock(self._lock_file, fcntl.LOCK_EX) - def __exit__(self, exception_type, exception_value, exception_traceback): - self._lock_file.close() - return DeploymentLock(self) + return self._state.get_deployment_lock(self) def delete_resource(self, m): del self.resources[m.name] - with self._db: - self._db.execute("delete from Resources where deployment = ? and id = ?", (self.uuid, m.id)) + self._state.delete_resource(self.uuid, m.id) def delete(self, force=False): """Delete this deployment from the state file.""" - with self._db: + with self._state.db: if not force and len(self.resources) > 0: raise Exception("cannot delete this deployment because it still has resources") @@ -242,7 +180,7 @@ def delete(self, force=False): if os.path.islink(p): os.remove(p) # Delete the deployment from the database. - self._db.execute("delete from Deployments where uuid = ?", (self.uuid,)) + self._state._delete_deployment(self.uuid) def _nix_path_flags(self): @@ -836,7 +774,7 @@ def evaluate_active(self, include=[], exclude=[], kill_obsolete=False): self.evaluate() # Create state objects for all defined resources. - with self._db: + with self._state.db: for m in self.definitions.itervalues(): if m.name not in self.resources: self._create_resource(m.name, m.get_type()) @@ -1141,9 +1079,7 @@ def rename(self, name, new_name): m = self.resources.pop(name) self.resources[new_name] = m - - with self._db: - self._db.execute("update Resources set name = ? where deployment = ? and id = ?", (new_name, self.uuid, m.id)) + self._state._rename_resource(self.uuid, m.id, new_name) def send_keys(self, include=[], exclude=[]): @@ -1188,15 +1124,6 @@ def _create_definition(xml, config, type_name): raise nixops.deployment.UnknownBackend("unknown resource type ‘{0}’".format(type_name)) -def _create_state(depl, type, name, id): - """Create a resource state object of the desired type.""" - - for cls in _subclasses(nixops.resources.ResourceState): - if type == cls.get_type(): - return cls(depl, name, id) - - raise nixops.deployment.UnknownBackend("unknown resource type ‘{0}’".format(type)) - # Automatically load all resource types. def _load_modules_from(dir): diff --git a/nixops/resources/__init__.py b/nixops/resources/__init__.py index 359f61278..cc27a62e0 100644 --- a/nixops/resources/__init__.py +++ b/nixops/resources/__init__.py @@ -64,14 +64,8 @@ def __init__(self, depl, name, id): def _set_attrs(self, attrs): """Update machine attributes in the state file.""" - with self.depl._db: - c = self.depl._db.cursor() - for n, v in attrs.iteritems(): - if v == None: - c.execute("delete from ResourceAttrs where machine = ? and name = ?", (self.id, n)) - else: - c.execute("insert or replace into ResourceAttrs(machine, name, value) values (?, ?, ?)", - (self.id, n, v)) + self.depl._state.set_resource_attrs(self.depl.uuid, self.id, attrs) + def _set_attr(self, name, value): """Update one machine attribute in the state file.""" @@ -79,31 +73,23 @@ def _set_attr(self, name, value): def _del_attr(self, name): """Delete a machine attribute from the state file.""" - with self.depl._db: - self.depl._db.execute("delete from ResourceAttrs where machine = ? and name = ?", (self.id, name)) + self.depl._state.del_resource_attr(self.depl.uuid, self.id, name) + #TODO(moretea): again, the default option appears to be defunct. + # Have removed it in state/file.py. def _get_attr(self, name, default=nixops.util.undefined): """Get a machine attribute from the state file.""" - with self.depl._db: - c = self.depl._db.cursor() - c.execute("select value from ResourceAttrs where machine = ? and name = ?", (self.id, name)) - row = c.fetchone() - if row != None: return row[0] - return nixops.util.undefined + return self.depl._state.get_resource_attr(self.depl.uuid, self.id, name) def export(self): """Export the resource to move between databases""" - with self.depl._db: - c = self.depl._db.cursor() - c.execute("select name, value from ResourceAttrs where machine = ?", (self.id,)) - rows = c.fetchall() - res = {row[0]: row[1] for row in rows} - res['type'] = self.get_type() - return res + res = self.depl._state.get_all_resource_attrs(self.depl.uuid, self.id) + res['type'] = self.get_type() + return res def import_(self, attrs): """Import the resource from another database""" - with self.depl._db: + with self.depl._state.db: for k, v in attrs.iteritems(): if k == 'type': continue self._set_attr(k, v) diff --git a/nixops/resources/cloudwatch_log_group.py b/nixops/resources/cloudwatch_log_group.py index e53827284..943739e74 100644 --- a/nixops/resources/cloudwatch_log_group.py +++ b/nixops/resources/cloudwatch_log_group.py @@ -72,7 +72,7 @@ def _destroy(self): self._conn.delete_log_group(self.log_group_name) except boto.logs.exceptions.ResourceNotFoundException, e: self.log("the log group ‘{0}’ was already deleted".format(self.log_group_name)) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.log_group_name = None self.region = None @@ -114,7 +114,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.log("setting the retention in days of '{0}' to '{1}'".format(defn.config['name'], defn.config['retentionInDays'])) self._conn.set_retention(log_group_name=defn.config['name'],retention_in_days=defn.config['retentionInDays']) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.log_group_name = defn.config['name'] self.region = defn.config['region'] @@ -123,4 +123,4 @@ def create(self, defn, check, allow_reboot, allow_recreate): def destroy(self, wipe=False): self._destroy() - return True \ No newline at end of file + return True diff --git a/nixops/resources/cloudwatch_log_stream.py b/nixops/resources/cloudwatch_log_stream.py index c53c71dd8..4d6e770b6 100644 --- a/nixops/resources/cloudwatch_log_stream.py +++ b/nixops/resources/cloudwatch_log_stream.py @@ -72,7 +72,7 @@ def _destroy(self): self._conn.delete_log_stream(log_group_name=self.log_group_name,log_stream_name=self.log_stream_name) except boto.logs.exceptions.ResourceNotFoundException, e: self.log("the log group ‘{0}’ or log stream ‘{1}’ was already deleted".format(self.log_group_name,self.log_stream_name)) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.log_group_name = None self.log_stream_name = None @@ -121,7 +121,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): exist, arn = self.lookup_cloudwatch_log_stream(log_group_name=defn.config['logGroupName'], log_stream_name=defn.config['name']) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.log_stream_name = defn.config['name'] self.log_group_name = defn.config['logGroupName'] @@ -130,4 +130,4 @@ def create(self, defn, check, allow_reboot, allow_recreate): def destroy(self, wipe=False): self._destroy() - return True \ No newline at end of file + return True diff --git a/nixops/resources/datadog-monitor.py b/nixops/resources/datadog-monitor.py index 9c3893296..b56ce652f 100644 --- a/nixops/resources/datadog-monitor.py +++ b/nixops/resources/datadog-monitor.py @@ -106,7 +106,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): if 'errors' in response: raise Exception(str(response['errors'])) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.api_key = defn.config['apiKey'] self.app_key = defn.config['appKey'] diff --git a/nixops/resources/datadog-screenboard.py b/nixops/resources/datadog-screenboard.py index 2d03ecdf5..d7c3f1197 100644 --- a/nixops/resources/datadog-screenboard.py +++ b/nixops/resources/datadog-screenboard.py @@ -104,7 +104,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): if 'errors' in response: raise Exception(str(response['errors'])) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.api_key = defn.config['apiKey'] self.app_key = defn.config['appKey'] diff --git a/nixops/resources/datadog-timeboard.py b/nixops/resources/datadog-timeboard.py index 1fdcda25d..edc43dbd3 100644 --- a/nixops/resources/datadog-timeboard.py +++ b/nixops/resources/datadog-timeboard.py @@ -111,7 +111,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): else: url = response['url'] - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.api_key = defn.config['apiKey'] self.app_key = defn.config['appKey'] diff --git a/nixops/resources/ebs_volume.py b/nixops/resources/ebs_volume.py index ec1044e16..0485057ec 100644 --- a/nixops/resources/ebs_volume.py +++ b/nixops/resources/ebs_volume.py @@ -122,7 +122,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # volume we just created. Doesn't seem to be anything we # can do about this. - with self.depl._db: + with self.depl._state.db: self.state = self.STARTING self.region = defn.region self.zone = defn.zone diff --git a/nixops/resources/ec2_keypair.py b/nixops/resources/ec2_keypair.py index 2e5d1be01..4e82864e4 100644 --- a/nixops/resources/ec2_keypair.py +++ b/nixops/resources/ec2_keypair.py @@ -78,7 +78,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # Generate the key pair locally. if not self.public_key: (private, public) = nixops.util.create_key_pair(type="rsa") # EC2 only supports RSA keys. - with self.depl._db: + with self.depl._state.db: self.public_key = public self.private_key = private @@ -102,7 +102,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.log("uploading EC2 key pair ‘{0}’...".format(defn.keypair_name)) self._conn.import_key_pair(defn.keypair_name, self.public_key) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.keypair_name = defn.keypair_name diff --git a/nixops/resources/ec2_placement_group.py b/nixops/resources/ec2_placement_group.py index 511e52370..871f2cea6 100644 --- a/nixops/resources/ec2_placement_group.py +++ b/nixops/resources/ec2_placement_group.py @@ -67,11 +67,11 @@ def _connect(self): def create(self, defn, check, allow_reboot, allow_recreate): # Name or region change means a completely new security group if self.placement_group_name and (defn.placement_group_name != self.placement_group_name or defn.region != self.region): - with self.depl._db: + with self.depl._state.db: self.state = self.UNKNOWN self.old_placement_groups = self.old_placement_groups + [{'name': self.placement_group_name, 'region': self.region}] - with self.depl._db: + with self.depl._state.db: self.region = defn.region self.access_key_id = defn.access_key_id or nixops.ec2_utils.get_access_key_id() self.placement_group_name = defn.placement_group_name @@ -79,7 +79,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): grp = None if check: - with self.depl._db: + with self.depl._state.db: self._connect() try: diff --git a/nixops/resources/ec2_rds_dbinstance.py b/nixops/resources/ec2_rds_dbinstance.py index d314f006d..9921ab2db 100644 --- a/nixops/resources/ec2_rds_dbinstance.py +++ b/nixops/resources/ec2_rds_dbinstance.py @@ -148,7 +148,7 @@ def _wait_for_dbinstance(self, dbinstance, state='available'): time.sleep(6) def _copy_dbinstance_attrs(self, dbinstance): - with self.depl._db: + with self.depl._state.db: self.rds_dbinstance_id = dbinstance.id self.rds_dbinstance_allocated_storage = int(dbinstance.allocated_storage) self.rds_dbinstance_instance_class = dbinstance.instance_class @@ -170,7 +170,7 @@ def _compare_instance_id(self, instance_id): return unicode(self.rds_dbinstance_id).lower() == unicode(instance_id).lower() def create(self, defn, check, allow_reboot, allow_recreate): - with self.depl._db: + with self.depl._state.db: self.access_key_id = defn.access_key_id or nixops.ec2_utils.get_access_key_id() if not self.access_key_id: raise Exception("please set ‘accessKeyId’, $EC2_ACCESS_KEY or $AWS_ACCESS_KEY_ID") @@ -192,7 +192,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): dbinstance = self._try_fetch_dbinstance(self.rds_dbinstance_id) - with self.depl._db: + with self.depl._state.db: if check or self.state == self.MISSING or self.state == self.UNKNOWN: if dbinstance and (self.state == self.MISSING or self.state == self.UNKNOWN): if dbinstance.status == 'deleting': @@ -233,7 +233,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self._copy_dbinstance_attrs(dbinstance) self.state = self.UP - with self.depl._db: + with self.depl._state.db: if self.state == self.UP and self._diff_defn(defn): if dbinstance is None: raise Exception("state is UP but database instance does not exist. re-run with --check option to synchronize states") diff --git a/nixops/resources/ec2_security_group.py b/nixops/resources/ec2_security_group.py index b90df07d1..0dbf46d27 100644 --- a/nixops/resources/ec2_security_group.py +++ b/nixops/resources/ec2_security_group.py @@ -96,11 +96,11 @@ def _connect(self): def create(self, defn, check, allow_reboot, allow_recreate): # Name or region change means a completely new security group if self.security_group_name and (defn.security_group_name != self.security_group_name or defn.region != self.region): - with self.depl._db: + with self.depl._state.db: self.state = self.UNKNOWN self.old_security_groups = self.old_security_groups + [{'name': self.security_group_name, 'region': self.region}] - with self.depl._db: + with self.depl._state.db: self.region = defn.region self.access_key_id = defn.access_key_id or nixops.ec2_utils.get_access_key_id() self.security_group_name = defn.security_group_name @@ -109,7 +109,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): grp = None if check: - with self.depl._db: + with self.depl._state.db: self._connect() try: diff --git a/nixops/resources/elastic_file_system.py b/nixops/resources/elastic_file_system.py index d4b23b4e3..64560a376 100644 --- a/nixops/resources/elastic_file_system.py +++ b/nixops/resources/elastic_file_system.py @@ -85,7 +85,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): if len(fss) == 1: fs = fss[0] if fs["LifeCycleState"] == "available": - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.fs_id = fs["FileSystemId"] self.region = defn.config["region"] @@ -146,7 +146,7 @@ def destroy(self, wipe=False): break time.sleep(1) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.fs_id = None self.region = None diff --git a/nixops/resources/elastic_file_system_mount_target.py b/nixops/resources/elastic_file_system_mount_target.py index 74033a5c7..c6b318268 100644 --- a/nixops/resources/elastic_file_system_mount_target.py +++ b/nixops/resources/elastic_file_system_mount_target.py @@ -41,7 +41,7 @@ def get_type(cls): return "elastic-file-system-mount-target" def _reset_state(self): - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.access_key_id = None self.region = None @@ -94,7 +94,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): args["IpAddress"] = defn.config["ipAddress"] res = client.create_mount_target(FileSystemId=fs_id, SubnetId=defn.config["subnet"], **args) - with self.depl._db: + with self.depl._state.db: self.state = self.STARTING self.fsmt_id = res["MountTargetId"] self.fs_id = fs_id diff --git a/nixops/resources/elastic_ip.py b/nixops/resources/elastic_ip.py index 6528532fa..8a2ee2723 100644 --- a/nixops/resources/elastic_ip.py +++ b/nixops/resources/elastic_ip.py @@ -84,7 +84,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # address we just created. Doesn't seem to be anything we # can do about this. - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.region = defn.region self.public_ipv4 = address.public_ip diff --git a/nixops/resources/iam_role.py b/nixops/resources/iam_role.py index 8f055d7ee..c8c6f9dac 100644 --- a/nixops/resources/iam_role.py +++ b/nixops/resources/iam_role.py @@ -105,7 +105,7 @@ def _destroy(self): self.log("could not find instance profile") - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.role_name = None self.access_key_id = None @@ -167,7 +167,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): if defn.assume_role_policy != "": self._conn.update_assume_role_policy(defn.role_name, defn.assume_role_policy) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.role_name = defn.role_name self.policy = defn.policy diff --git a/nixops/resources/s3_bucket.py b/nixops/resources/s3_bucket.py index 986ec645f..142f6209e 100644 --- a/nixops/resources/s3_bucket.py +++ b/nixops/resources/s3_bucket.py @@ -101,7 +101,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # [http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html] if e.status != 204: raise # (204 : Bucket didn't have any policy to delete) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.bucket_name = defn.bucket_name self.region = defn.region diff --git a/nixops/resources/sns_topic.py b/nixops/resources/sns_topic.py index e454b7821..fddc05666 100644 --- a/nixops/resources/sns_topic.py +++ b/nixops/resources/sns_topic.py @@ -74,7 +74,7 @@ def _destroy(self): self.connect() self.log("destroying SNS topic ‘{0}’...".format(self.topic_name)) self._conn.delete_topic(self.arn) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.topic_name = None self.region = None @@ -136,7 +136,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): if subscriber_arn != "PendingConfirmation": self._conn.unsubscribe(subscription=subscriber_arn) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.topic_name = defn.config['name'] self.display_name = defn.config['displayName'] diff --git a/nixops/resources/sqs_queue.py b/nixops/resources/sqs_queue.py index 4ad53ba8d..207fb794c 100644 --- a/nixops/resources/sqs_queue.py +++ b/nixops/resources/sqs_queue.py @@ -84,7 +84,7 @@ def _destroy(self): if q: self.log("destroying SQS queue ‘{0}’...".format(self.queue_name)) self._conn.delete_queue(q) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.queue_name = None self.queue_base_name = None @@ -122,7 +122,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.log("creating SQS queue ‘{0}’...".format(defn.queue_name)) q = nixops.ec2_utils.retry(lambda: self._conn.create_queue(defn.queue_name, defn.visibility_timeout), error_codes = ['AWS.SimpleQueueService.QueueDeletedRecently']) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.queue_name = defn.queue_name self.url = q.url diff --git a/nixops/resources/ssh_keypair.py b/nixops/resources/ssh_keypair.py index 63fe78164..faf15907b 100644 --- a/nixops/resources/ssh_keypair.py +++ b/nixops/resources/ssh_keypair.py @@ -46,7 +46,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # Generate the key pair locally. if not self.public_key: (private, public) = nixops.util.create_key_pair(type="ed25519") - with self.depl._db: + with self.depl._state.db: self.public_key = public self.private_key = private self.state = state = nixops.resources.ResourceState.UP diff --git a/nixops/state/__init__.py b/nixops/state/__init__.py new file mode 100644 index 000000000..214c19009 --- /dev/null +++ b/nixops/state/__init__.py @@ -0,0 +1,25 @@ +import urlparse +import sys +import sqlite3_file +import json_file + +class WrongStateSchemeException(Exception): + pass + +def open(url): + url = urlparse.urlparse(url) + scheme = url.scheme + + if scheme == "": + scheme = "sqlite3" + + def raise_(ex): + raise ex + + switcher = { + "sqlite3": lambda(url): sqlite3_file.StateFile(url.path), + "json": lambda(url): json_file.JsonFile(url.path), + } + + function = switcher.get(scheme, lambda(url): raise_(WrongStateSchemeException("Unknown state scheme!"))) + return function(url) diff --git a/nixops/state/etcd.py b/nixops/state/etcd.py new file mode 100644 index 000000000..e69de29bb diff --git a/nixops/state/json_file.py b/nixops/state/json_file.py new file mode 100644 index 000000000..873b27ab5 --- /dev/null +++ b/nixops/state/json_file.py @@ -0,0 +1,362 @@ +# -*- coding: utf-8 -*- + +import nixops.deployment +import os +import os.path +import sys +import threading +import fcntl +import re +import json +import copy + +from uuid import uuid1 as gen_uuid + +def _subclasses(cls): + sub = cls.__subclasses__() + return [cls] if not sub else [g for s in sub for g in _subclasses(s)] + +class TransactionalJsonFile: + """ + Transactional access to a JSON file, with support + of nested transactions. + + This is made possible by keeping track of the transaction nest level. + If a transaction is started, the current JSON file is flocked() and read into memory. + All modifications to the document are kept in memory, until the last nested context is + exited again. + + Then, the in memory dict written to a temporary file, which is moved in place of + the original file to prevent partial writes. + """ + + # Implementation notes: + # if self.nesting > 0, then no write will propagate. + def __init__(self, db_file): + + lock_file_path = re.sub("\.json$", ".lock", db_file) + self._lock_file = open(lock_file_path, "w") + fcntl.fcntl(self._lock_file, fcntl.F_SETFD, fcntl.FD_CLOEXEC) # to not keep the lock in child processes + + self._db_file = db_file + self.nesting = 0 + self.lock = threading.RLock() + + ## Make sure that a JSON database file is in place. + with self: + pass + + def read(self): + if self.nesting == 0: + with open(self._db_file,"r") as f: + return json.load(f) + else: + assert self.nesting > 0 + return self._current_state + + # Implement Python's context management protocol so that "with db" + # automatically commits or rolls back. + def __enter__(self): + self.lock.acquire() + if self.nesting == 0: + fcntl.flock(self._lock_file, fcntl.LOCK_EX) + self._ensure_db_exists() + self.must_rollback = False + json = self.read() + self._backup_state = copy.deepcopy(json) + self._current_state = copy.deepcopy(json) + self.nesting = self.nesting + 1 + + def __exit__(self, exception_type, exception_value, exception_traceback): + if exception_type != None: self.must_rollback = True + self.nesting = self.nesting - 1 + assert self.nesting >= 0 + if self.nesting == 0: + if self.must_rollback: + self._rollback() + else: + self._commit() + fcntl.flock(self._lock_file, fcntl.LOCK_UN) + self.lock.release() + + def _rollback(self): + self._backup_state = None + self._current_state = None + pass + + def set(self, state): + self._current_state = state + + def _commit(self): + assert self.nesting == 0 + + # TODO: write to temp file, then mv + with open(self._db_file, "w") as f: + json.dump(self._current_state, f,indent=2) + + self._backup_state = None + self._current_state = None + + def _ensure_db_exists(self): + db_exists = os.path.exists(self._db_file) + if not db_exists: + initial_db = { + "schemaVersion": 0, + "deployments": {} + } + + with open(self._db_file, "w", 0o600) as f: + json.dump(initial_db, f) + f.close() + + def schema_version(self): + version = self.read()["schemaVersion"] + if version is None: + raise "illegal datafile" #TODO: proper exception + else: + return version + +class JsonFile(object): + """NixOps state file.""" + + def __init__(self, json_file): + self.file_path = json_file + + if os.path.splitext(json_file)[1] not in ['.json']: + raise Exception("state file ‘{0}’ should have extension ‘.json’".format(json_file)) + + self.db = TransactionalJsonFile(json_file) + + # Check that we're not using a to new DB schema version. + with self.db: + version = self.db.schema_version() + if version > 0: + raise Exception("this NixOps version is too old to deal with JSON schema version {0}".format(version)) + + ############################################################################################### + ## Deployment + + def query_deployments(self): + """Return the UUIDs of all deployments in the database.""" + + return self.db.read()["deployments"].keys() + + def get_all_deployments(self): + """Return Deployment objects for every deployment in the database.""" + uuids = self.query_deployments() + res = [] + for uuid in uuids: + try: + res.append(self.open_deployment(uuid=uuid)) + except nixops.deployment.UnknownBackend as e: + sys.stderr.write("skipping deployment ‘{0}’: {1}\n".format(uuid, str(e))) + return res + + def _find_deployment(self, uuid=None): + all_deployments = self.db.read()["deployments"] + found = [] + if not uuid: + found = all_deployments + if not found: + found = filter(lambda(id): id == uuid, all_deployments) + if not found: + found = filter(lambda(id): all_deployments[id]["attributes"].get("name") == uuid, all_deployments) + + if not found: + found = filter(lambda(id): id.startswith(uuid), all_deployments) + + if not found: + return None + + if len(found) > 1: + if uuid: + raise Exception("state file contains multiple deployments with the same name, so you should specify one using its UUID") + else: + raise Exception("state file contains multiple deployments, so you should specify which one to use using ‘-d’, or set the environment variable NIXOPS_DEPLOYMENT") + return nixops.deployment.Deployment(self, found[0], sys.stderr) + + def open_deployment(self, uuid=None): + """Open an existing deployment.""" + deployment = self._find_deployment(uuid=uuid) + if deployment: return deployment + raise Exception("could not find specified deployment in state file ‘{0}’".format(self.db_file)) + + def create_deployment(self, uuid=None): + """Create a new deployment.""" + if not uuid: + import uuid + uuid = str(uuid.uuid1()) + with self.db: + state = self.db.read() + state["deployments"][uuid] = { "attributes": {}, "resources": {} } + self.db.set(state) + return nixops.deployment.Deployment(self, uuid, sys.stderr) + + def _delete_deployment(self, deployment_uuid): + """NOTE: This is UNSAFE, it's guarded in nixops/deployment.py. Do not call this function except from there!""" + self.__db.execute("delete from Deployments where uuid = ?", (deployment_uuid,)) + with self.db: + state = self.db.read() + state["deployments"].pop(deployment_uuid, None) + self.db.set(state) + + def clone_deployment(self, deployment_uuid): + with self.db: + if not uuid: + import uuid + new_uuid = str(uuid.uuid1()) + state = self.db.read() + + cloned_attributes = copy.deepcopy(state["deployments"][deployment_uuid]["attributes"]) + state["deployments"][new_uuid] = { + "attributes": cloned_attributes, + "resources": {} + } + + self.db.set(state) + + return self._find_deployment(new_uuid) + + def get_resources_for(self, deployment): + """Get all the resources for a certain deployment""" + resources = {} + with self.db: + state = self.db.read() + state_resources = state["deployments"][deployment.uuid]["resources"] + for res_id, res in state_resources.items(): + r = self._create_state(deployment, res["type"], res["name"], res_id) + resources[res["name"]] = r + self.db.set(state) + return resources + + def set_deployment_attrs(self, deployment_uuid, attrs): + """Update deployment attributes in the state.""" + with self.db: + state = self.db.read() + for n, v in attrs.iteritems(): + if v == None: + state["deployments"][deployment_uuid]["attributes"].pop(n,None) + else: + state["deployments"][deployment_uuid]["attributes"][n] = v + self.db.set(state) + + def del_deployment_attr(self, deployment_uuid, attr_name): + with self.db: + state = self.db.read() + state["deployments"][deployment_uuid]["attributes"].pop(attr_name,None) + self.db.set(state) + + def get_deployment_attr(self, deployment_uuid, name): + """Get a deployment attribute from the state.""" + with self.db: + state = self.db.read() + result = state["deployments"][deployment_uuid]["attributes"].get(name) + if result: + return result + else: + return nixops.util.undefined + + def get_all_deployment_attrs(self, deployment_uuid): + with self.db: + state = self.db.read() + return copy.deepcopy(state["deployments"][deployment]["attributes"]) + + def get_deployment_lock(self, deployment): + lock_dir = os.environ.get("HOME", "") + "/.nixops/locks" + if not os.path.exists(lock_dir): os.makedirs(lock_dir, 0700) + lock_file_path = lock_dir + "/" + deployment.uuid + class DeploymentLock(object): + def __init__(self, logger, path): + self._lock_file_path = path + self._logger = logger + self._lock_file = None + def __enter__(self): + self._lock_file = open(self._lock_file_path, "w") + fcntl.fcntl(self._lock_file, fcntl.F_SETFD, fcntl.FD_CLOEXEC) + try: + fcntl.flock(self._lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + self._logger.log( + "waiting for exclusive deployment lock..." + ) + fcntl.flock(self._lock_file, fcntl.LOCK_EX) + def __exit__(self, exception_type, exception_value, exception_traceback): + if self._lock_file: + self._lock_file.close() + return DeploymentLock(deployment.logger, lock_file_path) + + ############################################################################################### + ## Resources + + def create_resource(self, deployment, name, type): + with self.db: + state = self.db.read() + if name in state["deployments"][deployment.uuid]["resources"]: + raise Exception("resource already exists in database!") + id = str(gen_uuid()) + state["deployments"][deployment.uuid]["resources"][id] = { + "name": name, + "type" : type, + "attributes" : {} + } + self.db.set(state) + r = self._create_state(deployment, type, name, id) + return r + + def delete_resource(self, deployment_uuid, res_id): + with self.db: + state = self.db.read() + state["deployments"][deployment_uuid]["resources"].pop(res_id) + self.db.set(state) + + def _rename_resource(self, deployment_uuid, resource_id, new_name): + """NOTE: Invariants are checked in nixops/deployment.py#rename""" + with self.db: + state = self.db.read() + state["deployments"][deployment_uuid]["resources"][resource_id]["name"] = new_name + self.db.set(state) + + def set_resource_attrs(self, deployment_uuid, resource_id, attrs): + with self.db: + state = self.db.read() + resource_attrs = state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] + for n, v in attrs.iteritems(): + if v == None: + resource_attrs.pop(n, None) + else: + resource_attrs[n] = v + state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] = resource_attrs + self.db.set(state) + + def del_resource_attr(self, deployment_uuid, resource_id, name): + with self.db: + state = self.db.read() + resource_attrs = state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] + resource_attrs.pop(name, None) + state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] = resource_attrs + self.db.set(state) + + def get_resource_attr(self, deployment_uuid, resource_id, name): + """Get a machine attribute from the state file.""" + with self.db: + state = self.db.read() + resource_attrs = state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] + res = resource_attrs.get(name) + if res != None: return res + return nixops.util.undefined + + def get_all_resource_attrs(self, deployment_uuid, resource_id): + with self.db: + state = self.db.read() + resource_attrs = state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] + return copy.deepcopy(resource_attrs) + + ### STATE + def _create_state(self, depl, type, name, id): + """Create a resource state object of the desired type.""" + + for cls in _subclasses(nixops.resources.ResourceState): + if type == cls.get_type(): + return cls(depl, name, id) + + raise nixops.deployment.UnknownBackend("unknown resource type ‘{0}’".format(type)) diff --git a/nixops/statefile.py b/nixops/state/sqlite3_file.py similarity index 53% rename from nixops/statefile.py rename to nixops/state/sqlite3_file.py index 0e8c2c5f3..592874f9a 100644 --- a/nixops/statefile.py +++ b/nixops/state/sqlite3_file.py @@ -6,7 +6,11 @@ from pysqlite2 import dbapi2 as sqlite3 import sys import threading +import fcntl +def _subclasses(cls): + sub = cls.__subclasses__() + return [cls] if not sub else [g for s in sub for g in _subclasses(s)] class Connection(sqlite3.Connection): @@ -100,14 +104,18 @@ def __init__(self, db_file): else: raise Exception("this NixOps version is too old to deal with schema version {0}".format(version)) - self._db = db + self.db = db + def close(self): - self._db.close() + self.db.close() + + ############################################################################################### + ## Deployment def query_deployments(self): """Return the UUIDs of all deployments in the database.""" - c = self._db.cursor() + c = self.db.cursor() c.execute("select uuid from Deployments") res = c.fetchall() return [x[0] for x in res] @@ -124,7 +132,7 @@ def get_all_deployments(self): return res def _find_deployment(self, uuid=None): - c = self._db.cursor() + c = self.db.cursor() if not uuid: c.execute("select uuid from Deployments") else: @@ -157,10 +165,159 @@ def create_deployment(self, uuid=None): if not uuid: import uuid uuid = str(uuid.uuid1()) - with self._db: - self._db.execute("insert into Deployments(uuid) values (?)", (uuid,)) + with self.db: + self.db.execute("insert into Deployments(uuid) values (?)", (uuid,)) return nixops.deployment.Deployment(self, uuid, sys.stderr) + def _delete_deployment(self, deployment_uuid): + """NOTE: This is UNSAFE, it's guarded in nixops/deployment.py. Do not call this function except from there!""" + self.db.execute("delete from Deployments where uuid = ?", (deployment_uuid,)) + + def clone_deployment(self, deployment_uuid): + with self.db: + new = self.create_deployment() + self.db.execute("insert into DeploymentAttrs (deployment, name, value) " + + "select ?, name, value from DeploymentAttrs where deployment = ?", + (new.uuid, deployment_uuid)) + new.configs_path = None + return new + + + + def get_resources_for(self, deployment): + """Get all the resources for a certain deployment""" + resources = {} + with self.db: + c = self.db.cursor() + c.execute("select id, name, type from Resources where deployment = ?", (deployment.uuid,)) + for (id, name, type) in c.fetchall(): + r = self._create_state(deployment, type, name, id) + resources[name] = r + return resources + + def set_deployment_attrs(self, deployment_uuid, attrs): + """Update deployment attributes in the state.""" + with self.db: + c = self.db.cursor() + for n, v in attrs.iteritems(): + if v == None: + c.execute("delete from DeploymentAttrs where deployment = ? and name = ?", (deployment_uuid, n)) + else: + c.execute("insert or replace into DeploymentAttrs(deployment, name, value) values (?, ?, ?)", + (deployment_uuid, n, v)) + + def del_deployment_attr(self, deployment_uuid, attr_name): + with self.db: + self.db.execute("delete from DeploymentAttrs where deployment = ? and name = ?", (deployment_uuid, attr_name)) + + + def get_deployment_attr(self, deployment_uuid, name): + """Get a deployment attribute from the state.""" + with self.db: + c = self.db.cursor() + c.execute("select value from DeploymentAttrs where deployment = ? and name = ?", (deployment_uuid, name)) + row = c.fetchone() + if row != None: return row[0] + return nixops.util.undefined + + def get_all_deployment_attrs(self, deployment_uuid): + with self.db: + c = self.db.cursor() + c.execute("select name, value from DeploymentAttrs where deployment = ?", (deployment_uuid)) + rows = c.fetchall() + res = {row[0]: row[1] for row in rows} + return res + + def get_deployment_lock(self, deployment): + lock_dir = os.environ.get("HOME", "") + "/.nixops/locks" + if not os.path.exists(lock_dir): os.makedirs(lock_dir, 0700) + lock_file_path = lock_dir + "/" + deployment.uuid + class DeploymentLock(object): + def __init__(self, logger, path): + self._lock_file_path = path + self._logger = logger + self._lock_file = None + def __enter__(self): + self._lock_file = open(self._lock_file_path, "w") + fcntl.fcntl(self._lock_file, fcntl.F_SETFD, fcntl.FD_CLOEXEC) + try: + fcntl.flock(self._lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + self._logger.log( + "waiting for exclusive deployment lock..." + ) + fcntl.flock(self._lock_file, fcntl.LOCK_EX) + def __exit__(self, exception_type, exception_value, exception_traceback): + if self._lock_file: + self._lock_file.close() + return DeploymentLock(deployment.logger, lock_file_path) + + ############################################################################################### + ## Resources + + def create_resource(self, deployment, name, type): + c = self.db.cursor() + c.execute("select 1 from Resources where deployment = ? and name = ?", (deployment.uuid, name)) + if len(c.fetchall()) != 0: + raise Exception("resource already exists in database!") + c.execute("insert into Resources(deployment, name, type) values (?, ?, ?)", + (deployment.uuid, name, type)) + id = c.lastrowid + r = self._create_state(deployment, type, name, id) + return r + + def delete_resource(self, deployment_uuid, res_id): + with self.db: + self.db.execute("delete from Resources where deployment = ? and id = ?", (deployment_uuid, res_id)) + + def _rename_resource(self, deployment_uuid, resource_id, new_name): + """NOTE: Invariants are checked in nixops/deployment.py#rename""" + with self.db: + self.db.execute("update Resources set name = ? where deployment = ? and id = ?", (new_name, deployment_uuid, resource_id)) + + + def set_resource_attrs(self, _deployment_uuid, resource_id, attrs): + with self.db: + c = self.db.cursor() + for n, v in attrs.iteritems(): + if v == None: + c.execute("delete from ResourceAttrs where machine = ? and name = ?", (resource_id, n)) + else: + c.execute("insert or replace into ResourceAttrs(machine, name, value) values (?, ?, ?)", + (resource_id, n, v)) + + def del_resource_attr(self, _deployment_uuid, resource_id, name): + with self.db: + self.db.execute("delete from ResourceAttrs where machine = ? and name = ?", (resource_id, name)) + + def get_resource_attr(self, _deployment_uuid, resource_id, name): + """Get a machine attribute from the state file.""" + with self.db: + c = self.db.cursor() + c.execute("select value from ResourceAttrs where machine = ? and name = ?", (resource_id, name)) + row = c.fetchone() + if row != None: return row[0] + return nixops.util.undefined + + def get_all_resource_attrs(self, deployment_uuid, resource_id): + with self.db: + c = self.db.cursor() + c.execute("select name, value from ResourceAttrs where machine = ?", (resource_id,)) + rows = c.fetchall() + res = {row[0]: row[1] for row in rows} + return res + + ### STATE + def _create_state(self, depl, type, name, id): + """Create a resource state object of the desired type.""" + + for cls in _subclasses(nixops.resources.ResourceState): + if type == cls.get_type(): + return cls(depl, name, id) + + raise nixops.deployment.UnknownBackend("unknown resource type ‘{0}’".format(type)) + + def _table_exists(self, c, table): c.execute("select 1 from sqlite_master where name = ? and type='table'", (table,)); return c.fetchone() != None diff --git a/nixops/util.py b/nixops/util.py index 8f532aaf7..8ab8ac234 100644 --- a/nixops/util.py +++ b/nixops/util.py @@ -218,7 +218,7 @@ def get(self): if s == None: return None elif type is str: return s elif type is int: return int(s) - elif type is bool: return True if s == "1" else False + elif type is bool: return True if s == True or s == "1" else False elif type is 'json': return json.loads(s) else: assert False def set(self, x): diff --git a/scripts/nixops b/scripts/nixops index 912d7b872..78b50881a 100755 --- a/scripts/nixops +++ b/scripts/nixops @@ -5,7 +5,7 @@ from nixops import deployment from nixops.nix_expr import py2nix from nixops.parallel import MultipleExceptions, run_tasks -import nixops.statefile +import nixops.state.sqlite3_file import prettytable import argparse import os @@ -48,16 +48,16 @@ def sort_deployments(depls): # $NIXOPS_DEPLOYMENT. def one_or_all(): if args.all: - sf = nixops.statefile.StateFile(args.state_file) - return sf.get_all_deployments() + state = nixops.state.open(args.state_url) + return state.get_all_deployments() else: return [open_deployment()] def op_list_deployments(): - sf = nixops.statefile.StateFile(args.state_file) + state = nixops.state.open(args.state_url) tbl = create_table([("UUID", 'l'), ("Name", 'l'), ("Description", 'l'), ("# Machines", 'r'), ("Type", 'c')]) - for depl in sort_deployments(sf.get_all_deployments()): + for depl in sort_deployments(state.get_all_deployments()): tbl.add_row( [depl.uuid, depl.name or "(none)", depl.description, len(depl.machines), @@ -67,8 +67,8 @@ def op_list_deployments(): def open_deployment(): - sf = nixops.statefile.StateFile(args.state_file) - depl = sf.open_deployment(uuid=args.deployment) + state = nixops.state.open(args.state_url) + depl = state.open_deployment(uuid=args.deployment) depl.extra_nix_path = sum(args.nix_path or [], []) for (n, v) in args.nix_options or []: depl.extra_nix_flags.extend(["--option", n, v]) @@ -102,8 +102,8 @@ def modify_deployment(depl): def op_create(): - sf = nixops.statefile.StateFile(args.state_file) - depl = sf.create_deployment() + state = nixops.state.open(args.state_url) + depl = state.create_deployment() sys.stderr.write("created deployment ‘{0}’\n".format(depl.uuid)) modify_deployment(depl) if args.name or args.deployment: set_name(depl, args.name or args.deployment) @@ -190,10 +190,10 @@ def op_info(): ]) if args.all: - sf = nixops.statefile.StateFile(args.state_file) + state = nixops.state.open(args.state_url) if not args.plain: tbl = create_table([('Deployment', 'l')] + table_headers) - for depl in sort_deployments(sf.get_all_deployments()): + for depl in sort_deployments(state.get_all_deployments()): do_eval(depl) print_deployment(depl) if not args.plain: print tbl @@ -492,16 +492,16 @@ def op_export(): def op_import(): - sf = nixops.statefile.StateFile(args.state_file) - existing = set(sf.query_deployments()) + state = nixops.state.open(args.state_url) + existing = set(state.query_deployments()) dump = json.loads(sys.stdin.read()) for uuid, attrs in dump.iteritems(): if uuid in existing: raise Exception("state file already contains a deployment with UUID ‘{0}’".format(uuid)) - with sf._db: - depl = sf.create_deployment(uuid=uuid) + with state.db: + depl = state.create_deployment(uuid=uuid) depl.import_(attrs) sys.stderr.write("added deployment ‘{0}’\n".format(uuid)) @@ -695,8 +695,8 @@ subparsers = parser.add_subparsers(help='sub-command help') def add_subparser(name, help): subparser = subparsers.add_parser(name, help=help) - subparser.add_argument('--state', '-s', dest='state_file', metavar='FILE', - default=nixops.statefile.get_default_state_file(), help='path to state file') + subparser.add_argument('--state', '-s', dest='state_url', metavar='FILE', + default=os.environ.get("NIXOPS_STATE_URL", nixops.state.sqlite3_file.get_default_state_file()), help='URL that points to the state provider.') subparser.add_argument('--deployment', '-d', dest='deployment', metavar='UUID_OR_NAME', default=os.environ.get("NIXOPS_DEPLOYMENT", os.environ.get("CHARON_DEPLOYMENT", None)), help='UUID or symbolic name of the deployment') subparser.add_argument('--debug', action='store_true', help='enable debug output') diff --git a/tests/__init__.py b/tests/__init__.py index 6bdd94ec8..b586d2885 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -3,14 +3,14 @@ import sys import threading from os import path -import nixops.statefile +import nixops.state.sqlite3_file _multiprocess_shared_ = True db_file = '%s/test.nixops' % (path.dirname(__file__)) def setup(): - nixops.statefile.StateFile(db_file).close() + nixops.state.sqlite3_file.StateFile(db_file).close() def destroy(sf, uuid): depl = sf.open_deployment(uuid) @@ -27,7 +27,7 @@ def destroy(sf, uuid): depl.logger.log("deployment ‘{0}’ destroyed".format(uuid)) def teardown(): - sf = nixops.statefile.StateFile(db_file) + sf = nixops.state.sqlite3_file.StateFile(db_file) uuids = sf.query_deployments() threads = [] for uuid in uuids: diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py index 7152f23e8..e721b2132 100644 --- a/tests/functional/__init__.py +++ b/tests/functional/__init__.py @@ -1,6 +1,6 @@ import os from os import path -import nixops.statefile +import nixops.state.sqlite3_file from tests import db_file @@ -8,7 +8,7 @@ class DatabaseUsingTest(object): _multiprocess_can_split_ = True def setup(self): - self.sf = nixops.statefile.StateFile(db_file) + self.sf = nixops.state.sqlite3_file.StateFile(db_file) def teardown(self): self.sf.close() diff --git a/tests/functional/generic_deployment_test.py b/tests/functional/generic_deployment_test.py index 92736370a..26085fb82 100644 --- a/tests/functional/generic_deployment_test.py +++ b/tests/functional/generic_deployment_test.py @@ -1,6 +1,5 @@ import os import subprocess -import nixops.statefile from nose import SkipTest