diff --git a/nixops/backends/ec2.py b/nixops/backends/ec2.py index c41f8e4d8..e74e694db 100644 --- a/nixops/backends/ec2.py +++ b/nixops/backends/ec2.py @@ -124,7 +124,7 @@ def __init__(self, depl, name, id): def _reset_state(self): """Discard all state pertaining to an instance.""" - with self.depl._db: + with self.depl._state.db: self.state = MachineState.MISSING self.associate_public_ip_address = None self.use_private_ip_address = None @@ -344,7 +344,7 @@ def _instance_ip_ready(ins): self.log_end("{0} / {1}".format(instance.ip_address, instance.private_ip_address)) - with self.depl._db: + with self.depl._state.db: self.private_ipv4 = instance.private_ip_address self.public_ipv4 = instance.ip_address self.public_dns_name = instance.public_dns_name @@ -588,7 +588,7 @@ def _assign_elastic_ip(self, elastic_ipv4, check): nixops.known_hosts.update(self.public_ipv4, elastic_ipv4, self.public_host_key) - with self.depl._db: + with self.depl._state.db: self.elastic_ipv4 = elastic_ipv4 self.public_ipv4 = elastic_ipv4 self.ssh_pinged = False @@ -601,7 +601,7 @@ def _assign_elastic_ip(self, elastic_ipv4, check): else: self.log("address ‘{0}’ was not associated with instance ‘{1}’".format(self.elastic_ipv4, self.vm_id)) - with self.depl._db: + with self.depl._state.db: self.elastic_ipv4 = None self.public_ipv4 = None self.ssh_pinged = False @@ -665,7 +665,7 @@ def create_instance(self, defn, zone, devmap, user_data, ebs_optimized): lambda: self._conn.request_spot_instances(price=defn.spot_instance_price/100.0, **common_args) )[0] - with self.depl._db: + with self.depl._state.db: self.spot_instance_price = defn.spot_instance_price self.spot_instance_request_id = request.id @@ -696,7 +696,7 @@ def create_instance(self, defn, zone, devmap, user_data, ebs_optimized): # the instance ID, we'll get the same instance ID on the # next run. if not self.client_token: - with self.depl._db: + with self.depl._state.db: self.client_token = nixops.util.generate_random_string(length=48) # = 64 ASCII chars self.state = self.STARTING @@ -899,7 +899,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # Generate a public/private host key. if not self.public_host_key: (private, public) = nixops.util.create_key_pair(type=defn.host_key_type()) - with self.depl._db: + with self.depl._state.db: self.public_host_key = public self.private_host_key = private @@ -909,7 +909,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): instance = self.create_instance(defn, zone, devmap, user_data, ebs_optimized) - with self.depl._db: + with self.depl._state.db: self.vm_id = instance.id self.ami = defn.ami self.instance_type = defn.instance_type @@ -994,7 +994,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): elastic_ipv4 = res.public_ipv4 self._assign_elastic_ip(elastic_ipv4, check) - with self.depl._db: + with self.depl._state.db: self.use_private_ip_address = defn.use_private_ip_address self.associate_public_ip_address = defn.associate_public_ip_address diff --git a/nixops/backends/hetzner.py b/nixops/backends/hetzner.py index cf85c1e97..4b2661646 100644 --- a/nixops/backends/hetzner.py +++ b/nixops/backends/hetzner.py @@ -594,7 +594,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.log_start("creating an exclusive robot admin sub-account " "for ‘{0}’... ".format(self.name)) server = self._get_server_from_main_robot(self.main_ipv4, defn) - with self.depl._db: + with self.depl._state.db: (self.robot_admin_user, self.robot_admin_pass) = server.admin.create() self.log_end("done. ({0})".format(self.robot_admin_user)) @@ -609,7 +609,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): ) if robot_user != self.robot_admin_user or \ robot_pass != self.robot_admin_pass: - with self.depl._db: + with self.depl._state.db: (self.robot_admin_user, self.robot_admin_pass) = (robot_user, robot_pass) diff --git a/nixops/backends/virtualbox.py b/nixops/backends/virtualbox.py index ed6ce8b4e..151095505 100644 --- a/nixops/backends/virtualbox.py +++ b/nixops/backends/virtualbox.py @@ -141,6 +141,7 @@ def _update_ip(self): capture_stdout=True).rstrip() if res[0:7] != "Value: ": return new_address = res[7:] + nixops.known_hosts.update(self.private_ipv4, new_address, self.public_host_key) self.private_ipv4 = new_address @@ -194,7 +195,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # Generate a public/private host key. if not self.public_host_key: (private, public) = nixops.util.create_key_pair() - with self.depl._db: + with self.depl._state.db: self.public_host_key = public self.private_host_key = private @@ -203,7 +204,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # Backwards compatibility. if self.disk: - with self.depl._db: + with self.depl._state.db: self._update_disk("disk1", {"created": True, "path": self.disk, "attached": self.disk_attached, "port": 0}) diff --git a/nixops/deployment.py b/nixops/deployment.py index 33b3035ac..66e11b126 100644 --- a/nixops/deployment.py +++ b/nixops/deployment.py @@ -12,7 +12,6 @@ import errno from collections import defaultdict from xml.etree import ElementTree -import nixops.statefile import nixops.backends import nixops.logger import nixops.parallel @@ -22,7 +21,6 @@ import getpass import traceback import glob -import fcntl import itertools import platform from nixops.util import ansi_success @@ -56,9 +54,8 @@ class Deployment(object): # internal variable to mark if network attribute of network has been evaluated (separately) network_attr_eval = False - def __init__(self, statefile, uuid, log_file=sys.stderr): - self._statefile = statefile - self._db = statefile._db + def __init__(self, state, uuid, log_file=sys.stderr): + self._state = state self.uuid = uuid self._last_log_prefix = None @@ -78,15 +75,8 @@ def __init__(self, statefile, uuid, log_file=sys.stderr): if not os.path.exists(self.expr_path): self.expr_path = os.path.dirname(__file__) + "/../nix" - self.resources = {} - with self._db: - c = self._db.cursor() - c.execute("select id, name, type from Resources where deployment = ?", (self.uuid,)) - for (id, name, type) in c.fetchall(): - r = _create_state(self, type, name, id) - self.resources[name] = r + self.resources = self._state.get_resources_for(self) self.logger.update_log_prefixes() - self.definitions = None @@ -126,65 +116,41 @@ def get_machine(self, name): raise Exception("resource ‘{0}’ is not a machine".format(name)) return res - def _set_attrs(self, attrs): - """Update deployment attributes in the state file.""" - with self._db: - c = self._db.cursor() - for n, v in attrs.iteritems(): - if v == None: - c.execute("delete from DeploymentAttrs where deployment = ? and name = ?", (self.uuid, n)) - else: - c.execute("insert or replace into DeploymentAttrs(deployment, name, value) values (?, ?, ?)", - (self.uuid, n, v)) + """Update deployment attributes in the state.""" + self._state.set_deployment_attrs(self.uuid, attrs) def _set_attr(self, name, value): - """Update one deployment attribute in the state file.""" + """Update one deployment attribute in the state.""" self._set_attrs({name: value}) def _del_attr(self, name): - """Delete a deployment attribute from the state file.""" - with self._db: - self._db.execute("delete from DeploymentAttrs where deployment = ? and name = ?", (self.uuid, name)) + """Delete a deployment attribute from the state.""" + self._state.del_deployment_attr(self.uuid, name) + #TODO(moretea): The default param does not appear to be used at all? + # Removed it when moving the body to nixops/state/file.py. def _get_attr(self, name, default=nixops.util.undefined): - """Get a deployment attribute from the state file.""" - with self._db: - c = self._db.cursor() - c.execute("select value from DeploymentAttrs where deployment = ? and name = ?", (self.uuid, name)) - row = c.fetchone() - if row != None: return row[0] - return nixops.util.undefined - + """Get a deployment attribute from the state.""" + return self._state.get_deployment_attr(self.uuid, name) def _create_resource(self, name, type): - c = self._db.cursor() - c.execute("select 1 from Resources where deployment = ? and name = ?", (self.uuid, name)) - if len(c.fetchall()) != 0: - raise Exception("resource already exists in database!") - c.execute("insert into Resources(deployment, name, type) values (?, ?, ?)", - (self.uuid, name, type)) - id = c.lastrowid - r = _create_state(self, type, name, id) + r = self._state.create_resource(self, name, type) self.resources[name] = r return r def export(self): - with self._db: - c = self._db.cursor() - c.execute("select name, value from DeploymentAttrs where deployment = ?", (self.uuid,)) - rows = c.fetchall() - res = {row[0]: row[1] for row in rows} - res['resources'] = {r.name: r.export() for r in self.resources.itervalues()} - return res + res = self._state.get_all_deployment_attrs(self.uuid) + res['resources'] = {r.name: r.export() for r in self.resources.itervalues()} + return res def import_(self, attrs): - with self._db: + with self._state.db: for k, v in attrs.iteritems(): if k == 'resources': continue self._set_attr(k, v) @@ -195,49 +161,21 @@ def import_(self, attrs): def clone(self): - with self._db: - new = self._statefile.create_deployment() - self._db.execute("insert into DeploymentAttrs (deployment, name, value) " + - "select ?, name, value from DeploymentAttrs where deployment = ?", - (new.uuid, self.uuid)) - new.configs_path = None - return new + return self._state.clone_deployment(self.uuid) def _get_deployment_lock(self): - if self._lock_file_path is None: - lock_dir = os.environ.get("HOME", "") + "/.nixops/locks" - if not os.path.exists(lock_dir): os.makedirs(lock_dir, 0700) - self._lock_file_path = lock_dir + "/" + self.uuid - class DeploymentLock(object): - def __init__(self, depl): - self._lock_file_path = depl._lock_file_path - self._logger = depl.logger - self._lock_file = None - def __enter__(self): - self._lock_file = open(self._lock_file_path, "w") - fcntl.fcntl(self._lock_file, fcntl.F_SETFD, fcntl.FD_CLOEXEC) - try: - fcntl.flock(self._lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) - except IOError: - self._logger.log( - "waiting for exclusive deployment lock..." - ) - fcntl.flock(self._lock_file, fcntl.LOCK_EX) - def __exit__(self, exception_type, exception_value, exception_traceback): - self._lock_file.close() - return DeploymentLock(self) + return self._state.get_deployment_lock(self) def delete_resource(self, m): del self.resources[m.name] - with self._db: - self._db.execute("delete from Resources where deployment = ? and id = ?", (self.uuid, m.id)) + self._state.delete_resource(self.uuid, m.id) def delete(self, force=False): """Delete this deployment from the state file.""" - with self._db: + with self._state.db: if not force and len(self.resources) > 0: raise Exception("cannot delete this deployment because it still has resources") @@ -248,7 +186,7 @@ def delete(self, force=False): if os.path.islink(p): os.remove(p) # Delete the deployment from the database. - self._db.execute("delete from Deployments where uuid = ?", (self.uuid,)) + self._state._delete_deployment(self.uuid) def _nix_path_flags(self): @@ -856,7 +794,7 @@ def evaluate_active(self, include=[], exclude=[], kill_obsolete=False): self.evaluate() # Create state objects for all defined resources. - with self._db: + with self._state.db: for m in self.definitions.itervalues(): if m.name not in self.resources: self._create_resource(m.name, m.get_type()) @@ -1197,9 +1135,7 @@ def rename(self, name, new_name): m = self.resources.pop(name) self.resources[new_name] = m - - with self._db: - self._db.execute("update Resources set name = ? where deployment = ? and id = ?", (new_name, self.uuid, m.id)) + self._state._rename_resource(self.uuid, m.id, new_name) def send_keys(self, include=[], exclude=[]): @@ -1244,15 +1180,6 @@ def _create_definition(xml, config, type_name): raise nixops.deployment.UnknownBackend("unknown resource type ‘{0}’".format(type_name)) -def _create_state(depl, type, name, id): - """Create a resource state object of the desired type.""" - - for cls in _subclasses(nixops.resources.ResourceState): - if type == cls.get_type(): - return cls(depl, name, id) - - raise nixops.deployment.UnknownBackend("unknown resource type ‘{0}’".format(type)) - # Automatically load all resource types. def _load_modules_from(dir): diff --git a/nixops/resources/__init__.py b/nixops/resources/__init__.py index b3001847b..d53f7f5fa 100644 --- a/nixops/resources/__init__.py +++ b/nixops/resources/__init__.py @@ -3,7 +3,7 @@ import re import nixops.util -from nixops.state import StateDict +from nixops.state.state_helper import StateDict from nixops.diff import Diff, Handler class ResourceDefinition(object): @@ -66,14 +66,8 @@ def __init__(self, depl, name, id): def _set_attrs(self, attrs): """Update machine attributes in the state file.""" - with self.depl._db: - c = self.depl._db.cursor() - for n, v in attrs.iteritems(): - if v == None: - c.execute("delete from ResourceAttrs where machine = ? and name = ?", (self.id, n)) - else: - c.execute("insert or replace into ResourceAttrs(machine, name, value) values (?, ?, ?)", - (self.id, n, v)) + self.depl._state.set_resource_attrs(self.depl.uuid, self.id, attrs) + def _set_attr(self, name, value): """Update one machine attribute in the state file.""" @@ -81,31 +75,24 @@ def _set_attr(self, name, value): def _del_attr(self, name): """Delete a machine attribute from the state file.""" - with self.depl._db: - self.depl._db.execute("delete from ResourceAttrs where machine = ? and name = ?", (self.id, name)) + self.depl._state.del_resource_attr(self.depl.uuid, self.id, name) + #TODO(moretea): again, the default option appears to be defunct. + # Have removed it in state/file.py. def _get_attr(self, name, default=nixops.util.undefined): """Get a machine attribute from the state file.""" - with self.depl._db: - c = self.depl._db.cursor() - c.execute("select value from ResourceAttrs where machine = ? and name = ?", (self.id, name)) - row = c.fetchone() - if row != None: return row[0] - return nixops.util.undefined + + return self.depl._state.get_resource_attr(self.depl.uuid, self.id, name) def export(self): """Export the resource to move between databases""" - with self.depl._db: - c = self.depl._db.cursor() - c.execute("select name, value from ResourceAttrs where machine = ?", (self.id,)) - rows = c.fetchall() - res = {row[0]: row[1] for row in rows} - res['type'] = self.get_type() - return res + res = self.depl._state.get_all_resource_attrs(self.depl.uuid, self.id) + res['type'] = self.get_type() + return res def import_(self, attrs): """Import the resource from another database""" - with self.depl._db: + with self.depl._state.db: for k, v in attrs.iteritems(): if k == 'type': continue self._set_attr(k, v) diff --git a/nixops/resources/aws_vpn_connection.py b/nixops/resources/aws_vpn_connection.py index b2a3237a2..c6962e660 100644 --- a/nixops/resources/aws_vpn_connection.py +++ b/nixops/resources/aws_vpn_connection.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from nixops.state import StateDict +from nixops.state.state_helper import StateDict from nixops.diff import Diff, Handler import nixops.util import nixops.resources @@ -89,7 +89,7 @@ def realize_create_vpn_conn(self, allow_recreate): ) vpn_conn_id = vpn_connection['VpnConnection']['VpnConnectionId'] - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['vpnConnectionId'] = vpn_conn_id self._state['vpnGatewayId'] = vpn_gateway_id @@ -114,7 +114,7 @@ def _destroy(self): else: raise e - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['vpnConnectionId'] = None self._state['vpnGatewayId'] = None diff --git a/nixops/resources/aws_vpn_connection_route.py b/nixops/resources/aws_vpn_connection_route.py index 93ba3b29d..a7f099f76 100644 --- a/nixops/resources/aws_vpn_connection_route.py +++ b/nixops/resources/aws_vpn_connection_route.py @@ -7,7 +7,7 @@ from nixops.resources.ec2_common import EC2CommonState import nixops.ec2_utils from nixops.diff import Diff, Handler -from nixops.state import StateDict +from nixops.state.state_helper import StateDict class AWSVPNConnectionRouteDefinition(nixops.resources.ResourceDefinition): """Definition of a VPN connection route""" @@ -80,7 +80,7 @@ def realize_create_vpn_route(self, allow_recreate): DestinationCidrBlock=config['destinationCidrBlock'], VpnConnectionId=vpn_conn_id) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['vpnConnectionId'] = vpn_conn_id self._state['destinationCidrBlock'] = config['destinationCidrBlock'] @@ -98,7 +98,7 @@ def _destroy(self): else: raise e - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['vpnConnectionId'] = None self._state['destinationCidrBlock'] = None diff --git a/nixops/resources/aws_vpn_gateway.py b/nixops/resources/aws_vpn_gateway.py index 57c168987..7137e3526 100644 --- a/nixops/resources/aws_vpn_gateway.py +++ b/nixops/resources/aws_vpn_gateway.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from nixops.state import StateDict +from nixops.state.state_helper import StateDict from nixops.diff import Diff, Handler import nixops.util import nixops.resources @@ -85,7 +85,7 @@ def realize_create_vpn_gtw(self, allow_recreate): VpnGatewayId=vpn_gtw_id) #TODO wait for the attchement state - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['vpnGatewayId'] = vpn_gtw_id self._state['vpcId'] = vpc_id @@ -122,7 +122,7 @@ def _destroy(self): else: raise e - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['region'] = None self._state['vpnGatewayId'] = None diff --git a/nixops/resources/cloudwatch_log_group.py b/nixops/resources/cloudwatch_log_group.py index e53827284..943739e74 100644 --- a/nixops/resources/cloudwatch_log_group.py +++ b/nixops/resources/cloudwatch_log_group.py @@ -72,7 +72,7 @@ def _destroy(self): self._conn.delete_log_group(self.log_group_name) except boto.logs.exceptions.ResourceNotFoundException, e: self.log("the log group ‘{0}’ was already deleted".format(self.log_group_name)) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.log_group_name = None self.region = None @@ -114,7 +114,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.log("setting the retention in days of '{0}' to '{1}'".format(defn.config['name'], defn.config['retentionInDays'])) self._conn.set_retention(log_group_name=defn.config['name'],retention_in_days=defn.config['retentionInDays']) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.log_group_name = defn.config['name'] self.region = defn.config['region'] @@ -123,4 +123,4 @@ def create(self, defn, check, allow_reboot, allow_recreate): def destroy(self, wipe=False): self._destroy() - return True \ No newline at end of file + return True diff --git a/nixops/resources/cloudwatch_log_stream.py b/nixops/resources/cloudwatch_log_stream.py index c53c71dd8..4d6e770b6 100644 --- a/nixops/resources/cloudwatch_log_stream.py +++ b/nixops/resources/cloudwatch_log_stream.py @@ -72,7 +72,7 @@ def _destroy(self): self._conn.delete_log_stream(log_group_name=self.log_group_name,log_stream_name=self.log_stream_name) except boto.logs.exceptions.ResourceNotFoundException, e: self.log("the log group ‘{0}’ or log stream ‘{1}’ was already deleted".format(self.log_group_name,self.log_stream_name)) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.log_group_name = None self.log_stream_name = None @@ -121,7 +121,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): exist, arn = self.lookup_cloudwatch_log_stream(log_group_name=defn.config['logGroupName'], log_stream_name=defn.config['name']) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.log_stream_name = defn.config['name'] self.log_group_name = defn.config['logGroupName'] @@ -130,4 +130,4 @@ def create(self, defn, check, allow_reboot, allow_recreate): def destroy(self, wipe=False): self._destroy() - return True \ No newline at end of file + return True diff --git a/nixops/resources/datadog-monitor.py b/nixops/resources/datadog-monitor.py index 3032de766..6d02ed05d 100644 --- a/nixops/resources/datadog-monitor.py +++ b/nixops/resources/datadog-monitor.py @@ -107,7 +107,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): if 'errors' in response: raise Exception(str(response['errors'])) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.api_key = defn.config['apiKey'] self.app_key = defn.config['appKey'] diff --git a/nixops/resources/datadog-screenboard.py b/nixops/resources/datadog-screenboard.py index 2d03ecdf5..d7c3f1197 100644 --- a/nixops/resources/datadog-screenboard.py +++ b/nixops/resources/datadog-screenboard.py @@ -104,7 +104,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): if 'errors' in response: raise Exception(str(response['errors'])) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.api_key = defn.config['apiKey'] self.app_key = defn.config['appKey'] diff --git a/nixops/resources/datadog-timeboard.py b/nixops/resources/datadog-timeboard.py index 1fdcda25d..edc43dbd3 100644 --- a/nixops/resources/datadog-timeboard.py +++ b/nixops/resources/datadog-timeboard.py @@ -111,7 +111,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): else: url = response['url'] - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.api_key = defn.config['apiKey'] self.app_key = defn.config['appKey'] diff --git a/nixops/resources/ebs_volume.py b/nixops/resources/ebs_volume.py index ec1044e16..0485057ec 100644 --- a/nixops/resources/ebs_volume.py +++ b/nixops/resources/ebs_volume.py @@ -122,7 +122,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # volume we just created. Doesn't seem to be anything we # can do about this. - with self.depl._db: + with self.depl._state.db: self.state = self.STARTING self.region = defn.region self.zone = defn.zone diff --git a/nixops/resources/ec2_common.py b/nixops/resources/ec2_common.py index d58f53a08..e3d1eacd5 100644 --- a/nixops/resources/ec2_common.py +++ b/nixops/resources/ec2_common.py @@ -19,7 +19,7 @@ def _retry(self, fun, **kwargs): def get_common_tags(self): tags = {'CharonNetworkUUID': self.depl.uuid, 'CharonMachineName': self.name, - 'CharonStateFile': "{0}@{1}:{2}".format(getpass.getuser(), socket.gethostname(), self.depl._db.db_file)} + 'CharonStateFile': "{0}@{1}:{2}".format(getpass.getuser(), socket.gethostname(), self.depl._state.db.db_file)} if self.depl.name: tags['CharonNetworkName'] = self.depl.name return tags diff --git a/nixops/resources/ec2_keypair.py b/nixops/resources/ec2_keypair.py index 2e5d1be01..4e82864e4 100644 --- a/nixops/resources/ec2_keypair.py +++ b/nixops/resources/ec2_keypair.py @@ -78,7 +78,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # Generate the key pair locally. if not self.public_key: (private, public) = nixops.util.create_key_pair(type="rsa") # EC2 only supports RSA keys. - with self.depl._db: + with self.depl._state.db: self.public_key = public self.private_key = private @@ -102,7 +102,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.log("uploading EC2 key pair ‘{0}’...".format(defn.keypair_name)) self._conn.import_key_pair(defn.keypair_name, self.public_key) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.keypair_name = defn.keypair_name diff --git a/nixops/resources/ec2_placement_group.py b/nixops/resources/ec2_placement_group.py index 511e52370..871f2cea6 100644 --- a/nixops/resources/ec2_placement_group.py +++ b/nixops/resources/ec2_placement_group.py @@ -67,11 +67,11 @@ def _connect(self): def create(self, defn, check, allow_reboot, allow_recreate): # Name or region change means a completely new security group if self.placement_group_name and (defn.placement_group_name != self.placement_group_name or defn.region != self.region): - with self.depl._db: + with self.depl._state.db: self.state = self.UNKNOWN self.old_placement_groups = self.old_placement_groups + [{'name': self.placement_group_name, 'region': self.region}] - with self.depl._db: + with self.depl._state.db: self.region = defn.region self.access_key_id = defn.access_key_id or nixops.ec2_utils.get_access_key_id() self.placement_group_name = defn.placement_group_name @@ -79,7 +79,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): grp = None if check: - with self.depl._db: + with self.depl._state.db: self._connect() try: diff --git a/nixops/resources/ec2_rds_dbinstance.py b/nixops/resources/ec2_rds_dbinstance.py index 525112d6f..5fca0569e 100644 --- a/nixops/resources/ec2_rds_dbinstance.py +++ b/nixops/resources/ec2_rds_dbinstance.py @@ -171,7 +171,7 @@ def _wait_for_dbinstance(self, dbinstance, state='available'): time.sleep(6) def _copy_dbinstance_attrs(self, dbinstance, security_groups): - with self.depl._db: + with self.depl._state.db: self.rds_dbinstance_id = dbinstance.id self.rds_dbinstance_allocated_storage = int(dbinstance.allocated_storage) self.rds_dbinstance_instance_class = dbinstance.instance_class @@ -207,7 +207,7 @@ def fetch_security_group_resources(self, config): return security_groups def create(self, defn, check, allow_reboot, allow_recreate): - with self.depl._db: + with self.depl._state.db: self.access_key_id = defn.access_key_id or nixops.ec2_utils.get_access_key_id() if not self.access_key_id: raise Exception("please set ‘accessKeyId’, $EC2_ACCESS_KEY or $AWS_ACCESS_KEY_ID") @@ -229,7 +229,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): dbinstance = self._try_fetch_dbinstance(self.rds_dbinstance_id) - with self.depl._db: + with self.depl._state.db: if check or self.state == self.MISSING or self.state == self.UNKNOWN: if dbinstance and (self.state == self.MISSING or self.state == self.UNKNOWN): if dbinstance.status == 'deleting': @@ -272,7 +272,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self._copy_dbinstance_attrs(dbinstance, defn.rds_dbinstance_security_groups) self.state = self.UP - with self.depl._db: + with self.depl._state.db: if self.state == self.UP and self._diff_defn(defn): if dbinstance is None: raise Exception("state is UP but database instance does not exist. re-run with --check option to synchronize states") diff --git a/nixops/resources/ec2_rds_dbsecurity_group.py b/nixops/resources/ec2_rds_dbsecurity_group.py index 6f5b122f7..d73b4530f 100644 --- a/nixops/resources/ec2_rds_dbsecurity_group.py +++ b/nixops/resources/ec2_rds_dbsecurity_group.py @@ -86,7 +86,7 @@ def generate_rule(rule): for rule in response['DBSecurityGroups'][0].get('IPRanges', []): rules.append(generate_rule(rule)) - with self.depl._db: + with self.depl._state.db: self._state['rules'] = rules def realize_create_sg(self, allow_recreate): @@ -107,7 +107,7 @@ def realize_create_sg(self, allow_recreate): self.get_client("rds").create_db_security_group( DBSecurityGroupName=config['groupName'], DBSecurityGroupDescription=config['description']) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['groupName'] = config['groupName'] self._state['description'] = config['description'] @@ -128,7 +128,7 @@ def realize_rules_change(self, allow_recreate): kwargs = self.process_rule(rule) self.get_client("rds").authorize_db_security_group_ingress(**kwargs) - with self.depl._db: + with self.depl._state.db: self._state['rules'] = config['rules'] def process_rule(self, config): @@ -153,7 +153,7 @@ def _destroy(self): else: raise error - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['groupName'] = None self._state['region'] = None diff --git a/nixops/resources/ec2_security_group.py b/nixops/resources/ec2_security_group.py index e65b55995..2e76bb9c6 100644 --- a/nixops/resources/ec2_security_group.py +++ b/nixops/resources/ec2_security_group.py @@ -102,7 +102,7 @@ def retry_notfound(f): # Name or region change means a completely new security group if self.security_group_name and (defn.security_group_name != self.security_group_name or defn.region != self.region): - with self.depl._db: + with self.depl._state.db: self.state = self.UNKNOWN self.old_security_groups = self.old_security_groups + [{'name': self.security_group_name, 'region': self.region}] @@ -111,7 +111,7 @@ def retry_notfound(f): res = self.depl.get_typed_resource(defn.vpc_id[4:].split(".")[0], "vpc") defn.vpc_id = res._state['vpcId'] - with self.depl._db: + with self.depl._state.db: self.region = defn.region self.access_key_id = defn.access_key_id or nixops.ec2_utils.get_access_key_id() self.security_group_name = defn.security_group_name @@ -120,7 +120,7 @@ def retry_notfound(f): grp = None if check: - with self.depl._db: + with self.depl._state.db: self._connect() try: diff --git a/nixops/resources/elastic_file_system.py b/nixops/resources/elastic_file_system.py index d4b23b4e3..64560a376 100644 --- a/nixops/resources/elastic_file_system.py +++ b/nixops/resources/elastic_file_system.py @@ -85,7 +85,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): if len(fss) == 1: fs = fss[0] if fs["LifeCycleState"] == "available": - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.fs_id = fs["FileSystemId"] self.region = defn.config["region"] @@ -146,7 +146,7 @@ def destroy(self, wipe=False): break time.sleep(1) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.fs_id = None self.region = None diff --git a/nixops/resources/elastic_file_system_mount_target.py b/nixops/resources/elastic_file_system_mount_target.py index 180782d6d..2d79df194 100644 --- a/nixops/resources/elastic_file_system_mount_target.py +++ b/nixops/resources/elastic_file_system_mount_target.py @@ -41,7 +41,7 @@ def get_type(cls): return "elastic-file-system-mount-target" def _reset_state(self): - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.access_key_id = None self.region = None @@ -97,7 +97,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): securityGroups = self.security_groups_to_ids(region, access_key_id, subnetId, defn.config["securityGroups"] ) res = client.create_mount_target(FileSystemId=fs_id, SubnetId=subnetId, SecurityGroups=securityGroups, **args) - with self.depl._db: + with self.depl._state.db: self.state = self.STARTING self.fsmt_id = res["MountTargetId"] self.fs_id = fs_id diff --git a/nixops/resources/elastic_ip.py b/nixops/resources/elastic_ip.py index eac8d8885..df3a45754 100644 --- a/nixops/resources/elastic_ip.py +++ b/nixops/resources/elastic_ip.py @@ -79,7 +79,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # address we just created. Doesn't seem to be anything we # can do about this. - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.region = defn.config['region'] self.public_ipv4 = address['PublicIp'] @@ -123,7 +123,7 @@ def destroy(self, wipe=False): else: self._client.release_address(PublicIp=eip['PublicIp']) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.public_ipv4 = None self.allocation_id = None diff --git a/nixops/resources/iam_role.py b/nixops/resources/iam_role.py index 880d9aae1..5963f3400 100644 --- a/nixops/resources/iam_role.py +++ b/nixops/resources/iam_role.py @@ -140,7 +140,7 @@ def _destroy(self): raise - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.role_name = None self.access_key_id = None @@ -226,7 +226,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): if defn.assume_role_policy != "": self._conn.update_assume_role_policy(defn.role_name, defn.assume_role_policy) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.role_name = defn.role_name self.policy = defn.policy diff --git a/nixops/resources/route53_health_check.py b/nixops/resources/route53_health_check.py index b6f7a1800..883628e25 100644 --- a/nixops/resources/route53_health_check.py +++ b/nixops/resources/route53_health_check.py @@ -152,7 +152,7 @@ def cannot_change(desc, sk, d): ref = str(uuid.uuid1()) self.log('creating health check') health_check = client.create_health_check(CallerReference=ref, HealthCheckConfig=cfg) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.health_check_id = health_check['HealthCheck']['Id'] else: @@ -165,7 +165,7 @@ def cannot_change(desc, sk, d): self.log('updating health check') client.update_health_check(**cfg) - with self.depl._db: + with self.depl._state.db: self.health_check_config = orig_cfg self.child_health_checks = defn.child_health_checks @@ -184,7 +184,7 @@ def destroy(self, wipe=False): pass raise - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING return True diff --git a/nixops/resources/route53_hosted_zone.py b/nixops/resources/route53_hosted_zone.py index ffaf8126d..f071a557a 100644 --- a/nixops/resources/route53_hosted_zone.py +++ b/nixops/resources/route53_hosted_zone.py @@ -96,7 +96,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.log('creating hosted zone for {}'.format(defn.zone_name)) hosted_zone = client.create_hosted_zone(**args) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.zone_id = hosted_zone['HostedZone']['Id'] self.private_zone = defn.private_zone @@ -119,7 +119,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): for assoc in tbd: client.disassociate_vpc_from_hosted_zone(HostedZoneId=self.zone_id, VPC={ 'VPCId': assoc['vpcId'], 'VPCRegion': assoc['region'] }) - with self.depl._db: + with self.depl._state.db: self.associated_vpcs = defn.associated_vpcs return True @@ -137,7 +137,7 @@ def destroy(self, wipe=False): pass raise - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING return True diff --git a/nixops/resources/route53_recordset.py b/nixops/resources/route53_recordset.py index e88c80310..e4ae44687 100644 --- a/nixops/resources/route53_recordset.py +++ b/nixops/resources/route53_recordset.py @@ -177,7 +177,7 @@ def resolve_machine_ip(v): ChangeBatch=self.make_batch('UPSERT', defn) )) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.zone_name = zone_name self.zone_id = zone_id @@ -249,7 +249,7 @@ def destroy(self, wipe=False): HostedZoneId=self.zone_id, ChangeBatch=self.make_batch('DELETE', self))) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING return True diff --git a/nixops/resources/s3_bucket.py b/nixops/resources/s3_bucket.py index 22d819c12..5e53a7034 100644 --- a/nixops/resources/s3_bucket.py +++ b/nixops/resources/s3_bucket.py @@ -103,7 +103,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] != "BucketAlreadyOwnedByYou": raise - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.bucket_name = defn.bucket_name self.region = defn.region diff --git a/nixops/resources/sns_topic.py b/nixops/resources/sns_topic.py index e454b7821..fddc05666 100644 --- a/nixops/resources/sns_topic.py +++ b/nixops/resources/sns_topic.py @@ -74,7 +74,7 @@ def _destroy(self): self.connect() self.log("destroying SNS topic ‘{0}’...".format(self.topic_name)) self._conn.delete_topic(self.arn) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.topic_name = None self.region = None @@ -136,7 +136,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): if subscriber_arn != "PendingConfirmation": self._conn.unsubscribe(subscription=subscriber_arn) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.topic_name = defn.config['name'] self.display_name = defn.config['displayName'] diff --git a/nixops/resources/sqs_queue.py b/nixops/resources/sqs_queue.py index 4ad53ba8d..207fb794c 100644 --- a/nixops/resources/sqs_queue.py +++ b/nixops/resources/sqs_queue.py @@ -84,7 +84,7 @@ def _destroy(self): if q: self.log("destroying SQS queue ‘{0}’...".format(self.queue_name)) self._conn.delete_queue(q) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self.queue_name = None self.queue_base_name = None @@ -122,7 +122,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): self.log("creating SQS queue ‘{0}’...".format(defn.queue_name)) q = nixops.ec2_utils.retry(lambda: self._conn.create_queue(defn.queue_name, defn.visibility_timeout), error_codes = ['AWS.SimpleQueueService.QueueDeletedRecently']) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self.queue_name = defn.queue_name self.url = q.url diff --git a/nixops/resources/ssh_keypair.py b/nixops/resources/ssh_keypair.py index 63fe78164..faf15907b 100644 --- a/nixops/resources/ssh_keypair.py +++ b/nixops/resources/ssh_keypair.py @@ -46,7 +46,7 @@ def create(self, defn, check, allow_reboot, allow_recreate): # Generate the key pair locally. if not self.public_key: (private, public) = nixops.util.create_key_pair(type="ed25519") - with self.depl._db: + with self.depl._state.db: self.public_key = public self.private_key = private self.state = state = nixops.resources.ResourceState.UP diff --git a/nixops/resources/vpc.py b/nixops/resources/vpc.py index 285883c26..400aa53b5 100755 --- a/nixops/resources/vpc.py +++ b/nixops/resources/vpc.py @@ -9,7 +9,7 @@ import nixops.resources from nixops.resources.ec2_common import EC2CommonState import nixops.ec2_utils -from nixops.state import StateDict +from nixops.state.state_helper import StateDict from nixops.diff import Diff, Handler class VPCDefinition(nixops.resources.ResourceDefinition): @@ -85,7 +85,7 @@ def _destroy(self): self.cleanup_state() def cleanup_state(self): - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['vpcId'] = None self._state['region'] = None @@ -130,7 +130,7 @@ def wait_for_vpc_available(self, vpc_id): raise Exception("couldn't find vpc {}, please run a deploy with --check".format(self._state["vpcId"])) self.log_end(" done") - with self.depl._db: + with self.depl._state.db: self.state = self.UP def realize_create_vpc(self, allow_recreate): @@ -151,7 +151,7 @@ def realize_create_vpc(self, allow_recreate): InstanceTenancy=config['instanceTenancy']) self.vpc_id = vpc.get('Vpc').get('VpcId') - with self.depl._db: + with self.depl._state.db: self.state = self.STARTING self._state["vpcId"] = self.vpc_id self._state["region"] = config['region'] @@ -171,7 +171,7 @@ def realize_classic_link_change(self, allow_recreate): self.get_client().enable_vpc_classic_link(VpcId=self.vpc_id) elif config['enableClassicLink'] == False and self._state.get('enableClassicLink', None): self.get_client().disable_vpc_classic_link(VpcId=self.vpc_id) - with self.depl._db: + with self.depl._state.db: self._state["enableClassicLink"] = config['enableClassicLink'] def realize_dns_config(self, allow_recreate): @@ -184,7 +184,7 @@ def realize_dns_config(self, allow_recreate): EnableDnsHostnames={ 'Value': config['enableDnsHostnames'] }) - with self.depl._db: + with self.depl._state.db: self._state["enableDnsSupport"] = config['enableDnsSupport'] self._state["enableDnsHostnames"] = config['enableDnsHostnames'] @@ -229,7 +229,7 @@ def realize_associate_ipv6_cidr_block(self, allow_recreate): self.get_client().disassociate_vpc_cidr_block( AssociationId=self._state['associationId']) - with self.depl._db: + with self.depl._state.db: self._state["amazonProvidedIpv6CidrBlock"] = config['amazonProvidedIpv6CidrBlock'] if assign_cidr: self._state['associationId'] = association_id diff --git a/nixops/resources/vpc_customer_gateway.py b/nixops/resources/vpc_customer_gateway.py index 9cdeb8929..ece177cba 100644 --- a/nixops/resources/vpc_customer_gateway.py +++ b/nixops/resources/vpc_customer_gateway.py @@ -7,7 +7,7 @@ import boto3 import botocore -from nixops.state import StateDict +from nixops.state.state_helper import StateDict from nixops.diff import Diff, Handler import nixops.util import nixops.resources @@ -79,12 +79,12 @@ def realize_create_customer_gtw(self, allow_recreate): Type=config['type']) customer_gtw_id = response['CustomerGateway']['CustomerGatewayId'] - with self.depl._db: self.state = self.STARTING + with self.depl._state.db: self.state = self.STARTING waiter = self.get_client().get_waiter('customer_gateway_available') waiter.wait(CustomerGatewayIds=[customer_gtw_id]) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['region'] = config['region'] self._state['customerGatewayId'] = customer_gtw_id @@ -110,7 +110,7 @@ def _destroy(self): raise e #TODO wait for customer gtw to be deleted - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['region'] = None self._state['customerGatewayId'] = None diff --git a/nixops/resources/vpc_dhcp_options.py b/nixops/resources/vpc_dhcp_options.py index ee67f7271..0fd7be400 100644 --- a/nixops/resources/vpc_dhcp_options.py +++ b/nixops/resources/vpc_dhcp_options.py @@ -12,7 +12,7 @@ import nixops.resources from nixops.resources.ec2_common import EC2CommonState import nixops.ec2_utils -from nixops.state import StateDict +from nixops.state.state_helper import StateDict from nixops.diff import Diff, Handler class VPCDhcpOptionsDefinition(nixops.resources.ResourceDefinition): @@ -111,7 +111,7 @@ def create_dhcp_options(dhcp_config): return response.get('DhcpOptions').get('DhcpOptionsId') dhcp_options_id = create_dhcp_options(dhcp_config) - with self.depl._db: + with self.depl._state.db: self.state = self.STARTING self._state['vpcId'] = vpc_id self._state['dhcpOptionsId'] = dhcp_options_id @@ -122,7 +122,7 @@ def create_dhcp_options(dhcp_config): self._state['netbiosNodeType'] = config["netbiosNodeType"] self.get_client().associate_dhcp_options(DhcpOptionsId=dhcp_options_id, VpcId=vpc_id) - with self.depl._db: + with self.depl._state.db: self.state = self.UP def realize_update_tag(self, allow_recreate): @@ -145,7 +145,7 @@ def _destroy(self): else: raise e - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['vpcId'] = None self._state['dhcpOptions'] = None diff --git a/nixops/resources/vpc_egress_only_internet_gateway.py b/nixops/resources/vpc_egress_only_internet_gateway.py index 81609be8f..aba415dae 100644 --- a/nixops/resources/vpc_egress_only_internet_gateway.py +++ b/nixops/resources/vpc_egress_only_internet_gateway.py @@ -3,7 +3,7 @@ import boto3 import botocore -from nixops.state import StateDict +from nixops.state.state_helper import StateDict from nixops.diff import Diff, Handler import nixops.util import nixops.resources @@ -82,7 +82,7 @@ def realize_create_gtw(self, allow_recreate): response = self.get_client().create_egress_only_internet_gateway(VpcId=vpc_id) igw_id = response['EgressOnlyInternetGateway']['EgressOnlyInternetGatewayId'] - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['region'] = config['region'] self._state['vpcId'] = vpc_id @@ -93,7 +93,7 @@ def _destroy(self): self.log("deleting egress only internet gateway {0}".format(self._state['egressOnlyInternetGatewayId'])) self.get_client().delete_egress_only_internet_gateway(EgressOnlyInternetGatewayId=self._state['egressOnlyInternetGatewayId']) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['region'] = None self._state['vpcId'] = None diff --git a/nixops/resources/vpc_endpoint.py b/nixops/resources/vpc_endpoint.py index 1413feb3d..5759b5846 100644 --- a/nixops/resources/vpc_endpoint.py +++ b/nixops/resources/vpc_endpoint.py @@ -2,7 +2,7 @@ import uuid -from nixops.state import StateDict +from nixops.state.state_helper import StateDict from nixops.diff import Diff, Handler import nixops.util import nixops.resources @@ -89,7 +89,7 @@ def realize_create_endpoint(self, allow_recreate): VpcId=vpc_id) endpoint_id = response['VpcEndpoint']['VpcEndpointId'] - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['endpointId'] = endpoint_id self._state['vpcId'] = vpc_id @@ -117,7 +117,7 @@ def realize_modify_endpoint(self, allow_recreate): self.get_client().modify_vpc_endpoint(**edp_input) - with self.depl._db: + with self.depl._state.db: self._state['policy'] = config['policy'] self._state['routeTableIds'] = new_rtbs @@ -132,7 +132,7 @@ def _destroy(self): else: raise e - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['endpointId'] = None self._state['vpcId'] = None diff --git a/nixops/resources/vpc_internet_gateway.py b/nixops/resources/vpc_internet_gateway.py index 49cc7b140..236a8f285 100644 --- a/nixops/resources/vpc_internet_gateway.py +++ b/nixops/resources/vpc_internet_gateway.py @@ -7,7 +7,7 @@ import boto3 import botocore -from nixops.state import StateDict +from nixops.state.state_helper import StateDict from nixops.diff import Diff, Handler import nixops.util import nixops.resources @@ -89,7 +89,7 @@ def realize_create_gtw(self, allow_recreate): self.log("attaching internet gateway {0} to vpc {1}".format(igw_id, vpc_id)) self.get_client().attach_internet_gateway(InternetGatewayId=igw_id, VpcId=vpc_id) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['region'] = config['region'] self._state['vpcId'] = vpc_id @@ -110,7 +110,7 @@ def _destroy(self): self.log("deleting internet gateway {0}".format(self._state['internetGatewayId'])) self.get_client().delete_internet_gateway(InternetGatewayId=self._state['internetGatewayId']) - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['region'] = None self._state['vpcId'] = None diff --git a/nixops/resources/vpc_nat_gateway.py b/nixops/resources/vpc_nat_gateway.py index 6a8f7f91a..c58aa63df 100644 --- a/nixops/resources/vpc_nat_gateway.py +++ b/nixops/resources/vpc_nat_gateway.py @@ -13,7 +13,7 @@ from nixops.resources.ec2_common import EC2CommonState import nixops.ec2_utils from nixops.diff import Diff, Handler -from nixops.state import StateDict +from nixops.state.state_helper import StateDict class VPCNatGatewayDefinition(nixops.resources.ResourceDefinition): """Definition of a VPC NAT gateway""" @@ -101,7 +101,7 @@ def realize_create_gtw(self, allow_recreate): SubnetId=subnet_id) gtw_id = response['NatGateway']['NatGatewayId'] - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['subnetId'] = subnet_id self._state['allocationId'] = allocation_id @@ -137,7 +137,7 @@ def _destroy(self): self.log("deleting vpc NAT gateway {}".format(self._state['natGatewayId'])) try: self.get_client().delete_nat_gateway(NatGatewayId=self._state['natGatewayId']) - with self.depl._db: self.state = self.STOPPING + with self.depl._state.db: self.state = self.STOPPING except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "InvalidNatGatewayID.NotFound" or e.response['Error']['Code'] == "NatGatewayNotFound": self.warn("nat gateway {} was already deleted".format(self._state['natGatewayId'])) @@ -147,7 +147,7 @@ def _destroy(self): if self.state == self.STOPPING: self.wait_for_nat_gtw_deletion() - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['region'] = None self._state['subnetId'] = None diff --git a/nixops/resources/vpc_network_acl.py b/nixops/resources/vpc_network_acl.py index 026058940..667f6c5de 100644 --- a/nixops/resources/vpc_network_acl.py +++ b/nixops/resources/vpc_network_acl.py @@ -9,7 +9,7 @@ from nixops.resources.ec2_common import EC2CommonState import nixops.ec2_utils from nixops.diff import Diff, Handler -from nixops.state import StateDict +from nixops.state.state_helper import StateDict class VPCNetworkAcldefinition(nixops.resources.ResourceDefinition): """definition of a vpc network ACL.""" @@ -89,7 +89,7 @@ def realize_create_network_acl(self, allow_recreate): response = self.get_client().create_network_acl(VpcId=vpc_id) self.network_acl_id = response['NetworkAcl']['NetworkAclId'] - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['vpcId'] = vpc_id self._state['networkAclId'] = self.network_acl_id @@ -112,7 +112,7 @@ def realize_entries_change(self, allow_recreate): for entry in to_create: rule = self.process_rule_entry(entry) self.get_client().create_network_acl_entry(**rule) - with self.depl._db: + with self.depl._state.db: self._state['entries'] = config['entries'] def realize_subnets_change(self, allow_recreate): @@ -147,7 +147,7 @@ def realize_subnets_change(self, allow_recreate): self.log("associating subnet {0} to network acl {1}".format(subnet, self.network_acl_id)) self.get_client().replace_network_acl_association(AssociationId=association_id, NetworkAclId=self.network_acl_id) - with self.depl._db: + with self.depl._state.db: self._state['subnetIds'] = new_subnets def get_default_network_acl(self, vpc_id): @@ -193,7 +193,7 @@ def _destroy(self): else: raise e - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['networkAclId'] = None self._state['region'] = None diff --git a/nixops/resources/vpc_network_interface.py b/nixops/resources/vpc_network_interface.py index 8a36b513a..b867423d0 100644 --- a/nixops/resources/vpc_network_interface.py +++ b/nixops/resources/vpc_network_interface.py @@ -10,7 +10,7 @@ from nixops.resources.ec2_common import EC2CommonState import nixops.ec2_utils from nixops.diff import Diff, Handler -from nixops.state import StateDict +from nixops.state.state_helper import StateDict class VPCNetworkInterfaceDefinition(nixops.resources.ResourceDefinition): """Definition of a VPC network interface""" @@ -99,7 +99,7 @@ def split_ips(eni_ips): primary, secondary = split_ips(eni['PrivateIpAddresses']) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['subnetId'] = eni_input['SubnetId'] self._state['networkInterfaceId'] = eni['NetworkInterfaceId'] @@ -169,7 +169,7 @@ def realize_modify_eni_attrs(self, allow_recreate): SourceDestCheck={ 'Value':config['sourceDestCheck'] }) - with self.depl._db: + with self.depl._state.db: self._state['description'] = config['description'] self._state['securityGroups'] = groups self._state['sourceDestCheck'] = config['sourceDestCheck'] @@ -191,7 +191,7 @@ def _destroy(self): else: raise e - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['networkInterfaceId'] = None self._state['primaryPrivateIpAddress'] = None diff --git a/nixops/resources/vpc_network_interface_attachment.py b/nixops/resources/vpc_network_interface_attachment.py index b72dd8cd7..ae8c1113b 100644 --- a/nixops/resources/vpc_network_interface_attachment.py +++ b/nixops/resources/vpc_network_interface_attachment.py @@ -12,7 +12,7 @@ from nixops.resources.ec2_common import EC2CommonState import nixops.ec2_utils from nixops.diff import Diff, Handler -from nixops.state import StateDict +from nixops.state.state_helper import StateDict class VPCNetworkInterfaceAttachmentDefinition(nixops.resources.ResourceDefinition): """Definition of a VPC network interface attachment""" @@ -94,7 +94,7 @@ def wait_for_eni_attachment(self, eni_id): self.log_end(" done") - with self.depl._db: + with self.depl._state.db: self.state = self.UP def realize_create_eni_attachment(self, allow_recreate): @@ -123,7 +123,7 @@ def realize_create_eni_attachment(self, allow_recreate): InstanceId=vm_id, NetworkInterfaceId=eni_id) - with self.depl._db: + with self.depl._state.db: self.state = self.STARTING self._state['attachmentId'] = eni_attachment['AttachmentId'] self._state['instanceId'] = vm_id @@ -157,7 +157,7 @@ def _destroy(self): try: self.get_client().detach_network_interface(AttachmentId=self._state['attachmentId'], Force=True) - with self.depl._db: + with self.depl._state.db: self.state = self.STOPPING except botocore.exceptions.ClientError as e: @@ -168,7 +168,7 @@ def _destroy(self): if self.state == self.STOPPING: self.wait_for_eni_detachment() - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['region'] = None self._state['attachmentId'] = None diff --git a/nixops/resources/vpc_route.py b/nixops/resources/vpc_route.py index a58c15fdd..399e92772 100644 --- a/nixops/resources/vpc_route.py +++ b/nixops/resources/vpc_route.py @@ -10,7 +10,7 @@ from nixops.resources.ec2_common import EC2CommonState import nixops.ec2_utils from nixops.diff import Diff, Handler -from nixops.state import StateDict +from nixops.state.state_helper import StateDict class VPCRouteDefinition(nixops.resources.ResourceDefinition): """Definition of a VPC route""" @@ -120,7 +120,7 @@ def retrieve_defn(option): self.log("creating route {0} => {1} in route table {2}".format(retrieve_defn(target), config[destination], rtb_id)) self.get_client().create_route(**route) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state[target] = route[self.upper(target)] self._state[destination] = config[destination] @@ -144,7 +144,7 @@ def _destroy(self): else: raise error - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['routeTableId'] = None self._state[destination] = None diff --git a/nixops/resources/vpc_route_table.py b/nixops/resources/vpc_route_table.py index 60dbbbe07..ef05c7d9d 100644 --- a/nixops/resources/vpc_route_table.py +++ b/nixops/resources/vpc_route_table.py @@ -10,7 +10,7 @@ from nixops.resources.ec2_common import EC2CommonState import nixops.ec2_utils from nixops.diff import Diff, Handler -from nixops.state import StateDict +from nixops.state.state_helper import StateDict class VPCRouteTableDefinition(nixops.resources.ResourceDefinition): """Definition of a VPC route table""" @@ -91,7 +91,7 @@ def realize_create_route_table(self, allow_recreate): self.log("creating route table in vpc {}".format(vpc_id)) route_table = self.get_client().create_route_table(VpcId=vpc_id) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['vpcId'] = vpc_id self._state['routeTableId'] = route_table['RouteTable']['RouteTableId'] @@ -122,7 +122,7 @@ def realize_propagate_vpn_gtws(self, allow_recreate): GatewayId=vgw, RouteTableId=self._state['routeTableId']) - with self.depl._db: + with self.depl._state.db: self._state['propagatingVgws'] = new_vgws def realize_update_tag(self, allow_recreate): @@ -142,7 +142,7 @@ def _destroy(self): else: raise error - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['vpcId'] = None self._state['routeTableId'] = None diff --git a/nixops/resources/vpc_route_table_association.py b/nixops/resources/vpc_route_table_association.py index 8b71ea2ff..f4b5d14b6 100644 --- a/nixops/resources/vpc_route_table_association.py +++ b/nixops/resources/vpc_route_table_association.py @@ -10,7 +10,7 @@ from nixops.resources.ec2_common import EC2CommonState import nixops.ec2_utils from nixops.diff import Diff, Handler -from nixops.state import StateDict +from nixops.state.state_helper import StateDict class VPCRouteTableAssociationDefinition(nixops.resources.ResourceDefinition): """Definition of a VPC route table association""" @@ -91,7 +91,7 @@ def realize_associate_route_table(self, allow_recreate): association = self.get_client().associate_route_table(RouteTableId=route_table_id, SubnetId=subnet_id) - with self.depl._db: + with self.depl._state.db: self.state = self.UP self._state['routeTableId'] = route_table_id self._state['subnetId'] = subnet_id @@ -108,7 +108,7 @@ def _destroy(self): else: raise error - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['routeTableId'] = None self._state['subnetId'] = None diff --git a/nixops/resources/vpc_subnet.py b/nixops/resources/vpc_subnet.py index 00a3723bd..016cab8fa 100644 --- a/nixops/resources/vpc_subnet.py +++ b/nixops/resources/vpc_subnet.py @@ -10,7 +10,6 @@ from nixops.resources.ec2_common import EC2CommonState import nixops.ec2_utils from nixops.diff import Diff, Handler -from nixops.state import StateDict class VPCSubnetDefinition(nixops.resources.ResourceDefinition): """Definition of a VPC subnet.""" @@ -114,7 +113,7 @@ def wait_for_subnet_available(self, subnet_id): raise Exception("couldn't find subnet {}, please run deploy with --check".format(subnet_id)) self.log_end(" done") - with self.depl._db: + with self.depl._state.db: self.state = self.UP def realize_create_subnet(self, allow_recreate): @@ -143,7 +142,7 @@ def realize_create_subnet(self, allow_recreate): self.subnet_id = subnet.get('SubnetId') self.zone = subnet.get('AvailabilityZone') - with self.depl._db: + with self.depl._state.db: self.state = self.STARTING self._state['subnetId'] = self.subnet_id self._state['cidrBlock'] = config['cidrBlock'] @@ -165,7 +164,7 @@ def realize_map_public_ip_on_launch(self, allow_recreate): MapPublicIpOnLaunch={'Value':config['mapPublicIpOnLaunch']}, SubnetId=self.subnet_id) - with self.depl._db: + with self.depl._state.db: self._state['mapPublicIpOnLaunch'] = config['mapPublicIpOnLaunch'] def realize_associate_ipv6_cidr_block(self, allow_recreate): @@ -181,7 +180,7 @@ def realize_associate_ipv6_cidr_block(self, allow_recreate): self.get_client().disassociate_subnet_cidr_block( AssociationId=self._state['associationId']) - with self.depl._db: + with self.depl._state.db: self._state["ipv6CidrBlock"] = config['ipv6CidrBlock'] if config['ipv6CidrBlock'] is not None: self._state['associationId'] = response['Ipv6CidrBlockAssociation']['AssociationId'] @@ -205,7 +204,7 @@ def _destroy(self): else: raise error - with self.depl._db: + with self.depl._state.db: self.state = self.MISSING self._state['subnetID'] = None self._state['region'] = None diff --git a/nixops/state.py b/nixops/state.py deleted file mode 100644 index 80cb8c740..000000000 --- a/nixops/state.py +++ /dev/null @@ -1,63 +0,0 @@ -import json -import collections - -import nixops.util - -class StateDict(collections.MutableMapping): - """ - An implementation of a MutableMapping container providing - a python dict like behavior for the NixOps state file. - """ - # TODO implement __repr__ for convenience e.g debuging the structure - def __init__(self, depl, id): - super(StateDict, self).__init__() - self._db = depl._db - self.id = id - - def __setitem__(self, key, value): - with self._db: - c = self._db.cursor() - if value == None: - c.execute("delete from ResourceAttrs where machine = ? and name = ?", - (self.id, key)) - else: - v = value - if isinstance(value, list): - v = json.dumps(value) - c.execute("insert or replace into ResourceAttrs(machine, name, value) values (?, ?, ?)", - (self.id, key, v)) - - def __getitem__(self, key): - with self._db: - c = self._db.cursor() - c.execute("select value from ResourceAttrs where machine = ? and name = ?", - (self.id, key)) - row = c.fetchone() - if row != None: - try: - return json.loads(row[0]) - except ValueError: - return row[0] - raise KeyError("couldn't find key {} in the state file".format(key)) - - def __delitem__(self, key): - with self._db: - c.execute("delete from ResourceAttrs where machine = ? and name = ?", (self.id, key)) - - def keys(self): - # Generally the list of keys per ResourceAttrs is relatively small - # so this should be also relatively fast. - _keys = [] - with self._db: - c = self._db.cursor() - c.execute("select name from ResourceAttrs where machine = ?", (self.id,)) - rows = c.fetchall() - for row in rows: - _keys.append(row[0]) - return _keys - - def __iter__(self): - return iter(self.keys()) - - def __len__(self): - return len(self.keys()) diff --git a/nixops/state/__init__.py b/nixops/state/__init__.py new file mode 100644 index 000000000..10a52c27d --- /dev/null +++ b/nixops/state/__init__.py @@ -0,0 +1,45 @@ +import os +import urlparse +import sys +import json_file +import sqlite_connector + +class WrongStateSchemeException(Exception): + pass + + +def get_default_state_file(): + home = os.environ.get("HOME", "") + "/.nixops" + if not os.path.exists(home): + old_home = os.environ.get("HOME", "") + "/.charon" + if os.path.exists(old_home): + sys.stderr.write("renaming {!r} to {!r}...\n".format(old_home, home)) + os.rename(old_home, home) + if os.path.exists(home + "/deployments.charon"): + os.rename(home + "/deployments.charon", home + "/deployments.nixops") + else: + os.makedirs(home, 0700) + return os.environ.get("NIXOPS_STATE", os.environ.get("CHARON_STATE", home + "/deployments.nixops")) + + +def open(url): + url_parsed = urlparse.urlparse(url) + scheme = url_parsed.scheme + ext = os.path.splitext(url)[1] + if scheme == "": + if ext == ".nixops": + scheme = "sqlite" + url = 'sqlite://' + url + elif ext == ".json": + scheme = "json" + + def raise_(ex): + raise ex + + switcher = { + "json": lambda(url): json_file.JsonFile(url), + "sqlite": lambda(url): sqlite_connector.SQLiteConnection(url) + } + + function = switcher.get(scheme, lambda(url): raise_(WrongStateSchemeException("Unknown state scheme! {}".format(url)))) + return function(url) diff --git a/nixops/state/json_file.py b/nixops/state/json_file.py new file mode 100644 index 000000000..ef8ff87d7 --- /dev/null +++ b/nixops/state/json_file.py @@ -0,0 +1,363 @@ +# -*- coding: utf-8 -*- + +import nixops.deployment +from nixops.state.state_helper import _subclasses +import os +import os.path +import sys +import threading +import fcntl +import re +import json +import copy + +from uuid import uuid1 as gen_uuid + +class TransactionalJsonFile: + """ + Transactional access to a JSON file, with support + of nested transactions. + + This is made possible by keeping track of the transaction nest level. + If a transaction is started, the current JSON file is flocked() and read into memory. + All modifications to the document are kept in memory, until the last nested context is + exited again. + + Then, the in memory dict written to a temporary file, which is moved in place of + the original file to prevent partial writes. + """ + + # Implementation notes: + # if self.nesting > 0, then no write will propagate. + def __init__(self, db_file): + + lock_file_path = re.sub("\.json$", ".lock", db_file) + self._lock_file = open(lock_file_path, "w") + fcntl.fcntl(self._lock_file, fcntl.F_SETFD, fcntl.FD_CLOEXEC) # to not keep the lock in child processes + + self._db_file = db_file + self.nesting = 0 + self.lock = threading.RLock() + + ## Make sure that a JSON database file is in place. + with self: + pass + + def read(self): + if self.nesting == 0: + with open(self._db_file,"r") as f: + return json.load(f) + else: + assert self.nesting > 0 + return self._current_state + + # Implement Python's context management protocol so that "with db" + # automatically commits or rolls back. + def __enter__(self): + self.lock.acquire() + if self.nesting == 0: + fcntl.flock(self._lock_file, fcntl.LOCK_EX) + self._ensure_db_exists() + self.must_rollback = False + json = self.read() + self._backup_state = copy.deepcopy(json) + self._current_state = copy.deepcopy(json) + self.nesting = self.nesting + 1 + + def __exit__(self, exception_type, exception_value, exception_traceback): + if exception_type != None: self.must_rollback = True + self.nesting = self.nesting - 1 + assert self.nesting >= 0 + if self.nesting == 0: + if self.must_rollback: + self._rollback() + else: + self._commit() + fcntl.flock(self._lock_file, fcntl.LOCK_UN) + self.lock.release() + + def _rollback(self): + self._backup_state = None + self._current_state = None + pass + + def set(self, state): + self._current_state = state + + def _commit(self): + assert self.nesting == 0 + + # TODO: write to temp file, then mv + with open(self._db_file, "w") as f: + json.dump(self._current_state, f,indent=2) + + self._backup_state = None + self._current_state = None + + def _ensure_db_exists(self): + db_exists = os.path.exists(self._db_file) + if not db_exists: + initial_db = { + "schemaVersion": 0, + "deployments": {} + } + + with open(self._db_file, "w", 0o600) as f: + json.dump(initial_db, f) + f.close() + + def schema_version(self): + version = self.read()["schemaVersion"] + if version is None: + raise "illegal datafile" #TODO: proper exception + else: + return version + +class JsonFile(object): + """NixOps state file.""" + + def __init__(self, json_file): + self.file_path = json_file + + if os.path.splitext(json_file)[1] not in ['.json']: + raise Exception("state file ‘{0}’ should have extension ‘.json’".format(json_file)) + + self.db = TransactionalJsonFile(json_file) + + # Check that we're not using a to new DB schema version. + with self.db: + version = self.db.schema_version() + if version > 0: + raise Exception("this NixOps version is too old to deal with JSON schema version {0}".format(version)) + + ############################################################################################### + ## Deployment + + def query_deployments(self): + """Return the UUIDs of all deployments in the database.""" + + return self.db.read()["deployments"].keys() + + def get_all_deployments(self): + """Return Deployment objects for every deployment in the database.""" + uuids = self.query_deployments() + res = [] + for uuid in uuids: + try: + res.append(self.open_deployment(uuid=uuid)) + except nixops.deployment.UnknownBackend as e: + sys.stderr.write("skipping deployment ‘{0}’: {1}\n".format(uuid, str(e))) + return res + + def _find_deployment(self, uuid=None): + all_deployments = self.db.read()["deployments"] + found = [] + + # if nothing exists no reason to check for things + if not all_deployments: + return None + + if not uuid: + found = filter(lambda(id): all_deployments[id]["attributes"].get("name"), all_deployments) + else: + found = filter(lambda(id): id == uuid, all_deployments) + if not found: + found = filter(lambda(id): all_deployments[id]["attributes"].get("name") == uuid, all_deployments) + if not found: + found = filter(lambda(id): id.startswith(uuid), all_deployments) + + if not found: + return None + + if len(found) > 1: + if uuid: + raise Exception("state file contains multiple deployments with the same name, so you should specify one using its UUID") + else: + raise Exception("state file contains multiple deployments, so you should specify which one to use using ‘-d’, or set the environment variable NIXOPS_DEPLOYMENT") + return nixops.deployment.Deployment(self, found[0], sys.stderr) + + def open_deployment(self, uuid=None): + """Open an existing deployment.""" + deployment = self._find_deployment(uuid=uuid) + if deployment: return deployment + raise Exception("could not find specified deployment in state file ‘{0}’".format(self.db_file)) + + def create_deployment(self, uuid=None): + """Create a new deployment.""" + if not uuid: + import uuid + uuid = str(uuid.uuid1()) + with self.db: + state = self.db.read() + state["deployments"][uuid] = { "attributes": {}, "resources": {} } + self.db.set(state) + return nixops.deployment.Deployment(self, uuid, sys.stderr) + + def _delete_deployment(self, deployment_uuid): + """NOTE: This is UNSAFE, it's guarded in nixops/deployment.py. Do not call this function except from there!""" + self.__db.execute("delete from Deployments where uuid = ?", (deployment_uuid,)) + with self.db: + state = self.db.read() + state["deployments"].pop(deployment_uuid, None) + self.db.set(state) + + def clone_deployment(self, deployment_uuid): + with self.db: + if not uuid: + import uuid + new_uuid = str(uuid.uuid1()) + state = self.db.read() + + cloned_attributes = copy.deepcopy(state["deployments"][deployment_uuid]["attributes"]) + state["deployments"][new_uuid] = { + "attributes": cloned_attributes, + "resources": {} + } + + self.db.set(state) + + return self._find_deployment(new_uuid) + + def get_resources_for(self, deployment): + """Get all the resources for a certain deployment""" + resources = {} + with self.db: + state = self.db.read() + state_resources = state["deployments"][deployment.uuid]["resources"] + for res_id, res in state_resources.items(): + r = self._create_state(deployment, res["type"], res["name"], res_id) + resources[res["name"]] = r + self.db.set(state) + return resources + + def set_deployment_attrs(self, deployment_uuid, attrs): + """Update deployment attributes in the state.""" + with self.db: + state = self.db.read() + for n, v in attrs.iteritems(): + if v == None: + state["deployments"][deployment_uuid]["attributes"].pop(n,None) + else: + state["deployments"][deployment_uuid]["attributes"][n] = v + self.db.set(state) + + def del_deployment_attr(self, deployment_uuid, attr_name): + with self.db: + state = self.db.read() + state["deployments"][deployment_uuid]["attributes"].pop(attr_name,None) + self.db.set(state) + + def get_deployment_attr(self, deployment_uuid, name): + """Get a deployment attribute from the state.""" + with self.db: + state = self.db.read() + result = state["deployments"][deployment_uuid]["attributes"].get(name) + if result: + return result + else: + return nixops.util.undefined + + def get_all_deployment_attrs(self, deployment_uuid): + with self.db: + state = self.db.read() + return copy.deepcopy(state["deployments"][deployment_uuid]["attributes"]) + + def get_deployment_lock(self, deployment): + lock_dir = os.environ.get("HOME", "") + "/.nixops/locks" + if not os.path.exists(lock_dir): os.makedirs(lock_dir, 0700) + lock_file_path = lock_dir + "/" + deployment.uuid + class DeploymentLock(object): + def __init__(self, logger, path): + self._lock_file_path = path + self._logger = logger + self._lock_file = None + def __enter__(self): + self._lock_file = open(self._lock_file_path, "w") + fcntl.fcntl(self._lock_file, fcntl.F_SETFD, fcntl.FD_CLOEXEC) + try: + fcntl.flock(self._lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + self._logger.log( + "waiting for exclusive deployment lock..." + ) + fcntl.flock(self._lock_file, fcntl.LOCK_EX) + def __exit__(self, exception_type, exception_value, exception_traceback): + if self._lock_file: + self._lock_file.close() + return DeploymentLock(deployment.logger, lock_file_path) + + ############################################################################################### + ## Resources + + def create_resource(self, deployment, name, type): + with self.db: + state = self.db.read() + if name in state["deployments"][deployment.uuid]["resources"]: + raise Exception("resource already exists in database!") + id = str(gen_uuid()) + state["deployments"][deployment.uuid]["resources"][id] = { + "name": name, + "type" : type, + "attributes" : {} + } + self.db.set(state) + r = self._create_state(deployment, type, name, id) + return r + + def delete_resource(self, deployment_uuid, res_id): + with self.db: + state = self.db.read() + state["deployments"][deployment_uuid]["resources"].pop(res_id) + self.db.set(state) + + def _rename_resource(self, deployment_uuid, resource_id, new_name): + """NOTE: Invariants are checked in nixops/deployment.py#rename""" + with self.db: + state = self.db.read() + state["deployments"][deployment_uuid]["resources"][resource_id]["name"] = new_name + self.db.set(state) + + def set_resource_attrs(self, deployment_uuid, resource_id, attrs): + with self.db: + state = self.db.read() + resource_attrs = state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] + for n, v in attrs.iteritems(): + if v == None: + resource_attrs.pop(n, None) + else: + resource_attrs[n] = v + state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] = resource_attrs + self.db.set(state) + + def del_resource_attr(self, deployment_uuid, resource_id, name): + with self.db: + state = self.db.read() + resource_attrs = state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] + resource_attrs.pop(name, None) + state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] = resource_attrs + self.db.set(state) + + def get_resource_attr(self, deployment_uuid, resource_id, name): + """Get a machine attribute from the state file.""" + with self.db: + state = self.db.read() + resource_attrs = state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] + res = resource_attrs.get(name) + if res != None: return res + return nixops.util.undefined + + def get_all_resource_attrs(self, deployment_uuid, resource_id): + with self.db: + state = self.db.read() + resource_attrs = state["deployments"][deployment_uuid]["resources"][resource_id]["attributes"] + return copy.deepcopy(resource_attrs) + + ### STATE + def _create_state(self, depl, type, name, id): + """Create a resource state object of the desired type.""" + + for cls in _subclasses(nixops.resources.ResourceState): + if type == cls.get_type(): + return cls(depl, name, id) + + raise nixops.deployment.UnknownBackend("unknown resource type ‘{0}’".format(type)) diff --git a/nixops/state/sqlite_connector.py b/nixops/state/sqlite_connector.py new file mode 100644 index 000000000..ab07c879c --- /dev/null +++ b/nixops/state/sqlite_connector.py @@ -0,0 +1,395 @@ +# -*- coding: utf-8 -*- + + +import nixops.deployment +from nixops.state.state_helper import _subclasses +import os +import os.path +import urlparse +import sys +import threading +import fcntl + +import sqlite3 + + +class Connection(sqlite3.Connection): + + def __init__(self, db_file, **kwargs): + db_exists = os.path.exists(db_file) + if not db_exists: + os.fdopen(os.open(db_file, os.O_WRONLY | os.O_CREAT, 0o600), 'w').close() + + sqlite3.Connection.__init__(self, db_file, **kwargs) + + self.db_file = db_file + self.nesting = 0 + self.lock = threading.RLock() + + # Implement Python's context management protocol so that "with db" + # automatically commits or rolls back. The difference with the + # parent's "with" implementation is that we nest, i.e. a commit or + # rollback is only done at the outer "with". + def __enter__(self): + self.lock.acquire() + if self.nesting == 0: + self.must_rollback = False + self.nesting = self.nesting + 1 + sqlite3.Connection.__enter__(self) + + + def __exit__(self, exception_type, exception_value, exception_traceback): + if exception_type != None: self.must_rollback = True + self.nesting = self.nesting - 1 + assert self.nesting >= 0 + if self.nesting == 0: + if self.must_rollback: + try: + self.rollback() + except sqlite3.ProgrammingError: + pass + else: + sqlite3.Connection.__exit__(self, exception_type, exception_value, exception_traceback) + self.lock.release() + + +class SQLiteConnection(object): + """NixOps state file.""" + + current_schema = 3 + + def __init__(self, db_file): + url = urlparse.urlparse(db_file) + self.db_file = url.netloc + url.path + + if os.path.splitext(db_file)[1] not in ['.nixops', '.charon']: + raise Exception("state file ‘{0}’ should have extension ‘.nixops’".format(db_file)) + db = sqlite3.connect(self.db_file, timeout=60, check_same_thread=False, factory=Connection, isolation_level=None) # FIXME + db.db_file = db_file + + db.execute("pragma journal_mode = wal") + db.execute("pragma foreign_keys = 1") + + # FIXME: this is not actually transactional, because pysqlite (not + # sqlite) does an implicit commit before "create table". + with db: + c = db.cursor() + + # Get the schema version. + version = 0 # new database + if self._table_exists(c, 'SchemaVersion'): + c.execute("select version from SchemaVersion") + version = c.fetchone()[0] + elif self._table_exists(c, 'Deployments'): + version = 1 + + if version == self.current_schema: + pass + elif version == 0: + self._create_schema(c) + elif version < self.current_schema: + if version <= 1: self._upgrade_1_to_2(c) + if version <= 2: self._upgrade_2_to_3(c) + c.execute("update SchemaVersion set version = ?", (self.current_schema,)) + else: + raise Exception("this NixOps version is too old to deal with schema version {0}".format(version)) + + self.db = db + + def close(self): + self.db.close() + + def query_deployments(self): + """Return the UUIDs of all deployments in the database.""" + c = self.db.cursor() + c.execute("select uuid from Deployments") + res = c.fetchall() + return [x[0] for x in res] + + def get_all_deployments(self): + """Return Deployment objects for every deployment in the database.""" + uuids = self.query_deployments() + res = [] + for uuid in uuids: + try: + res.append(self.open_deployment(uuid=uuid)) + except nixops.deployment.UnknownBackend as e: + sys.stderr.write("skipping deployment ‘{0}’: {1}\n".format(uuid, str(e))) + return res + + def _find_deployment(self, uuid=None): + c = self.db.cursor() + if not uuid: + c.execute("select uuid from Deployments") + else: + c.execute("select uuid from Deployments d where uuid = ? or exists (select 1 from DeploymentAttrs where deployment = d.uuid and name = 'name' and value = ?)", (uuid, uuid)) + res = c.fetchall() + if len(res) == 0: + if uuid: + # try the prefix match + c.execute("select uuid from Deployments where uuid glob ?", (uuid + '*', )) + res = c.fetchall() + if len(res) == 0: + return None + else: + return None + if len(res) > 1: + if uuid: + raise Exception("state file contains multiple deployments with the same name, so you should specify one using its UUID") + else: + raise Exception("state file contains multiple deployments, so you should specify which one to use using ‘-d’, or set the environment variable NIXOPS_DEPLOYMENT") + return nixops.deployment.Deployment(self, res[0][0], sys.stderr) + + def open_deployment(self, uuid=None): + """Open an existing deployment.""" + deployment = self._find_deployment(uuid=uuid) + if deployment: return deployment + raise Exception("could not find specified deployment in state file ‘{0}’".format(self.db_file)) + + def create_deployment(self, uuid=None): + """Create a new deployment.""" + if not uuid: + import uuid + uuid = str(uuid.uuid1()) + with self.db: + self.db.execute("insert into Deployments(uuid) values (?)", (uuid,)) + return nixops.deployment.Deployment(self, uuid, sys.stderr) + + def _table_exists(self, c, table): + c.execute("select 1 from sqlite_master where name = ? and type='table'", (table,)); + return c.fetchone() != None + + def _delete_deployment(self, deployment_uuid): + """NOTE: This is UNSAFE, it's guarded in nixops/deployment.py. Do not call this function except from there!""" + with self.db: + self.db.execute("delete from Deployments where uuid = '{}'".format(deployment_uuid)) + + def clone_deployment(self, deployment_uuid): + with self.db: + new = self.create_deployment() + self.db.execute("insert into DeploymentAttrs (deployment, name, value) " + + "select '{}', name, value from DeploymentAttrs where deployment = '{}'" + .format(new.uuid, deployment_uuid) + ) + new.configs_path = None + return new + + + def _create_schemaversion(self, c): + c.execute( + '''create table if not exists SchemaVersion( + version integer not null + );''') + + c.execute("insert into SchemaVersion(version) values (?)", (self.current_schema,)) + + def _create_schema(self, c): + self._create_schemaversion(c) + + c.execute( + '''create table if not exists Deployments( + uuid text primary key + );''') + + c.execute( + '''create table if not exists DeploymentAttrs( + deployment text not null, + name text not null, + value text not null, + primary key(deployment, name), + foreign key(deployment) references Deployments(uuid) on delete cascade + );''') + + c.execute( + '''create table if not exists Resources( + id integer primary key autoincrement, + deployment text not null, + name text not null, + type text not null, + foreign key(deployment) references Deployments(uuid) on delete cascade + );''') + + c.execute( + '''create table if not exists ResourceAttrs( + machine integer not null, + name text not null, + value text not null, + primary key(machine, name), + foreign key(machine) references Resources(id) on delete cascade + );''') + + def _upgrade_1_to_2(self, c): + sys.stderr.write("updating database schema from version 1 to 2...\n") + self._create_schemaversion(c) + + def _upgrade_2_to_3(self, c): + sys.stderr.write("updating database schema from version 2 to 3...\n") + c.execute("alter table Machines rename to Resources") + c.execute("alter table MachineAttrs rename to ResourceAttrs") + +# ############################################################################################### +# ## Deployment + def get_resources_for(self, deployment): + """Get all the resources for a certain deployment""" + with self.db: + resources = {} + + rows = self.db.execute("select id, name, type from Resources where deployment = '{}'".format(deployment.uuid)).fetchall() + for (id, name, type) in rows: + r = self._create_state(deployment, type, name, id) + resources[name] = r + return resources + + + def set_deployment_attrs(self, deployment_uuid, attrs): + """Update deployment attributes in the state.""" + with self.db: + for name, value in attrs.iteritems(): + if value == None: + self.db.execute("delete from DeploymentAttrs where deployment = '{}' and name = '{}'" + .format(deployment_uuid, name) + ) + else: + if self.get_deployment_attr(deployment_uuid, name) == nixops.util.undefined: + self.db.execute("insert into DeploymentAttrs(deployment, name, value) values ('{}', '{}', {!r})" + .format(deployment_uuid, name, value) + ) + else: + self.db.execute("update DeploymentAttrs set value={!r} where deployment='{}' and name='{}'" + .format(value, deployment_uuid, name) + ) + + + def del_deployment_attr(self, deployment_uuid, attr_name): + with self.db: + self.db.execute("delete from DeploymentAttrs where deployment = '{}' and name = {!r}" + .format(deployment_uuid, attr_name) + ) + + + def get_deployment_attr(self, deployment_uuid, name): + """Get a deployment attribute from the state.""" + with self.db: + rows = self.db.execute("select value from DeploymentAttrs where deployment = '{}' and name = {!r}" + .format(deployment_uuid, name)).fetchall() + for row in rows: + return row[0] + return nixops.util.undefined + + + def get_all_deployment_attrs(self, deployment_uuid): + with self.db: + rows = self.db.execute("select name, value from DeploymentAttrs where deployment = '{}'" + .format(deployment_uuid)).fetchall() + res = {row[0]: row[1] for row in rows} + return res + + + def get_deployment_lock(self, deployment): + lock_dir = os.environ.get("HOME", "") + "/.nixops/locks" + if not os.path.exists(lock_dir): os.makedirs(lock_dir, 0700) + lock_file_path = lock_dir + "/" + deployment.uuid + class DeploymentLock(object): + def __init__(self, logger, path): + self._lock_file_path = path + self._logger = logger + self._lock_file = None + def __enter__(self): + self._lock_file = open(self._lock_file_path, "w") + fcntl.fcntl(self._lock_file, fcntl.F_SETFD, fcntl.FD_CLOEXEC) + try: + fcntl.flock(self._lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError: + self._logger.log( + "waiting for exclusive deployment lock..." + ) + fcntl.flock(self._lock_file, fcntl.LOCK_EX) + def __exit__(self, exception_type, exception_value, exception_traceback): + if self._lock_file: + self._lock_file.close() + return DeploymentLock(deployment.logger, lock_file_path) + +# ############################################################################################### +# ## Resources + + def create_resource(self, deployment, name, type): + count = self.db.execute("select count(id) from Resources where deployment = '{}' and name = {!r}" + .format(deployment.uuid, name)).fetchone()[0] + + if count != 0: + raise Exception("resource already exists in database!") + + result = self.db.execute("insert into Resources(deployment, name, type) values ('{}', {!r}, {!r})" + .format(deployment.uuid, name, type)) + + id = result.lastrowid + r = self._create_state(deployment, type, name, id) + return r + + + def delete_resource(self, deployment_uuid, res_id): + with self.db: + self.db.execute("delete from Resources where deployment = '{}' and id = {!r}" + .format(deployment_uuid, res_id)) + + + def _rename_resource(self, deployment_uuid, resource_id, new_name): + """NOTE: Invariants are checked in nixops/deployment.py#rename""" + with self.db: + self.db.execute("update Resources set name = '{}' where deployment = '{}' and id = {!r}" + .format(new_name, deployment_uuid, resource_id)) + + + def set_resource_attrs(self, _deployment_uuid, resource_id, attrs): + with self.db: + for name, value in attrs.iteritems(): + if value == None: + self.db.execute("delete from ResourceAttrs where machine = '{}' and name = '{}'" + .format(resource_id, name)) + else: + if self.get_resource_attr(_deployment_uuid, resource_id, name) == nixops.util.undefined: + self.db.execute("insert into ResourceAttrs(machine, name, value) values ('{}', '{}', '{}')" + .format(resource_id, name, value) + ) + else: + self.db.execute("update ResourceAttrs set value={!r} where machine='{}' and name='{}'" + .format(value, resource_id, name) + ) + + + def del_resource_attr(self, _deployment_uuid, resource_id, name): + with self.db: + self.db.execute("delete from ResourceAttrs where machine = {!r} and name = {!r}" + .format(resource_id, name)) + + + def get_resource_attr(self, _deployment_uuid, resource_id, name): + with self.db: + """Get a machine attribute from the state file.""" + rows = self.db.execute("select value from ResourceAttrs where machine = '{}' and name = '{}'" + .format(resource_id, name)).fetchall() + + for row in rows: + return row[0] + return nixops.util.undefined + + + def get_all_resource_attrs(self, deployment_uuid, resource_id): + with self.db: + rows = self.db.execute("select name, value from ResourceAttrs where machine = '{}'" + .format(resource_id)).fetchall() + res = {row[0]: row[1] for row in rows} + return res + + +# ### STATE + + def _create_state(self, depl, type, name, id): + """Create a resource state object of the desired type.""" + + for cls in _subclasses(nixops.resources.ResourceState): + if type == cls.get_type(): + return cls(depl, name, id) + + raise nixops.deployment.UnknownBackend("unknown resource type ‘{!r}’" + .format(type)) diff --git a/nixops/state/state_helper.py b/nixops/state/state_helper.py new file mode 100644 index 000000000..12bfa225e --- /dev/null +++ b/nixops/state/state_helper.py @@ -0,0 +1,48 @@ +import json +import collections +import nixops.util + + +def _subclasses(cls): + sub = cls.__subclasses__() + return [cls] if not sub else [g for s in sub for g in _subclasses(s)] + + +class StateDict(collections.MutableMapping): + """ + An implementation of a MutableMapping container providing + a python dict like behavior for the NixOps state file. + """ + # TODO implement __repr__ for convenience e.g debuging the structure + def __init__(self, depl, id): + super(StateDict, self).__init__() + self._state = depl._state + self.uuid = depl.uuid + self.id = id + + def __setitem__(self, key, value): + self._state.set_resource_attrs(self.uuid, self.id, {key:value}) + + def __getitem__(self, key): + value = self._state.get_resource_attr(self.uuid, self.id, key) + if value != nixops.util.undefined: + try: + return json.loads(value) + except ValueError: + return value + raise KeyError("couldn't find key {} in the state file".format(key)) + + def __delitem__(self, key): + self._state.del_resource_attr(self.uuid, self.id, key) + + def keys(self): + # Generally the list of keys per ResourceAttrs is relatively small + # so this should be also relatively fast. + attrs = self._state.get_all_resource_attrs(self.uuid, self.id) + return [key for key,value in attrs.iteritems()] + + def __iter__(self): + return iter(self.keys()) + + def __len__(self): + return len(self.keys()) diff --git a/nixops/statefile.py b/nixops/statefile.py deleted file mode 100644 index 0e8c2c5f3..000000000 --- a/nixops/statefile.py +++ /dev/null @@ -1,220 +0,0 @@ -# -*- coding: utf-8 -*- - -import nixops.deployment -import os -import os.path -from pysqlite2 import dbapi2 as sqlite3 -import sys -import threading - - -class Connection(sqlite3.Connection): - - def __init__(self, db_file, **kwargs): - db_exists = os.path.exists(db_file) - if not db_exists: - os.fdopen(os.open(db_file, os.O_WRONLY | os.O_CREAT, 0o600), 'w').close() - sqlite3.Connection.__init__(self, db_file, **kwargs) - self.db_file = db_file - self.nesting = 0 - self.lock = threading.RLock() - - # Implement Python's context management protocol so that "with db" - # automatically commits or rolls back. The difference with the - # parent's "with" implementation is that we nest, i.e. a commit or - # rollback is only done at the outer "with". - def __enter__(self): - self.lock.acquire() - if self.nesting == 0: - self.must_rollback = False - self.nesting = self.nesting + 1 - sqlite3.Connection.__enter__(self) - - - def __exit__(self, exception_type, exception_value, exception_traceback): - if exception_type != None: self.must_rollback = True - self.nesting = self.nesting - 1 - assert self.nesting >= 0 - if self.nesting == 0: - if self.must_rollback: - try: - self.rollback() - except sqlite3.ProgrammingError: - pass - else: - sqlite3.Connection.__exit__(self, exception_type, exception_value, exception_traceback) - self.lock.release() - - -def get_default_state_file(): - home = os.environ.get("HOME", "") + "/.nixops" - if not os.path.exists(home): - old_home = os.environ.get("HOME", "") + "/.charon" - if os.path.exists(old_home): - sys.stderr.write("renaming ‘{0}’ to ‘{1}’...\n".format(old_home, home)) - os.rename(old_home, home) - if os.path.exists(home + "/deployments.charon"): - os.rename(home + "/deployments.charon", home + "/deployments.nixops") - else: - os.makedirs(home, 0700) - return os.environ.get("NIXOPS_STATE", os.environ.get("CHARON_STATE", home + "/deployments.nixops")) - - -class StateFile(object): - """NixOps state file.""" - - current_schema = 3 - - def __init__(self, db_file): - self.db_file = db_file - - if os.path.splitext(db_file)[1] not in ['.nixops', '.charon']: - raise Exception("state file ‘{0}’ should have extension ‘.nixops’".format(db_file)) - db = sqlite3.connect(db_file, timeout=60, check_same_thread=False, factory=Connection, isolation_level=None) # FIXME - db.db_file = db_file - - db.execute("pragma journal_mode = wal") - db.execute("pragma foreign_keys = 1") - - # FIXME: this is not actually transactional, because pysqlite (not - # sqlite) does an implicit commit before "create table". - with db: - c = db.cursor() - - # Get the schema version. - version = 0 # new database - if self._table_exists(c, 'SchemaVersion'): - c.execute("select version from SchemaVersion") - version = c.fetchone()[0] - elif self._table_exists(c, 'Deployments'): - version = 1 - - if version == self.current_schema: - pass - elif version == 0: - self._create_schema(c) - elif version < self.current_schema: - if version <= 1: self._upgrade_1_to_2(c) - if version <= 2: self._upgrade_2_to_3(c) - c.execute("update SchemaVersion set version = ?", (self.current_schema,)) - else: - raise Exception("this NixOps version is too old to deal with schema version {0}".format(version)) - - self._db = db - - def close(self): - self._db.close() - - def query_deployments(self): - """Return the UUIDs of all deployments in the database.""" - c = self._db.cursor() - c.execute("select uuid from Deployments") - res = c.fetchall() - return [x[0] for x in res] - - def get_all_deployments(self): - """Return Deployment objects for every deployment in the database.""" - uuids = self.query_deployments() - res = [] - for uuid in uuids: - try: - res.append(self.open_deployment(uuid=uuid)) - except nixops.deployment.UnknownBackend as e: - sys.stderr.write("skipping deployment ‘{0}’: {1}\n".format(uuid, str(e))) - return res - - def _find_deployment(self, uuid=None): - c = self._db.cursor() - if not uuid: - c.execute("select uuid from Deployments") - else: - c.execute("select uuid from Deployments d where uuid = ? or exists (select 1 from DeploymentAttrs where deployment = d.uuid and name = 'name' and value = ?)", (uuid, uuid)) - res = c.fetchall() - if len(res) == 0: - if uuid: - # try the prefix match - c.execute("select uuid from Deployments where uuid glob ?", (uuid + '*', )) - res = c.fetchall() - if len(res) == 0: - return None - else: - return None - if len(res) > 1: - if uuid: - raise Exception("state file contains multiple deployments with the same name, so you should specify one using its UUID") - else: - raise Exception("state file contains multiple deployments, so you should specify which one to use using ‘-d’, or set the environment variable NIXOPS_DEPLOYMENT") - return nixops.deployment.Deployment(self, res[0][0], sys.stderr) - - def open_deployment(self, uuid=None): - """Open an existing deployment.""" - deployment = self._find_deployment(uuid=uuid) - if deployment: return deployment - raise Exception("could not find specified deployment in state file ‘{0}’".format(self.db_file)) - - def create_deployment(self, uuid=None): - """Create a new deployment.""" - if not uuid: - import uuid - uuid = str(uuid.uuid1()) - with self._db: - self._db.execute("insert into Deployments(uuid) values (?)", (uuid,)) - return nixops.deployment.Deployment(self, uuid, sys.stderr) - - def _table_exists(self, c, table): - c.execute("select 1 from sqlite_master where name = ? and type='table'", (table,)); - return c.fetchone() != None - - def _create_schemaversion(self, c): - c.execute( - '''create table if not exists SchemaVersion( - version integer not null - );''') - - c.execute("insert into SchemaVersion(version) values (?)", (self.current_schema,)) - - def _create_schema(self, c): - self._create_schemaversion(c) - - c.execute( - '''create table if not exists Deployments( - uuid text primary key - );''') - - c.execute( - '''create table if not exists DeploymentAttrs( - deployment text not null, - name text not null, - value text not null, - primary key(deployment, name), - foreign key(deployment) references Deployments(uuid) on delete cascade - );''') - - c.execute( - '''create table if not exists Resources( - id integer primary key autoincrement, - deployment text not null, - name text not null, - type text not null, - foreign key(deployment) references Deployments(uuid) on delete cascade - );''') - - c.execute( - '''create table if not exists ResourceAttrs( - machine integer not null, - name text not null, - value text not null, - primary key(machine, name), - foreign key(machine) references Resources(id) on delete cascade - );''') - - def _upgrade_1_to_2(self, c): - sys.stderr.write("updating database schema from version 1 to 2...\n") - self._create_schemaversion(c) - - def _upgrade_2_to_3(self, c): - sys.stderr.write("updating database schema from version 2 to 3...\n") - c.execute("alter table Machines rename to Resources") - c.execute("alter table MachineAttrs rename to ResourceAttrs") - - diff --git a/nixops/util.py b/nixops/util.py index 60da9aba6..c0d399c01 100644 --- a/nixops/util.py +++ b/nixops/util.py @@ -242,13 +242,14 @@ def attr_property(name, default, type=str): """Define a property that corresponds to a value in the NixOps state file.""" def get(self): s = self._get_attr(name, default) + if s == undefined: if default != undefined: return copy.deepcopy(default) raise Exception("deployment attribute ‘{0}’ missing from state file".format(name)) if s == None: return None elif type is str: return s elif type is int: return int(s) - elif type is bool: return True if s == "1" else False + elif type is bool: return True if s == True or s == "1" else False elif type is 'json': return json.loads(s) else: assert False def set(self, x): diff --git a/release.nix b/release.nix index 0ae5deec6..098bd2628 100644 --- a/release.nix +++ b/release.nix @@ -91,8 +91,6 @@ rec { azure-mgmt-resource azure-mgmt-storage adal - # Go back to sqlite once Python 2.7.13 is released - pysqlite datadog digital-ocean ]; diff --git a/scripts/nixops b/scripts/nixops index 3d298fbd1..90fdf170b 100755 --- a/scripts/nixops +++ b/scripts/nixops @@ -5,7 +5,7 @@ from nixops import deployment from nixops.nix_expr import py2nix from nixops.parallel import MultipleExceptions, run_tasks -import nixops.statefile +import nixops.state import prettytable import argparse import os @@ -48,16 +48,16 @@ def sort_deployments(depls): # $NIXOPS_DEPLOYMENT. def one_or_all(): if args.all: - sf = nixops.statefile.StateFile(args.state_file) - return sf.get_all_deployments() + state = nixops.state.open(args.state_url) + return state.get_all_deployments() else: return [open_deployment()] def op_list_deployments(): - sf = nixops.statefile.StateFile(args.state_file) + state = nixops.state.open(args.state_url) tbl = create_table([("UUID", 'l'), ("Name", 'l'), ("Description", 'l'), ("# Machines", 'r'), ("Type", 'c')]) - for depl in sort_deployments(sf.get_all_deployments()): + for depl in sort_deployments(state.get_all_deployments()): tbl.add_row( [depl.uuid, depl.name or "(none)", depl.description, len(depl.machines), @@ -67,8 +67,8 @@ def op_list_deployments(): def open_deployment(): - sf = nixops.statefile.StateFile(args.state_file) - depl = sf.open_deployment(uuid=args.deployment) + state = nixops.state.open(args.state_url) + depl = state.open_deployment(uuid=args.deployment) depl.extra_nix_path = sum(args.nix_path or [], []) for (n, v) in args.nix_options or []: depl.extra_nix_flags.extend(["--option", n, v]) @@ -102,8 +102,8 @@ def modify_deployment(depl): def op_create(): - sf = nixops.statefile.StateFile(args.state_file) - depl = sf.create_deployment() + state = nixops.state.open(args.state_url) + depl = state.create_deployment() sys.stderr.write("created deployment ‘{0}’\n".format(depl.uuid)) modify_deployment(depl) if args.name or args.deployment: set_name(depl, args.name or args.deployment) @@ -190,10 +190,10 @@ def op_info(): ]) if args.all: - sf = nixops.statefile.StateFile(args.state_file) + state = nixops.state.open(args.state_url) if not args.plain: tbl = create_table([('Deployment', 'l')] + table_headers) - for depl in sort_deployments(sf.get_all_deployments()): + for depl in sort_deployments(state.get_all_deployments()): do_eval(depl) print_deployment(depl) if not args.plain: print tbl @@ -519,16 +519,16 @@ def op_export(): def op_import(): - sf = nixops.statefile.StateFile(args.state_file) - existing = set(sf.query_deployments()) + state = nixops.state.open(args.state_url) + existing = set(state.query_deployments()) dump = json.loads(sys.stdin.read()) for uuid, attrs in dump.iteritems(): if uuid in existing: raise Exception("state file already contains a deployment with UUID ‘{0}’".format(uuid)) - with sf._db: - depl = sf.create_deployment(uuid=uuid) + with state.db: + depl = state.create_deployment(uuid=uuid) depl.import_(attrs) sys.stderr.write("added deployment ‘{0}’\n".format(uuid)) @@ -723,8 +723,8 @@ subparsers = parser.add_subparsers(help='sub-command help') def add_subparser(name, help): subparser = subparsers.add_parser(name, help=help) - subparser.add_argument('--state', '-s', dest='state_file', metavar='FILE', - default=nixops.statefile.get_default_state_file(), help='path to state file') + subparser.add_argument('--state', '-s', dest='state_url', metavar='FILE', + default=os.environ.get("NIXOPS_STATE_URL", nixops.state.get_default_state_file()), help='URL that points to the state provider.') subparser.add_argument('--deployment', '-d', dest='deployment', metavar='UUID_OR_NAME', default=os.environ.get("NIXOPS_DEPLOYMENT", os.environ.get("CHARON_DEPLOYMENT", None)), help='UUID or symbolic name of the deployment') subparser.add_argument('--debug', action='store_true', help='enable debug output') diff --git a/setup.py b/setup.py index ed09010e7..ca08bbfb8 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ def run(self): author='Eelco Dolstra', author_email='eelco.dolstra@logicblox.com', scripts=['scripts/nixops'], - packages=['nixops', 'nixops.resources', 'nixops.backends'], + packages=['nixops', 'nixops.resources', 'nixops.backends', 'nixops.state'], package_data={'nixops': ['data/nixos-infect']}, cmdclass={'test': TestCommand} ) diff --git a/tests/__init__.py b/tests/__init__.py index 6bdd94ec8..7b760ec24 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -3,17 +3,19 @@ import sys import threading from os import path -import nixops.statefile +import nixops.state _multiprocess_shared_ = True db_file = '%s/test.nixops' % (path.dirname(__file__)) +json_file = '%s/test.json' % (path.dirname(__file__)) def setup(): - nixops.statefile.StateFile(db_file).close() + state = nixops.state.open(db_file) + state.db.close() -def destroy(sf, uuid): - depl = sf.open_deployment(uuid) +def destroy(state, uuid): + depl = state.open_deployment(uuid) depl.logger.set_autoresponse("y") try: depl.clean_backups(keep=0) @@ -27,17 +29,17 @@ def destroy(sf, uuid): depl.logger.log("deployment ‘{0}’ destroyed".format(uuid)) def teardown(): - sf = nixops.statefile.StateFile(db_file) - uuids = sf.query_deployments() + state = nixops.state.open(db_file) + uuids = state.query_deployments() threads = [] for uuid in uuids: - threads.append(threading.Thread(target=destroy,args=(sf, uuid))) + threads.append(threading.Thread(target=destroy,args=(state, uuid))) for thread in threads: thread.start() for thread in threads: thread.join() - uuids_left = sf.query_deployments() - sf.close() + uuids_left = state.query_deployments() + state.close() if not uuids_left: os.remove(db_file) else: diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py index 7152f23e8..3c5d0c540 100644 --- a/tests/functional/__init__.py +++ b/tests/functional/__init__.py @@ -1,14 +1,24 @@ import os from os import path -import nixops.statefile +import nixops.state from tests import db_file - +from tests import json_file class DatabaseUsingTest(object): _multiprocess_can_split_ = True def setup(self): - self.sf = nixops.statefile.StateFile(db_file) + self.state = nixops.state.open(db_file) + + def teardown(self): + self.state.close() + + +class JSONUsingTest(object): + _multiprocess_can_split_ = True + + def setup(self): + self.state = nixops.state.open(json_file) def teardown(self): - self.sf.close() + self.state.close() diff --git a/tests/functional/generic_deployment_test.py b/tests/functional/generic_deployment_test.py index 92736370a..f40781423 100644 --- a/tests/functional/generic_deployment_test.py +++ b/tests/functional/generic_deployment_test.py @@ -1,6 +1,5 @@ import os import subprocess -import nixops.statefile from nose import SkipTest @@ -9,5 +8,5 @@ class GenericDeploymentTest(DatabaseUsingTest): def setup(self): super(GenericDeploymentTest,self).setup() - self.depl = self.sf.create_deployment() + self.depl = self.state.create_deployment() self.depl.logger.set_autoresponse("y") diff --git a/tests/functional/generic_json_deployment_test.py b/tests/functional/generic_json_deployment_test.py new file mode 100644 index 000000000..ef6f3b04d --- /dev/null +++ b/tests/functional/generic_json_deployment_test.py @@ -0,0 +1,12 @@ +import os +import subprocess + +from nose import SkipTest + +from tests.functional import JSONUsingTest + +class GenericJsonDeploymentTest(JSONUsingTest): + def setup(self): + super(GenericJsonDeploymentTest,self).setup() + self.depl = self.state.create_deployment() + self.depl.logger.set_autoresponse("y") diff --git a/tests/functional/test_deleting_deletes.py b/tests/functional/test_deleting_deletes.py index df8c32bec..83ca9859f 100644 --- a/tests/functional/test_deleting_deletes.py +++ b/tests/functional/test_deleting_deletes.py @@ -7,4 +7,4 @@ class TestDeletingDeletes(single_machine_test.SingleMachineTest): def run_check(self): uuid = self.depl.uuid self.depl.delete() - tools.assert_raises(Exception, self.sf.open_deployment, (uuid,)) + tools.assert_raises(Exception, self.state.open_deployment, (uuid,)) diff --git a/tests/functional/test_query_deployments.py b/tests/functional/test_query_deployments.py index 88bc08760..c045586dc 100644 --- a/tests/functional/test_query_deployments.py +++ b/tests/functional/test_query_deployments.py @@ -6,7 +6,7 @@ class TestQueryDeployments(DatabaseUsingTest): def test_shows_all_deployments(self): depls = [] for i in range(10): - depls.append(self.sf.create_deployment()) - uuids = self.sf.query_deployments() + depls.append(self.state.create_deployment()) + uuids = self.state.query_deployments() for depl in depls: tools.assert_true(any([ depl.uuid == uuid for uuid in uuids ])) diff --git a/tests/functional/vpc.py b/tests/functional/vpc.py index b6cdb0c1e..25e778d6f 100644 --- a/tests/functional/vpc.py +++ b/tests/functional/vpc.py @@ -20,6 +20,7 @@ def setup(self): self.depl.nix_exprs = [ base_spec ] self.exprs_dir = nixops.util.SelfDeletingDir(tempfile.mkdtemp("nixos-tests")) + def test_deploy_vpc(self): self.depl.deploy() vpc_resource = self.depl.get_typed_resource("vpc-test", "vpc")