diff --git a/README.rst b/README.rst index ad0c61e6..52d93001 100644 --- a/README.rst +++ b/README.rst @@ -17,7 +17,7 @@ Microsoft SQL Server and Azure SQL Database. Features -------- -- Supports Django 2.0.8 +- Supports Django 2.1 - Supports Microsoft SQL Server 2008/2008R2, 2012, 2014, 2016, 2017 and Azure SQL Database - Passes most of the tests of the Django test suite @@ -29,7 +29,7 @@ Features Dependencies ------------ -- Django 2.0.8 +- Django 2.1 - pyodbc 3.0 or newer Installation @@ -241,9 +241,9 @@ The following features are currently not supported: Notice ------ -This version of *django-pyodbc-azure* only supports Django 2.0. +This version of *django-pyodbc-azure* only supports Django 2.1. If you want to use it on older versions of Django, -specify an appropriate version number (1.11.x.x for Django 1.11) +specify an appropriate version number (2.0.x.x for Django 2.0) at installation like this: :: - pip install "django-pyodbc-azure<2.0" + pip install "django-pyodbc-azure<2.1" diff --git a/setup.py b/setup.py index f9da6a5a..87105a16 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,6 @@ 'Framework :: Django', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', @@ -18,7 +17,7 @@ setup( name='django-pyodbc-azure', - version='2.0.8.0', + version='2.1.0.0', description='Django backend for Microsoft SQL Server and Azure SQL Database using pyodbc', long_description=open('README.rst').read(), author='Michiya Takahashi', @@ -27,7 +26,7 @@ license='BSD', packages=['sql_server', 'sql_server.pyodbc'], install_requires=[ - 'Django>=2.0.8,<2.1', + 'Django>=2.1.0,<2.2', 'pyodbc>=3.0', ], classifiers=CLASSIFIERS, diff --git a/sql_server/pyodbc/base.py b/sql_server/pyodbc/base.py index 7a4d7a59..aba99d74 100644 --- a/sql_server/pyodbc/base.py +++ b/sql_server/pyodbc/base.py @@ -8,7 +8,7 @@ from django.core.exceptions import ImproperlyConfigured from django import VERSION -if VERSION[:3] < (2,0,8) or VERSION[:2] >= (2,1): +if VERSION[:3] < (2,1,0) or VERSION[:2] >= (2,2): raise ImproperlyConfigured("Django %d.%d.%d is not supported." % VERSION[:3]) try: diff --git a/sql_server/pyodbc/compiler.py b/sql_server/pyodbc/compiler.py index 0c74e57d..92d53676 100644 --- a/sql_server/pyodbc/compiler.py +++ b/sql_server/pyodbc/compiler.py @@ -3,7 +3,9 @@ from django.db.models.aggregates import Avg, Count, StdDev, Variance from django.db.models.expressions import Exists, OrderBy, Ref, Value -from django.db.models.functions import ConcatPair, Greatest, Least, Length, StrIndex, Substr +from django.db.models.functions import ( + Chr, ConcatPair, Greatest, Least, Length, LPad, Repeat, RPad, StrIndex, Substr, Trim +) from django.db.models.sql import compiler from django.db.transaction import TransactionManagementError from django.db.utils import DatabaseError, NotSupportedError @@ -12,6 +14,9 @@ def _as_sql_agv(self, compiler, connection): return self.as_sql(compiler, connection, template='%(function)s(CONVERT(float, %(field)s))') +def _as_sql_chr(self, compiler, connection): + return self.as_sql(compiler, connection, function='NCHAR') + def _as_sql_concatpair(self, compiler, connection): if connection.sql_server_version < 2012: node = self.coalesce() @@ -39,6 +44,23 @@ def _as_sql_least(self, compiler, connection): def _as_sql_length(self, compiler, connection): return self.as_sql(compiler, connection, function='LEN') +def _as_sql_lpad(self, compiler, connection): + i = iter(self.get_source_expressions()) + expression, expression_arg = compiler.compile(next(i)) + length, length_arg = compiler.compile(next(i)) + fill_text, fill_text_arg = compiler.compile(next(i)) + params = [] + params.extend(fill_text_arg) + params.extend(length_arg) + params.extend(length_arg) + params.extend(expression_arg) + params.extend(length_arg) + params.extend(expression_arg) + params.extend(expression_arg) + template = ('LEFT(REPLICATE(%(fill_text)s, %(length)s), CASE WHEN %(length)s > LEN(%(expression)s) ' + 'THEN %(length)s - LEN(%(expression)s) ELSE 0 END) + %(expression)s') + return template % {'expression':expression, 'length':length, 'fill_text':fill_text }, params + def _as_sql_exists(self, compiler, connection, template=None, **extra_context): # MS SQL doesn't allow EXISTS() in the SELECT list, so wrap it with a # CASE WHEN expression. Change the template since the When expression @@ -55,6 +77,22 @@ def _as_sql_order_by(self, compiler, connection): template = 'CASE WHEN %(expression)s IS NULL THEN 0 ELSE 1 END, %(expression)s %(ordering)s' return self.as_sql(compiler, connection, template=template) +def _as_sql_repeat(self, compiler, connection): + return self.as_sql(compiler, connection, function='REPLICATE') + +def _as_sql_rpad(self, compiler, connection): + i = iter(self.get_source_expressions()) + expression, expression_arg = compiler.compile(next(i)) + length, length_arg = compiler.compile(next(i)) + fill_text, fill_text_arg = compiler.compile(next(i)) + params = [] + params.extend(expression_arg) + params.extend(fill_text_arg) + params.extend(length_arg) + params.extend(length_arg) + template='LEFT(%(expression)s + REPLICATE(%(fill_text)s, %(length)s), %(length)s)' + return template % {'expression':expression, 'length':length, 'fill_text':fill_text }, params + def _as_sql_stddev(self, compiler, connection): function = 'STDEV' if self.function == 'STDDEV_POP': @@ -72,6 +110,9 @@ def _as_sql_substr(self, compiler, connection): self.get_source_expressions().append(Value(2**31-1)) return self.as_sql(compiler, connection) +def _as_sql_trim(self, compiler, connection): + return self.as_sql(compiler, connection, template='LTRIM(RTRIM(%(expressions)s))') + def _as_sql_variance(self, compiler, connection): function = 'VAR' if self.function == 'VAR_POP': @@ -120,6 +161,8 @@ def as_sql(self, with_limits=True, with_col_aliases=False): try: extra_select, order_by, group_by = self.pre_sql_setup() for_update_part = None + # Is a LIMIT/OFFSET clause needed? + with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark) combinator = self.query.combinator features = self.connection.features @@ -138,7 +181,7 @@ def as_sql(self, with_limits=True, with_col_aliases=False): raise NotSupportedError('{} is not supported on this database backend.'.format(combinator)) result, params = self.get_combinator_sql(combinator, self.query.combinator_all) else: - distinct_fields = self.get_distinct() + distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' -- see # docstring of get_from_clause() for details. from_, f_params = self.get_from_clause() @@ -148,7 +191,12 @@ def as_sql(self, with_limits=True, with_col_aliases=False): result = ['SELECT'] if self.query.distinct: - result.append(self.connection.ops.distinct_sql(distinct_fields)) + distinct_result, distinct_params = self.connection.ops.distinct_sql( + distinct_fields, + distinct_params, + ) + result += distinct_result + params += distinct_params # SQL Server requires the keword for limitting at the begenning if do_limit and not do_offset: @@ -191,13 +239,11 @@ def as_sql(self, with_limits=True, with_col_aliases=False): elif not order_by: order_by.append(((None, ('%s ASC' % offsetting_order_by, [], None)))) - result.append(', '.join(out_cols)) - if self.query.select_for_update and self.connection.features.has_select_for_update: if self.connection.get_autocommit(): raise TransactionManagementError('select_for_update cannot be used outside of a transaction.') - if with_limits and not self.connection.features.supports_select_for_update_with_limit: + if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit: raise NotSupportedError( 'LIMIT/OFFSET is not supported with ' 'select_for_update on this database backend.' @@ -223,8 +269,7 @@ def as_sql(self, with_limits=True, with_col_aliases=False): if for_update_part and self.connection.features.for_update_after_from: from_.insert(1, for_update_part) - result.append('FROM') - result.extend(from_) + result += [', '.join(out_cols), 'FROM', *from_] params.extend(f_params) if where: @@ -237,16 +282,20 @@ def as_sql(self, with_limits=True, with_col_aliases=False): params.extend(g_params) if grouping: if distinct_fields: - raise NotImplementedError( - "annotate() + distinct(fields) is not implemented.") - if not order_by: - order_by = self.connection.ops.force_no_ordering() + raise NotImplementedError('annotate() + distinct(fields) is not implemented.') + order_by = order_by or self.connection.ops.force_no_ordering() result.append('GROUP BY %s' % ', '.join(grouping)) if having: result.append('HAVING %s' % having) params.extend(h_params) + if self.query.explain_query: + result.insert(0, self.connection.ops.explain_query_prefix( + self.query.explain_format, + **self.query.explain_options + )) + if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: @@ -269,9 +318,7 @@ def as_sql(self, with_limits=True, with_col_aliases=False): if not self.query.subquery: result.append('ORDER BY X.rn') else: - result.append('OFFSET %d ROWS' % low_mark) - if do_limit: - result.append('FETCH FIRST %d ROWS ONLY' % (high_mark - low_mark)) + result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark)) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would @@ -313,6 +360,8 @@ def _as_microsoft(self, node): as_microsoft = None if isinstance(node, Avg): as_microsoft = _as_sql_agv + elif isinstance(node, Chr): + as_microsoft = _as_sql_chr elif isinstance(node, ConcatPair): as_microsoft = _as_sql_concatpair elif isinstance(node, Count): @@ -323,16 +372,24 @@ def _as_microsoft(self, node): as_microsoft = _as_sql_least elif isinstance(node, Length): as_microsoft = _as_sql_length + elif isinstance(node, RPad): + as_microsoft = _as_sql_rpad + elif isinstance(node, LPad): + as_microsoft = _as_sql_lpad elif isinstance(node, Exists): as_microsoft = _as_sql_exists elif isinstance(node, OrderBy): as_microsoft = _as_sql_order_by + elif isinstance(node, Repeat): + as_microsoft = _as_sql_repeat elif isinstance(node, StdDev): as_microsoft = _as_sql_stddev elif isinstance(node, StrIndex): as_microsoft = _as_sql_strindex elif isinstance(node, Substr): as_microsoft = _as_sql_substr + elif isinstance(node, Trim): + as_microsoft = _as_sql_trim elif isinstance(node, Variance): as_microsoft = _as_sql_variance if as_microsoft: @@ -349,11 +406,9 @@ def as_sql(self): qn = self.connection.ops.quote_name opts = self.query.get_meta() result = ['INSERT INTO %s' % qn(opts.db_table)] + fields = self.query.fields or [opts.pk] - has_fields = bool(self.query.fields) - - if has_fields: - fields = self.query.fields + if self.query.fields: result.append('(%s)' % ', '.join(qn(f.column) for f in fields)) values_format = 'VALUES (%s)' value_rows = [ @@ -370,7 +425,7 @@ def as_sql(self): # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. - can_bulk = (not self.return_id and self.connection.features.has_bulk_insert) and has_fields + can_bulk = (not self.return_id and self.connection.features.has_bulk_insert) and self.query.fields placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) @@ -390,7 +445,7 @@ def as_sql(self): for p, vals in zip(placeholder_rows, param_rows) ] - if has_fields: + if self.query.fields: if opts.auto_field is not None: # db_column is None if not explicitly specified by model field auto_field_column = opts.auto_field.db_column or opts.auto_field.column diff --git a/sql_server/pyodbc/features.py b/sql_server/pyodbc/features.py index b6cac4e1..03153f85 100644 --- a/sql_server/pyodbc/features.py +++ b/sql_server/pyodbc/features.py @@ -2,7 +2,7 @@ class DatabaseFeatures(BaseDatabaseFeatures): - allow_sliced_subqueries = False + allow_sliced_subqueries_with_in = False can_introspect_autofield = True can_introspect_small_integer_field = True can_return_id_from_insert = True diff --git a/sql_server/pyodbc/introspection.py b/sql_server/pyodbc/introspection.py index 218d0d99..82f4c910 100644 --- a/sql_server/pyodbc/introspection.py +++ b/sql_server/pyodbc/introspection.py @@ -1,12 +1,9 @@ -import warnings - import pyodbc as Database from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo, TableInfo, ) from django.db.models.indexes import Index -from django.utils.deprecation import RemovedInDjango21Warning SQL_AUTOFIELD = -777555 SQL_BIGAUTOFIELD = -777444 @@ -148,79 +145,6 @@ def get_relations(self, cursor, table_name): cursor.execute(sql, (table_name,)) return dict([[item[0], (item[2], item[1])] for item in cursor.fetchall()]) - def get_indexes(self, cursor, table_name): - """ - Deprecated in Django 1.11, use get_constraints instead. - Returns a dictionary of fieldname -> infodict for the given table, - where each infodict is in the format: - {'primary_key': boolean representing whether it's the primary key, - 'unique': boolean representing whether it's a unique index} - - Only single-column indexes are introspected. - """ - warnings.warn( - "get_indexes() is deprecated in favor of get_constraints().", - RemovedInDjango21Warning, stacklevel=2 - ) - # CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx - # TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx - - pk_uk_sql = """ -SELECT d.COLUMN_NAME, c.CONSTRAINT_TYPE FROM ( -SELECT a.CONSTRAINT_NAME, a.CONSTRAINT_TYPE, a.TABLE_SCHEMA -FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a -INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS b - ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME - AND a.TABLE_SCHEMA = b.TABLE_SCHEMA - AND a.TABLE_NAME = b.TABLE_NAME -WHERE a.TABLE_SCHEMA = SCHEMA_NAME() - AND a.TABLE_NAME = %s - AND (CONSTRAINT_TYPE = 'PRIMARY KEY' OR CONSTRAINT_TYPE = 'UNIQUE') -GROUP BY a.CONSTRAINT_TYPE, a.CONSTRAINT_NAME, a.TABLE_SCHEMA -HAVING(COUNT(a.CONSTRAINT_NAME)) = 1) AS c -INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS d - ON c.CONSTRAINT_NAME = d.CONSTRAINT_NAME - AND c.TABLE_SCHEMA = d.TABLE_SCHEMA""" - - field_names = [item[0] for item in self.get_table_description(cursor, table_name, identity_check=False)] - indexes, results = {}, {} - cursor.execute(pk_uk_sql, (table_name,)) - data = cursor.fetchall() - if data: - results.update(data) - - # non-unique, non-compound indexes, only in SS2005? - ix_sql = """ -SELECT DISTINCT c.name -FROM sys.columns c -INNER JOIN sys.index_columns ic - ON ic.object_id = c.object_id AND ic.column_id = c.column_id -INNER JOIN sys.indexes ix - ON ix.object_id = ic.object_id AND ix.index_id = ic.index_id -INNER JOIN sys.tables t - ON t.object_id = ix.object_id -WHERE ix.object_id IN ( - SELECT ix.object_id - FROM sys.indexes ix - GROUP BY ix.object_id, ix.index_id - HAVING count(1) = 1) -AND ix.is_primary_key = 0 -AND ix.is_unique_constraint = 0 -AND t.schema_id = SCHEMA_ID() -AND t.name = %s""" - - cursor.execute(ix_sql, (table_name,)) - for column in [r[0] for r in cursor.fetchall()]: - if column not in results: - results[column] = 'IX' - - for field in field_names: - val = results.get(field, None) - if val: - indexes[field] = dict(primary_key=(val=='PRIMARY KEY'), unique=(val=='UNIQUE')) - - return indexes - def get_key_columns(self, cursor, table_name): """ Returns a list of (column_name, referenced_table_name, referenced_column_name) for all diff --git a/sql_server/pyodbc/operations.py b/sql_server/pyodbc/operations.py index 408e3a35..7cfc31f9 100644 --- a/sql_server/pyodbc/operations.py +++ b/sql_server/pyodbc/operations.py @@ -127,6 +127,8 @@ def date_trunc_sql(self, lookup_type, field_name): return "CONVERT(datetime2, CONVERT(varchar, DATEPART(year, %s)) + '/' + CONVERT(varchar, 1+((DATEPART(quarter, %s)-1)*3)) + '/01')" % (field_name, field_name) if lookup_type == 'month': return "CONVERT(datetime2, CONVERT(varchar, DATEPART(year, %s)) + '/' + CONVERT(varchar, DATEPART(month, %s)) + '/01')" % (field_name, field_name) + if lookup_type == 'week': + return "DATEADD(DAY, (DATEPART(weekday, %s) + 5) %%%% 7 * -1, CONVERT(datetime2, CONVERT(varchar(12), %s, 112)))" % (field_name, field_name) if lookup_type == 'day': return "CONVERT(datetime2, CONVERT(varchar(12), %s, 112))" % field_name @@ -147,7 +149,7 @@ def datetime_extract_sql(self, lookup_type, field_name, tzname): def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) sql = '' - if lookup_type in ('year', 'quarter', 'month', 'day'): + if lookup_type in ('year', 'quarter', 'month', 'week', 'day'): sql = self.date_trunc_sql(lookup_type, field_name) elif lookup_type == 'hour': sql = "CONVERT(datetime2, SUBSTRING(CONVERT(varchar, %s, 20), 0, 14) + ':00:00')" % field_name @@ -262,6 +264,14 @@ def regex_lookup(self, lookup_type): """ raise NotImplementedError('SQL Server has no built-in regular expression support.') + def limit_offset_sql(self, low_mark, high_mark): + """Return LIMIT/OFFSET SQL clause.""" + limit, offset = self._get_limit_offset_params(low_mark, high_mark) + return '%s%s' % ( + (' OFFSET %d ROWS' % offset) if offset else '', + (' FETCH FIRST %d ROWS ONLY' % limit) if limit else '', + ) + def last_executed_query(self, cursor, sql, params): """ Returns a string of the query last executed by the given cursor, with diff --git a/sql_server/pyodbc/schema.py b/sql_server/pyodbc/schema.py index 5154c324..1999cd00 100644 --- a/sql_server/pyodbc/schema.py +++ b/sql_server/pyodbc/schema.py @@ -151,9 +151,9 @@ def _alter_field(self, model, old_field, new_field, old_type, new_type, fks_dropped.add((old_field.column,)) self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name)) # Has unique been removed? - if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)): + if old_field.unique and (not new_field.unique or self._field_became_primary_key(old_field, new_field)): # Find the unique constraint for this field - constraint_names = self._constraint_names(model, [old_field.column], unique=True) + constraint_names = self._constraint_names(model, [old_field.column], unique=True, primary_key=False) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % ( len(constraint_names), @@ -201,12 +201,11 @@ def _alter_field(self, model, old_field, new_field, old_type, new_type, # db_index=True. index_names = self._constraint_names(model, [old_field.column], index=True, type_=Index.suffix) for index_name in index_names: - if index_name in meta_index_names: + if index_name not in meta_index_names: # The only way to check if an index was created with # db_index=True or with Index(['field'], name='foo') # is to look at its name (refs #28053). - continue - self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name)) + self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name)) # Change check constraints? if (old_db_params['check'] != new_db_params['check'] and old_db_params['check']) or ( # SQL Server requires explicit deletion befor altering column type with the same constraint @@ -316,10 +315,11 @@ def _alter_field(self, model, old_field, new_field, old_type, new_type, if post_actions: for sql, params in post_actions: self.execute(sql, params) + # If primary_key changed to False, delete the primary key constraint. + if old_field.primary_key and not new_field.primary_key: + self._delete_primary_key(model, strict) # Added a unique? - if (not old_field.unique and new_field.unique) or ( - old_field.primary_key and not new_field.primary_key and new_field.unique - ): + if self._unique_should_be_added(old_field, new_field): self.execute(self._create_unique_sql(model, [new_field.column])) # Added an index? # constraint will no longer be used in lieu of an index. The following @@ -364,11 +364,7 @@ def _alter_field(self, model, old_field, new_field, old_type, new_type, if old_field.primary_key and new_field.primary_key and old_type != new_type: rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Changed to become primary key? - # Note that we don't detect unsetting of a PK, as we assume another field - # will always come along and replace it. - if not old_field.primary_key and new_field.primary_key: - # First, drop the old PK - self._delete_primary_key(model, strict) + if self._field_became_primary_key(old_field, new_field): # Make the new one self.execute( self.sql_create_pk % { @@ -740,7 +736,7 @@ def remove_field(self, model, field): # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() - # Remove all deferred statements referencing the deleted table. + # Remove all deferred statements referencing the deleted column. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_column(model._meta.db_table, field.column): self.deferred_sql.remove(sql)