partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
Uploader.__expect
will wait for exp to be returned from nodemcu or timeout
nodemcu_uploader/uploader.py
def __expect(self, exp='> ', timeout=None): """will wait for exp to be returned from nodemcu or timeout""" timeout_before = self._port.timeout timeout = timeout or self._timeout #do NOT set timeout on Windows if SYSTEM != 'Windows': # Checking for new data every 100us is fast enough if self._port.timeout != MINIMAL_TIMEOUT: self._port.timeout = MINIMAL_TIMEOUT end = time.time() + timeout # Finish as soon as either exp matches or we run out of time (work like dump, but faster on success) data = '' while not data.endswith(exp) and time.time() <= end: data += self._port.read() log.debug('expect returned: `{0}`'.format(data)) if time.time() > end: raise CommunicationTimeout('Timeout waiting for data', data) if not data.endswith(exp) and len(exp) > 0: raise BadResponseException('Bad response.', exp, data) if SYSTEM != 'Windows': self._port.timeout = timeout_before return data
def __expect(self, exp='> ', timeout=None): """will wait for exp to be returned from nodemcu or timeout""" timeout_before = self._port.timeout timeout = timeout or self._timeout #do NOT set timeout on Windows if SYSTEM != 'Windows': # Checking for new data every 100us is fast enough if self._port.timeout != MINIMAL_TIMEOUT: self._port.timeout = MINIMAL_TIMEOUT end = time.time() + timeout # Finish as soon as either exp matches or we run out of time (work like dump, but faster on success) data = '' while not data.endswith(exp) and time.time() <= end: data += self._port.read() log.debug('expect returned: `{0}`'.format(data)) if time.time() > end: raise CommunicationTimeout('Timeout waiting for data', data) if not data.endswith(exp) and len(exp) > 0: raise BadResponseException('Bad response.', exp, data) if SYSTEM != 'Windows': self._port.timeout = timeout_before return data
[ "will", "wait", "for", "exp", "to", "be", "returned", "from", "nodemcu", "or", "timeout" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L121-L148
[ "def", "__expect", "(", "self", ",", "exp", "=", "'> '", ",", "timeout", "=", "None", ")", ":", "timeout_before", "=", "self", ".", "_port", ".", "timeout", "timeout", "=", "timeout", "or", "self", ".", "_timeout", "#do NOT set timeout on Windows", "if", "SYSTEM", "!=", "'Windows'", ":", "# Checking for new data every 100us is fast enough", "if", "self", ".", "_port", ".", "timeout", "!=", "MINIMAL_TIMEOUT", ":", "self", ".", "_port", ".", "timeout", "=", "MINIMAL_TIMEOUT", "end", "=", "time", ".", "time", "(", ")", "+", "timeout", "# Finish as soon as either exp matches or we run out of time (work like dump, but faster on success)", "data", "=", "''", "while", "not", "data", ".", "endswith", "(", "exp", ")", "and", "time", ".", "time", "(", ")", "<=", "end", ":", "data", "+=", "self", ".", "_port", ".", "read", "(", ")", "log", ".", "debug", "(", "'expect returned: `{0}`'", ".", "format", "(", "data", ")", ")", "if", "time", ".", "time", "(", ")", ">", "end", ":", "raise", "CommunicationTimeout", "(", "'Timeout waiting for data'", ",", "data", ")", "if", "not", "data", ".", "endswith", "(", "exp", ")", "and", "len", "(", "exp", ")", ">", "0", ":", "raise", "BadResponseException", "(", "'Bad response.'", ",", "exp", ",", "data", ")", "if", "SYSTEM", "!=", "'Windows'", ":", "self", ".", "_port", ".", "timeout", "=", "timeout_before", "return", "data" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.__write
write data on the nodemcu port. If 'binary' is True the debug log will show the intended output as hex, otherwise as string
nodemcu_uploader/uploader.py
def __write(self, output, binary=False): """write data on the nodemcu port. If 'binary' is True the debug log will show the intended output as hex, otherwise as string""" if not binary: log.debug('write: %s', output) else: log.debug('write binary: %s', hexify(output)) self._port.write(output) self._port.flush()
def __write(self, output, binary=False): """write data on the nodemcu port. If 'binary' is True the debug log will show the intended output as hex, otherwise as string""" if not binary: log.debug('write: %s', output) else: log.debug('write binary: %s', hexify(output)) self._port.write(output) self._port.flush()
[ "write", "data", "on", "the", "nodemcu", "port", ".", "If", "binary", "is", "True", "the", "debug", "log", "will", "show", "the", "intended", "output", "as", "hex", "otherwise", "as", "string" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L150-L158
[ "def", "__write", "(", "self", ",", "output", ",", "binary", "=", "False", ")", ":", "if", "not", "binary", ":", "log", ".", "debug", "(", "'write: %s'", ",", "output", ")", "else", ":", "log", ".", "debug", "(", "'write binary: %s'", ",", "hexify", "(", "output", ")", ")", "self", ".", "_port", ".", "write", "(", "output", ")", "self", ".", "_port", ".", "flush", "(", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.__exchange
Write output to the port and wait for response
nodemcu_uploader/uploader.py
def __exchange(self, output, timeout=None): """Write output to the port and wait for response""" self.__writeln(output) self._port.flush() return self.__expect(timeout=timeout or self._timeout)
def __exchange(self, output, timeout=None): """Write output to the port and wait for response""" self.__writeln(output) self._port.flush() return self.__expect(timeout=timeout or self._timeout)
[ "Write", "output", "to", "the", "port", "and", "wait", "for", "response" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L165-L169
[ "def", "__exchange", "(", "self", ",", "output", ",", "timeout", "=", "None", ")", ":", "self", ".", "__writeln", "(", "output", ")", "self", ".", "_port", ".", "flush", "(", ")", "return", "self", ".", "__expect", "(", "timeout", "=", "timeout", "or", "self", ".", "_timeout", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.close
restores the nodemcu to default baudrate and then closes the port
nodemcu_uploader/uploader.py
def close(self): """restores the nodemcu to default baudrate and then closes the port""" try: if self.baud != self.start_baud: self.__set_baudrate(self.start_baud) self._port.flush() self.__clear_buffers() except serial.serialutil.SerialException: pass log.debug('closing port') self._port.close()
def close(self): """restores the nodemcu to default baudrate and then closes the port""" try: if self.baud != self.start_baud: self.__set_baudrate(self.start_baud) self._port.flush() self.__clear_buffers() except serial.serialutil.SerialException: pass log.debug('closing port') self._port.close()
[ "restores", "the", "nodemcu", "to", "default", "baudrate", "and", "then", "closes", "the", "port" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L172-L182
[ "def", "close", "(", "self", ")", ":", "try", ":", "if", "self", ".", "baud", "!=", "self", ".", "start_baud", ":", "self", ".", "__set_baudrate", "(", "self", ".", "start_baud", ")", "self", ".", "_port", ".", "flush", "(", ")", "self", ".", "__clear_buffers", "(", ")", "except", "serial", ".", "serialutil", ".", "SerialException", ":", "pass", "log", ".", "debug", "(", "'closing port'", ")", "self", ".", "_port", ".", "close", "(", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.prepare
This uploads the protocol functions nessecary to do binary chunked transfer
nodemcu_uploader/uploader.py
def prepare(self): """ This uploads the protocol functions nessecary to do binary chunked transfer """ log.info('Preparing esp for transfer.') for func in LUA_FUNCTIONS: detected = self.__exchange('print({0})'.format(func)) if detected.find('function:') == -1: break else: log.info('Preparation already done. Not adding functions again.') return True functions = RECV_LUA + '\n' + SEND_LUA data = functions.format(baud=self._port.baudrate) ##change any \r\n to just \n and split on that lines = data.replace('\r', '').split('\n') #remove some unneccesary spaces to conserve some bytes for line in lines: line = line.strip().replace(', ', ',').replace(' = ', '=') if len(line) == 0: continue resp = self.__exchange(line) #do some basic test of the result if ('unexpected' in resp) or ('stdin' in resp) or len(resp) > len(functions)+10: log.error('error when preparing "%s"', resp) return False return True
def prepare(self): """ This uploads the protocol functions nessecary to do binary chunked transfer """ log.info('Preparing esp for transfer.') for func in LUA_FUNCTIONS: detected = self.__exchange('print({0})'.format(func)) if detected.find('function:') == -1: break else: log.info('Preparation already done. Not adding functions again.') return True functions = RECV_LUA + '\n' + SEND_LUA data = functions.format(baud=self._port.baudrate) ##change any \r\n to just \n and split on that lines = data.replace('\r', '').split('\n') #remove some unneccesary spaces to conserve some bytes for line in lines: line = line.strip().replace(', ', ',').replace(' = ', '=') if len(line) == 0: continue resp = self.__exchange(line) #do some basic test of the result if ('unexpected' in resp) or ('stdin' in resp) or len(resp) > len(functions)+10: log.error('error when preparing "%s"', resp) return False return True
[ "This", "uploads", "the", "protocol", "functions", "nessecary", "to", "do", "binary", "chunked", "transfer" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L185-L216
[ "def", "prepare", "(", "self", ")", ":", "log", ".", "info", "(", "'Preparing esp for transfer.'", ")", "for", "func", "in", "LUA_FUNCTIONS", ":", "detected", "=", "self", ".", "__exchange", "(", "'print({0})'", ".", "format", "(", "func", ")", ")", "if", "detected", ".", "find", "(", "'function:'", ")", "==", "-", "1", ":", "break", "else", ":", "log", ".", "info", "(", "'Preparation already done. Not adding functions again.'", ")", "return", "True", "functions", "=", "RECV_LUA", "+", "'\\n'", "+", "SEND_LUA", "data", "=", "functions", ".", "format", "(", "baud", "=", "self", ".", "_port", ".", "baudrate", ")", "##change any \\r\\n to just \\n and split on that", "lines", "=", "data", ".", "replace", "(", "'\\r'", ",", "''", ")", ".", "split", "(", "'\\n'", ")", "#remove some unneccesary spaces to conserve some bytes", "for", "line", "in", "lines", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "replace", "(", "', '", ",", "','", ")", ".", "replace", "(", "' = '", ",", "'='", ")", "if", "len", "(", "line", ")", "==", "0", ":", "continue", "resp", "=", "self", ".", "__exchange", "(", "line", ")", "#do some basic test of the result", "if", "(", "'unexpected'", "in", "resp", ")", "or", "(", "'stdin'", "in", "resp", ")", "or", "len", "(", "resp", ")", ">", "len", "(", "functions", ")", "+", "10", ":", "log", ".", "error", "(", "'error when preparing \"%s\"'", ",", "resp", ")", "return", "False", "return", "True" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.download_file
Download a file from device to local filesystem
nodemcu_uploader/uploader.py
def download_file(self, filename): """Download a file from device to local filesystem""" res = self.__exchange('send("{filename}")'.format(filename=filename)) if ('unexpected' in res) or ('stdin' in res): log.error('Unexpected error downloading file: %s', res) raise Exception('Unexpected error downloading file') #tell device we are ready to receive self.__write('C') #we should get a NUL terminated filename to start with sent_filename = self.__expect(NUL).strip() log.info('receiveing ' + sent_filename) #ACK to start download self.__write(ACK, True) buf = '' data = '' chunk, buf = self.__read_chunk(buf) #read chunks until we get an empty which is the end while chunk != '': self.__write(ACK, True) data = data + chunk chunk, buf = self.__read_chunk(buf) return data
def download_file(self, filename): """Download a file from device to local filesystem""" res = self.__exchange('send("{filename}")'.format(filename=filename)) if ('unexpected' in res) or ('stdin' in res): log.error('Unexpected error downloading file: %s', res) raise Exception('Unexpected error downloading file') #tell device we are ready to receive self.__write('C') #we should get a NUL terminated filename to start with sent_filename = self.__expect(NUL).strip() log.info('receiveing ' + sent_filename) #ACK to start download self.__write(ACK, True) buf = '' data = '' chunk, buf = self.__read_chunk(buf) #read chunks until we get an empty which is the end while chunk != '': self.__write(ACK, True) data = data + chunk chunk, buf = self.__read_chunk(buf) return data
[ "Download", "a", "file", "from", "device", "to", "local", "filesystem" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L218-L242
[ "def", "download_file", "(", "self", ",", "filename", ")", ":", "res", "=", "self", ".", "__exchange", "(", "'send(\"{filename}\")'", ".", "format", "(", "filename", "=", "filename", ")", ")", "if", "(", "'unexpected'", "in", "res", ")", "or", "(", "'stdin'", "in", "res", ")", ":", "log", ".", "error", "(", "'Unexpected error downloading file: %s'", ",", "res", ")", "raise", "Exception", "(", "'Unexpected error downloading file'", ")", "#tell device we are ready to receive", "self", ".", "__write", "(", "'C'", ")", "#we should get a NUL terminated filename to start with", "sent_filename", "=", "self", ".", "__expect", "(", "NUL", ")", ".", "strip", "(", ")", "log", ".", "info", "(", "'receiveing '", "+", "sent_filename", ")", "#ACK to start download", "self", ".", "__write", "(", "ACK", ",", "True", ")", "buf", "=", "''", "data", "=", "''", "chunk", ",", "buf", "=", "self", ".", "__read_chunk", "(", "buf", ")", "#read chunks until we get an empty which is the end", "while", "chunk", "!=", "''", ":", "self", ".", "__write", "(", "ACK", ",", "True", ")", "data", "=", "data", "+", "chunk", "chunk", ",", "buf", "=", "self", ".", "__read_chunk", "(", "buf", ")", "return", "data" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.read_file
reading data from device into local file
nodemcu_uploader/uploader.py
def read_file(self, filename, destination=''): """reading data from device into local file""" if not destination: destination = filename log.info('Transferring %s to %s', filename, destination) data = self.download_file(filename) # Just in case, the filename may contain folder, so create it if needed. log.info(destination) if not os.path.exists(os.path.dirname(destination)): try: os.makedirs(os.path.dirname(destination)) except OSError as e: # Guard against race condition if e.errno != errno.EEXIST: raise with open(destination, 'w') as fil: fil.write(data)
def read_file(self, filename, destination=''): """reading data from device into local file""" if not destination: destination = filename log.info('Transferring %s to %s', filename, destination) data = self.download_file(filename) # Just in case, the filename may contain folder, so create it if needed. log.info(destination) if not os.path.exists(os.path.dirname(destination)): try: os.makedirs(os.path.dirname(destination)) except OSError as e: # Guard against race condition if e.errno != errno.EEXIST: raise with open(destination, 'w') as fil: fil.write(data)
[ "reading", "data", "from", "device", "into", "local", "file" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L244-L260
[ "def", "read_file", "(", "self", ",", "filename", ",", "destination", "=", "''", ")", ":", "if", "not", "destination", ":", "destination", "=", "filename", "log", ".", "info", "(", "'Transferring %s to %s'", ",", "filename", ",", "destination", ")", "data", "=", "self", ".", "download_file", "(", "filename", ")", "# Just in case, the filename may contain folder, so create it if needed.", "log", ".", "info", "(", "destination", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "destination", ")", ")", ":", "try", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "destination", ")", ")", "except", "OSError", "as", "e", ":", "# Guard against race condition", "if", "e", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise", "with", "open", "(", "destination", ",", "'w'", ")", "as", "fil", ":", "fil", ".", "write", "(", "data", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.write_file
sends a file to the device using the transfer protocol
nodemcu_uploader/uploader.py
def write_file(self, path, destination='', verify='none'): """sends a file to the device using the transfer protocol""" filename = os.path.basename(path) if not destination: destination = filename log.info('Transferring %s as %s', path, destination) self.__writeln("recv()") res = self.__expect('C> ') if not res.endswith('C> '): log.error('Error waiting for esp "%s"', res) raise CommunicationTimeout('Error waiting for device to start receiving', res) log.debug('sending destination filename "%s"', destination) self.__write(destination + '\x00', True) if not self.__got_ack(): log.error('did not ack destination filename') raise NoAckException('Device did not ACK destination filename') content = from_file(path) log.debug('sending %d bytes in %s', len(content), filename) pos = 0 chunk_size = 128 while pos < len(content): rest = len(content) - pos if rest > chunk_size: rest = chunk_size data = content[pos:pos+rest] if not self.__write_chunk(data): resp = self.__expect() log.error('Bad chunk response "%s" %s', resp, hexify(resp)) raise BadResponseException('Bad chunk response', ACK, resp) pos += chunk_size log.debug('sending zero block') #zero size block self.__write_chunk('') if verify != 'none': self.verify_file(path, destination, verify)
def write_file(self, path, destination='', verify='none'): """sends a file to the device using the transfer protocol""" filename = os.path.basename(path) if not destination: destination = filename log.info('Transferring %s as %s', path, destination) self.__writeln("recv()") res = self.__expect('C> ') if not res.endswith('C> '): log.error('Error waiting for esp "%s"', res) raise CommunicationTimeout('Error waiting for device to start receiving', res) log.debug('sending destination filename "%s"', destination) self.__write(destination + '\x00', True) if not self.__got_ack(): log.error('did not ack destination filename') raise NoAckException('Device did not ACK destination filename') content = from_file(path) log.debug('sending %d bytes in %s', len(content), filename) pos = 0 chunk_size = 128 while pos < len(content): rest = len(content) - pos if rest > chunk_size: rest = chunk_size data = content[pos:pos+rest] if not self.__write_chunk(data): resp = self.__expect() log.error('Bad chunk response "%s" %s', resp, hexify(resp)) raise BadResponseException('Bad chunk response', ACK, resp) pos += chunk_size log.debug('sending zero block') #zero size block self.__write_chunk('') if verify != 'none': self.verify_file(path, destination, verify)
[ "sends", "a", "file", "to", "the", "device", "using", "the", "transfer", "protocol" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L262-L304
[ "def", "write_file", "(", "self", ",", "path", ",", "destination", "=", "''", ",", "verify", "=", "'none'", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "if", "not", "destination", ":", "destination", "=", "filename", "log", ".", "info", "(", "'Transferring %s as %s'", ",", "path", ",", "destination", ")", "self", ".", "__writeln", "(", "\"recv()\"", ")", "res", "=", "self", ".", "__expect", "(", "'C> '", ")", "if", "not", "res", ".", "endswith", "(", "'C> '", ")", ":", "log", ".", "error", "(", "'Error waiting for esp \"%s\"'", ",", "res", ")", "raise", "CommunicationTimeout", "(", "'Error waiting for device to start receiving'", ",", "res", ")", "log", ".", "debug", "(", "'sending destination filename \"%s\"'", ",", "destination", ")", "self", ".", "__write", "(", "destination", "+", "'\\x00'", ",", "True", ")", "if", "not", "self", ".", "__got_ack", "(", ")", ":", "log", ".", "error", "(", "'did not ack destination filename'", ")", "raise", "NoAckException", "(", "'Device did not ACK destination filename'", ")", "content", "=", "from_file", "(", "path", ")", "log", ".", "debug", "(", "'sending %d bytes in %s'", ",", "len", "(", "content", ")", ",", "filename", ")", "pos", "=", "0", "chunk_size", "=", "128", "while", "pos", "<", "len", "(", "content", ")", ":", "rest", "=", "len", "(", "content", ")", "-", "pos", "if", "rest", ">", "chunk_size", ":", "rest", "=", "chunk_size", "data", "=", "content", "[", "pos", ":", "pos", "+", "rest", "]", "if", "not", "self", ".", "__write_chunk", "(", "data", ")", ":", "resp", "=", "self", ".", "__expect", "(", ")", "log", ".", "error", "(", "'Bad chunk response \"%s\" %s'", ",", "resp", ",", "hexify", "(", "resp", ")", ")", "raise", "BadResponseException", "(", "'Bad chunk response'", ",", "ACK", ",", "resp", ")", "pos", "+=", "chunk_size", "log", ".", "debug", "(", "'sending zero block'", ")", "#zero size block", "self", ".", "__write_chunk", "(", "''", ")", "if", "verify", "!=", "'none'", ":", "self", ".", "verify_file", "(", "path", ",", "destination", ",", "verify", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.verify_file
Tries to verify if path has same checksum as destination. Valid options for verify is 'raw', 'sha1' or 'none'
nodemcu_uploader/uploader.py
def verify_file(self, path, destination, verify='none'): """Tries to verify if path has same checksum as destination. Valid options for verify is 'raw', 'sha1' or 'none' """ content = from_file(path) log.info('Verifying using %s...' % verify) if verify == 'raw': data = self.download_file(destination) if content != data: log.error('Raw verification failed.') raise VerificationError('Verification failed.') else: log.info('Verification successful. Contents are identical.') elif verify == 'sha1': #Calculate SHA1 on remote file. Extract just hash from result data = self.__exchange('shafile("'+destination+'")').splitlines()[1] log.info('Remote SHA1: %s', data) #Calculate hash of local data filehashhex = hashlib.sha1(content.encode(ENCODING)).hexdigest() log.info('Local SHA1: %s', filehashhex) if data != filehashhex: log.error('SHA1 verification failed.') raise VerificationError('SHA1 Verification failed.') else: log.info('Verification successful. Checksums match') elif verify != 'none': raise Exception(verify + ' is not a valid verification method.')
def verify_file(self, path, destination, verify='none'): """Tries to verify if path has same checksum as destination. Valid options for verify is 'raw', 'sha1' or 'none' """ content = from_file(path) log.info('Verifying using %s...' % verify) if verify == 'raw': data = self.download_file(destination) if content != data: log.error('Raw verification failed.') raise VerificationError('Verification failed.') else: log.info('Verification successful. Contents are identical.') elif verify == 'sha1': #Calculate SHA1 on remote file. Extract just hash from result data = self.__exchange('shafile("'+destination+'")').splitlines()[1] log.info('Remote SHA1: %s', data) #Calculate hash of local data filehashhex = hashlib.sha1(content.encode(ENCODING)).hexdigest() log.info('Local SHA1: %s', filehashhex) if data != filehashhex: log.error('SHA1 verification failed.') raise VerificationError('SHA1 Verification failed.') else: log.info('Verification successful. Checksums match') elif verify != 'none': raise Exception(verify + ' is not a valid verification method.')
[ "Tries", "to", "verify", "if", "path", "has", "same", "checksum", "as", "destination", ".", "Valid", "options", "for", "verify", "is", "raw", "sha1", "or", "none" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L306-L335
[ "def", "verify_file", "(", "self", ",", "path", ",", "destination", ",", "verify", "=", "'none'", ")", ":", "content", "=", "from_file", "(", "path", ")", "log", ".", "info", "(", "'Verifying using %s...'", "%", "verify", ")", "if", "verify", "==", "'raw'", ":", "data", "=", "self", ".", "download_file", "(", "destination", ")", "if", "content", "!=", "data", ":", "log", ".", "error", "(", "'Raw verification failed.'", ")", "raise", "VerificationError", "(", "'Verification failed.'", ")", "else", ":", "log", ".", "info", "(", "'Verification successful. Contents are identical.'", ")", "elif", "verify", "==", "'sha1'", ":", "#Calculate SHA1 on remote file. Extract just hash from result", "data", "=", "self", ".", "__exchange", "(", "'shafile(\"'", "+", "destination", "+", "'\")'", ")", ".", "splitlines", "(", ")", "[", "1", "]", "log", ".", "info", "(", "'Remote SHA1: %s'", ",", "data", ")", "#Calculate hash of local data", "filehashhex", "=", "hashlib", ".", "sha1", "(", "content", ".", "encode", "(", "ENCODING", ")", ")", ".", "hexdigest", "(", ")", "log", ".", "info", "(", "'Local SHA1: %s'", ",", "filehashhex", ")", "if", "data", "!=", "filehashhex", ":", "log", ".", "error", "(", "'SHA1 verification failed.'", ")", "raise", "VerificationError", "(", "'SHA1 Verification failed.'", ")", "else", ":", "log", ".", "info", "(", "'Verification successful. Checksums match'", ")", "elif", "verify", "!=", "'none'", ":", "raise", "Exception", "(", "verify", "+", "' is not a valid verification method.'", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.exec_file
execute the lines in the local file 'path
nodemcu_uploader/uploader.py
def exec_file(self, path): """execute the lines in the local file 'path'""" filename = os.path.basename(path) log.info('Execute %s', filename) content = from_file(path).replace('\r', '').split('\n') res = '> ' for line in content: line = line.rstrip('\n') retlines = (res + self.__exchange(line)).splitlines() # Log all but the last line res = retlines.pop() for lin in retlines: log.info(lin) # last line log.info(res)
def exec_file(self, path): """execute the lines in the local file 'path'""" filename = os.path.basename(path) log.info('Execute %s', filename) content = from_file(path).replace('\r', '').split('\n') res = '> ' for line in content: line = line.rstrip('\n') retlines = (res + self.__exchange(line)).splitlines() # Log all but the last line res = retlines.pop() for lin in retlines: log.info(lin) # last line log.info(res)
[ "execute", "the", "lines", "in", "the", "local", "file", "path" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L337-L353
[ "def", "exec_file", "(", "self", ",", "path", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "log", ".", "info", "(", "'Execute %s'", ",", "filename", ")", "content", "=", "from_file", "(", "path", ")", ".", "replace", "(", "'\\r'", ",", "''", ")", ".", "split", "(", "'\\n'", ")", "res", "=", "'> '", "for", "line", "in", "content", ":", "line", "=", "line", ".", "rstrip", "(", "'\\n'", ")", "retlines", "=", "(", "res", "+", "self", ".", "__exchange", "(", "line", ")", ")", ".", "splitlines", "(", ")", "# Log all but the last line", "res", "=", "retlines", ".", "pop", "(", ")", "for", "lin", "in", "retlines", ":", "log", ".", "info", "(", "lin", ")", "# last line", "log", ".", "info", "(", "res", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.__got_ack
Returns true if ACK is received
nodemcu_uploader/uploader.py
def __got_ack(self): """Returns true if ACK is received""" log.debug('waiting for ack') res = self._port.read(1) log.debug('ack read %s', hexify(res)) return res == ACK
def __got_ack(self): """Returns true if ACK is received""" log.debug('waiting for ack') res = self._port.read(1) log.debug('ack read %s', hexify(res)) return res == ACK
[ "Returns", "true", "if", "ACK", "is", "received" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L355-L360
[ "def", "__got_ack", "(", "self", ")", ":", "log", ".", "debug", "(", "'waiting for ack'", ")", "res", "=", "self", ".", "_port", ".", "read", "(", "1", ")", "log", ".", "debug", "(", "'ack read %s'", ",", "hexify", "(", "res", ")", ")", "return", "res", "==", "ACK" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.write_lines
write lines, one by one, separated by \n to device
nodemcu_uploader/uploader.py
def write_lines(self, data): """write lines, one by one, separated by \n to device""" lines = data.replace('\r', '').split('\n') for line in lines: self.__exchange(line)
def write_lines(self, data): """write lines, one by one, separated by \n to device""" lines = data.replace('\r', '').split('\n') for line in lines: self.__exchange(line)
[ "write", "lines", "one", "by", "one", "separated", "by", "\\", "n", "to", "device" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L362-L366
[ "def", "write_lines", "(", "self", ",", "data", ")", ":", "lines", "=", "data", ".", "replace", "(", "'\\r'", ",", "''", ")", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "self", ".", "__exchange", "(", "line", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.__write_chunk
formats and sends a chunk of data to the device according to transfer protocol
nodemcu_uploader/uploader.py
def __write_chunk(self, chunk): """formats and sends a chunk of data to the device according to transfer protocol""" log.debug('writing %d bytes chunk', len(chunk)) data = BLOCK_START + chr(len(chunk)) + chunk if len(chunk) < 128: padding = 128 - len(chunk) log.debug('pad with %d characters', padding) data = data + (' ' * padding) log.debug("packet size %d", len(data)) self.__write(data) self._port.flush() return self.__got_ack()
def __write_chunk(self, chunk): """formats and sends a chunk of data to the device according to transfer protocol""" log.debug('writing %d bytes chunk', len(chunk)) data = BLOCK_START + chr(len(chunk)) + chunk if len(chunk) < 128: padding = 128 - len(chunk) log.debug('pad with %d characters', padding) data = data + (' ' * padding) log.debug("packet size %d", len(data)) self.__write(data) self._port.flush() return self.__got_ack()
[ "formats", "and", "sends", "a", "chunk", "of", "data", "to", "the", "device", "according", "to", "transfer", "protocol" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L368-L380
[ "def", "__write_chunk", "(", "self", ",", "chunk", ")", ":", "log", ".", "debug", "(", "'writing %d bytes chunk'", ",", "len", "(", "chunk", ")", ")", "data", "=", "BLOCK_START", "+", "chr", "(", "len", "(", "chunk", ")", ")", "+", "chunk", "if", "len", "(", "chunk", ")", "<", "128", ":", "padding", "=", "128", "-", "len", "(", "chunk", ")", "log", ".", "debug", "(", "'pad with %d characters'", ",", "padding", ")", "data", "=", "data", "+", "(", "' '", "*", "padding", ")", "log", ".", "debug", "(", "\"packet size %d\"", ",", "len", "(", "data", ")", ")", "self", ".", "__write", "(", "data", ")", "self", ".", "_port", ".", "flush", "(", ")", "return", "self", ".", "__got_ack", "(", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.__read_chunk
Read a chunk of data
nodemcu_uploader/uploader.py
def __read_chunk(self, buf): """Read a chunk of data""" log.debug('reading chunk') timeout_before = self._port.timeout if SYSTEM != 'Windows': # Checking for new data every 100us is fast enough if self._port.timeout != MINIMAL_TIMEOUT: self._port.timeout = MINIMAL_TIMEOUT end = time.time() + timeout_before while len(buf) < 130 and time.time() <= end: buf = buf + self._port.read() if buf[0] != BLOCK_START or len(buf) < 130: log.debug('buffer binary: %s ', hexify(buf)) raise Exception('Bad blocksize or start byte') if SYSTEM != 'Windows': self._port.timeout = timeout_before chunk_size = ord(buf[1]) data = buf[2:chunk_size+2] buf = buf[130:] return (data, buf)
def __read_chunk(self, buf): """Read a chunk of data""" log.debug('reading chunk') timeout_before = self._port.timeout if SYSTEM != 'Windows': # Checking for new data every 100us is fast enough if self._port.timeout != MINIMAL_TIMEOUT: self._port.timeout = MINIMAL_TIMEOUT end = time.time() + timeout_before while len(buf) < 130 and time.time() <= end: buf = buf + self._port.read() if buf[0] != BLOCK_START or len(buf) < 130: log.debug('buffer binary: %s ', hexify(buf)) raise Exception('Bad blocksize or start byte') if SYSTEM != 'Windows': self._port.timeout = timeout_before chunk_size = ord(buf[1]) data = buf[2:chunk_size+2] buf = buf[130:] return (data, buf)
[ "Read", "a", "chunk", "of", "data" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L382-L406
[ "def", "__read_chunk", "(", "self", ",", "buf", ")", ":", "log", ".", "debug", "(", "'reading chunk'", ")", "timeout_before", "=", "self", ".", "_port", ".", "timeout", "if", "SYSTEM", "!=", "'Windows'", ":", "# Checking for new data every 100us is fast enough", "if", "self", ".", "_port", ".", "timeout", "!=", "MINIMAL_TIMEOUT", ":", "self", ".", "_port", ".", "timeout", "=", "MINIMAL_TIMEOUT", "end", "=", "time", ".", "time", "(", ")", "+", "timeout_before", "while", "len", "(", "buf", ")", "<", "130", "and", "time", ".", "time", "(", ")", "<=", "end", ":", "buf", "=", "buf", "+", "self", ".", "_port", ".", "read", "(", ")", "if", "buf", "[", "0", "]", "!=", "BLOCK_START", "or", "len", "(", "buf", ")", "<", "130", ":", "log", ".", "debug", "(", "'buffer binary: %s '", ",", "hexify", "(", "buf", ")", ")", "raise", "Exception", "(", "'Bad blocksize or start byte'", ")", "if", "SYSTEM", "!=", "'Windows'", ":", "self", ".", "_port", ".", "timeout", "=", "timeout_before", "chunk_size", "=", "ord", "(", "buf", "[", "1", "]", ")", "data", "=", "buf", "[", "2", ":", "chunk_size", "+", "2", "]", "buf", "=", "buf", "[", "130", ":", "]", "return", "(", "data", ",", "buf", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.file_list
list files on the device
nodemcu_uploader/uploader.py
def file_list(self): """list files on the device""" log.info('Listing files') res = self.__exchange(LIST_FILES) res = res.split('\r\n') # skip first and last lines res = res[1:-1] files = [] for line in res: files.append(line.split('\t')) return files
def file_list(self): """list files on the device""" log.info('Listing files') res = self.__exchange(LIST_FILES) res = res.split('\r\n') # skip first and last lines res = res[1:-1] files = [] for line in res: files.append(line.split('\t')) return files
[ "list", "files", "on", "the", "device" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L408-L418
[ "def", "file_list", "(", "self", ")", ":", "log", ".", "info", "(", "'Listing files'", ")", "res", "=", "self", ".", "__exchange", "(", "LIST_FILES", ")", "res", "=", "res", ".", "split", "(", "'\\r\\n'", ")", "# skip first and last lines", "res", "=", "res", "[", "1", ":", "-", "1", "]", "files", "=", "[", "]", "for", "line", "in", "res", ":", "files", ".", "append", "(", "line", ".", "split", "(", "'\\t'", ")", ")", "return", "files" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.file_do
Execute a file on the device using 'do
nodemcu_uploader/uploader.py
def file_do(self, filename): """Execute a file on the device using 'do'""" log.info('Executing '+filename) res = self.__exchange('dofile("'+filename+'")') log.info(res) return res
def file_do(self, filename): """Execute a file on the device using 'do'""" log.info('Executing '+filename) res = self.__exchange('dofile("'+filename+'")') log.info(res) return res
[ "Execute", "a", "file", "on", "the", "device", "using", "do" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L420-L425
[ "def", "file_do", "(", "self", ",", "filename", ")", ":", "log", ".", "info", "(", "'Executing '", "+", "filename", ")", "res", "=", "self", ".", "__exchange", "(", "'dofile(\"'", "+", "filename", "+", "'\")'", ")", "log", ".", "info", "(", "res", ")", "return", "res" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.file_format
Formats device filesystem
nodemcu_uploader/uploader.py
def file_format(self): """Formats device filesystem""" log.info('Formating, can take minutes depending on flash size...') res = self.__exchange('file.format()', timeout=300) if 'format done' not in res: log.error(res) else: log.info(res) return res
def file_format(self): """Formats device filesystem""" log.info('Formating, can take minutes depending on flash size...') res = self.__exchange('file.format()', timeout=300) if 'format done' not in res: log.error(res) else: log.info(res) return res
[ "Formats", "device", "filesystem" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L427-L435
[ "def", "file_format", "(", "self", ")", ":", "log", ".", "info", "(", "'Formating, can take minutes depending on flash size...'", ")", "res", "=", "self", ".", "__exchange", "(", "'file.format()'", ",", "timeout", "=", "300", ")", "if", "'format done'", "not", "in", "res", ":", "log", ".", "error", "(", "res", ")", "else", ":", "log", ".", "info", "(", "res", ")", "return", "res" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.file_print
Prints a file on the device to console
nodemcu_uploader/uploader.py
def file_print(self, filename): """Prints a file on the device to console""" log.info('Printing ' + filename) res = self.__exchange(PRINT_FILE.format(filename=filename)) log.info(res) return res
def file_print(self, filename): """Prints a file on the device to console""" log.info('Printing ' + filename) res = self.__exchange(PRINT_FILE.format(filename=filename)) log.info(res) return res
[ "Prints", "a", "file", "on", "the", "device", "to", "console" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L437-L442
[ "def", "file_print", "(", "self", ",", "filename", ")", ":", "log", ".", "info", "(", "'Printing '", "+", "filename", ")", "res", "=", "self", ".", "__exchange", "(", "PRINT_FILE", ".", "format", "(", "filename", "=", "filename", ")", ")", "log", ".", "info", "(", "res", ")", "return", "res" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.node_heap
Show device heap size
nodemcu_uploader/uploader.py
def node_heap(self): """Show device heap size""" log.info('Heap') res = self.__exchange('print(node.heap())') log.info(res) return int(res.split('\r\n')[1])
def node_heap(self): """Show device heap size""" log.info('Heap') res = self.__exchange('print(node.heap())') log.info(res) return int(res.split('\r\n')[1])
[ "Show", "device", "heap", "size" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L444-L449
[ "def", "node_heap", "(", "self", ")", ":", "log", ".", "info", "(", "'Heap'", ")", "res", "=", "self", ".", "__exchange", "(", "'print(node.heap())'", ")", "log", ".", "info", "(", "res", ")", "return", "int", "(", "res", ".", "split", "(", "'\\r\\n'", ")", "[", "1", "]", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.node_restart
Restarts device
nodemcu_uploader/uploader.py
def node_restart(self): """Restarts device""" log.info('Restart') res = self.__exchange('node.restart()') log.info(res) return res
def node_restart(self): """Restarts device""" log.info('Restart') res = self.__exchange('node.restart()') log.info(res) return res
[ "Restarts", "device" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L451-L456
[ "def", "node_restart", "(", "self", ")", ":", "log", ".", "info", "(", "'Restart'", ")", "res", "=", "self", ".", "__exchange", "(", "'node.restart()'", ")", "log", ".", "info", "(", "res", ")", "return", "res" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.file_compile
Compiles a file specified by path on the device
nodemcu_uploader/uploader.py
def file_compile(self, path): """Compiles a file specified by path on the device""" log.info('Compile '+path) cmd = 'node.compile("%s")' % path res = self.__exchange(cmd) log.info(res) return res
def file_compile(self, path): """Compiles a file specified by path on the device""" log.info('Compile '+path) cmd = 'node.compile("%s")' % path res = self.__exchange(cmd) log.info(res) return res
[ "Compiles", "a", "file", "specified", "by", "path", "on", "the", "device" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L458-L464
[ "def", "file_compile", "(", "self", ",", "path", ")", ":", "log", ".", "info", "(", "'Compile '", "+", "path", ")", "cmd", "=", "'node.compile(\"%s\")'", "%", "path", "res", "=", "self", ".", "__exchange", "(", "cmd", ")", "log", ".", "info", "(", "res", ")", "return", "res" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.file_remove
Removes a file on the device
nodemcu_uploader/uploader.py
def file_remove(self, path): """Removes a file on the device""" log.info('Remove '+path) cmd = 'file.remove("%s")' % path res = self.__exchange(cmd) log.info(res) return res
def file_remove(self, path): """Removes a file on the device""" log.info('Remove '+path) cmd = 'file.remove("%s")' % path res = self.__exchange(cmd) log.info(res) return res
[ "Removes", "a", "file", "on", "the", "device" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L466-L472
[ "def", "file_remove", "(", "self", ",", "path", ")", ":", "log", ".", "info", "(", "'Remove '", "+", "path", ")", "cmd", "=", "'file.remove(\"%s\")'", "%", "path", "res", "=", "self", ".", "__exchange", "(", "cmd", ")", "log", ".", "info", "(", "res", ")", "return", "res" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
Uploader.backup
Backup all files from the device
nodemcu_uploader/uploader.py
def backup(self, path): """Backup all files from the device""" log.info('Backing up in '+path) # List file to backup files = self.file_list() # then download each of then self.prepare() for f in files: self.read_file(f[0], os.path.join(path, f[0]))
def backup(self, path): """Backup all files from the device""" log.info('Backing up in '+path) # List file to backup files = self.file_list() # then download each of then self.prepare() for f in files: self.read_file(f[0], os.path.join(path, f[0]))
[ "Backup", "all", "files", "from", "the", "device" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/uploader.py#L474-L482
[ "def", "backup", "(", "self", ",", "path", ")", ":", "log", ".", "info", "(", "'Backing up in '", "+", "path", ")", "# List file to backup", "files", "=", "self", ".", "file_list", "(", ")", "# then download each of then", "self", ".", "prepare", "(", ")", "for", "f", "in", "files", ":", "self", ".", "read_file", "(", "f", "[", "0", "]", ",", "os", ".", "path", ".", "join", "(", "path", ",", "f", "[", "0", "]", ")", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
destination_from_source
Split each of the sources in the array on ':' First part will be source, second will be destination. Modifies the the original array to contain only sources and returns an array of destinations.
nodemcu_uploader/main.py
def destination_from_source(sources, use_glob=True): """ Split each of the sources in the array on ':' First part will be source, second will be destination. Modifies the the original array to contain only sources and returns an array of destinations. """ destinations = [] newsources = [] for i in range(0, len(sources)): srcdst = sources[i].split(':') if len(srcdst) == 2: destinations.append(srcdst[1]) newsources.append(srcdst[0]) #proper list assignment else: if use_glob: listing = glob.glob(srcdst[0]) for filename in listing: newsources.append(filename) #always use forward slash at destination destinations.append(filename.replace('\\', '/')) else: newsources.append(srcdst[0]) destinations.append(srcdst[0]) return [newsources, destinations]
def destination_from_source(sources, use_glob=True): """ Split each of the sources in the array on ':' First part will be source, second will be destination. Modifies the the original array to contain only sources and returns an array of destinations. """ destinations = [] newsources = [] for i in range(0, len(sources)): srcdst = sources[i].split(':') if len(srcdst) == 2: destinations.append(srcdst[1]) newsources.append(srcdst[0]) #proper list assignment else: if use_glob: listing = glob.glob(srcdst[0]) for filename in listing: newsources.append(filename) #always use forward slash at destination destinations.append(filename.replace('\\', '/')) else: newsources.append(srcdst[0]) destinations.append(srcdst[0]) return [newsources, destinations]
[ "Split", "each", "of", "the", "sources", "in", "the", "array", "on", ":", "First", "part", "will", "be", "source", "second", "will", "be", "destination", ".", "Modifies", "the", "the", "original", "array", "to", "contain", "only", "sources", "and", "returns", "an", "array", "of", "destinations", "." ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/main.py#L20-L45
[ "def", "destination_from_source", "(", "sources", ",", "use_glob", "=", "True", ")", ":", "destinations", "=", "[", "]", "newsources", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "sources", ")", ")", ":", "srcdst", "=", "sources", "[", "i", "]", ".", "split", "(", "':'", ")", "if", "len", "(", "srcdst", ")", "==", "2", ":", "destinations", ".", "append", "(", "srcdst", "[", "1", "]", ")", "newsources", ".", "append", "(", "srcdst", "[", "0", "]", ")", "#proper list assignment", "else", ":", "if", "use_glob", ":", "listing", "=", "glob", ".", "glob", "(", "srcdst", "[", "0", "]", ")", "for", "filename", "in", "listing", ":", "newsources", ".", "append", "(", "filename", ")", "#always use forward slash at destination", "destinations", ".", "append", "(", "filename", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", ")", "else", ":", "newsources", ".", "append", "(", "srcdst", "[", "0", "]", ")", "destinations", ".", "append", "(", "srcdst", "[", "0", "]", ")", "return", "[", "newsources", ",", "destinations", "]" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
operation_upload
The upload operation
nodemcu_uploader/main.py
def operation_upload(uploader, sources, verify, do_compile, do_file, do_restart): """The upload operation""" sources, destinations = destination_from_source(sources) if len(destinations) == len(sources): if uploader.prepare(): for filename, dst in zip(sources, destinations): if do_compile: uploader.file_remove(os.path.splitext(dst)[0]+'.lc') uploader.write_file(filename, dst, verify) #init.lua is not allowed to be compiled if do_compile and dst != 'init.lua': uploader.file_compile(dst) uploader.file_remove(dst) if do_file: uploader.file_do(os.path.splitext(dst)[0]+'.lc') elif do_file: uploader.file_do(dst) else: raise Exception('Error preparing nodemcu for reception') else: raise Exception('You must specify a destination filename for each file you want to upload.') if do_restart: uploader.node_restart() log.info('All done!')
def operation_upload(uploader, sources, verify, do_compile, do_file, do_restart): """The upload operation""" sources, destinations = destination_from_source(sources) if len(destinations) == len(sources): if uploader.prepare(): for filename, dst in zip(sources, destinations): if do_compile: uploader.file_remove(os.path.splitext(dst)[0]+'.lc') uploader.write_file(filename, dst, verify) #init.lua is not allowed to be compiled if do_compile and dst != 'init.lua': uploader.file_compile(dst) uploader.file_remove(dst) if do_file: uploader.file_do(os.path.splitext(dst)[0]+'.lc') elif do_file: uploader.file_do(dst) else: raise Exception('Error preparing nodemcu for reception') else: raise Exception('You must specify a destination filename for each file you want to upload.') if do_restart: uploader.node_restart() log.info('All done!')
[ "The", "upload", "operation" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/main.py#L48-L72
[ "def", "operation_upload", "(", "uploader", ",", "sources", ",", "verify", ",", "do_compile", ",", "do_file", ",", "do_restart", ")", ":", "sources", ",", "destinations", "=", "destination_from_source", "(", "sources", ")", "if", "len", "(", "destinations", ")", "==", "len", "(", "sources", ")", ":", "if", "uploader", ".", "prepare", "(", ")", ":", "for", "filename", ",", "dst", "in", "zip", "(", "sources", ",", "destinations", ")", ":", "if", "do_compile", ":", "uploader", ".", "file_remove", "(", "os", ".", "path", ".", "splitext", "(", "dst", ")", "[", "0", "]", "+", "'.lc'", ")", "uploader", ".", "write_file", "(", "filename", ",", "dst", ",", "verify", ")", "#init.lua is not allowed to be compiled", "if", "do_compile", "and", "dst", "!=", "'init.lua'", ":", "uploader", ".", "file_compile", "(", "dst", ")", "uploader", ".", "file_remove", "(", "dst", ")", "if", "do_file", ":", "uploader", ".", "file_do", "(", "os", ".", "path", ".", "splitext", "(", "dst", ")", "[", "0", "]", "+", "'.lc'", ")", "elif", "do_file", ":", "uploader", ".", "file_do", "(", "dst", ")", "else", ":", "raise", "Exception", "(", "'Error preparing nodemcu for reception'", ")", "else", ":", "raise", "Exception", "(", "'You must specify a destination filename for each file you want to upload.'", ")", "if", "do_restart", ":", "uploader", ".", "node_restart", "(", ")", "log", ".", "info", "(", "'All done!'", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
operation_download
The download operation
nodemcu_uploader/main.py
def operation_download(uploader, sources): """The download operation""" sources, destinations = destination_from_source(sources, False) print('sources', sources) print('destinations', destinations) if len(destinations) == len(sources): if uploader.prepare(): for filename, dst in zip(sources, destinations): uploader.read_file(filename, dst) else: raise Exception('You must specify a destination filename for each file you want to download.') log.info('All done!')
def operation_download(uploader, sources): """The download operation""" sources, destinations = destination_from_source(sources, False) print('sources', sources) print('destinations', destinations) if len(destinations) == len(sources): if uploader.prepare(): for filename, dst in zip(sources, destinations): uploader.read_file(filename, dst) else: raise Exception('You must specify a destination filename for each file you want to download.') log.info('All done!')
[ "The", "download", "operation" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/main.py#L75-L86
[ "def", "operation_download", "(", "uploader", ",", "sources", ")", ":", "sources", ",", "destinations", "=", "destination_from_source", "(", "sources", ",", "False", ")", "print", "(", "'sources'", ",", "sources", ")", "print", "(", "'destinations'", ",", "destinations", ")", "if", "len", "(", "destinations", ")", "==", "len", "(", "sources", ")", ":", "if", "uploader", ".", "prepare", "(", ")", ":", "for", "filename", ",", "dst", "in", "zip", "(", "sources", ",", "destinations", ")", ":", "uploader", ".", "read_file", "(", "filename", ",", "dst", ")", "else", ":", "raise", "Exception", "(", "'You must specify a destination filename for each file you want to download.'", ")", "log", ".", "info", "(", "'All done!'", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
operation_list
List file on target
nodemcu_uploader/main.py
def operation_list(uploader): """List file on target""" files = uploader.file_list() for f in files: log.info("{file:30s} {size}".format(file=f[0], size=f[1]))
def operation_list(uploader): """List file on target""" files = uploader.file_list() for f in files: log.info("{file:30s} {size}".format(file=f[0], size=f[1]))
[ "List", "file", "on", "target" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/main.py#L88-L92
[ "def", "operation_list", "(", "uploader", ")", ":", "files", "=", "uploader", ".", "file_list", "(", ")", "for", "f", "in", "files", ":", "log", ".", "info", "(", "\"{file:30s} {size}\"", ".", "format", "(", "file", "=", "f", "[", "0", "]", ",", "size", "=", "f", "[", "1", "]", ")", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
operation_file
File operations
nodemcu_uploader/main.py
def operation_file(uploader, cmd, filename=''): """File operations""" if cmd == 'list': operation_list(uploader) if cmd == 'do': for path in filename: uploader.file_do(path) elif cmd == 'format': uploader.file_format() elif cmd == 'remove': for path in filename: uploader.file_remove(path) elif cmd == 'print': for path in filename: uploader.file_print(path)
def operation_file(uploader, cmd, filename=''): """File operations""" if cmd == 'list': operation_list(uploader) if cmd == 'do': for path in filename: uploader.file_do(path) elif cmd == 'format': uploader.file_format() elif cmd == 'remove': for path in filename: uploader.file_remove(path) elif cmd == 'print': for path in filename: uploader.file_print(path)
[ "File", "operations" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/main.py#L94-L108
[ "def", "operation_file", "(", "uploader", ",", "cmd", ",", "filename", "=", "''", ")", ":", "if", "cmd", "==", "'list'", ":", "operation_list", "(", "uploader", ")", "if", "cmd", "==", "'do'", ":", "for", "path", "in", "filename", ":", "uploader", ".", "file_do", "(", "path", ")", "elif", "cmd", "==", "'format'", ":", "uploader", ".", "file_format", "(", ")", "elif", "cmd", "==", "'remove'", ":", "for", "path", "in", "filename", ":", "uploader", ".", "file_remove", "(", "path", ")", "elif", "cmd", "==", "'print'", ":", "for", "path", "in", "filename", ":", "uploader", ".", "file_print", "(", "path", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
main_func
Main function for cli
nodemcu_uploader/main.py
def main_func(): """Main function for cli""" parser = argparse.ArgumentParser( description='NodeMCU Lua file uploader', prog='nodemcu-uploader' ) parser.add_argument( '--verbose', help='verbose output', action='store_true', default=False) parser.add_argument( '--version', help='prints the version and exists', action='version', version='%(prog)s {version} (serial {serialversion})'.format(version=__version__, serialversion=serialversion) ) parser.add_argument( '--port', '-p', help='Serial port device', default=Uploader.PORT) parser.add_argument( '--baud', '-b', help='Serial port baudrate', type=arg_auto_int, default=Uploader.BAUD) parser.add_argument( '--start_baud', '-B', help='Initial Serial port baudrate', type=arg_auto_int, default=Uploader.START_BAUD) parser.add_argument( '--timeout', '-t', help='Timeout for operations', type=arg_auto_int, default=Uploader.TIMEOUT) parser.add_argument( '--autobaud_time', '-a', help='Duration of the autobaud timer', type=float, default=Uploader.AUTOBAUD_TIME, ) subparsers = parser.add_subparsers( dest='operation', help='Run nodemcu-uploader {command} -h for additional help') backup_parser = subparsers.add_parser( 'backup', help='Backup all the files on the nodemcu board') backup_parser.add_argument('path', help='Folder where to store the backup') upload_parser = subparsers.add_parser( 'upload', help='Path to one or more files to be uploaded. Destination name will be the same as the file name.') upload_parser.add_argument( 'filename', nargs='+', help='Lua file to upload. Use colon to give alternate destination.' ) upload_parser.add_argument( '--compile', '-c', help='If file should be uploaded as compiled', action='store_true', default=False ) upload_parser.add_argument( '--verify', '-v', help='To verify the uploaded data.', action='store', nargs='?', choices=['none', 'raw', 'sha1'], default='none' ) upload_parser.add_argument( '--dofile', '-e', help='If file should be run after upload.', action='store_true', default=False ) upload_parser.add_argument( '--restart', '-r', help='If esp should be restarted', action='store_true', default=False ) exec_parser = subparsers.add_parser( 'exec', help='Path to one or more files to be executed line by line.') exec_parser.add_argument('filename', nargs='+', help='Lua file to execute.') download_parser = subparsers.add_parser( 'download', help='Path to one or more files to be downloaded. Destination name will be the same as the file name.') download_parser.add_argument('filename', nargs='+', help='Lua file to download. Use colon to give alternate destination.') file_parser = subparsers.add_parser( 'file', help='File functions') file_parser.add_argument( 'cmd', choices=('list', 'do', 'format', 'remove', 'print'), help="list=list files, do=dofile given path, format=formate file area, remove=remove given path") file_parser.add_argument('filename', nargs='*', help='path for cmd') node_parse = subparsers.add_parser( 'node', help='Node functions') node_parse.add_argument('ncmd', choices=('heap', 'restart'), help="heap=print heap memory, restart=restart nodemcu") subparsers.add_parser( 'terminal', help='Run pySerials miniterm' ) args = parser.parse_args() default_level = logging.INFO if args.verbose: default_level = logging.DEBUG #formatter = logging.Formatter('%(message)s') logging.basicConfig(level=default_level, format='%(message)s') if args.operation == 'terminal': #uploader can not claim the port terminal(args.port, str(args.start_baud)) return # let uploader user the default (short) timeout for establishing connection uploader = Uploader(args.port, args.baud, start_baud=args.start_baud, autobaud_time=args.autobaud_time) # and reset the timeout (if we have the uploader&timeout) if args.timeout: uploader.set_timeout(args.timeout) if args.operation == 'upload': operation_upload(uploader, args.filename, args.verify, args.compile, args.dofile, args.restart) elif args.operation == 'download': operation_download(uploader, args.filename) elif args.operation == 'exec': sources = args.filename for path in sources: uploader.exec_file(path) elif args.operation == 'file': operation_file(uploader, args.cmd, args.filename) elif args.operation == 'node': if args.ncmd == 'heap': uploader.node_heap() elif args.ncmd == 'restart': uploader.node_restart() elif args.operation == 'backup': uploader.backup(args.path) #no uploader related commands after this point uploader.close()
def main_func(): """Main function for cli""" parser = argparse.ArgumentParser( description='NodeMCU Lua file uploader', prog='nodemcu-uploader' ) parser.add_argument( '--verbose', help='verbose output', action='store_true', default=False) parser.add_argument( '--version', help='prints the version and exists', action='version', version='%(prog)s {version} (serial {serialversion})'.format(version=__version__, serialversion=serialversion) ) parser.add_argument( '--port', '-p', help='Serial port device', default=Uploader.PORT) parser.add_argument( '--baud', '-b', help='Serial port baudrate', type=arg_auto_int, default=Uploader.BAUD) parser.add_argument( '--start_baud', '-B', help='Initial Serial port baudrate', type=arg_auto_int, default=Uploader.START_BAUD) parser.add_argument( '--timeout', '-t', help='Timeout for operations', type=arg_auto_int, default=Uploader.TIMEOUT) parser.add_argument( '--autobaud_time', '-a', help='Duration of the autobaud timer', type=float, default=Uploader.AUTOBAUD_TIME, ) subparsers = parser.add_subparsers( dest='operation', help='Run nodemcu-uploader {command} -h for additional help') backup_parser = subparsers.add_parser( 'backup', help='Backup all the files on the nodemcu board') backup_parser.add_argument('path', help='Folder where to store the backup') upload_parser = subparsers.add_parser( 'upload', help='Path to one or more files to be uploaded. Destination name will be the same as the file name.') upload_parser.add_argument( 'filename', nargs='+', help='Lua file to upload. Use colon to give alternate destination.' ) upload_parser.add_argument( '--compile', '-c', help='If file should be uploaded as compiled', action='store_true', default=False ) upload_parser.add_argument( '--verify', '-v', help='To verify the uploaded data.', action='store', nargs='?', choices=['none', 'raw', 'sha1'], default='none' ) upload_parser.add_argument( '--dofile', '-e', help='If file should be run after upload.', action='store_true', default=False ) upload_parser.add_argument( '--restart', '-r', help='If esp should be restarted', action='store_true', default=False ) exec_parser = subparsers.add_parser( 'exec', help='Path to one or more files to be executed line by line.') exec_parser.add_argument('filename', nargs='+', help='Lua file to execute.') download_parser = subparsers.add_parser( 'download', help='Path to one or more files to be downloaded. Destination name will be the same as the file name.') download_parser.add_argument('filename', nargs='+', help='Lua file to download. Use colon to give alternate destination.') file_parser = subparsers.add_parser( 'file', help='File functions') file_parser.add_argument( 'cmd', choices=('list', 'do', 'format', 'remove', 'print'), help="list=list files, do=dofile given path, format=formate file area, remove=remove given path") file_parser.add_argument('filename', nargs='*', help='path for cmd') node_parse = subparsers.add_parser( 'node', help='Node functions') node_parse.add_argument('ncmd', choices=('heap', 'restart'), help="heap=print heap memory, restart=restart nodemcu") subparsers.add_parser( 'terminal', help='Run pySerials miniterm' ) args = parser.parse_args() default_level = logging.INFO if args.verbose: default_level = logging.DEBUG #formatter = logging.Formatter('%(message)s') logging.basicConfig(level=default_level, format='%(message)s') if args.operation == 'terminal': #uploader can not claim the port terminal(args.port, str(args.start_baud)) return # let uploader user the default (short) timeout for establishing connection uploader = Uploader(args.port, args.baud, start_baud=args.start_baud, autobaud_time=args.autobaud_time) # and reset the timeout (if we have the uploader&timeout) if args.timeout: uploader.set_timeout(args.timeout) if args.operation == 'upload': operation_upload(uploader, args.filename, args.verify, args.compile, args.dofile, args.restart) elif args.operation == 'download': operation_download(uploader, args.filename) elif args.operation == 'exec': sources = args.filename for path in sources: uploader.exec_file(path) elif args.operation == 'file': operation_file(uploader, args.cmd, args.filename) elif args.operation == 'node': if args.ncmd == 'heap': uploader.node_heap() elif args.ncmd == 'restart': uploader.node_restart() elif args.operation == 'backup': uploader.backup(args.path) #no uploader related commands after this point uploader.close()
[ "Main", "function", "for", "cli" ]
kmpm/nodemcu-uploader
python
https://github.com/kmpm/nodemcu-uploader/blob/557a25f37b1fb4e31a745719e237e42fff192834/nodemcu_uploader/main.py#L117-L301
[ "def", "main_func", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'NodeMCU Lua file uploader'", ",", "prog", "=", "'nodemcu-uploader'", ")", "parser", ".", "add_argument", "(", "'--verbose'", ",", "help", "=", "'verbose output'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "help", "=", "'prints the version and exists'", ",", "action", "=", "'version'", ",", "version", "=", "'%(prog)s {version} (serial {serialversion})'", ".", "format", "(", "version", "=", "__version__", ",", "serialversion", "=", "serialversion", ")", ")", "parser", ".", "add_argument", "(", "'--port'", ",", "'-p'", ",", "help", "=", "'Serial port device'", ",", "default", "=", "Uploader", ".", "PORT", ")", "parser", ".", "add_argument", "(", "'--baud'", ",", "'-b'", ",", "help", "=", "'Serial port baudrate'", ",", "type", "=", "arg_auto_int", ",", "default", "=", "Uploader", ".", "BAUD", ")", "parser", ".", "add_argument", "(", "'--start_baud'", ",", "'-B'", ",", "help", "=", "'Initial Serial port baudrate'", ",", "type", "=", "arg_auto_int", ",", "default", "=", "Uploader", ".", "START_BAUD", ")", "parser", ".", "add_argument", "(", "'--timeout'", ",", "'-t'", ",", "help", "=", "'Timeout for operations'", ",", "type", "=", "arg_auto_int", ",", "default", "=", "Uploader", ".", "TIMEOUT", ")", "parser", ".", "add_argument", "(", "'--autobaud_time'", ",", "'-a'", ",", "help", "=", "'Duration of the autobaud timer'", ",", "type", "=", "float", ",", "default", "=", "Uploader", ".", "AUTOBAUD_TIME", ",", ")", "subparsers", "=", "parser", ".", "add_subparsers", "(", "dest", "=", "'operation'", ",", "help", "=", "'Run nodemcu-uploader {command} -h for additional help'", ")", "backup_parser", "=", "subparsers", ".", "add_parser", "(", "'backup'", ",", "help", "=", "'Backup all the files on the nodemcu board'", ")", "backup_parser", ".", "add_argument", "(", "'path'", ",", "help", "=", "'Folder where to store the backup'", ")", "upload_parser", "=", "subparsers", ".", "add_parser", "(", "'upload'", ",", "help", "=", "'Path to one or more files to be uploaded. Destination name will be the same as the file name.'", ")", "upload_parser", ".", "add_argument", "(", "'filename'", ",", "nargs", "=", "'+'", ",", "help", "=", "'Lua file to upload. Use colon to give alternate destination.'", ")", "upload_parser", ".", "add_argument", "(", "'--compile'", ",", "'-c'", ",", "help", "=", "'If file should be uploaded as compiled'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ")", "upload_parser", ".", "add_argument", "(", "'--verify'", ",", "'-v'", ",", "help", "=", "'To verify the uploaded data.'", ",", "action", "=", "'store'", ",", "nargs", "=", "'?'", ",", "choices", "=", "[", "'none'", ",", "'raw'", ",", "'sha1'", "]", ",", "default", "=", "'none'", ")", "upload_parser", ".", "add_argument", "(", "'--dofile'", ",", "'-e'", ",", "help", "=", "'If file should be run after upload.'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ")", "upload_parser", ".", "add_argument", "(", "'--restart'", ",", "'-r'", ",", "help", "=", "'If esp should be restarted'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ")", "exec_parser", "=", "subparsers", ".", "add_parser", "(", "'exec'", ",", "help", "=", "'Path to one or more files to be executed line by line.'", ")", "exec_parser", ".", "add_argument", "(", "'filename'", ",", "nargs", "=", "'+'", ",", "help", "=", "'Lua file to execute.'", ")", "download_parser", "=", "subparsers", ".", "add_parser", "(", "'download'", ",", "help", "=", "'Path to one or more files to be downloaded. Destination name will be the same as the file name.'", ")", "download_parser", ".", "add_argument", "(", "'filename'", ",", "nargs", "=", "'+'", ",", "help", "=", "'Lua file to download. Use colon to give alternate destination.'", ")", "file_parser", "=", "subparsers", ".", "add_parser", "(", "'file'", ",", "help", "=", "'File functions'", ")", "file_parser", ".", "add_argument", "(", "'cmd'", ",", "choices", "=", "(", "'list'", ",", "'do'", ",", "'format'", ",", "'remove'", ",", "'print'", ")", ",", "help", "=", "\"list=list files, do=dofile given path, format=formate file area, remove=remove given path\"", ")", "file_parser", ".", "add_argument", "(", "'filename'", ",", "nargs", "=", "'*'", ",", "help", "=", "'path for cmd'", ")", "node_parse", "=", "subparsers", ".", "add_parser", "(", "'node'", ",", "help", "=", "'Node functions'", ")", "node_parse", ".", "add_argument", "(", "'ncmd'", ",", "choices", "=", "(", "'heap'", ",", "'restart'", ")", ",", "help", "=", "\"heap=print heap memory, restart=restart nodemcu\"", ")", "subparsers", ".", "add_parser", "(", "'terminal'", ",", "help", "=", "'Run pySerials miniterm'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "default_level", "=", "logging", ".", "INFO", "if", "args", ".", "verbose", ":", "default_level", "=", "logging", ".", "DEBUG", "#formatter = logging.Formatter('%(message)s')", "logging", ".", "basicConfig", "(", "level", "=", "default_level", ",", "format", "=", "'%(message)s'", ")", "if", "args", ".", "operation", "==", "'terminal'", ":", "#uploader can not claim the port", "terminal", "(", "args", ".", "port", ",", "str", "(", "args", ".", "start_baud", ")", ")", "return", "# let uploader user the default (short) timeout for establishing connection", "uploader", "=", "Uploader", "(", "args", ".", "port", ",", "args", ".", "baud", ",", "start_baud", "=", "args", ".", "start_baud", ",", "autobaud_time", "=", "args", ".", "autobaud_time", ")", "# and reset the timeout (if we have the uploader&timeout)", "if", "args", ".", "timeout", ":", "uploader", ".", "set_timeout", "(", "args", ".", "timeout", ")", "if", "args", ".", "operation", "==", "'upload'", ":", "operation_upload", "(", "uploader", ",", "args", ".", "filename", ",", "args", ".", "verify", ",", "args", ".", "compile", ",", "args", ".", "dofile", ",", "args", ".", "restart", ")", "elif", "args", ".", "operation", "==", "'download'", ":", "operation_download", "(", "uploader", ",", "args", ".", "filename", ")", "elif", "args", ".", "operation", "==", "'exec'", ":", "sources", "=", "args", ".", "filename", "for", "path", "in", "sources", ":", "uploader", ".", "exec_file", "(", "path", ")", "elif", "args", ".", "operation", "==", "'file'", ":", "operation_file", "(", "uploader", ",", "args", ".", "cmd", ",", "args", ".", "filename", ")", "elif", "args", ".", "operation", "==", "'node'", ":", "if", "args", ".", "ncmd", "==", "'heap'", ":", "uploader", ".", "node_heap", "(", ")", "elif", "args", ".", "ncmd", "==", "'restart'", ":", "uploader", ".", "node_restart", "(", ")", "elif", "args", ".", "operation", "==", "'backup'", ":", "uploader", ".", "backup", "(", "args", ".", "path", ")", "#no uploader related commands after this point", "uploader", ".", "close", "(", ")" ]
557a25f37b1fb4e31a745719e237e42fff192834
valid
display
Display a widget, text or other media in a notebook without the need to import IPython at the top level. Also handles wrapping GenePattern Python Library content in widgets. :param content: :return:
genepattern/remote_widgets.py
def display(content): """ Display a widget, text or other media in a notebook without the need to import IPython at the top level. Also handles wrapping GenePattern Python Library content in widgets. :param content: :return: """ if isinstance(content, gp.GPServer): IPython.display.display(GPAuthWidget(content)) elif isinstance(content, gp.GPTask): IPython.display.display(GPTaskWidget(content)) elif isinstance(content, gp.GPJob): IPython.display.display(GPJobWidget(content)) else: IPython.display.display(content)
def display(content): """ Display a widget, text or other media in a notebook without the need to import IPython at the top level. Also handles wrapping GenePattern Python Library content in widgets. :param content: :return: """ if isinstance(content, gp.GPServer): IPython.display.display(GPAuthWidget(content)) elif isinstance(content, gp.GPTask): IPython.display.display(GPTaskWidget(content)) elif isinstance(content, gp.GPJob): IPython.display.display(GPJobWidget(content)) else: IPython.display.display(content)
[ "Display", "a", "widget", "text", "or", "other", "media", "in", "a", "notebook", "without", "the", "need", "to", "import", "IPython", "at", "the", "top", "level", "." ]
genepattern/genepattern-notebook
python
https://github.com/genepattern/genepattern-notebook/blob/953168bd08c5332412438cbc5bb59993a07a6911/genepattern/remote_widgets.py#L185-L200
[ "def", "display", "(", "content", ")", ":", "if", "isinstance", "(", "content", ",", "gp", ".", "GPServer", ")", ":", "IPython", ".", "display", ".", "display", "(", "GPAuthWidget", "(", "content", ")", ")", "elif", "isinstance", "(", "content", ",", "gp", ".", "GPTask", ")", ":", "IPython", ".", "display", ".", "display", "(", "GPTaskWidget", "(", "content", ")", ")", "elif", "isinstance", "(", "content", ",", "gp", ".", "GPJob", ")", ":", "IPython", ".", "display", ".", "display", "(", "GPJobWidget", "(", "content", ")", ")", "else", ":", "IPython", ".", "display", ".", "display", "(", "content", ")" ]
953168bd08c5332412438cbc5bb59993a07a6911
valid
SessionList.register
Register a new GenePattern server session for the provided server, username and password. Return the session. :param server: :param username: :param password: :return:
genepattern/remote_widgets.py
def register(self, server, username, password): """ Register a new GenePattern server session for the provided server, username and password. Return the session. :param server: :param username: :param password: :return: """ # Create the session session = gp.GPServer(server, username, password) # Validate username if not empty valid_username = username != "" and username is not None # Validate that the server is not already registered index = self._get_index(server) new_server = index == -1 # Add the new session to the list if valid_username and new_server: self.sessions.append(session) # Replace old session is one exists if valid_username and not new_server: self.sessions[index] = session return session
def register(self, server, username, password): """ Register a new GenePattern server session for the provided server, username and password. Return the session. :param server: :param username: :param password: :return: """ # Create the session session = gp.GPServer(server, username, password) # Validate username if not empty valid_username = username != "" and username is not None # Validate that the server is not already registered index = self._get_index(server) new_server = index == -1 # Add the new session to the list if valid_username and new_server: self.sessions.append(session) # Replace old session is one exists if valid_username and not new_server: self.sessions[index] = session return session
[ "Register", "a", "new", "GenePattern", "server", "session", "for", "the", "provided", "server", "username", "and", "password", ".", "Return", "the", "session", ".", ":", "param", "server", ":", ":", "param", "username", ":", ":", "param", "password", ":", ":", "return", ":" ]
genepattern/genepattern-notebook
python
https://github.com/genepattern/genepattern-notebook/blob/953168bd08c5332412438cbc5bb59993a07a6911/genepattern/remote_widgets.py#L23-L51
[ "def", "register", "(", "self", ",", "server", ",", "username", ",", "password", ")", ":", "# Create the session", "session", "=", "gp", ".", "GPServer", "(", "server", ",", "username", ",", "password", ")", "# Validate username if not empty", "valid_username", "=", "username", "!=", "\"\"", "and", "username", "is", "not", "None", "# Validate that the server is not already registered", "index", "=", "self", ".", "_get_index", "(", "server", ")", "new_server", "=", "index", "==", "-", "1", "# Add the new session to the list", "if", "valid_username", "and", "new_server", ":", "self", ".", "sessions", ".", "append", "(", "session", ")", "# Replace old session is one exists", "if", "valid_username", "and", "not", "new_server", ":", "self", ".", "sessions", "[", "index", "]", "=", "session", "return", "session" ]
953168bd08c5332412438cbc5bb59993a07a6911
valid
SessionList.get
Returns a registered GPServer object with a matching GenePattern server url or index Returns None if no matching result was found :param server: :return:
genepattern/remote_widgets.py
def get(self, server): """ Returns a registered GPServer object with a matching GenePattern server url or index Returns None if no matching result was found :param server: :return: """ # Handle indexes if isinstance(server, int): if server >= len(self.sessions): return None else: return self.sessions[server] # Handle server URLs index = self._get_index(server) if index == -1: return None else: return self.sessions[index]
def get(self, server): """ Returns a registered GPServer object with a matching GenePattern server url or index Returns None if no matching result was found :param server: :return: """ # Handle indexes if isinstance(server, int): if server >= len(self.sessions): return None else: return self.sessions[server] # Handle server URLs index = self._get_index(server) if index == -1: return None else: return self.sessions[index]
[ "Returns", "a", "registered", "GPServer", "object", "with", "a", "matching", "GenePattern", "server", "url", "or", "index", "Returns", "None", "if", "no", "matching", "result", "was", "found", ":", "param", "server", ":", ":", "return", ":" ]
genepattern/genepattern-notebook
python
https://github.com/genepattern/genepattern-notebook/blob/953168bd08c5332412438cbc5bb59993a07a6911/genepattern/remote_widgets.py#L53-L73
[ "def", "get", "(", "self", ",", "server", ")", ":", "# Handle indexes", "if", "isinstance", "(", "server", ",", "int", ")", ":", "if", "server", ">=", "len", "(", "self", ".", "sessions", ")", ":", "return", "None", "else", ":", "return", "self", ".", "sessions", "[", "server", "]", "# Handle server URLs", "index", "=", "self", ".", "_get_index", "(", "server", ")", "if", "index", "==", "-", "1", ":", "return", "None", "else", ":", "return", "self", ".", "sessions", "[", "index", "]" ]
953168bd08c5332412438cbc5bb59993a07a6911
valid
SessionList._get_index
Returns a registered GPServer object with a matching GenePattern server url Returns -1 if no matching result was found :param server_url: :return:
genepattern/remote_widgets.py
def _get_index(self, server_url): """ Returns a registered GPServer object with a matching GenePattern server url Returns -1 if no matching result was found :param server_url: :return: """ for i in range(len(self.sessions)): session = self.sessions[i] if session.url == server_url: return i return -1
def _get_index(self, server_url): """ Returns a registered GPServer object with a matching GenePattern server url Returns -1 if no matching result was found :param server_url: :return: """ for i in range(len(self.sessions)): session = self.sessions[i] if session.url == server_url: return i return -1
[ "Returns", "a", "registered", "GPServer", "object", "with", "a", "matching", "GenePattern", "server", "url", "Returns", "-", "1", "if", "no", "matching", "result", "was", "found", ":", "param", "server_url", ":", ":", "return", ":" ]
genepattern/genepattern-notebook
python
https://github.com/genepattern/genepattern-notebook/blob/953168bd08c5332412438cbc5bb59993a07a6911/genepattern/remote_widgets.py#L82-L93
[ "def", "_get_index", "(", "self", ",", "server_url", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "sessions", ")", ")", ":", "session", "=", "self", ".", "sessions", "[", "i", "]", "if", "session", ".", "url", "==", "server_url", ":", "return", "i", "return", "-", "1" ]
953168bd08c5332412438cbc5bb59993a07a6911
valid
Timer._accept
Accept None or ∞ or datetime or numeric for target
tempora/timing.py
def _accept(self, target): "Accept None or ∞ or datetime or numeric for target" if isinstance(target, datetime.timedelta): target = target.total_seconds() if target is None: # treat None as infinite target target = float('Inf') return target
def _accept(self, target): "Accept None or ∞ or datetime or numeric for target" if isinstance(target, datetime.timedelta): target = target.total_seconds() if target is None: # treat None as infinite target target = float('Inf') return target
[ "Accept", "None", "or", "∞", "or", "datetime", "or", "numeric", "for", "target" ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/timing.py#L122-L131
[ "def", "_accept", "(", "self", ",", "target", ")", ":", "if", "isinstance", "(", "target", ",", "datetime", ".", "timedelta", ")", ":", "target", "=", "target", ".", "total_seconds", "(", ")", "if", "target", "is", "None", ":", "# treat None as infinite target", "target", "=", "float", "(", "'Inf'", ")", "return", "target" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
from_timestamp
Convert a numeric timestamp to a timezone-aware datetime. A client may override this function to change the default behavior, such as to use local time or timezone-naïve times.
tempora/schedule.py
def from_timestamp(ts): """ Convert a numeric timestamp to a timezone-aware datetime. A client may override this function to change the default behavior, such as to use local time or timezone-naïve times. """ return datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc)
def from_timestamp(ts): """ Convert a numeric timestamp to a timezone-aware datetime. A client may override this function to change the default behavior, such as to use local time or timezone-naïve times. """ return datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc)
[ "Convert", "a", "numeric", "timestamp", "to", "a", "timezone", "-", "aware", "datetime", "." ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/schedule.py#L29-L36
[ "def", "from_timestamp", "(", "ts", ")", ":", "return", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "ts", ")", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
DelayedCommand.at_time
Construct a DelayedCommand to come due at `at`, where `at` may be a datetime or timestamp.
tempora/schedule.py
def at_time(cls, at, target): """ Construct a DelayedCommand to come due at `at`, where `at` may be a datetime or timestamp. """ at = cls._from_timestamp(at) cmd = cls.from_datetime(at) cmd.delay = at - now() cmd.target = target return cmd
def at_time(cls, at, target): """ Construct a DelayedCommand to come due at `at`, where `at` may be a datetime or timestamp. """ at = cls._from_timestamp(at) cmd = cls.from_datetime(at) cmd.delay = at - now() cmd.target = target return cmd
[ "Construct", "a", "DelayedCommand", "to", "come", "due", "at", "at", "where", "at", "may", "be", "a", "datetime", "or", "timestamp", "." ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/schedule.py#L74-L83
[ "def", "at_time", "(", "cls", ",", "at", ",", "target", ")", ":", "at", "=", "cls", ".", "_from_timestamp", "(", "at", ")", "cmd", "=", "cls", ".", "from_datetime", "(", "at", ")", "cmd", ".", "delay", "=", "at", "-", "now", "(", ")", "cmd", ".", "target", "=", "target", "return", "cmd" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
PeriodicCommand._localize
Rely on pytz.localize to ensure new result honors DST.
tempora/schedule.py
def _localize(dt): """ Rely on pytz.localize to ensure new result honors DST. """ try: tz = dt.tzinfo return tz.localize(dt.replace(tzinfo=None)) except AttributeError: return dt
def _localize(dt): """ Rely on pytz.localize to ensure new result honors DST. """ try: tz = dt.tzinfo return tz.localize(dt.replace(tzinfo=None)) except AttributeError: return dt
[ "Rely", "on", "pytz", ".", "localize", "to", "ensure", "new", "result", "honors", "DST", "." ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/schedule.py#L101-L109
[ "def", "_localize", "(", "dt", ")", ":", "try", ":", "tz", "=", "dt", ".", "tzinfo", "return", "tz", ".", "localize", "(", "dt", ".", "replace", "(", "tzinfo", "=", "None", ")", ")", "except", "AttributeError", ":", "return", "dt" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
PeriodicCommandFixedDelay.daily_at
Schedule a command to run at a specific time each day.
tempora/schedule.py
def daily_at(cls, at, target): """ Schedule a command to run at a specific time each day. """ daily = datetime.timedelta(days=1) # convert when to the next datetime matching this time when = datetime.datetime.combine(datetime.date.today(), at) if when < now(): when += daily return cls.at_time(cls._localize(when), daily, target)
def daily_at(cls, at, target): """ Schedule a command to run at a specific time each day. """ daily = datetime.timedelta(days=1) # convert when to the next datetime matching this time when = datetime.datetime.combine(datetime.date.today(), at) if when < now(): when += daily return cls.at_time(cls._localize(when), daily, target)
[ "Schedule", "a", "command", "to", "run", "at", "a", "specific", "time", "each", "day", "." ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/schedule.py#L144-L153
[ "def", "daily_at", "(", "cls", ",", "at", ",", "target", ")", ":", "daily", "=", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "# convert when to the next datetime matching this time", "when", "=", "datetime", ".", "datetime", ".", "combine", "(", "datetime", ".", "date", ".", "today", "(", ")", ",", "at", ")", "if", "when", "<", "now", "(", ")", ":", "when", "+=", "daily", "return", "cls", ".", "at_time", "(", "cls", ".", "_localize", "(", "when", ")", ",", "daily", ",", "target", ")" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
strftime
A class to replace the strftime in datetime package or time module. Identical to strftime behavior in those modules except supports any year. Also supports datetime.datetime times. Also supports milliseconds using %s Also supports microseconds using %u
tempora/__init__.py
def strftime(fmt, t): """A class to replace the strftime in datetime package or time module. Identical to strftime behavior in those modules except supports any year. Also supports datetime.datetime times. Also supports milliseconds using %s Also supports microseconds using %u""" if isinstance(t, (time.struct_time, tuple)): t = datetime.datetime(*t[:6]) assert isinstance(t, (datetime.datetime, datetime.time, datetime.date)) try: year = t.year if year < 1900: t = t.replace(year=1900) except AttributeError: year = 1900 subs = ( ('%Y', '%04d' % year), ('%y', '%02d' % (year % 100)), ('%s', '%03d' % (t.microsecond // 1000)), ('%u', '%03d' % (t.microsecond % 1000)) ) def doSub(s, sub): return s.replace(*sub) def doSubs(s): return functools.reduce(doSub, subs, s) fmt = '%%'.join(map(doSubs, fmt.split('%%'))) return t.strftime(fmt)
def strftime(fmt, t): """A class to replace the strftime in datetime package or time module. Identical to strftime behavior in those modules except supports any year. Also supports datetime.datetime times. Also supports milliseconds using %s Also supports microseconds using %u""" if isinstance(t, (time.struct_time, tuple)): t = datetime.datetime(*t[:6]) assert isinstance(t, (datetime.datetime, datetime.time, datetime.date)) try: year = t.year if year < 1900: t = t.replace(year=1900) except AttributeError: year = 1900 subs = ( ('%Y', '%04d' % year), ('%y', '%02d' % (year % 100)), ('%s', '%03d' % (t.microsecond // 1000)), ('%u', '%03d' % (t.microsecond % 1000)) ) def doSub(s, sub): return s.replace(*sub) def doSubs(s): return functools.reduce(doSub, subs, s) fmt = '%%'.join(map(doSubs, fmt.split('%%'))) return t.strftime(fmt)
[ "A", "class", "to", "replace", "the", "strftime", "in", "datetime", "package", "or", "time", "module", ".", "Identical", "to", "strftime", "behavior", "in", "those", "modules", "except", "supports", "any", "year", ".", "Also", "supports", "datetime", ".", "datetime", "times", ".", "Also", "supports", "milliseconds", "using", "%s", "Also", "supports", "microseconds", "using", "%u" ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L97-L127
[ "def", "strftime", "(", "fmt", ",", "t", ")", ":", "if", "isinstance", "(", "t", ",", "(", "time", ".", "struct_time", ",", "tuple", ")", ")", ":", "t", "=", "datetime", ".", "datetime", "(", "*", "t", "[", ":", "6", "]", ")", "assert", "isinstance", "(", "t", ",", "(", "datetime", ".", "datetime", ",", "datetime", ".", "time", ",", "datetime", ".", "date", ")", ")", "try", ":", "year", "=", "t", ".", "year", "if", "year", "<", "1900", ":", "t", "=", "t", ".", "replace", "(", "year", "=", "1900", ")", "except", "AttributeError", ":", "year", "=", "1900", "subs", "=", "(", "(", "'%Y'", ",", "'%04d'", "%", "year", ")", ",", "(", "'%y'", ",", "'%02d'", "%", "(", "year", "%", "100", ")", ")", ",", "(", "'%s'", ",", "'%03d'", "%", "(", "t", ".", "microsecond", "//", "1000", ")", ")", ",", "(", "'%u'", ",", "'%03d'", "%", "(", "t", ".", "microsecond", "%", "1000", ")", ")", ")", "def", "doSub", "(", "s", ",", "sub", ")", ":", "return", "s", ".", "replace", "(", "*", "sub", ")", "def", "doSubs", "(", "s", ")", ":", "return", "functools", ".", "reduce", "(", "doSub", ",", "subs", ",", "s", ")", "fmt", "=", "'%%'", ".", "join", "(", "map", "(", "doSubs", ",", "fmt", ".", "split", "(", "'%%'", ")", ")", ")", "return", "t", ".", "strftime", "(", "fmt", ")" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
strptime
A function to replace strptime in the time module. Should behave identically to the strptime function except it returns a datetime.datetime object instead of a time.struct_time object. Also takes an optional tzinfo parameter which is a time zone info object.
tempora/__init__.py
def strptime(s, fmt, tzinfo=None): """ A function to replace strptime in the time module. Should behave identically to the strptime function except it returns a datetime.datetime object instead of a time.struct_time object. Also takes an optional tzinfo parameter which is a time zone info object. """ res = time.strptime(s, fmt) return datetime.datetime(tzinfo=tzinfo, *res[:6])
def strptime(s, fmt, tzinfo=None): """ A function to replace strptime in the time module. Should behave identically to the strptime function except it returns a datetime.datetime object instead of a time.struct_time object. Also takes an optional tzinfo parameter which is a time zone info object. """ res = time.strptime(s, fmt) return datetime.datetime(tzinfo=tzinfo, *res[:6])
[ "A", "function", "to", "replace", "strptime", "in", "the", "time", "module", ".", "Should", "behave", "identically", "to", "the", "strptime", "function", "except", "it", "returns", "a", "datetime", ".", "datetime", "object", "instead", "of", "a", "time", ".", "struct_time", "object", ".", "Also", "takes", "an", "optional", "tzinfo", "parameter", "which", "is", "a", "time", "zone", "info", "object", "." ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L130-L138
[ "def", "strptime", "(", "s", ",", "fmt", ",", "tzinfo", "=", "None", ")", ":", "res", "=", "time", ".", "strptime", "(", "s", ",", "fmt", ")", "return", "datetime", ".", "datetime", "(", "tzinfo", "=", "tzinfo", ",", "*", "res", "[", ":", "6", "]", ")" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
datetime_mod
Find the time which is the specified date/time truncated to the time delta relative to the start date/time. By default, the start time is midnight of the same day as the specified date/time. >>> datetime_mod(datetime.datetime(2004, 1, 2, 3), ... datetime.timedelta(days = 1.5), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 1, 0, 0) >>> datetime_mod(datetime.datetime(2004, 1, 2, 13), ... datetime.timedelta(days = 1.5), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 2, 12, 0) >>> datetime_mod(datetime.datetime(2004, 1, 2, 13), ... datetime.timedelta(days = 7), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 1, 0, 0) >>> datetime_mod(datetime.datetime(2004, 1, 10, 13), ... datetime.timedelta(days = 7), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 8, 0, 0)
tempora/__init__.py
def datetime_mod(dt, period, start=None): """ Find the time which is the specified date/time truncated to the time delta relative to the start date/time. By default, the start time is midnight of the same day as the specified date/time. >>> datetime_mod(datetime.datetime(2004, 1, 2, 3), ... datetime.timedelta(days = 1.5), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 1, 0, 0) >>> datetime_mod(datetime.datetime(2004, 1, 2, 13), ... datetime.timedelta(days = 1.5), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 2, 12, 0) >>> datetime_mod(datetime.datetime(2004, 1, 2, 13), ... datetime.timedelta(days = 7), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 1, 0, 0) >>> datetime_mod(datetime.datetime(2004, 1, 10, 13), ... datetime.timedelta(days = 7), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 8, 0, 0) """ if start is None: # use midnight of the same day start = datetime.datetime.combine(dt.date(), datetime.time()) # calculate the difference between the specified time and the start date. delta = dt - start # now aggregate the delta and the period into microseconds # Use microseconds because that's the highest precision of these time # pieces. Also, using microseconds ensures perfect precision (no floating # point errors). def get_time_delta_microseconds(td): return (td.days * seconds_per_day + td.seconds) * 1000000 + td.microseconds delta, period = map(get_time_delta_microseconds, (delta, period)) offset = datetime.timedelta(microseconds=delta % period) # the result is the original specified time minus the offset result = dt - offset return result
def datetime_mod(dt, period, start=None): """ Find the time which is the specified date/time truncated to the time delta relative to the start date/time. By default, the start time is midnight of the same day as the specified date/time. >>> datetime_mod(datetime.datetime(2004, 1, 2, 3), ... datetime.timedelta(days = 1.5), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 1, 0, 0) >>> datetime_mod(datetime.datetime(2004, 1, 2, 13), ... datetime.timedelta(days = 1.5), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 2, 12, 0) >>> datetime_mod(datetime.datetime(2004, 1, 2, 13), ... datetime.timedelta(days = 7), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 1, 0, 0) >>> datetime_mod(datetime.datetime(2004, 1, 10, 13), ... datetime.timedelta(days = 7), ... start = datetime.datetime(2004, 1, 1)) datetime.datetime(2004, 1, 8, 0, 0) """ if start is None: # use midnight of the same day start = datetime.datetime.combine(dt.date(), datetime.time()) # calculate the difference between the specified time and the start date. delta = dt - start # now aggregate the delta and the period into microseconds # Use microseconds because that's the highest precision of these time # pieces. Also, using microseconds ensures perfect precision (no floating # point errors). def get_time_delta_microseconds(td): return (td.days * seconds_per_day + td.seconds) * 1000000 + td.microseconds delta, period = map(get_time_delta_microseconds, (delta, period)) offset = datetime.timedelta(microseconds=delta % period) # the result is the original specified time minus the offset result = dt - offset return result
[ "Find", "the", "time", "which", "is", "the", "specified", "date", "/", "time", "truncated", "to", "the", "time", "delta", "relative", "to", "the", "start", "date", "/", "time", ".", "By", "default", "the", "start", "time", "is", "midnight", "of", "the", "same", "day", "as", "the", "specified", "date", "/", "time", "." ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L219-L259
[ "def", "datetime_mod", "(", "dt", ",", "period", ",", "start", "=", "None", ")", ":", "if", "start", "is", "None", ":", "# use midnight of the same day", "start", "=", "datetime", ".", "datetime", ".", "combine", "(", "dt", ".", "date", "(", ")", ",", "datetime", ".", "time", "(", ")", ")", "# calculate the difference between the specified time and the start date.", "delta", "=", "dt", "-", "start", "# now aggregate the delta and the period into microseconds", "# Use microseconds because that's the highest precision of these time", "# pieces. Also, using microseconds ensures perfect precision (no floating", "# point errors).", "def", "get_time_delta_microseconds", "(", "td", ")", ":", "return", "(", "td", ".", "days", "*", "seconds_per_day", "+", "td", ".", "seconds", ")", "*", "1000000", "+", "td", ".", "microseconds", "delta", ",", "period", "=", "map", "(", "get_time_delta_microseconds", ",", "(", "delta", ",", "period", ")", ")", "offset", "=", "datetime", ".", "timedelta", "(", "microseconds", "=", "delta", "%", "period", ")", "# the result is the original specified time minus the offset", "result", "=", "dt", "-", "offset", "return", "result" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
datetime_round
Find the nearest even period for the specified date/time. >>> datetime_round(datetime.datetime(2004, 11, 13, 8, 11, 13), ... datetime.timedelta(hours = 1)) datetime.datetime(2004, 11, 13, 8, 0) >>> datetime_round(datetime.datetime(2004, 11, 13, 8, 31, 13), ... datetime.timedelta(hours = 1)) datetime.datetime(2004, 11, 13, 9, 0) >>> datetime_round(datetime.datetime(2004, 11, 13, 8, 30), ... datetime.timedelta(hours = 1)) datetime.datetime(2004, 11, 13, 9, 0)
tempora/__init__.py
def datetime_round(dt, period, start=None): """ Find the nearest even period for the specified date/time. >>> datetime_round(datetime.datetime(2004, 11, 13, 8, 11, 13), ... datetime.timedelta(hours = 1)) datetime.datetime(2004, 11, 13, 8, 0) >>> datetime_round(datetime.datetime(2004, 11, 13, 8, 31, 13), ... datetime.timedelta(hours = 1)) datetime.datetime(2004, 11, 13, 9, 0) >>> datetime_round(datetime.datetime(2004, 11, 13, 8, 30), ... datetime.timedelta(hours = 1)) datetime.datetime(2004, 11, 13, 9, 0) """ result = datetime_mod(dt, period, start) if abs(dt - result) >= period // 2: result += period return result
def datetime_round(dt, period, start=None): """ Find the nearest even period for the specified date/time. >>> datetime_round(datetime.datetime(2004, 11, 13, 8, 11, 13), ... datetime.timedelta(hours = 1)) datetime.datetime(2004, 11, 13, 8, 0) >>> datetime_round(datetime.datetime(2004, 11, 13, 8, 31, 13), ... datetime.timedelta(hours = 1)) datetime.datetime(2004, 11, 13, 9, 0) >>> datetime_round(datetime.datetime(2004, 11, 13, 8, 30), ... datetime.timedelta(hours = 1)) datetime.datetime(2004, 11, 13, 9, 0) """ result = datetime_mod(dt, period, start) if abs(dt - result) >= period // 2: result += period return result
[ "Find", "the", "nearest", "even", "period", "for", "the", "specified", "date", "/", "time", "." ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L262-L279
[ "def", "datetime_round", "(", "dt", ",", "period", ",", "start", "=", "None", ")", ":", "result", "=", "datetime_mod", "(", "dt", ",", "period", ",", "start", ")", "if", "abs", "(", "dt", "-", "result", ")", ">=", "period", "//", "2", ":", "result", "+=", "period", "return", "result" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
get_nearest_year_for_day
Returns the nearest year to now inferred from a Julian date.
tempora/__init__.py
def get_nearest_year_for_day(day): """ Returns the nearest year to now inferred from a Julian date. """ now = time.gmtime() result = now.tm_year # if the day is far greater than today, it must be from last year if day - now.tm_yday > 365 // 2: result -= 1 # if the day is far less than today, it must be for next year. if now.tm_yday - day > 365 // 2: result += 1 return result
def get_nearest_year_for_day(day): """ Returns the nearest year to now inferred from a Julian date. """ now = time.gmtime() result = now.tm_year # if the day is far greater than today, it must be from last year if day - now.tm_yday > 365 // 2: result -= 1 # if the day is far less than today, it must be for next year. if now.tm_yday - day > 365 // 2: result += 1 return result
[ "Returns", "the", "nearest", "year", "to", "now", "inferred", "from", "a", "Julian", "date", "." ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L282-L294
[ "def", "get_nearest_year_for_day", "(", "day", ")", ":", "now", "=", "time", ".", "gmtime", "(", ")", "result", "=", "now", ".", "tm_year", "# if the day is far greater than today, it must be from last year", "if", "day", "-", "now", ".", "tm_yday", ">", "365", "//", "2", ":", "result", "-=", "1", "# if the day is far less than today, it must be for next year.", "if", "now", ".", "tm_yday", "-", "day", ">", "365", "//", "2", ":", "result", "+=", "1", "return", "result" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
gregorian_date
Gregorian Date is defined as a year and a julian day (1-based index into the days of the year). >>> gregorian_date(2007, 15) datetime.date(2007, 1, 15)
tempora/__init__.py
def gregorian_date(year, julian_day): """ Gregorian Date is defined as a year and a julian day (1-based index into the days of the year). >>> gregorian_date(2007, 15) datetime.date(2007, 1, 15) """ result = datetime.date(year, 1, 1) result += datetime.timedelta(days=julian_day - 1) return result
def gregorian_date(year, julian_day): """ Gregorian Date is defined as a year and a julian day (1-based index into the days of the year). >>> gregorian_date(2007, 15) datetime.date(2007, 1, 15) """ result = datetime.date(year, 1, 1) result += datetime.timedelta(days=julian_day - 1) return result
[ "Gregorian", "Date", "is", "defined", "as", "a", "year", "and", "a", "julian", "day", "(", "1", "-", "based", "index", "into", "the", "days", "of", "the", "year", ")", "." ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L297-L307
[ "def", "gregorian_date", "(", "year", ",", "julian_day", ")", ":", "result", "=", "datetime", ".", "date", "(", "year", ",", "1", ",", "1", ")", "result", "+=", "datetime", ".", "timedelta", "(", "days", "=", "julian_day", "-", "1", ")", "return", "result" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
get_period_seconds
return the number of seconds in the specified period >>> get_period_seconds('day') 86400 >>> get_period_seconds(86400) 86400 >>> get_period_seconds(datetime.timedelta(hours=24)) 86400 >>> get_period_seconds('day + os.system("rm -Rf *")') Traceback (most recent call last): ... ValueError: period not in (second, minute, hour, day, month, year)
tempora/__init__.py
def get_period_seconds(period): """ return the number of seconds in the specified period >>> get_period_seconds('day') 86400 >>> get_period_seconds(86400) 86400 >>> get_period_seconds(datetime.timedelta(hours=24)) 86400 >>> get_period_seconds('day + os.system("rm -Rf *")') Traceback (most recent call last): ... ValueError: period not in (second, minute, hour, day, month, year) """ if isinstance(period, six.string_types): try: name = 'seconds_per_' + period.lower() result = globals()[name] except KeyError: msg = "period not in (second, minute, hour, day, month, year)" raise ValueError(msg) elif isinstance(period, numbers.Number): result = period elif isinstance(period, datetime.timedelta): result = period.days * get_period_seconds('day') + period.seconds else: raise TypeError('period must be a string or integer') return result
def get_period_seconds(period): """ return the number of seconds in the specified period >>> get_period_seconds('day') 86400 >>> get_period_seconds(86400) 86400 >>> get_period_seconds(datetime.timedelta(hours=24)) 86400 >>> get_period_seconds('day + os.system("rm -Rf *")') Traceback (most recent call last): ... ValueError: period not in (second, minute, hour, day, month, year) """ if isinstance(period, six.string_types): try: name = 'seconds_per_' + period.lower() result = globals()[name] except KeyError: msg = "period not in (second, minute, hour, day, month, year)" raise ValueError(msg) elif isinstance(period, numbers.Number): result = period elif isinstance(period, datetime.timedelta): result = period.days * get_period_seconds('day') + period.seconds else: raise TypeError('period must be a string or integer') return result
[ "return", "the", "number", "of", "seconds", "in", "the", "specified", "period" ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L310-L338
[ "def", "get_period_seconds", "(", "period", ")", ":", "if", "isinstance", "(", "period", ",", "six", ".", "string_types", ")", ":", "try", ":", "name", "=", "'seconds_per_'", "+", "period", ".", "lower", "(", ")", "result", "=", "globals", "(", ")", "[", "name", "]", "except", "KeyError", ":", "msg", "=", "\"period not in (second, minute, hour, day, month, year)\"", "raise", "ValueError", "(", "msg", ")", "elif", "isinstance", "(", "period", ",", "numbers", ".", "Number", ")", ":", "result", "=", "period", "elif", "isinstance", "(", "period", ",", "datetime", ".", "timedelta", ")", ":", "result", "=", "period", ".", "days", "*", "get_period_seconds", "(", "'day'", ")", "+", "period", ".", "seconds", "else", ":", "raise", "TypeError", "(", "'period must be a string or integer'", ")", "return", "result" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
get_date_format_string
For a given period (e.g. 'month', 'day', or some numeric interval such as 3600 (in secs)), return the format string that can be used with strftime to format that time to specify the times across that interval, but no more detailed. For example, >>> get_date_format_string('month') '%Y-%m' >>> get_date_format_string(3600) '%Y-%m-%d %H' >>> get_date_format_string('hour') '%Y-%m-%d %H' >>> get_date_format_string(None) Traceback (most recent call last): ... TypeError: period must be a string or integer >>> get_date_format_string('garbage') Traceback (most recent call last): ... ValueError: period not in (second, minute, hour, day, month, year)
tempora/__init__.py
def get_date_format_string(period): """ For a given period (e.g. 'month', 'day', or some numeric interval such as 3600 (in secs)), return the format string that can be used with strftime to format that time to specify the times across that interval, but no more detailed. For example, >>> get_date_format_string('month') '%Y-%m' >>> get_date_format_string(3600) '%Y-%m-%d %H' >>> get_date_format_string('hour') '%Y-%m-%d %H' >>> get_date_format_string(None) Traceback (most recent call last): ... TypeError: period must be a string or integer >>> get_date_format_string('garbage') Traceback (most recent call last): ... ValueError: period not in (second, minute, hour, day, month, year) """ # handle the special case of 'month' which doesn't have # a static interval in seconds if isinstance(period, six.string_types) and period.lower() == 'month': return '%Y-%m' file_period_secs = get_period_seconds(period) format_pieces = ('%Y', '-%m-%d', ' %H', '-%M', '-%S') seconds_per_second = 1 intervals = ( seconds_per_year, seconds_per_day, seconds_per_hour, seconds_per_minute, seconds_per_second, ) mods = list(map(lambda interval: file_period_secs % interval, intervals)) format_pieces = format_pieces[: mods.index(0) + 1] return ''.join(format_pieces)
def get_date_format_string(period): """ For a given period (e.g. 'month', 'day', or some numeric interval such as 3600 (in secs)), return the format string that can be used with strftime to format that time to specify the times across that interval, but no more detailed. For example, >>> get_date_format_string('month') '%Y-%m' >>> get_date_format_string(3600) '%Y-%m-%d %H' >>> get_date_format_string('hour') '%Y-%m-%d %H' >>> get_date_format_string(None) Traceback (most recent call last): ... TypeError: period must be a string or integer >>> get_date_format_string('garbage') Traceback (most recent call last): ... ValueError: period not in (second, minute, hour, day, month, year) """ # handle the special case of 'month' which doesn't have # a static interval in seconds if isinstance(period, six.string_types) and period.lower() == 'month': return '%Y-%m' file_period_secs = get_period_seconds(period) format_pieces = ('%Y', '-%m-%d', ' %H', '-%M', '-%S') seconds_per_second = 1 intervals = ( seconds_per_year, seconds_per_day, seconds_per_hour, seconds_per_minute, seconds_per_second, ) mods = list(map(lambda interval: file_period_secs % interval, intervals)) format_pieces = format_pieces[: mods.index(0) + 1] return ''.join(format_pieces)
[ "For", "a", "given", "period", "(", "e", ".", "g", ".", "month", "day", "or", "some", "numeric", "interval", "such", "as", "3600", "(", "in", "secs", "))", "return", "the", "format", "string", "that", "can", "be", "used", "with", "strftime", "to", "format", "that", "time", "to", "specify", "the", "times", "across", "that", "interval", "but", "no", "more", "detailed", ".", "For", "example" ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L341-L380
[ "def", "get_date_format_string", "(", "period", ")", ":", "# handle the special case of 'month' which doesn't have", "# a static interval in seconds", "if", "isinstance", "(", "period", ",", "six", ".", "string_types", ")", "and", "period", ".", "lower", "(", ")", "==", "'month'", ":", "return", "'%Y-%m'", "file_period_secs", "=", "get_period_seconds", "(", "period", ")", "format_pieces", "=", "(", "'%Y'", ",", "'-%m-%d'", ",", "' %H'", ",", "'-%M'", ",", "'-%S'", ")", "seconds_per_second", "=", "1", "intervals", "=", "(", "seconds_per_year", ",", "seconds_per_day", ",", "seconds_per_hour", ",", "seconds_per_minute", ",", "seconds_per_second", ",", ")", "mods", "=", "list", "(", "map", "(", "lambda", "interval", ":", "file_period_secs", "%", "interval", ",", "intervals", ")", ")", "format_pieces", "=", "format_pieces", "[", ":", "mods", ".", "index", "(", "0", ")", "+", "1", "]", "return", "''", ".", "join", "(", "format_pieces", ")" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
divide_timedelta_float
Divide a timedelta by a float value >>> one_day = datetime.timedelta(days=1) >>> half_day = datetime.timedelta(days=.5) >>> divide_timedelta_float(one_day, 2.0) == half_day True >>> divide_timedelta_float(one_day, 2) == half_day True
tempora/__init__.py
def divide_timedelta_float(td, divisor): """ Divide a timedelta by a float value >>> one_day = datetime.timedelta(days=1) >>> half_day = datetime.timedelta(days=.5) >>> divide_timedelta_float(one_day, 2.0) == half_day True >>> divide_timedelta_float(one_day, 2) == half_day True """ # td is comprised of days, seconds, microseconds dsm = [getattr(td, attr) for attr in ('days', 'seconds', 'microseconds')] dsm = map(lambda elem: elem / divisor, dsm) return datetime.timedelta(*dsm)
def divide_timedelta_float(td, divisor): """ Divide a timedelta by a float value >>> one_day = datetime.timedelta(days=1) >>> half_day = datetime.timedelta(days=.5) >>> divide_timedelta_float(one_day, 2.0) == half_day True >>> divide_timedelta_float(one_day, 2) == half_day True """ # td is comprised of days, seconds, microseconds dsm = [getattr(td, attr) for attr in ('days', 'seconds', 'microseconds')] dsm = map(lambda elem: elem / divisor, dsm) return datetime.timedelta(*dsm)
[ "Divide", "a", "timedelta", "by", "a", "float", "value" ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L383-L397
[ "def", "divide_timedelta_float", "(", "td", ",", "divisor", ")", ":", "# td is comprised of days, seconds, microseconds", "dsm", "=", "[", "getattr", "(", "td", ",", "attr", ")", "for", "attr", "in", "(", "'days'", ",", "'seconds'", ",", "'microseconds'", ")", "]", "dsm", "=", "map", "(", "lambda", "elem", ":", "elem", "/", "divisor", ",", "dsm", ")", "return", "datetime", ".", "timedelta", "(", "*", "dsm", ")" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
calculate_prorated_values
A utility function to prompt for a rate (a string in units per unit time), and return that same rate for various time periods.
tempora/__init__.py
def calculate_prorated_values(): """ A utility function to prompt for a rate (a string in units per unit time), and return that same rate for various time periods. """ rate = six.moves.input("Enter the rate (3/hour, 50/month)> ") res = re.match(r'(?P<value>[\d.]+)/(?P<period>\w+)$', rate).groupdict() value = float(res['value']) value_per_second = value / get_period_seconds(res['period']) for period in ('minute', 'hour', 'day', 'month', 'year'): period_value = value_per_second * get_period_seconds(period) print("per {period}: {period_value}".format(**locals()))
def calculate_prorated_values(): """ A utility function to prompt for a rate (a string in units per unit time), and return that same rate for various time periods. """ rate = six.moves.input("Enter the rate (3/hour, 50/month)> ") res = re.match(r'(?P<value>[\d.]+)/(?P<period>\w+)$', rate).groupdict() value = float(res['value']) value_per_second = value / get_period_seconds(res['period']) for period in ('minute', 'hour', 'day', 'month', 'year'): period_value = value_per_second * get_period_seconds(period) print("per {period}: {period_value}".format(**locals()))
[ "A", "utility", "function", "to", "prompt", "for", "a", "rate", "(", "a", "string", "in", "units", "per", "unit", "time", ")", "and", "return", "that", "same", "rate", "for", "various", "time", "periods", "." ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L400-L411
[ "def", "calculate_prorated_values", "(", ")", ":", "rate", "=", "six", ".", "moves", ".", "input", "(", "\"Enter the rate (3/hour, 50/month)> \"", ")", "res", "=", "re", ".", "match", "(", "r'(?P<value>[\\d.]+)/(?P<period>\\w+)$'", ",", "rate", ")", ".", "groupdict", "(", ")", "value", "=", "float", "(", "res", "[", "'value'", "]", ")", "value_per_second", "=", "value", "/", "get_period_seconds", "(", "res", "[", "'period'", "]", ")", "for", "period", "in", "(", "'minute'", ",", "'hour'", ",", "'day'", ",", "'month'", ",", "'year'", ")", ":", "period_value", "=", "value_per_second", "*", "get_period_seconds", "(", "period", ")", "print", "(", "\"per {period}: {period_value}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
parse_timedelta
Take a string representing a span of time and parse it to a time delta. Accepts any string of comma-separated numbers each with a unit indicator. >>> parse_timedelta('1 day') datetime.timedelta(days=1) >>> parse_timedelta('1 day, 30 seconds') datetime.timedelta(days=1, seconds=30) >>> parse_timedelta('47.32 days, 20 minutes, 15.4 milliseconds') datetime.timedelta(days=47, seconds=28848, microseconds=15400) Supports weeks, months, years >>> parse_timedelta('1 week') datetime.timedelta(days=7) >>> parse_timedelta('1 year, 1 month') datetime.timedelta(days=395, seconds=58685) Note that months and years strict intervals, not aligned to a calendar: >>> now = datetime.datetime.now() >>> later = now + parse_timedelta('1 year') >>> diff = later.replace(year=now.year) - now >>> diff.seconds 20940
tempora/__init__.py
def parse_timedelta(str): """ Take a string representing a span of time and parse it to a time delta. Accepts any string of comma-separated numbers each with a unit indicator. >>> parse_timedelta('1 day') datetime.timedelta(days=1) >>> parse_timedelta('1 day, 30 seconds') datetime.timedelta(days=1, seconds=30) >>> parse_timedelta('47.32 days, 20 minutes, 15.4 milliseconds') datetime.timedelta(days=47, seconds=28848, microseconds=15400) Supports weeks, months, years >>> parse_timedelta('1 week') datetime.timedelta(days=7) >>> parse_timedelta('1 year, 1 month') datetime.timedelta(days=395, seconds=58685) Note that months and years strict intervals, not aligned to a calendar: >>> now = datetime.datetime.now() >>> later = now + parse_timedelta('1 year') >>> diff = later.replace(year=now.year) - now >>> diff.seconds 20940 """ deltas = (_parse_timedelta_part(part.strip()) for part in str.split(',')) return sum(deltas, datetime.timedelta())
def parse_timedelta(str): """ Take a string representing a span of time and parse it to a time delta. Accepts any string of comma-separated numbers each with a unit indicator. >>> parse_timedelta('1 day') datetime.timedelta(days=1) >>> parse_timedelta('1 day, 30 seconds') datetime.timedelta(days=1, seconds=30) >>> parse_timedelta('47.32 days, 20 minutes, 15.4 milliseconds') datetime.timedelta(days=47, seconds=28848, microseconds=15400) Supports weeks, months, years >>> parse_timedelta('1 week') datetime.timedelta(days=7) >>> parse_timedelta('1 year, 1 month') datetime.timedelta(days=395, seconds=58685) Note that months and years strict intervals, not aligned to a calendar: >>> now = datetime.datetime.now() >>> later = now + parse_timedelta('1 year') >>> diff = later.replace(year=now.year) - now >>> diff.seconds 20940 """ deltas = (_parse_timedelta_part(part.strip()) for part in str.split(',')) return sum(deltas, datetime.timedelta())
[ "Take", "a", "string", "representing", "a", "span", "of", "time", "and", "parse", "it", "to", "a", "time", "delta", ".", "Accepts", "any", "string", "of", "comma", "-", "separated", "numbers", "each", "with", "a", "unit", "indicator", "." ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L414-L446
[ "def", "parse_timedelta", "(", "str", ")", ":", "deltas", "=", "(", "_parse_timedelta_part", "(", "part", ".", "strip", "(", ")", ")", "for", "part", "in", "str", ".", "split", "(", "','", ")", ")", "return", "sum", "(", "deltas", ",", "datetime", ".", "timedelta", "(", ")", ")" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
divide_timedelta
Get the ratio of two timedeltas >>> one_day = datetime.timedelta(days=1) >>> one_hour = datetime.timedelta(hours=1) >>> divide_timedelta(one_hour, one_day) == 1 / 24 True
tempora/__init__.py
def divide_timedelta(td1, td2): """ Get the ratio of two timedeltas >>> one_day = datetime.timedelta(days=1) >>> one_hour = datetime.timedelta(hours=1) >>> divide_timedelta(one_hour, one_day) == 1 / 24 True """ try: return td1 / td2 except TypeError: # Python 3.2 gets division # http://bugs.python.org/issue2706 return td1.total_seconds() / td2.total_seconds()
def divide_timedelta(td1, td2): """ Get the ratio of two timedeltas >>> one_day = datetime.timedelta(days=1) >>> one_hour = datetime.timedelta(hours=1) >>> divide_timedelta(one_hour, one_day) == 1 / 24 True """ try: return td1 / td2 except TypeError: # Python 3.2 gets division # http://bugs.python.org/issue2706 return td1.total_seconds() / td2.total_seconds()
[ "Get", "the", "ratio", "of", "two", "timedeltas" ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L467-L481
[ "def", "divide_timedelta", "(", "td1", ",", "td2", ")", ":", "try", ":", "return", "td1", "/", "td2", "except", "TypeError", ":", "# Python 3.2 gets division", "# http://bugs.python.org/issue2706", "return", "td1", ".", "total_seconds", "(", ")", "/", "td2", ".", "total_seconds", "(", ")" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
date_range
Much like the built-in function range, but works with dates >>> range_items = date_range( ... datetime.datetime(2005,12,21), ... datetime.datetime(2005,12,25), ... ) >>> my_range = tuple(range_items) >>> datetime.datetime(2005,12,21) in my_range True >>> datetime.datetime(2005,12,22) in my_range True >>> datetime.datetime(2005,12,25) in my_range False
tempora/__init__.py
def date_range(start=None, stop=None, step=None): """ Much like the built-in function range, but works with dates >>> range_items = date_range( ... datetime.datetime(2005,12,21), ... datetime.datetime(2005,12,25), ... ) >>> my_range = tuple(range_items) >>> datetime.datetime(2005,12,21) in my_range True >>> datetime.datetime(2005,12,22) in my_range True >>> datetime.datetime(2005,12,25) in my_range False """ if step is None: step = datetime.timedelta(days=1) if start is None: start = datetime.datetime.now() while start < stop: yield start start += step
def date_range(start=None, stop=None, step=None): """ Much like the built-in function range, but works with dates >>> range_items = date_range( ... datetime.datetime(2005,12,21), ... datetime.datetime(2005,12,25), ... ) >>> my_range = tuple(range_items) >>> datetime.datetime(2005,12,21) in my_range True >>> datetime.datetime(2005,12,22) in my_range True >>> datetime.datetime(2005,12,25) in my_range False """ if step is None: step = datetime.timedelta(days=1) if start is None: start = datetime.datetime.now() while start < stop: yield start start += step
[ "Much", "like", "the", "built", "-", "in", "function", "range", "but", "works", "with", "dates" ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L484-L506
[ "def", "date_range", "(", "start", "=", "None", ",", "stop", "=", "None", ",", "step", "=", "None", ")", ":", "if", "step", "is", "None", ":", "step", "=", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "if", "start", "is", "None", ":", "start", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "while", "start", "<", "stop", ":", "yield", "start", "start", "+=", "step" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
DatetimeConstructor.construct_datetime
Construct a datetime.datetime from a number of different time types found in python and pythonwin
tempora/__init__.py
def construct_datetime(cls, *args, **kwargs): """Construct a datetime.datetime from a number of different time types found in python and pythonwin""" if len(args) == 1: arg = args[0] method = cls.__get_dt_constructor( type(arg).__module__, type(arg).__name__, ) result = method(arg) try: result = result.replace(tzinfo=kwargs.pop('tzinfo')) except KeyError: pass if kwargs: first_key = kwargs.keys()[0] tmpl = ( "{first_key} is an invalid keyword " "argument for this function." ) raise TypeError(tmpl.format(**locals())) else: result = datetime.datetime(*args, **kwargs) return result
def construct_datetime(cls, *args, **kwargs): """Construct a datetime.datetime from a number of different time types found in python and pythonwin""" if len(args) == 1: arg = args[0] method = cls.__get_dt_constructor( type(arg).__module__, type(arg).__name__, ) result = method(arg) try: result = result.replace(tzinfo=kwargs.pop('tzinfo')) except KeyError: pass if kwargs: first_key = kwargs.keys()[0] tmpl = ( "{first_key} is an invalid keyword " "argument for this function." ) raise TypeError(tmpl.format(**locals())) else: result = datetime.datetime(*args, **kwargs) return result
[ "Construct", "a", "datetime", ".", "datetime", "from", "a", "number", "of", "different", "time", "types", "found", "in", "python", "and", "pythonwin" ]
jaraco/tempora
python
https://github.com/jaraco/tempora/blob/f0a9ab636103fe829aa9b495c93f5249aac5f2b8/tempora/__init__.py#L148-L171
[ "def", "construct_datetime", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", "==", "1", ":", "arg", "=", "args", "[", "0", "]", "method", "=", "cls", ".", "__get_dt_constructor", "(", "type", "(", "arg", ")", ".", "__module__", ",", "type", "(", "arg", ")", ".", "__name__", ",", ")", "result", "=", "method", "(", "arg", ")", "try", ":", "result", "=", "result", ".", "replace", "(", "tzinfo", "=", "kwargs", ".", "pop", "(", "'tzinfo'", ")", ")", "except", "KeyError", ":", "pass", "if", "kwargs", ":", "first_key", "=", "kwargs", ".", "keys", "(", ")", "[", "0", "]", "tmpl", "=", "(", "\"{first_key} is an invalid keyword \"", "\"argument for this function.\"", ")", "raise", "TypeError", "(", "tmpl", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "else", ":", "result", "=", "datetime", ".", "datetime", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "result" ]
f0a9ab636103fe829aa9b495c93f5249aac5f2b8
valid
__common_triplet
__common_triplet(input_string, consonants, vowels) -> string
codicefiscale.py
def __common_triplet(input_string, consonants, vowels): """__common_triplet(input_string, consonants, vowels) -> string""" output = consonants while len(output) < 3: try: output += vowels.pop(0) except IndexError: # If there are less wovels than needed to fill the triplet, # (e.g. for a surname as "Fo'" or "Hu" or the corean "Y") # fill it with 'X'; output += 'X' return output[:3]
def __common_triplet(input_string, consonants, vowels): """__common_triplet(input_string, consonants, vowels) -> string""" output = consonants while len(output) < 3: try: output += vowels.pop(0) except IndexError: # If there are less wovels than needed to fill the triplet, # (e.g. for a surname as "Fo'" or "Hu" or the corean "Y") # fill it with 'X'; output += 'X' return output[:3]
[ "__common_triplet", "(", "input_string", "consonants", "vowels", ")", "-", ">", "string" ]
ema/pycodicefiscale
python
https://github.com/ema/pycodicefiscale/blob/4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4/codicefiscale.py#L59-L72
[ "def", "__common_triplet", "(", "input_string", ",", "consonants", ",", "vowels", ")", ":", "output", "=", "consonants", "while", "len", "(", "output", ")", "<", "3", ":", "try", ":", "output", "+=", "vowels", ".", "pop", "(", "0", ")", "except", "IndexError", ":", "# If there are less wovels than needed to fill the triplet,", "# (e.g. for a surname as \"Fo'\" or \"Hu\" or the corean \"Y\")", "# fill it with 'X';", "output", "+=", "'X'", "return", "output", "[", ":", "3", "]" ]
4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4
valid
__consonants_and_vowels
__consonants_and_vowels(input_string) -> (string, list) Get the consonants as a string and the vowels as a list.
codicefiscale.py
def __consonants_and_vowels(input_string): """__consonants_and_vowels(input_string) -> (string, list) Get the consonants as a string and the vowels as a list. """ input_string = input_string.upper().replace(' ', '') consonants = [ char for char in input_string if char in __CONSONANTS ] vowels = [ char for char in input_string if char in __VOWELS ] return "".join(consonants), vowels
def __consonants_and_vowels(input_string): """__consonants_and_vowels(input_string) -> (string, list) Get the consonants as a string and the vowels as a list. """ input_string = input_string.upper().replace(' ', '') consonants = [ char for char in input_string if char in __CONSONANTS ] vowels = [ char for char in input_string if char in __VOWELS ] return "".join(consonants), vowels
[ "__consonants_and_vowels", "(", "input_string", ")", "-", ">", "(", "string", "list", ")" ]
ema/pycodicefiscale
python
https://github.com/ema/pycodicefiscale/blob/4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4/codicefiscale.py#L74-L84
[ "def", "__consonants_and_vowels", "(", "input_string", ")", ":", "input_string", "=", "input_string", ".", "upper", "(", ")", ".", "replace", "(", "' '", ",", "''", ")", "consonants", "=", "[", "char", "for", "char", "in", "input_string", "if", "char", "in", "__CONSONANTS", "]", "vowels", "=", "[", "char", "for", "char", "in", "input_string", "if", "char", "in", "__VOWELS", "]", "return", "\"\"", ".", "join", "(", "consonants", ")", ",", "vowels" ]
4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4
valid
__surname_triplet
__surname_triplet(input_string) -> string
codicefiscale.py
def __surname_triplet(input_string): """__surname_triplet(input_string) -> string""" consonants, vowels = __consonants_and_vowels(input_string) return __common_triplet(input_string, consonants, vowels)
def __surname_triplet(input_string): """__surname_triplet(input_string) -> string""" consonants, vowels = __consonants_and_vowels(input_string) return __common_triplet(input_string, consonants, vowels)
[ "__surname_triplet", "(", "input_string", ")", "-", ">", "string" ]
ema/pycodicefiscale
python
https://github.com/ema/pycodicefiscale/blob/4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4/codicefiscale.py#L86-L90
[ "def", "__surname_triplet", "(", "input_string", ")", ":", "consonants", ",", "vowels", "=", "__consonants_and_vowels", "(", "input_string", ")", "return", "__common_triplet", "(", "input_string", ",", "consonants", ",", "vowels", ")" ]
4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4
valid
__name_triplet
__name_triplet(input_string) -> string
codicefiscale.py
def __name_triplet(input_string): """__name_triplet(input_string) -> string""" if input_string == '': # highly unlikely: no first name, like for instance some Indian persons # with only one name on the passport # pylint: disable=W0511 return 'XXX' consonants, vowels = __consonants_and_vowels(input_string) if len(consonants) > 3: return "%s%s%s" % (consonants[0], consonants[2], consonants[3]) return __common_triplet(input_string, consonants, vowels)
def __name_triplet(input_string): """__name_triplet(input_string) -> string""" if input_string == '': # highly unlikely: no first name, like for instance some Indian persons # with only one name on the passport # pylint: disable=W0511 return 'XXX' consonants, vowels = __consonants_and_vowels(input_string) if len(consonants) > 3: return "%s%s%s" % (consonants[0], consonants[2], consonants[3]) return __common_triplet(input_string, consonants, vowels)
[ "__name_triplet", "(", "input_string", ")", "-", ">", "string" ]
ema/pycodicefiscale
python
https://github.com/ema/pycodicefiscale/blob/4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4/codicefiscale.py#L92-L105
[ "def", "__name_triplet", "(", "input_string", ")", ":", "if", "input_string", "==", "''", ":", "# highly unlikely: no first name, like for instance some Indian persons", "# with only one name on the passport", "# pylint: disable=W0511", "return", "'XXX'", "consonants", ",", "vowels", "=", "__consonants_and_vowels", "(", "input_string", ")", "if", "len", "(", "consonants", ")", ">", "3", ":", "return", "\"%s%s%s\"", "%", "(", "consonants", "[", "0", "]", ",", "consonants", "[", "2", "]", ",", "consonants", "[", "3", "]", ")", "return", "__common_triplet", "(", "input_string", ",", "consonants", ",", "vowels", ")" ]
4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4
valid
control_code
``control_code(input_string) -> int`` Computes the control code for the given input_string string. The expected input_string is the first 15 characters of a fiscal code. eg: control_code('RCCMNL83S18D969') -> 'H'
codicefiscale.py
def control_code(input_string): """``control_code(input_string) -> int`` Computes the control code for the given input_string string. The expected input_string is the first 15 characters of a fiscal code. eg: control_code('RCCMNL83S18D969') -> 'H' """ assert len(input_string) == 15 # building conversion tables for even and odd characters positions even_controlcode = {} for idx, char in enumerate(string.digits): even_controlcode[char] = idx for idx, char in enumerate(string.ascii_uppercase): even_controlcode[char] = idx values = [ 1, 0, 5, 7, 9, 13, 15, 17, 19, 21, 2, 4, 18, 20, 11, 3, 6, 8, 12, 14, 16, 10, 22, 25, 24, 23 ] odd_controlcode = {} for idx, char in enumerate(string.digits): odd_controlcode[char] = values[idx] for idx, char in enumerate(string.ascii_uppercase): odd_controlcode[char] = values[idx] # computing the code code = 0 for idx, char in enumerate(input_string): if idx % 2 == 0: code += odd_controlcode[char] else: code += even_controlcode[char] return string.ascii_uppercase[code % 26]
def control_code(input_string): """``control_code(input_string) -> int`` Computes the control code for the given input_string string. The expected input_string is the first 15 characters of a fiscal code. eg: control_code('RCCMNL83S18D969') -> 'H' """ assert len(input_string) == 15 # building conversion tables for even and odd characters positions even_controlcode = {} for idx, char in enumerate(string.digits): even_controlcode[char] = idx for idx, char in enumerate(string.ascii_uppercase): even_controlcode[char] = idx values = [ 1, 0, 5, 7, 9, 13, 15, 17, 19, 21, 2, 4, 18, 20, 11, 3, 6, 8, 12, 14, 16, 10, 22, 25, 24, 23 ] odd_controlcode = {} for idx, char in enumerate(string.digits): odd_controlcode[char] = values[idx] for idx, char in enumerate(string.ascii_uppercase): odd_controlcode[char] = values[idx] # computing the code code = 0 for idx, char in enumerate(input_string): if idx % 2 == 0: code += odd_controlcode[char] else: code += even_controlcode[char] return string.ascii_uppercase[code % 26]
[ "control_code", "(", "input_string", ")", "-", ">", "int" ]
ema/pycodicefiscale
python
https://github.com/ema/pycodicefiscale/blob/4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4/codicefiscale.py#L107-L145
[ "def", "control_code", "(", "input_string", ")", ":", "assert", "len", "(", "input_string", ")", "==", "15", "# building conversion tables for even and odd characters positions", "even_controlcode", "=", "{", "}", "for", "idx", ",", "char", "in", "enumerate", "(", "string", ".", "digits", ")", ":", "even_controlcode", "[", "char", "]", "=", "idx", "for", "idx", ",", "char", "in", "enumerate", "(", "string", ".", "ascii_uppercase", ")", ":", "even_controlcode", "[", "char", "]", "=", "idx", "values", "=", "[", "1", ",", "0", ",", "5", ",", "7", ",", "9", ",", "13", ",", "15", ",", "17", ",", "19", ",", "21", ",", "2", ",", "4", ",", "18", ",", "20", ",", "11", ",", "3", ",", "6", ",", "8", ",", "12", ",", "14", ",", "16", ",", "10", ",", "22", ",", "25", ",", "24", ",", "23", "]", "odd_controlcode", "=", "{", "}", "for", "idx", ",", "char", "in", "enumerate", "(", "string", ".", "digits", ")", ":", "odd_controlcode", "[", "char", "]", "=", "values", "[", "idx", "]", "for", "idx", ",", "char", "in", "enumerate", "(", "string", ".", "ascii_uppercase", ")", ":", "odd_controlcode", "[", "char", "]", "=", "values", "[", "idx", "]", "# computing the code", "code", "=", "0", "for", "idx", ",", "char", "in", "enumerate", "(", "input_string", ")", ":", "if", "idx", "%", "2", "==", "0", ":", "code", "+=", "odd_controlcode", "[", "char", "]", "else", ":", "code", "+=", "even_controlcode", "[", "char", "]", "return", "string", ".", "ascii_uppercase", "[", "code", "%", "26", "]" ]
4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4
valid
build
``build(surname, name, birthday, sex, municipality) -> string`` Computes the fiscal code for the given person data. eg: build('Rocca', 'Emanuele', datetime.datetime(1983, 11, 18), 'M', 'D969') -> RCCMNL83S18D969H
codicefiscale.py
def build(surname, name, birthday, sex, municipality): """``build(surname, name, birthday, sex, municipality) -> string`` Computes the fiscal code for the given person data. eg: build('Rocca', 'Emanuele', datetime.datetime(1983, 11, 18), 'M', 'D969') -> RCCMNL83S18D969H """ # RCCMNL output = __surname_triplet(surname) + __name_triplet(name) # RCCMNL83 output += str(birthday.year)[2:] # RCCMNL83S output += MONTHSCODE[birthday.month - 1] # RCCMNL83S18 output += "%02d" % (sex.upper() == 'M' and birthday.day or 40 + birthday.day) # RCCMNL83S18D969 output += municipality # RCCMNL83S18D969H output += control_code(output) assert isvalid(output) return output
def build(surname, name, birthday, sex, municipality): """``build(surname, name, birthday, sex, municipality) -> string`` Computes the fiscal code for the given person data. eg: build('Rocca', 'Emanuele', datetime.datetime(1983, 11, 18), 'M', 'D969') -> RCCMNL83S18D969H """ # RCCMNL output = __surname_triplet(surname) + __name_triplet(name) # RCCMNL83 output += str(birthday.year)[2:] # RCCMNL83S output += MONTHSCODE[birthday.month - 1] # RCCMNL83S18 output += "%02d" % (sex.upper() == 'M' and birthday.day or 40 + birthday.day) # RCCMNL83S18D969 output += municipality # RCCMNL83S18D969H output += control_code(output) assert isvalid(output) return output
[ "build", "(", "surname", "name", "birthday", "sex", "municipality", ")", "-", ">", "string" ]
ema/pycodicefiscale
python
https://github.com/ema/pycodicefiscale/blob/4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4/codicefiscale.py#L147-L176
[ "def", "build", "(", "surname", ",", "name", ",", "birthday", ",", "sex", ",", "municipality", ")", ":", "# RCCMNL", "output", "=", "__surname_triplet", "(", "surname", ")", "+", "__name_triplet", "(", "name", ")", "# RCCMNL83", "output", "+=", "str", "(", "birthday", ".", "year", ")", "[", "2", ":", "]", "# RCCMNL83S", "output", "+=", "MONTHSCODE", "[", "birthday", ".", "month", "-", "1", "]", "# RCCMNL83S18", "output", "+=", "\"%02d\"", "%", "(", "sex", ".", "upper", "(", ")", "==", "'M'", "and", "birthday", ".", "day", "or", "40", "+", "birthday", ".", "day", ")", "# RCCMNL83S18D969 ", "output", "+=", "municipality", "# RCCMNL83S18D969H", "output", "+=", "control_code", "(", "output", ")", "assert", "isvalid", "(", "output", ")", "return", "output" ]
4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4
valid
get_birthday
``get_birthday(code) -> string`` Birthday of the person whose fiscal code is 'code', in the format DD-MM-YY. Unfortunately it's not possible to guess the four digit birth year, given that the Italian fiscal code uses only the last two digits (1983 -> 83). Therefore, this function returns a string and not a datetime object. eg: birthday('RCCMNL83S18D969H') -> 18-11-83
codicefiscale.py
def get_birthday(code): """``get_birthday(code) -> string`` Birthday of the person whose fiscal code is 'code', in the format DD-MM-YY. Unfortunately it's not possible to guess the four digit birth year, given that the Italian fiscal code uses only the last two digits (1983 -> 83). Therefore, this function returns a string and not a datetime object. eg: birthday('RCCMNL83S18D969H') -> 18-11-83 """ assert isvalid(code) day = int(code[9:11]) day = day < 32 and day or day - 40 month = MONTHSCODE.index(code[8]) + 1 year = int(code[6:8]) return "%02d-%02d-%02d" % (day, month, year)
def get_birthday(code): """``get_birthday(code) -> string`` Birthday of the person whose fiscal code is 'code', in the format DD-MM-YY. Unfortunately it's not possible to guess the four digit birth year, given that the Italian fiscal code uses only the last two digits (1983 -> 83). Therefore, this function returns a string and not a datetime object. eg: birthday('RCCMNL83S18D969H') -> 18-11-83 """ assert isvalid(code) day = int(code[9:11]) day = day < 32 and day or day - 40 month = MONTHSCODE.index(code[8]) + 1 year = int(code[6:8]) return "%02d-%02d-%02d" % (day, month, year)
[ "get_birthday", "(", "code", ")", "-", ">", "string" ]
ema/pycodicefiscale
python
https://github.com/ema/pycodicefiscale/blob/4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4/codicefiscale.py#L179-L198
[ "def", "get_birthday", "(", "code", ")", ":", "assert", "isvalid", "(", "code", ")", "day", "=", "int", "(", "code", "[", "9", ":", "11", "]", ")", "day", "=", "day", "<", "32", "and", "day", "or", "day", "-", "40", "month", "=", "MONTHSCODE", ".", "index", "(", "code", "[", "8", "]", ")", "+", "1", "year", "=", "int", "(", "code", "[", "6", ":", "8", "]", ")", "return", "\"%02d-%02d-%02d\"", "%", "(", "day", ",", "month", ",", "year", ")" ]
4d06a145cdcffe7ee576f2fedaf40e2c6f7692a4
valid
API.get
Pass in an Overpass query in Overpass QL.
overpass/api.py
def get(self, query, responseformat="geojson", verbosity="body", build=True): """Pass in an Overpass query in Overpass QL.""" # Construct full Overpass query if build: full_query = self._construct_ql_query( query, responseformat=responseformat, verbosity=verbosity ) else: full_query = query if self.debug: logging.getLogger().info(query) # Get the response from Overpass r = self._get_from_overpass(full_query) content_type = r.headers.get("content-type") if self.debug: print(content_type) if content_type == "text/csv": result = [] reader = csv.reader(StringIO(r.text), delimiter="\t") for row in reader: result.append(row) return result elif content_type in ("text/xml", "application/xml", "application/osm3s+xml"): return r.text elif content_type == "application/json": response = json.loads(r.text) if not build: return response # Check for valid answer from Overpass. # A valid answer contains an 'elements' key at the root level. if "elements" not in response: raise UnknownOverpassError("Received an invalid answer from Overpass.") # If there is a 'remark' key, it spells trouble. overpass_remark = response.get("remark", None) if overpass_remark and overpass_remark.startswith("runtime error"): raise ServerRuntimeError(overpass_remark) if responseformat is not "geojson": return response # construct geojson return self._as_geojson(response["elements"])
def get(self, query, responseformat="geojson", verbosity="body", build=True): """Pass in an Overpass query in Overpass QL.""" # Construct full Overpass query if build: full_query = self._construct_ql_query( query, responseformat=responseformat, verbosity=verbosity ) else: full_query = query if self.debug: logging.getLogger().info(query) # Get the response from Overpass r = self._get_from_overpass(full_query) content_type = r.headers.get("content-type") if self.debug: print(content_type) if content_type == "text/csv": result = [] reader = csv.reader(StringIO(r.text), delimiter="\t") for row in reader: result.append(row) return result elif content_type in ("text/xml", "application/xml", "application/osm3s+xml"): return r.text elif content_type == "application/json": response = json.loads(r.text) if not build: return response # Check for valid answer from Overpass. # A valid answer contains an 'elements' key at the root level. if "elements" not in response: raise UnknownOverpassError("Received an invalid answer from Overpass.") # If there is a 'remark' key, it spells trouble. overpass_remark = response.get("remark", None) if overpass_remark and overpass_remark.startswith("runtime error"): raise ServerRuntimeError(overpass_remark) if responseformat is not "geojson": return response # construct geojson return self._as_geojson(response["elements"])
[ "Pass", "in", "an", "Overpass", "query", "in", "Overpass", "QL", "." ]
mvexel/overpass-api-python-wrapper
python
https://github.com/mvexel/overpass-api-python-wrapper/blob/4eea38224bc9259fd017b38ad8683f3fa3777175/overpass/api.py#L62-L109
[ "def", "get", "(", "self", ",", "query", ",", "responseformat", "=", "\"geojson\"", ",", "verbosity", "=", "\"body\"", ",", "build", "=", "True", ")", ":", "# Construct full Overpass query", "if", "build", ":", "full_query", "=", "self", ".", "_construct_ql_query", "(", "query", ",", "responseformat", "=", "responseformat", ",", "verbosity", "=", "verbosity", ")", "else", ":", "full_query", "=", "query", "if", "self", ".", "debug", ":", "logging", ".", "getLogger", "(", ")", ".", "info", "(", "query", ")", "# Get the response from Overpass", "r", "=", "self", ".", "_get_from_overpass", "(", "full_query", ")", "content_type", "=", "r", ".", "headers", ".", "get", "(", "\"content-type\"", ")", "if", "self", ".", "debug", ":", "print", "(", "content_type", ")", "if", "content_type", "==", "\"text/csv\"", ":", "result", "=", "[", "]", "reader", "=", "csv", ".", "reader", "(", "StringIO", "(", "r", ".", "text", ")", ",", "delimiter", "=", "\"\\t\"", ")", "for", "row", "in", "reader", ":", "result", ".", "append", "(", "row", ")", "return", "result", "elif", "content_type", "in", "(", "\"text/xml\"", ",", "\"application/xml\"", ",", "\"application/osm3s+xml\"", ")", ":", "return", "r", ".", "text", "elif", "content_type", "==", "\"application/json\"", ":", "response", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "if", "not", "build", ":", "return", "response", "# Check for valid answer from Overpass.", "# A valid answer contains an 'elements' key at the root level.", "if", "\"elements\"", "not", "in", "response", ":", "raise", "UnknownOverpassError", "(", "\"Received an invalid answer from Overpass.\"", ")", "# If there is a 'remark' key, it spells trouble.", "overpass_remark", "=", "response", ".", "get", "(", "\"remark\"", ",", "None", ")", "if", "overpass_remark", "and", "overpass_remark", ".", "startswith", "(", "\"runtime error\"", ")", ":", "raise", "ServerRuntimeError", "(", "overpass_remark", ")", "if", "responseformat", "is", "not", "\"geojson\"", ":", "return", "response", "# construct geojson", "return", "self", ".", "_as_geojson", "(", "response", "[", "\"elements\"", "]", ")" ]
4eea38224bc9259fd017b38ad8683f3fa3777175
valid
GeniaTagger.parse
Arguments: - `self`: - `text`:
geniatagger.py
def parse(self, text): """ Arguments: - `self`: - `text`: """ results = list() for oneline in text.split('\n'): self._tagger.stdin.write(oneline+'\n') while True: r = self._tagger.stdout.readline()[:-1] if not r: break results.append(tuple(r.split('\t'))) return results
def parse(self, text): """ Arguments: - `self`: - `text`: """ results = list() for oneline in text.split('\n'): self._tagger.stdin.write(oneline+'\n') while True: r = self._tagger.stdout.readline()[:-1] if not r: break results.append(tuple(r.split('\t'))) return results
[ "Arguments", ":", "-", "self", ":", "-", "text", ":" ]
informationsea/geniatagger-python
python
https://github.com/informationsea/geniatagger-python/blob/0a9d0a0e4ffca22d564950fc46e1f0002eafcf86/geniatagger.py#L25-L42
[ "def", "parse", "(", "self", ",", "text", ")", ":", "results", "=", "list", "(", ")", "for", "oneline", "in", "text", ".", "split", "(", "'\\n'", ")", ":", "self", ".", "_tagger", ".", "stdin", ".", "write", "(", "oneline", "+", "'\\n'", ")", "while", "True", ":", "r", "=", "self", ".", "_tagger", ".", "stdout", ".", "readline", "(", ")", "[", ":", "-", "1", "]", "if", "not", "r", ":", "break", "results", ".", "append", "(", "tuple", "(", "r", ".", "split", "(", "'\\t'", ")", ")", ")", "return", "results" ]
0a9d0a0e4ffca22d564950fc46e1f0002eafcf86
valid
create_port
Create a port Create a port which is a connection point of a device (e.g., a VM NIC) to attach to a L2 Neutron network. : param context: neutron api request context : param port: dictionary describing the port, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated.
quark/plugin_modules/ports.py
def create_port(context, port): """Create a port Create a port which is a connection point of a device (e.g., a VM NIC) to attach to a L2 Neutron network. : param context: neutron api request context : param port: dictionary describing the port, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_port for tenant %s" % context.tenant_id) port_attrs = port["port"] admin_only = ["mac_address", "device_owner", "bridge", "admin_state_up", "use_forbidden_mac_range", "network_plugin", "instance_node_id"] utils.filter_body(context, port_attrs, admin_only=admin_only) port_attrs = port["port"] mac_address = utils.pop_param(port_attrs, "mac_address", None) use_forbidden_mac_range = utils.pop_param(port_attrs, "use_forbidden_mac_range", False) segment_id = utils.pop_param(port_attrs, "segment_id") fixed_ips = utils.pop_param(port_attrs, "fixed_ips") if "device_id" not in port_attrs: port_attrs['device_id'] = "" device_id = port_attrs['device_id'] # NOTE(morgabra) This should be instance.node from nova, only needed # for ironic_driver. if "instance_node_id" not in port_attrs: port_attrs['instance_node_id'] = "" instance_node_id = port_attrs['instance_node_id'] net_id = port_attrs["network_id"] port_id = uuidutils.generate_uuid() net = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, fields=None, id=net_id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=net_id) _raise_if_unauthorized(context, net) # NOTE (Perkins): If a device_id is given, try to prevent multiple ports # from being created for a device already attached to the network if device_id: existing_ports = db_api.port_find(context, network_id=net_id, device_id=device_id, scope=db_api.ONE) if existing_ports: raise n_exc.BadRequest( resource="port", msg="This device is already connected to the " "requested network via another port") # Try to fail early on quotas and save ourselves some db overhead if fixed_ips: quota.QUOTAS.limit_check(context, context.tenant_id, fixed_ips_per_port=len(fixed_ips)) if not STRATEGY.is_provider_network(net_id): # We don't honor segmented networks when they aren't "shared" segment_id = None port_count = db_api.port_count_all(context, network_id=[net_id], tenant_id=[context.tenant_id]) quota.QUOTAS.limit_check( context, context.tenant_id, ports_per_network=port_count + 1) else: if not segment_id: raise q_exc.AmbiguousNetworkId(net_id=net_id) network_plugin = utils.pop_param(port_attrs, "network_plugin") if not network_plugin: network_plugin = net["network_plugin"] port_attrs["network_plugin"] = network_plugin ipam_driver = _get_ipam_driver(net, port=port_attrs) net_driver = _get_net_driver(net, port=port_attrs) # NOTE(morgabra) It's possible that we select a driver different than # the one specified by the network. However, we still might need to use # this for some operations, so we also fetch it and pass it along to # the backend driver we are actually using. base_net_driver = _get_net_driver(net) # TODO(anyone): security groups are not currently supported on port create. # Please see JIRA:NCP-801 security_groups = utils.pop_param(port_attrs, "security_groups") if security_groups is not None: raise q_exc.SecurityGroupsNotImplemented() group_ids, security_groups = _make_security_group_list(context, security_groups) quota.QUOTAS.limit_check(context, context.tenant_id, security_groups_per_port=len(group_ids)) addresses = [] backend_port = None with utils.CommandManager().execute() as cmd_mgr: @cmd_mgr.do def _allocate_ips(fixed_ips, net, port_id, segment_id, mac, **kwargs): if fixed_ips: if (STRATEGY.is_provider_network(net_id) and not context.is_admin): raise n_exc.NotAuthorized() ips, subnets = split_and_validate_requested_subnets(context, net_id, segment_id, fixed_ips) kwargs["ip_addresses"] = ips kwargs["subnets"] = subnets ipam_driver.allocate_ip_address( context, addresses, net["id"], port_id, CONF.QUARK.ipam_reuse_after, segment_id=segment_id, mac_address=mac, **kwargs) @cmd_mgr.undo def _allocate_ips_undo(addr, **kwargs): LOG.info("Rolling back IP addresses...") if addresses: for address in addresses: try: with context.session.begin(): ipam_driver.deallocate_ip_address(context, address, **kwargs) except Exception: LOG.exception("Couldn't release IP %s" % address) @cmd_mgr.do def _allocate_mac(net, port_id, mac_address, use_forbidden_mac_range=False, **kwargs): mac = ipam_driver.allocate_mac_address( context, net["id"], port_id, CONF.QUARK.ipam_reuse_after, mac_address=mac_address, use_forbidden_mac_range=use_forbidden_mac_range, **kwargs) return mac @cmd_mgr.undo def _allocate_mac_undo(mac, **kwargs): LOG.info("Rolling back MAC address...") if mac: try: with context.session.begin(): ipam_driver.deallocate_mac_address(context, mac["address"]) except Exception: LOG.exception("Couldn't release MAC %s" % mac) @cmd_mgr.do def _allocate_backend_port(mac, addresses, net, port_id, **kwargs): backend_port = net_driver.create_port( context, net["id"], port_id=port_id, security_groups=group_ids, device_id=device_id, instance_node_id=instance_node_id, mac_address=mac, addresses=addresses, base_net_driver=base_net_driver) _filter_backend_port(backend_port) return backend_port @cmd_mgr.undo def _allocate_back_port_undo(backend_port, **kwargs): LOG.info("Rolling back backend port...") try: backend_port_uuid = None if backend_port: backend_port_uuid = backend_port.get("uuid") net_driver.delete_port(context, backend_port_uuid) except Exception: LOG.exception( "Couldn't rollback backend port %s" % backend_port) @cmd_mgr.do def _allocate_db_port(port_attrs, backend_port, addresses, mac, **kwargs): port_attrs["network_id"] = net["id"] port_attrs["id"] = port_id port_attrs["security_groups"] = security_groups LOG.info("Including extra plugin attrs: %s" % backend_port) port_attrs.update(backend_port) with context.session.begin(): new_port = db_api.port_create( context, addresses=addresses, mac_address=mac["address"], backend_key=backend_port["uuid"], **port_attrs) return new_port @cmd_mgr.undo def _allocate_db_port_undo(new_port, **kwargs): LOG.info("Rolling back database port...") if not new_port: return try: with context.session.begin(): db_api.port_delete(context, new_port) except Exception: LOG.exception( "Couldn't rollback db port %s" % backend_port) # addresses, mac, backend_port, new_port mac = _allocate_mac(net, port_id, mac_address, use_forbidden_mac_range=use_forbidden_mac_range) _allocate_ips(fixed_ips, net, port_id, segment_id, mac) backend_port = _allocate_backend_port(mac, addresses, net, port_id) new_port = _allocate_db_port(port_attrs, backend_port, addresses, mac) return v._make_port_dict(new_port)
def create_port(context, port): """Create a port Create a port which is a connection point of a device (e.g., a VM NIC) to attach to a L2 Neutron network. : param context: neutron api request context : param port: dictionary describing the port, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_port for tenant %s" % context.tenant_id) port_attrs = port["port"] admin_only = ["mac_address", "device_owner", "bridge", "admin_state_up", "use_forbidden_mac_range", "network_plugin", "instance_node_id"] utils.filter_body(context, port_attrs, admin_only=admin_only) port_attrs = port["port"] mac_address = utils.pop_param(port_attrs, "mac_address", None) use_forbidden_mac_range = utils.pop_param(port_attrs, "use_forbidden_mac_range", False) segment_id = utils.pop_param(port_attrs, "segment_id") fixed_ips = utils.pop_param(port_attrs, "fixed_ips") if "device_id" not in port_attrs: port_attrs['device_id'] = "" device_id = port_attrs['device_id'] # NOTE(morgabra) This should be instance.node from nova, only needed # for ironic_driver. if "instance_node_id" not in port_attrs: port_attrs['instance_node_id'] = "" instance_node_id = port_attrs['instance_node_id'] net_id = port_attrs["network_id"] port_id = uuidutils.generate_uuid() net = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, fields=None, id=net_id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=net_id) _raise_if_unauthorized(context, net) # NOTE (Perkins): If a device_id is given, try to prevent multiple ports # from being created for a device already attached to the network if device_id: existing_ports = db_api.port_find(context, network_id=net_id, device_id=device_id, scope=db_api.ONE) if existing_ports: raise n_exc.BadRequest( resource="port", msg="This device is already connected to the " "requested network via another port") # Try to fail early on quotas and save ourselves some db overhead if fixed_ips: quota.QUOTAS.limit_check(context, context.tenant_id, fixed_ips_per_port=len(fixed_ips)) if not STRATEGY.is_provider_network(net_id): # We don't honor segmented networks when they aren't "shared" segment_id = None port_count = db_api.port_count_all(context, network_id=[net_id], tenant_id=[context.tenant_id]) quota.QUOTAS.limit_check( context, context.tenant_id, ports_per_network=port_count + 1) else: if not segment_id: raise q_exc.AmbiguousNetworkId(net_id=net_id) network_plugin = utils.pop_param(port_attrs, "network_plugin") if not network_plugin: network_plugin = net["network_plugin"] port_attrs["network_plugin"] = network_plugin ipam_driver = _get_ipam_driver(net, port=port_attrs) net_driver = _get_net_driver(net, port=port_attrs) # NOTE(morgabra) It's possible that we select a driver different than # the one specified by the network. However, we still might need to use # this for some operations, so we also fetch it and pass it along to # the backend driver we are actually using. base_net_driver = _get_net_driver(net) # TODO(anyone): security groups are not currently supported on port create. # Please see JIRA:NCP-801 security_groups = utils.pop_param(port_attrs, "security_groups") if security_groups is not None: raise q_exc.SecurityGroupsNotImplemented() group_ids, security_groups = _make_security_group_list(context, security_groups) quota.QUOTAS.limit_check(context, context.tenant_id, security_groups_per_port=len(group_ids)) addresses = [] backend_port = None with utils.CommandManager().execute() as cmd_mgr: @cmd_mgr.do def _allocate_ips(fixed_ips, net, port_id, segment_id, mac, **kwargs): if fixed_ips: if (STRATEGY.is_provider_network(net_id) and not context.is_admin): raise n_exc.NotAuthorized() ips, subnets = split_and_validate_requested_subnets(context, net_id, segment_id, fixed_ips) kwargs["ip_addresses"] = ips kwargs["subnets"] = subnets ipam_driver.allocate_ip_address( context, addresses, net["id"], port_id, CONF.QUARK.ipam_reuse_after, segment_id=segment_id, mac_address=mac, **kwargs) @cmd_mgr.undo def _allocate_ips_undo(addr, **kwargs): LOG.info("Rolling back IP addresses...") if addresses: for address in addresses: try: with context.session.begin(): ipam_driver.deallocate_ip_address(context, address, **kwargs) except Exception: LOG.exception("Couldn't release IP %s" % address) @cmd_mgr.do def _allocate_mac(net, port_id, mac_address, use_forbidden_mac_range=False, **kwargs): mac = ipam_driver.allocate_mac_address( context, net["id"], port_id, CONF.QUARK.ipam_reuse_after, mac_address=mac_address, use_forbidden_mac_range=use_forbidden_mac_range, **kwargs) return mac @cmd_mgr.undo def _allocate_mac_undo(mac, **kwargs): LOG.info("Rolling back MAC address...") if mac: try: with context.session.begin(): ipam_driver.deallocate_mac_address(context, mac["address"]) except Exception: LOG.exception("Couldn't release MAC %s" % mac) @cmd_mgr.do def _allocate_backend_port(mac, addresses, net, port_id, **kwargs): backend_port = net_driver.create_port( context, net["id"], port_id=port_id, security_groups=group_ids, device_id=device_id, instance_node_id=instance_node_id, mac_address=mac, addresses=addresses, base_net_driver=base_net_driver) _filter_backend_port(backend_port) return backend_port @cmd_mgr.undo def _allocate_back_port_undo(backend_port, **kwargs): LOG.info("Rolling back backend port...") try: backend_port_uuid = None if backend_port: backend_port_uuid = backend_port.get("uuid") net_driver.delete_port(context, backend_port_uuid) except Exception: LOG.exception( "Couldn't rollback backend port %s" % backend_port) @cmd_mgr.do def _allocate_db_port(port_attrs, backend_port, addresses, mac, **kwargs): port_attrs["network_id"] = net["id"] port_attrs["id"] = port_id port_attrs["security_groups"] = security_groups LOG.info("Including extra plugin attrs: %s" % backend_port) port_attrs.update(backend_port) with context.session.begin(): new_port = db_api.port_create( context, addresses=addresses, mac_address=mac["address"], backend_key=backend_port["uuid"], **port_attrs) return new_port @cmd_mgr.undo def _allocate_db_port_undo(new_port, **kwargs): LOG.info("Rolling back database port...") if not new_port: return try: with context.session.begin(): db_api.port_delete(context, new_port) except Exception: LOG.exception( "Couldn't rollback db port %s" % backend_port) # addresses, mac, backend_port, new_port mac = _allocate_mac(net, port_id, mac_address, use_forbidden_mac_range=use_forbidden_mac_range) _allocate_ips(fixed_ips, net, port_id, segment_id, mac) backend_port = _allocate_backend_port(mac, addresses, net, port_id) new_port = _allocate_db_port(port_attrs, backend_port, addresses, mac) return v._make_port_dict(new_port)
[ "Create", "a", "port" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ports.py#L134-L353
[ "def", "create_port", "(", "context", ",", "port", ")", ":", "LOG", ".", "info", "(", "\"create_port for tenant %s\"", "%", "context", ".", "tenant_id", ")", "port_attrs", "=", "port", "[", "\"port\"", "]", "admin_only", "=", "[", "\"mac_address\"", ",", "\"device_owner\"", ",", "\"bridge\"", ",", "\"admin_state_up\"", ",", "\"use_forbidden_mac_range\"", ",", "\"network_plugin\"", ",", "\"instance_node_id\"", "]", "utils", ".", "filter_body", "(", "context", ",", "port_attrs", ",", "admin_only", "=", "admin_only", ")", "port_attrs", "=", "port", "[", "\"port\"", "]", "mac_address", "=", "utils", ".", "pop_param", "(", "port_attrs", ",", "\"mac_address\"", ",", "None", ")", "use_forbidden_mac_range", "=", "utils", ".", "pop_param", "(", "port_attrs", ",", "\"use_forbidden_mac_range\"", ",", "False", ")", "segment_id", "=", "utils", ".", "pop_param", "(", "port_attrs", ",", "\"segment_id\"", ")", "fixed_ips", "=", "utils", ".", "pop_param", "(", "port_attrs", ",", "\"fixed_ips\"", ")", "if", "\"device_id\"", "not", "in", "port_attrs", ":", "port_attrs", "[", "'device_id'", "]", "=", "\"\"", "device_id", "=", "port_attrs", "[", "'device_id'", "]", "# NOTE(morgabra) This should be instance.node from nova, only needed", "# for ironic_driver.", "if", "\"instance_node_id\"", "not", "in", "port_attrs", ":", "port_attrs", "[", "'instance_node_id'", "]", "=", "\"\"", "instance_node_id", "=", "port_attrs", "[", "'instance_node_id'", "]", "net_id", "=", "port_attrs", "[", "\"network_id\"", "]", "port_id", "=", "uuidutils", ".", "generate_uuid", "(", ")", "net", "=", "db_api", ".", "network_find", "(", "context", "=", "context", ",", "limit", "=", "None", ",", "sorts", "=", "[", "'id'", "]", ",", "marker", "=", "None", ",", "page_reverse", "=", "False", ",", "fields", "=", "None", ",", "id", "=", "net_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "net", ":", "raise", "n_exc", ".", "NetworkNotFound", "(", "net_id", "=", "net_id", ")", "_raise_if_unauthorized", "(", "context", ",", "net", ")", "# NOTE (Perkins): If a device_id is given, try to prevent multiple ports", "# from being created for a device already attached to the network", "if", "device_id", ":", "existing_ports", "=", "db_api", ".", "port_find", "(", "context", ",", "network_id", "=", "net_id", ",", "device_id", "=", "device_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "existing_ports", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"port\"", ",", "msg", "=", "\"This device is already connected to the \"", "\"requested network via another port\"", ")", "# Try to fail early on quotas and save ourselves some db overhead", "if", "fixed_ips", ":", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "fixed_ips_per_port", "=", "len", "(", "fixed_ips", ")", ")", "if", "not", "STRATEGY", ".", "is_provider_network", "(", "net_id", ")", ":", "# We don't honor segmented networks when they aren't \"shared\"", "segment_id", "=", "None", "port_count", "=", "db_api", ".", "port_count_all", "(", "context", ",", "network_id", "=", "[", "net_id", "]", ",", "tenant_id", "=", "[", "context", ".", "tenant_id", "]", ")", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "ports_per_network", "=", "port_count", "+", "1", ")", "else", ":", "if", "not", "segment_id", ":", "raise", "q_exc", ".", "AmbiguousNetworkId", "(", "net_id", "=", "net_id", ")", "network_plugin", "=", "utils", ".", "pop_param", "(", "port_attrs", ",", "\"network_plugin\"", ")", "if", "not", "network_plugin", ":", "network_plugin", "=", "net", "[", "\"network_plugin\"", "]", "port_attrs", "[", "\"network_plugin\"", "]", "=", "network_plugin", "ipam_driver", "=", "_get_ipam_driver", "(", "net", ",", "port", "=", "port_attrs", ")", "net_driver", "=", "_get_net_driver", "(", "net", ",", "port", "=", "port_attrs", ")", "# NOTE(morgabra) It's possible that we select a driver different than", "# the one specified by the network. However, we still might need to use", "# this for some operations, so we also fetch it and pass it along to", "# the backend driver we are actually using.", "base_net_driver", "=", "_get_net_driver", "(", "net", ")", "# TODO(anyone): security groups are not currently supported on port create.", "# Please see JIRA:NCP-801", "security_groups", "=", "utils", ".", "pop_param", "(", "port_attrs", ",", "\"security_groups\"", ")", "if", "security_groups", "is", "not", "None", ":", "raise", "q_exc", ".", "SecurityGroupsNotImplemented", "(", ")", "group_ids", ",", "security_groups", "=", "_make_security_group_list", "(", "context", ",", "security_groups", ")", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "security_groups_per_port", "=", "len", "(", "group_ids", ")", ")", "addresses", "=", "[", "]", "backend_port", "=", "None", "with", "utils", ".", "CommandManager", "(", ")", ".", "execute", "(", ")", "as", "cmd_mgr", ":", "@", "cmd_mgr", ".", "do", "def", "_allocate_ips", "(", "fixed_ips", ",", "net", ",", "port_id", ",", "segment_id", ",", "mac", ",", "*", "*", "kwargs", ")", ":", "if", "fixed_ips", ":", "if", "(", "STRATEGY", ".", "is_provider_network", "(", "net_id", ")", "and", "not", "context", ".", "is_admin", ")", ":", "raise", "n_exc", ".", "NotAuthorized", "(", ")", "ips", ",", "subnets", "=", "split_and_validate_requested_subnets", "(", "context", ",", "net_id", ",", "segment_id", ",", "fixed_ips", ")", "kwargs", "[", "\"ip_addresses\"", "]", "=", "ips", "kwargs", "[", "\"subnets\"", "]", "=", "subnets", "ipam_driver", ".", "allocate_ip_address", "(", "context", ",", "addresses", ",", "net", "[", "\"id\"", "]", ",", "port_id", ",", "CONF", ".", "QUARK", ".", "ipam_reuse_after", ",", "segment_id", "=", "segment_id", ",", "mac_address", "=", "mac", ",", "*", "*", "kwargs", ")", "@", "cmd_mgr", ".", "undo", "def", "_allocate_ips_undo", "(", "addr", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"Rolling back IP addresses...\"", ")", "if", "addresses", ":", "for", "address", "in", "addresses", ":", "try", ":", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "ipam_driver", ".", "deallocate_ip_address", "(", "context", ",", "address", ",", "*", "*", "kwargs", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Couldn't release IP %s\"", "%", "address", ")", "@", "cmd_mgr", ".", "do", "def", "_allocate_mac", "(", "net", ",", "port_id", ",", "mac_address", ",", "use_forbidden_mac_range", "=", "False", ",", "*", "*", "kwargs", ")", ":", "mac", "=", "ipam_driver", ".", "allocate_mac_address", "(", "context", ",", "net", "[", "\"id\"", "]", ",", "port_id", ",", "CONF", ".", "QUARK", ".", "ipam_reuse_after", ",", "mac_address", "=", "mac_address", ",", "use_forbidden_mac_range", "=", "use_forbidden_mac_range", ",", "*", "*", "kwargs", ")", "return", "mac", "@", "cmd_mgr", ".", "undo", "def", "_allocate_mac_undo", "(", "mac", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"Rolling back MAC address...\"", ")", "if", "mac", ":", "try", ":", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "ipam_driver", ".", "deallocate_mac_address", "(", "context", ",", "mac", "[", "\"address\"", "]", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Couldn't release MAC %s\"", "%", "mac", ")", "@", "cmd_mgr", ".", "do", "def", "_allocate_backend_port", "(", "mac", ",", "addresses", ",", "net", ",", "port_id", ",", "*", "*", "kwargs", ")", ":", "backend_port", "=", "net_driver", ".", "create_port", "(", "context", ",", "net", "[", "\"id\"", "]", ",", "port_id", "=", "port_id", ",", "security_groups", "=", "group_ids", ",", "device_id", "=", "device_id", ",", "instance_node_id", "=", "instance_node_id", ",", "mac_address", "=", "mac", ",", "addresses", "=", "addresses", ",", "base_net_driver", "=", "base_net_driver", ")", "_filter_backend_port", "(", "backend_port", ")", "return", "backend_port", "@", "cmd_mgr", ".", "undo", "def", "_allocate_back_port_undo", "(", "backend_port", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"Rolling back backend port...\"", ")", "try", ":", "backend_port_uuid", "=", "None", "if", "backend_port", ":", "backend_port_uuid", "=", "backend_port", ".", "get", "(", "\"uuid\"", ")", "net_driver", ".", "delete_port", "(", "context", ",", "backend_port_uuid", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Couldn't rollback backend port %s\"", "%", "backend_port", ")", "@", "cmd_mgr", ".", "do", "def", "_allocate_db_port", "(", "port_attrs", ",", "backend_port", ",", "addresses", ",", "mac", ",", "*", "*", "kwargs", ")", ":", "port_attrs", "[", "\"network_id\"", "]", "=", "net", "[", "\"id\"", "]", "port_attrs", "[", "\"id\"", "]", "=", "port_id", "port_attrs", "[", "\"security_groups\"", "]", "=", "security_groups", "LOG", ".", "info", "(", "\"Including extra plugin attrs: %s\"", "%", "backend_port", ")", "port_attrs", ".", "update", "(", "backend_port", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "new_port", "=", "db_api", ".", "port_create", "(", "context", ",", "addresses", "=", "addresses", ",", "mac_address", "=", "mac", "[", "\"address\"", "]", ",", "backend_key", "=", "backend_port", "[", "\"uuid\"", "]", ",", "*", "*", "port_attrs", ")", "return", "new_port", "@", "cmd_mgr", ".", "undo", "def", "_allocate_db_port_undo", "(", "new_port", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"Rolling back database port...\"", ")", "if", "not", "new_port", ":", "return", "try", ":", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "db_api", ".", "port_delete", "(", "context", ",", "new_port", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Couldn't rollback db port %s\"", "%", "backend_port", ")", "# addresses, mac, backend_port, new_port", "mac", "=", "_allocate_mac", "(", "net", ",", "port_id", ",", "mac_address", ",", "use_forbidden_mac_range", "=", "use_forbidden_mac_range", ")", "_allocate_ips", "(", "fixed_ips", ",", "net", ",", "port_id", ",", "segment_id", ",", "mac", ")", "backend_port", "=", "_allocate_backend_port", "(", "mac", ",", "addresses", ",", "net", ",", "port_id", ")", "new_port", "=", "_allocate_db_port", "(", "port_attrs", ",", "backend_port", ",", "addresses", ",", "mac", ")", "return", "v", ".", "_make_port_dict", "(", "new_port", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
update_port
Update values of a port. : param context: neutron api request context : param id: UUID representing the port to update. : param port: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py.
quark/plugin_modules/ports.py
def update_port(context, id, port): """Update values of a port. : param context: neutron api request context : param id: UUID representing the port to update. : param port: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_port %s for tenant %s" % (id, context.tenant_id)) port_db = db_api.port_find(context, id=id, scope=db_api.ONE) if not port_db: raise n_exc.PortNotFound(port_id=id) port_dict = port["port"] fixed_ips = port_dict.pop("fixed_ips", None) admin_only = ["mac_address", "device_owner", "bridge", "admin_state_up", "device_id"] always_filter = ["network_id", "backend_key", "network_plugin"] utils.filter_body(context, port_dict, admin_only=admin_only, always_filter=always_filter) # Pre-check the requested fixed_ips before making too many db trips. # Note that this is the only check we need, since this call replaces # the entirety of the IP addresses document if fixed_ips are provided. if fixed_ips: quota.QUOTAS.limit_check(context, context.tenant_id, fixed_ips_per_port=len(fixed_ips)) new_security_groups = utils.pop_param(port_dict, "security_groups") if new_security_groups is not None: if (Capabilities.TENANT_NETWORK_SG not in CONF.QUARK.environment_capabilities): if not STRATEGY.is_provider_network(port_db["network_id"]): raise q_exc.TenantNetworkSecurityGroupRulesNotEnabled() if new_security_groups is not None and not port_db["device_id"]: raise q_exc.SecurityGroupsRequireDevice() group_ids, security_group_mods = _make_security_group_list( context, new_security_groups) quota.QUOTAS.limit_check(context, context.tenant_id, security_groups_per_port=len(group_ids)) if fixed_ips is not None: # NOTE(mdietz): we want full control over IPAM since # we're allocating by subnet instead of # network. ipam_driver = ipam.IPAM_REGISTRY.get_strategy( ipam.QuarkIpamANY.get_name()) addresses, subnet_ids = [], [] ip_addresses = {} for fixed_ip in fixed_ips: subnet_id = fixed_ip.get("subnet_id") ip_address = fixed_ip.get("ip_address") if not (subnet_id or ip_address): raise n_exc.BadRequest( resource="fixed_ips", msg="subnet_id or ip_address required") if ip_address and not subnet_id: raise n_exc.BadRequest( resource="fixed_ips", msg="subnet_id required for ip_address allocation") if subnet_id and ip_address: ip_netaddr = None try: ip_netaddr = netaddr.IPAddress(ip_address).ipv6() except netaddr.AddrFormatError: raise n_exc.InvalidInput( error_message="Invalid format provided for ip_address") ip_addresses[ip_netaddr] = subnet_id else: subnet_ids.append(subnet_id) port_ips = set([netaddr.IPAddress(int(a["address"])) for a in port_db["ip_addresses"]]) new_ips = set([a for a in ip_addresses.keys()]) ips_to_allocate = list(new_ips - port_ips) ips_to_deallocate = list(port_ips - new_ips) for ip in ips_to_allocate: if ip in ip_addresses: # NOTE: Fix for RM10187 - we were losing the list of IPs if # more than one IP was to be allocated. Track an # aggregate list instead, and add it to the running total # after each allocate allocated = [] ipam_driver.allocate_ip_address( context, allocated, port_db["network_id"], port_db["id"], reuse_after=None, ip_addresses=[ip], subnets=[ip_addresses[ip]]) addresses.extend(allocated) for ip in ips_to_deallocate: ipam_driver.deallocate_ips_by_port( context, port_db, ip_address=ip) for subnet_id in subnet_ids: ipam_driver.allocate_ip_address( context, addresses, port_db["network_id"], port_db["id"], reuse_after=CONF.QUARK.ipam_reuse_after, subnets=[subnet_id]) # Need to return all existing addresses and the new ones if addresses: port_dict["addresses"] = port_db["ip_addresses"] port_dict["addresses"].extend(addresses) # NOTE(morgabra) Updating network_plugin on port objects is explicitly # disallowed in the api, so we use whatever exists in the db. net_driver = _get_net_driver(port_db.network, port=port_db) base_net_driver = _get_net_driver(port_db.network) # TODO(anyone): What do we want to have happen here if this fails? Is it # ok to continue to keep the IPs but fail to apply security # groups? Is there a clean way to have a multi-status? Since # we're in a beta-y status, I'm going to let this sit for # a future patch where we have time to solve it well. kwargs = {} if new_security_groups is not None: # TODO(anyone): this is kind of silly (when testing), because it will # modify the incoming dict. Probably should be a copy or # something. kwargs["security_groups"] = security_group_mods net_driver.update_port(context, port_id=port_db["backend_key"], mac_address=port_db["mac_address"], device_id=port_db["device_id"], base_net_driver=base_net_driver, **kwargs) port_dict["security_groups"] = security_group_mods with context.session.begin(): port = db_api.port_update(context, port_db, **port_dict) # NOTE(mdietz): fix for issue 112, we wanted the IPs to be in # allocated_at order, so get a fresh object every time if port_db in context.session: context.session.expunge(port_db) port_db = db_api.port_find(context, id=id, scope=db_api.ONE) return v._make_port_dict(port_db)
def update_port(context, id, port): """Update values of a port. : param context: neutron api request context : param id: UUID representing the port to update. : param port: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_port %s for tenant %s" % (id, context.tenant_id)) port_db = db_api.port_find(context, id=id, scope=db_api.ONE) if not port_db: raise n_exc.PortNotFound(port_id=id) port_dict = port["port"] fixed_ips = port_dict.pop("fixed_ips", None) admin_only = ["mac_address", "device_owner", "bridge", "admin_state_up", "device_id"] always_filter = ["network_id", "backend_key", "network_plugin"] utils.filter_body(context, port_dict, admin_only=admin_only, always_filter=always_filter) # Pre-check the requested fixed_ips before making too many db trips. # Note that this is the only check we need, since this call replaces # the entirety of the IP addresses document if fixed_ips are provided. if fixed_ips: quota.QUOTAS.limit_check(context, context.tenant_id, fixed_ips_per_port=len(fixed_ips)) new_security_groups = utils.pop_param(port_dict, "security_groups") if new_security_groups is not None: if (Capabilities.TENANT_NETWORK_SG not in CONF.QUARK.environment_capabilities): if not STRATEGY.is_provider_network(port_db["network_id"]): raise q_exc.TenantNetworkSecurityGroupRulesNotEnabled() if new_security_groups is not None and not port_db["device_id"]: raise q_exc.SecurityGroupsRequireDevice() group_ids, security_group_mods = _make_security_group_list( context, new_security_groups) quota.QUOTAS.limit_check(context, context.tenant_id, security_groups_per_port=len(group_ids)) if fixed_ips is not None: # NOTE(mdietz): we want full control over IPAM since # we're allocating by subnet instead of # network. ipam_driver = ipam.IPAM_REGISTRY.get_strategy( ipam.QuarkIpamANY.get_name()) addresses, subnet_ids = [], [] ip_addresses = {} for fixed_ip in fixed_ips: subnet_id = fixed_ip.get("subnet_id") ip_address = fixed_ip.get("ip_address") if not (subnet_id or ip_address): raise n_exc.BadRequest( resource="fixed_ips", msg="subnet_id or ip_address required") if ip_address and not subnet_id: raise n_exc.BadRequest( resource="fixed_ips", msg="subnet_id required for ip_address allocation") if subnet_id and ip_address: ip_netaddr = None try: ip_netaddr = netaddr.IPAddress(ip_address).ipv6() except netaddr.AddrFormatError: raise n_exc.InvalidInput( error_message="Invalid format provided for ip_address") ip_addresses[ip_netaddr] = subnet_id else: subnet_ids.append(subnet_id) port_ips = set([netaddr.IPAddress(int(a["address"])) for a in port_db["ip_addresses"]]) new_ips = set([a for a in ip_addresses.keys()]) ips_to_allocate = list(new_ips - port_ips) ips_to_deallocate = list(port_ips - new_ips) for ip in ips_to_allocate: if ip in ip_addresses: # NOTE: Fix for RM10187 - we were losing the list of IPs if # more than one IP was to be allocated. Track an # aggregate list instead, and add it to the running total # after each allocate allocated = [] ipam_driver.allocate_ip_address( context, allocated, port_db["network_id"], port_db["id"], reuse_after=None, ip_addresses=[ip], subnets=[ip_addresses[ip]]) addresses.extend(allocated) for ip in ips_to_deallocate: ipam_driver.deallocate_ips_by_port( context, port_db, ip_address=ip) for subnet_id in subnet_ids: ipam_driver.allocate_ip_address( context, addresses, port_db["network_id"], port_db["id"], reuse_after=CONF.QUARK.ipam_reuse_after, subnets=[subnet_id]) # Need to return all existing addresses and the new ones if addresses: port_dict["addresses"] = port_db["ip_addresses"] port_dict["addresses"].extend(addresses) # NOTE(morgabra) Updating network_plugin on port objects is explicitly # disallowed in the api, so we use whatever exists in the db. net_driver = _get_net_driver(port_db.network, port=port_db) base_net_driver = _get_net_driver(port_db.network) # TODO(anyone): What do we want to have happen here if this fails? Is it # ok to continue to keep the IPs but fail to apply security # groups? Is there a clean way to have a multi-status? Since # we're in a beta-y status, I'm going to let this sit for # a future patch where we have time to solve it well. kwargs = {} if new_security_groups is not None: # TODO(anyone): this is kind of silly (when testing), because it will # modify the incoming dict. Probably should be a copy or # something. kwargs["security_groups"] = security_group_mods net_driver.update_port(context, port_id=port_db["backend_key"], mac_address=port_db["mac_address"], device_id=port_db["device_id"], base_net_driver=base_net_driver, **kwargs) port_dict["security_groups"] = security_group_mods with context.session.begin(): port = db_api.port_update(context, port_db, **port_dict) # NOTE(mdietz): fix for issue 112, we wanted the IPs to be in # allocated_at order, so get a fresh object every time if port_db in context.session: context.session.expunge(port_db) port_db = db_api.port_find(context, id=id, scope=db_api.ONE) return v._make_port_dict(port_db)
[ "Update", "values", "of", "a", "port", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ports.py#L357-L505
[ "def", "update_port", "(", "context", ",", "id", ",", "port", ")", ":", "LOG", ".", "info", "(", "\"update_port %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "port_db", "=", "db_api", ".", "port_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "port_db", ":", "raise", "n_exc", ".", "PortNotFound", "(", "port_id", "=", "id", ")", "port_dict", "=", "port", "[", "\"port\"", "]", "fixed_ips", "=", "port_dict", ".", "pop", "(", "\"fixed_ips\"", ",", "None", ")", "admin_only", "=", "[", "\"mac_address\"", ",", "\"device_owner\"", ",", "\"bridge\"", ",", "\"admin_state_up\"", ",", "\"device_id\"", "]", "always_filter", "=", "[", "\"network_id\"", ",", "\"backend_key\"", ",", "\"network_plugin\"", "]", "utils", ".", "filter_body", "(", "context", ",", "port_dict", ",", "admin_only", "=", "admin_only", ",", "always_filter", "=", "always_filter", ")", "# Pre-check the requested fixed_ips before making too many db trips.", "# Note that this is the only check we need, since this call replaces", "# the entirety of the IP addresses document if fixed_ips are provided.", "if", "fixed_ips", ":", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "fixed_ips_per_port", "=", "len", "(", "fixed_ips", ")", ")", "new_security_groups", "=", "utils", ".", "pop_param", "(", "port_dict", ",", "\"security_groups\"", ")", "if", "new_security_groups", "is", "not", "None", ":", "if", "(", "Capabilities", ".", "TENANT_NETWORK_SG", "not", "in", "CONF", ".", "QUARK", ".", "environment_capabilities", ")", ":", "if", "not", "STRATEGY", ".", "is_provider_network", "(", "port_db", "[", "\"network_id\"", "]", ")", ":", "raise", "q_exc", ".", "TenantNetworkSecurityGroupRulesNotEnabled", "(", ")", "if", "new_security_groups", "is", "not", "None", "and", "not", "port_db", "[", "\"device_id\"", "]", ":", "raise", "q_exc", ".", "SecurityGroupsRequireDevice", "(", ")", "group_ids", ",", "security_group_mods", "=", "_make_security_group_list", "(", "context", ",", "new_security_groups", ")", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "security_groups_per_port", "=", "len", "(", "group_ids", ")", ")", "if", "fixed_ips", "is", "not", "None", ":", "# NOTE(mdietz): we want full control over IPAM since", "# we're allocating by subnet instead of", "# network.", "ipam_driver", "=", "ipam", ".", "IPAM_REGISTRY", ".", "get_strategy", "(", "ipam", ".", "QuarkIpamANY", ".", "get_name", "(", ")", ")", "addresses", ",", "subnet_ids", "=", "[", "]", ",", "[", "]", "ip_addresses", "=", "{", "}", "for", "fixed_ip", "in", "fixed_ips", ":", "subnet_id", "=", "fixed_ip", ".", "get", "(", "\"subnet_id\"", ")", "ip_address", "=", "fixed_ip", ".", "get", "(", "\"ip_address\"", ")", "if", "not", "(", "subnet_id", "or", "ip_address", ")", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"fixed_ips\"", ",", "msg", "=", "\"subnet_id or ip_address required\"", ")", "if", "ip_address", "and", "not", "subnet_id", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"fixed_ips\"", ",", "msg", "=", "\"subnet_id required for ip_address allocation\"", ")", "if", "subnet_id", "and", "ip_address", ":", "ip_netaddr", "=", "None", "try", ":", "ip_netaddr", "=", "netaddr", ".", "IPAddress", "(", "ip_address", ")", ".", "ipv6", "(", ")", "except", "netaddr", ".", "AddrFormatError", ":", "raise", "n_exc", ".", "InvalidInput", "(", "error_message", "=", "\"Invalid format provided for ip_address\"", ")", "ip_addresses", "[", "ip_netaddr", "]", "=", "subnet_id", "else", ":", "subnet_ids", ".", "append", "(", "subnet_id", ")", "port_ips", "=", "set", "(", "[", "netaddr", ".", "IPAddress", "(", "int", "(", "a", "[", "\"address\"", "]", ")", ")", "for", "a", "in", "port_db", "[", "\"ip_addresses\"", "]", "]", ")", "new_ips", "=", "set", "(", "[", "a", "for", "a", "in", "ip_addresses", ".", "keys", "(", ")", "]", ")", "ips_to_allocate", "=", "list", "(", "new_ips", "-", "port_ips", ")", "ips_to_deallocate", "=", "list", "(", "port_ips", "-", "new_ips", ")", "for", "ip", "in", "ips_to_allocate", ":", "if", "ip", "in", "ip_addresses", ":", "# NOTE: Fix for RM10187 - we were losing the list of IPs if", "# more than one IP was to be allocated. Track an", "# aggregate list instead, and add it to the running total", "# after each allocate", "allocated", "=", "[", "]", "ipam_driver", ".", "allocate_ip_address", "(", "context", ",", "allocated", ",", "port_db", "[", "\"network_id\"", "]", ",", "port_db", "[", "\"id\"", "]", ",", "reuse_after", "=", "None", ",", "ip_addresses", "=", "[", "ip", "]", ",", "subnets", "=", "[", "ip_addresses", "[", "ip", "]", "]", ")", "addresses", ".", "extend", "(", "allocated", ")", "for", "ip", "in", "ips_to_deallocate", ":", "ipam_driver", ".", "deallocate_ips_by_port", "(", "context", ",", "port_db", ",", "ip_address", "=", "ip", ")", "for", "subnet_id", "in", "subnet_ids", ":", "ipam_driver", ".", "allocate_ip_address", "(", "context", ",", "addresses", ",", "port_db", "[", "\"network_id\"", "]", ",", "port_db", "[", "\"id\"", "]", ",", "reuse_after", "=", "CONF", ".", "QUARK", ".", "ipam_reuse_after", ",", "subnets", "=", "[", "subnet_id", "]", ")", "# Need to return all existing addresses and the new ones", "if", "addresses", ":", "port_dict", "[", "\"addresses\"", "]", "=", "port_db", "[", "\"ip_addresses\"", "]", "port_dict", "[", "\"addresses\"", "]", ".", "extend", "(", "addresses", ")", "# NOTE(morgabra) Updating network_plugin on port objects is explicitly", "# disallowed in the api, so we use whatever exists in the db.", "net_driver", "=", "_get_net_driver", "(", "port_db", ".", "network", ",", "port", "=", "port_db", ")", "base_net_driver", "=", "_get_net_driver", "(", "port_db", ".", "network", ")", "# TODO(anyone): What do we want to have happen here if this fails? Is it", "# ok to continue to keep the IPs but fail to apply security", "# groups? Is there a clean way to have a multi-status? Since", "# we're in a beta-y status, I'm going to let this sit for", "# a future patch where we have time to solve it well.", "kwargs", "=", "{", "}", "if", "new_security_groups", "is", "not", "None", ":", "# TODO(anyone): this is kind of silly (when testing), because it will", "# modify the incoming dict. Probably should be a copy or", "# something.", "kwargs", "[", "\"security_groups\"", "]", "=", "security_group_mods", "net_driver", ".", "update_port", "(", "context", ",", "port_id", "=", "port_db", "[", "\"backend_key\"", "]", ",", "mac_address", "=", "port_db", "[", "\"mac_address\"", "]", ",", "device_id", "=", "port_db", "[", "\"device_id\"", "]", ",", "base_net_driver", "=", "base_net_driver", ",", "*", "*", "kwargs", ")", "port_dict", "[", "\"security_groups\"", "]", "=", "security_group_mods", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "port", "=", "db_api", ".", "port_update", "(", "context", ",", "port_db", ",", "*", "*", "port_dict", ")", "# NOTE(mdietz): fix for issue 112, we wanted the IPs to be in", "# allocated_at order, so get a fresh object every time", "if", "port_db", "in", "context", ".", "session", ":", "context", ".", "session", ".", "expunge", "(", "port_db", ")", "port_db", "=", "db_api", ".", "port_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "return", "v", ".", "_make_port_dict", "(", "port_db", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_port
Retrieve a port. : param context: neutron api request context : param id: UUID representing the port to fetch. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
quark/plugin_modules/ports.py
def get_port(context, id, fields=None): """Retrieve a port. : param context: neutron api request context : param id: UUID representing the port to fetch. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_port %s for tenant %s fields %s" % (id, context.tenant_id, fields)) results = db_api.port_find(context, id=id, fields=fields, scope=db_api.ONE) if not results: raise n_exc.PortNotFound(port_id=id) return v._make_port_dict(results)
def get_port(context, id, fields=None): """Retrieve a port. : param context: neutron api request context : param id: UUID representing the port to fetch. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_port %s for tenant %s fields %s" % (id, context.tenant_id, fields)) results = db_api.port_find(context, id=id, fields=fields, scope=db_api.ONE) if not results: raise n_exc.PortNotFound(port_id=id) return v._make_port_dict(results)
[ "Retrieve", "a", "port", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ports.py#L509-L527
[ "def", "get_port", "(", "context", ",", "id", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_port %s for tenant %s fields %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "fields", ")", ")", "results", "=", "db_api", ".", "port_find", "(", "context", ",", "id", "=", "id", ",", "fields", "=", "fields", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "results", ":", "raise", "n_exc", ".", "PortNotFound", "(", "port_id", "=", "id", ")", "return", "v", ".", "_make_port_dict", "(", "results", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_ports
Retrieve a list of ports. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a port as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
quark/plugin_modules/ports.py
def get_ports(context, limit=None, sorts=['id'], marker=None, page_reverse=False, filters=None, fields=None): """Retrieve a list of ports. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a port as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_ports for tenant %s filters %s fields %s" % (context.tenant_id, filters, fields)) if filters is None: filters = {} if "ip_address" in filters: if not context.is_admin: raise n_exc.NotAuthorized() ips = [] try: ips = [netaddr.IPAddress(ip) for ip in filters.pop("ip_address")] except netaddr.AddrFormatError: raise n_exc.InvalidInput( error_message="Invalid format provided for ip_address") query = db_api.port_find_by_ip_address(context, ip_address=ips, scope=db_api.ALL, **filters) ports = [] for ip in query: ports.extend(ip.ports) else: ports = db_api.port_find(context, limit, sorts, marker, fields=fields, join_security_groups=True, **filters) return v._make_ports_list(ports, fields)
def get_ports(context, limit=None, sorts=['id'], marker=None, page_reverse=False, filters=None, fields=None): """Retrieve a list of ports. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a port as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_ports for tenant %s filters %s fields %s" % (context.tenant_id, filters, fields)) if filters is None: filters = {} if "ip_address" in filters: if not context.is_admin: raise n_exc.NotAuthorized() ips = [] try: ips = [netaddr.IPAddress(ip) for ip in filters.pop("ip_address")] except netaddr.AddrFormatError: raise n_exc.InvalidInput( error_message="Invalid format provided for ip_address") query = db_api.port_find_by_ip_address(context, ip_address=ips, scope=db_api.ALL, **filters) ports = [] for ip in query: ports.extend(ip.ports) else: ports = db_api.port_find(context, limit, sorts, marker, fields=fields, join_security_groups=True, **filters) return v._make_ports_list(ports, fields)
[ "Retrieve", "a", "list", "of", "ports", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ports.py#L531-L574
[ "def", "get_ports", "(", "context", ",", "limit", "=", "None", ",", "sorts", "=", "[", "'id'", "]", ",", "marker", "=", "None", ",", "page_reverse", "=", "False", ",", "filters", "=", "None", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_ports for tenant %s filters %s fields %s\"", "%", "(", "context", ".", "tenant_id", ",", "filters", ",", "fields", ")", ")", "if", "filters", "is", "None", ":", "filters", "=", "{", "}", "if", "\"ip_address\"", "in", "filters", ":", "if", "not", "context", ".", "is_admin", ":", "raise", "n_exc", ".", "NotAuthorized", "(", ")", "ips", "=", "[", "]", "try", ":", "ips", "=", "[", "netaddr", ".", "IPAddress", "(", "ip", ")", "for", "ip", "in", "filters", ".", "pop", "(", "\"ip_address\"", ")", "]", "except", "netaddr", ".", "AddrFormatError", ":", "raise", "n_exc", ".", "InvalidInput", "(", "error_message", "=", "\"Invalid format provided for ip_address\"", ")", "query", "=", "db_api", ".", "port_find_by_ip_address", "(", "context", ",", "ip_address", "=", "ips", ",", "scope", "=", "db_api", ".", "ALL", ",", "*", "*", "filters", ")", "ports", "=", "[", "]", "for", "ip", "in", "query", ":", "ports", ".", "extend", "(", "ip", ".", "ports", ")", "else", ":", "ports", "=", "db_api", ".", "port_find", "(", "context", ",", "limit", ",", "sorts", ",", "marker", ",", "fields", "=", "fields", ",", "join_security_groups", "=", "True", ",", "*", "*", "filters", ")", "return", "v", ".", "_make_ports_list", "(", "ports", ",", "fields", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_ports_count
Return the number of ports. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a port as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API.
quark/plugin_modules/ports.py
def get_ports_count(context, filters=None): """Return the number of ports. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a port as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ LOG.info("get_ports_count for tenant %s filters %s" % (context.tenant_id, filters)) return db_api.port_count_all(context, join_security_groups=True, **filters)
def get_ports_count(context, filters=None): """Return the number of ports. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a port as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ LOG.info("get_ports_count for tenant %s filters %s" % (context.tenant_id, filters)) return db_api.port_count_all(context, join_security_groups=True, **filters)
[ "Return", "the", "number", "of", "ports", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ports.py#L578-L597
[ "def", "get_ports_count", "(", "context", ",", "filters", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_ports_count for tenant %s filters %s\"", "%", "(", "context", ".", "tenant_id", ",", "filters", ")", ")", "return", "db_api", ".", "port_count_all", "(", "context", ",", "join_security_groups", "=", "True", ",", "*", "*", "filters", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
delete_port
Delete a port. : param context: neutron api request context : param id: UUID representing the port to delete.
quark/plugin_modules/ports.py
def delete_port(context, id): """Delete a port. : param context: neutron api request context : param id: UUID representing the port to delete. """ LOG.info("delete_port %s for tenant %s" % (id, context.tenant_id)) port = db_api.port_find(context, id=id, scope=db_api.ONE) if not port: raise n_exc.PortNotFound(port_id=id) if 'device_id' in port: # false is weird, but ignore that LOG.info("delete_port %s for tenant %s has device %s" % (id, context.tenant_id, port['device_id'])) backend_key = port["backend_key"] mac_address = netaddr.EUI(port["mac_address"]).value ipam_driver = _get_ipam_driver(port["network"], port=port) ipam_driver.deallocate_mac_address(context, mac_address) ipam_driver.deallocate_ips_by_port( context, port, ipam_reuse_after=CONF.QUARK.ipam_reuse_after) net_driver = _get_net_driver(port["network"], port=port) base_net_driver = _get_net_driver(port["network"]) net_driver.delete_port(context, backend_key, device_id=port["device_id"], mac_address=port["mac_address"], base_net_driver=base_net_driver) with context.session.begin(): db_api.port_delete(context, port)
def delete_port(context, id): """Delete a port. : param context: neutron api request context : param id: UUID representing the port to delete. """ LOG.info("delete_port %s for tenant %s" % (id, context.tenant_id)) port = db_api.port_find(context, id=id, scope=db_api.ONE) if not port: raise n_exc.PortNotFound(port_id=id) if 'device_id' in port: # false is weird, but ignore that LOG.info("delete_port %s for tenant %s has device %s" % (id, context.tenant_id, port['device_id'])) backend_key = port["backend_key"] mac_address = netaddr.EUI(port["mac_address"]).value ipam_driver = _get_ipam_driver(port["network"], port=port) ipam_driver.deallocate_mac_address(context, mac_address) ipam_driver.deallocate_ips_by_port( context, port, ipam_reuse_after=CONF.QUARK.ipam_reuse_after) net_driver = _get_net_driver(port["network"], port=port) base_net_driver = _get_net_driver(port["network"]) net_driver.delete_port(context, backend_key, device_id=port["device_id"], mac_address=port["mac_address"], base_net_driver=base_net_driver) with context.session.begin(): db_api.port_delete(context, port)
[ "Delete", "a", "port", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ports.py#L601-L631
[ "def", "delete_port", "(", "context", ",", "id", ")", ":", "LOG", ".", "info", "(", "\"delete_port %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "port", "=", "db_api", ".", "port_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "port", ":", "raise", "n_exc", ".", "PortNotFound", "(", "port_id", "=", "id", ")", "if", "'device_id'", "in", "port", ":", "# false is weird, but ignore that", "LOG", ".", "info", "(", "\"delete_port %s for tenant %s has device %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "port", "[", "'device_id'", "]", ")", ")", "backend_key", "=", "port", "[", "\"backend_key\"", "]", "mac_address", "=", "netaddr", ".", "EUI", "(", "port", "[", "\"mac_address\"", "]", ")", ".", "value", "ipam_driver", "=", "_get_ipam_driver", "(", "port", "[", "\"network\"", "]", ",", "port", "=", "port", ")", "ipam_driver", ".", "deallocate_mac_address", "(", "context", ",", "mac_address", ")", "ipam_driver", ".", "deallocate_ips_by_port", "(", "context", ",", "port", ",", "ipam_reuse_after", "=", "CONF", ".", "QUARK", ".", "ipam_reuse_after", ")", "net_driver", "=", "_get_net_driver", "(", "port", "[", "\"network\"", "]", ",", "port", "=", "port", ")", "base_net_driver", "=", "_get_net_driver", "(", "port", "[", "\"network\"", "]", ")", "net_driver", ".", "delete_port", "(", "context", ",", "backend_key", ",", "device_id", "=", "port", "[", "\"device_id\"", "]", ",", "mac_address", "=", "port", "[", "\"mac_address\"", "]", ",", "base_net_driver", "=", "base_net_driver", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "db_api", ".", "port_delete", "(", "context", ",", "port", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Segment_allocation_ranges.get_resources
Returns Ext Resources.
quark/api/extensions/segment_allocation_ranges.py
def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() controller = SegmentAllocationRangesController(plugin) return [extensions.ResourceExtension( Segment_allocation_ranges.get_alias(), controller)]
def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() controller = SegmentAllocationRangesController(plugin) return [extensions.ResourceExtension( Segment_allocation_ranges.get_alias(), controller)]
[ "Returns", "Ext", "Resources", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/api/extensions/segment_allocation_ranges.py#L100-L106
[ "def", "get_resources", "(", "cls", ")", ":", "plugin", "=", "directory", ".", "get_plugin", "(", ")", "controller", "=", "SegmentAllocationRangesController", "(", "plugin", ")", "return", "[", "extensions", ".", "ResourceExtension", "(", "Segment_allocation_ranges", ".", "get_alias", "(", ")", ",", "controller", ")", "]" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Ip_availability.get_resources
Returns Ext Resources.
quark/api/extensions/ip_availability.py
def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() controller = IPAvailabilityController(plugin) return [extensions.ResourceExtension(Ip_availability.get_alias(), controller)]
def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() controller = IPAvailabilityController(plugin) return [extensions.ResourceExtension(Ip_availability.get_alias(), controller)]
[ "Returns", "Ext", "Resources", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/api/extensions/ip_availability.py#L79-L84
[ "def", "get_resources", "(", "cls", ")", ":", "plugin", "=", "directory", ".", "get_plugin", "(", ")", "controller", "=", "IPAvailabilityController", "(", "plugin", ")", "return", "[", "extensions", ".", "ResourceExtension", "(", "Ip_availability", ".", "get_alias", "(", ")", ",", "controller", ")", "]" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
QuarkIpam._allocate_from_v6_subnet
This attempts to allocate v6 addresses as per RFC2462 and RFC3041. To accomodate this, we effectively treat all v6 assignment as a first time allocation utilizing the MAC address of the VIF. Because we recycle MACs, we will eventually attempt to recreate a previously generated v6 address. Instead of failing, we've opted to handle reallocating that address in this method. This should provide a performance boost over attempting to check each and every subnet in the existing reallocate logic, as we'd have to iterate over each and every subnet returned
quark/ipam.py
def _allocate_from_v6_subnet(self, context, net_id, subnet, port_id, reuse_after, ip_address=None, **kwargs): """This attempts to allocate v6 addresses as per RFC2462 and RFC3041. To accomodate this, we effectively treat all v6 assignment as a first time allocation utilizing the MAC address of the VIF. Because we recycle MACs, we will eventually attempt to recreate a previously generated v6 address. Instead of failing, we've opted to handle reallocating that address in this method. This should provide a performance boost over attempting to check each and every subnet in the existing reallocate logic, as we'd have to iterate over each and every subnet returned """ LOG.info("Attempting to allocate a v6 address - [{0}]".format( utils.pretty_kwargs(network_id=net_id, subnet=subnet, port_id=port_id, ip_address=ip_address))) if ip_address: LOG.info("IP %s explicitly requested, deferring to standard " "allocation" % ip_address) return self._allocate_from_subnet(context, net_id=net_id, subnet=subnet, port_id=port_id, reuse_after=reuse_after, ip_address=ip_address, **kwargs) else: mac = kwargs.get("mac_address") if mac: mac = kwargs["mac_address"].get("address") if subnet and subnet["ip_policy"]: ip_policy_cidrs = subnet["ip_policy"].get_cidrs_ip_set() else: ip_policy_cidrs = netaddr.IPSet([]) for tries, ip_address in enumerate( generate_v6(mac, port_id, subnet["cidr"])): LOG.info("Attempt {0} of {1}".format( tries + 1, CONF.QUARK.v6_allocation_attempts)) if tries > CONF.QUARK.v6_allocation_attempts - 1: LOG.info("Exceeded v6 allocation attempts, bailing") raise ip_address_failure(net_id) ip_address = netaddr.IPAddress(ip_address).ipv6() LOG.info("Generated a new v6 address {0}".format( str(ip_address))) if (ip_policy_cidrs is not None and ip_address in ip_policy_cidrs): LOG.info("Address {0} excluded by policy".format( str(ip_address))) continue try: with context.session.begin(): address = db_api.ip_address_create( context, address=ip_address, subnet_id=subnet["id"], version=subnet["ip_version"], network_id=net_id, address_type=kwargs.get('address_type', ip_types.FIXED)) return address except db_exception.DBDuplicateEntry: # This shouldn't ever happen, since we hold a unique MAC # address from the previous IPAM step. LOG.info("{0} exists but was already " "allocated".format(str(ip_address))) LOG.debug("Duplicate entry found when inserting subnet_id" " %s ip_address %s", subnet["id"], ip_address)
def _allocate_from_v6_subnet(self, context, net_id, subnet, port_id, reuse_after, ip_address=None, **kwargs): """This attempts to allocate v6 addresses as per RFC2462 and RFC3041. To accomodate this, we effectively treat all v6 assignment as a first time allocation utilizing the MAC address of the VIF. Because we recycle MACs, we will eventually attempt to recreate a previously generated v6 address. Instead of failing, we've opted to handle reallocating that address in this method. This should provide a performance boost over attempting to check each and every subnet in the existing reallocate logic, as we'd have to iterate over each and every subnet returned """ LOG.info("Attempting to allocate a v6 address - [{0}]".format( utils.pretty_kwargs(network_id=net_id, subnet=subnet, port_id=port_id, ip_address=ip_address))) if ip_address: LOG.info("IP %s explicitly requested, deferring to standard " "allocation" % ip_address) return self._allocate_from_subnet(context, net_id=net_id, subnet=subnet, port_id=port_id, reuse_after=reuse_after, ip_address=ip_address, **kwargs) else: mac = kwargs.get("mac_address") if mac: mac = kwargs["mac_address"].get("address") if subnet and subnet["ip_policy"]: ip_policy_cidrs = subnet["ip_policy"].get_cidrs_ip_set() else: ip_policy_cidrs = netaddr.IPSet([]) for tries, ip_address in enumerate( generate_v6(mac, port_id, subnet["cidr"])): LOG.info("Attempt {0} of {1}".format( tries + 1, CONF.QUARK.v6_allocation_attempts)) if tries > CONF.QUARK.v6_allocation_attempts - 1: LOG.info("Exceeded v6 allocation attempts, bailing") raise ip_address_failure(net_id) ip_address = netaddr.IPAddress(ip_address).ipv6() LOG.info("Generated a new v6 address {0}".format( str(ip_address))) if (ip_policy_cidrs is not None and ip_address in ip_policy_cidrs): LOG.info("Address {0} excluded by policy".format( str(ip_address))) continue try: with context.session.begin(): address = db_api.ip_address_create( context, address=ip_address, subnet_id=subnet["id"], version=subnet["ip_version"], network_id=net_id, address_type=kwargs.get('address_type', ip_types.FIXED)) return address except db_exception.DBDuplicateEntry: # This shouldn't ever happen, since we hold a unique MAC # address from the previous IPAM step. LOG.info("{0} exists but was already " "allocated".format(str(ip_address))) LOG.debug("Duplicate entry found when inserting subnet_id" " %s ip_address %s", subnet["id"], ip_address)
[ "This", "attempts", "to", "allocate", "v6", "addresses", "as", "per", "RFC2462", "and", "RFC3041", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/ipam.py#L495-L567
[ "def", "_allocate_from_v6_subnet", "(", "self", ",", "context", ",", "net_id", ",", "subnet", ",", "port_id", ",", "reuse_after", ",", "ip_address", "=", "None", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"Attempting to allocate a v6 address - [{0}]\"", ".", "format", "(", "utils", ".", "pretty_kwargs", "(", "network_id", "=", "net_id", ",", "subnet", "=", "subnet", ",", "port_id", "=", "port_id", ",", "ip_address", "=", "ip_address", ")", ")", ")", "if", "ip_address", ":", "LOG", ".", "info", "(", "\"IP %s explicitly requested, deferring to standard \"", "\"allocation\"", "%", "ip_address", ")", "return", "self", ".", "_allocate_from_subnet", "(", "context", ",", "net_id", "=", "net_id", ",", "subnet", "=", "subnet", ",", "port_id", "=", "port_id", ",", "reuse_after", "=", "reuse_after", ",", "ip_address", "=", "ip_address", ",", "*", "*", "kwargs", ")", "else", ":", "mac", "=", "kwargs", ".", "get", "(", "\"mac_address\"", ")", "if", "mac", ":", "mac", "=", "kwargs", "[", "\"mac_address\"", "]", ".", "get", "(", "\"address\"", ")", "if", "subnet", "and", "subnet", "[", "\"ip_policy\"", "]", ":", "ip_policy_cidrs", "=", "subnet", "[", "\"ip_policy\"", "]", ".", "get_cidrs_ip_set", "(", ")", "else", ":", "ip_policy_cidrs", "=", "netaddr", ".", "IPSet", "(", "[", "]", ")", "for", "tries", ",", "ip_address", "in", "enumerate", "(", "generate_v6", "(", "mac", ",", "port_id", ",", "subnet", "[", "\"cidr\"", "]", ")", ")", ":", "LOG", ".", "info", "(", "\"Attempt {0} of {1}\"", ".", "format", "(", "tries", "+", "1", ",", "CONF", ".", "QUARK", ".", "v6_allocation_attempts", ")", ")", "if", "tries", ">", "CONF", ".", "QUARK", ".", "v6_allocation_attempts", "-", "1", ":", "LOG", ".", "info", "(", "\"Exceeded v6 allocation attempts, bailing\"", ")", "raise", "ip_address_failure", "(", "net_id", ")", "ip_address", "=", "netaddr", ".", "IPAddress", "(", "ip_address", ")", ".", "ipv6", "(", ")", "LOG", ".", "info", "(", "\"Generated a new v6 address {0}\"", ".", "format", "(", "str", "(", "ip_address", ")", ")", ")", "if", "(", "ip_policy_cidrs", "is", "not", "None", "and", "ip_address", "in", "ip_policy_cidrs", ")", ":", "LOG", ".", "info", "(", "\"Address {0} excluded by policy\"", ".", "format", "(", "str", "(", "ip_address", ")", ")", ")", "continue", "try", ":", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "address", "=", "db_api", ".", "ip_address_create", "(", "context", ",", "address", "=", "ip_address", ",", "subnet_id", "=", "subnet", "[", "\"id\"", "]", ",", "version", "=", "subnet", "[", "\"ip_version\"", "]", ",", "network_id", "=", "net_id", ",", "address_type", "=", "kwargs", ".", "get", "(", "'address_type'", ",", "ip_types", ".", "FIXED", ")", ")", "return", "address", "except", "db_exception", ".", "DBDuplicateEntry", ":", "# This shouldn't ever happen, since we hold a unique MAC", "# address from the previous IPAM step.", "LOG", ".", "info", "(", "\"{0} exists but was already \"", "\"allocated\"", ".", "format", "(", "str", "(", "ip_address", ")", ")", ")", "LOG", ".", "debug", "(", "\"Duplicate entry found when inserting subnet_id\"", "\" %s ip_address %s\"", ",", "subnet", "[", "\"id\"", "]", ",", "ip_address", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
_create_flip
Associates the flip with ports and creates it with the flip driver :param context: neutron api request context. :param flip: quark.db.models.IPAddress object representing a floating IP :param port_fixed_ips: dictionary of the structure: {"<id of port>": {"port": <quark.db.models.Port>, "fixed_ip": "<fixed ip address>"}} :return: None
quark/plugin_modules/floating_ips.py
def _create_flip(context, flip, port_fixed_ips): """Associates the flip with ports and creates it with the flip driver :param context: neutron api request context. :param flip: quark.db.models.IPAddress object representing a floating IP :param port_fixed_ips: dictionary of the structure: {"<id of port>": {"port": <quark.db.models.Port>, "fixed_ip": "<fixed ip address>"}} :return: None """ if port_fixed_ips: context.session.begin() try: ports = [val['port'] for val in port_fixed_ips.values()] flip = db_api.port_associate_ip(context, ports, flip, port_fixed_ips.keys()) for port_id in port_fixed_ips: fixed_ip = port_fixed_ips[port_id]['fixed_ip'] flip = db_api.floating_ip_associate_fixed_ip(context, flip, fixed_ip) flip_driver = registry.DRIVER_REGISTRY.get_driver() flip_driver.register_floating_ip(flip, port_fixed_ips) context.session.commit() except Exception: context.session.rollback() raise # alexm: Notify from this method for consistency with _delete_flip billing.notify(context, billing.IP_ASSOC, flip)
def _create_flip(context, flip, port_fixed_ips): """Associates the flip with ports and creates it with the flip driver :param context: neutron api request context. :param flip: quark.db.models.IPAddress object representing a floating IP :param port_fixed_ips: dictionary of the structure: {"<id of port>": {"port": <quark.db.models.Port>, "fixed_ip": "<fixed ip address>"}} :return: None """ if port_fixed_ips: context.session.begin() try: ports = [val['port'] for val in port_fixed_ips.values()] flip = db_api.port_associate_ip(context, ports, flip, port_fixed_ips.keys()) for port_id in port_fixed_ips: fixed_ip = port_fixed_ips[port_id]['fixed_ip'] flip = db_api.floating_ip_associate_fixed_ip(context, flip, fixed_ip) flip_driver = registry.DRIVER_REGISTRY.get_driver() flip_driver.register_floating_ip(flip, port_fixed_ips) context.session.commit() except Exception: context.session.rollback() raise # alexm: Notify from this method for consistency with _delete_flip billing.notify(context, billing.IP_ASSOC, flip)
[ "Associates", "the", "flip", "with", "ports", "and", "creates", "it", "with", "the", "flip", "driver" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L139-L170
[ "def", "_create_flip", "(", "context", ",", "flip", ",", "port_fixed_ips", ")", ":", "if", "port_fixed_ips", ":", "context", ".", "session", ".", "begin", "(", ")", "try", ":", "ports", "=", "[", "val", "[", "'port'", "]", "for", "val", "in", "port_fixed_ips", ".", "values", "(", ")", "]", "flip", "=", "db_api", ".", "port_associate_ip", "(", "context", ",", "ports", ",", "flip", ",", "port_fixed_ips", ".", "keys", "(", ")", ")", "for", "port_id", "in", "port_fixed_ips", ":", "fixed_ip", "=", "port_fixed_ips", "[", "port_id", "]", "[", "'fixed_ip'", "]", "flip", "=", "db_api", ".", "floating_ip_associate_fixed_ip", "(", "context", ",", "flip", ",", "fixed_ip", ")", "flip_driver", "=", "registry", ".", "DRIVER_REGISTRY", ".", "get_driver", "(", ")", "flip_driver", ".", "register_floating_ip", "(", "flip", ",", "port_fixed_ips", ")", "context", ".", "session", ".", "commit", "(", ")", "except", "Exception", ":", "context", ".", "session", ".", "rollback", "(", ")", "raise", "# alexm: Notify from this method for consistency with _delete_flip", "billing", ".", "notify", "(", "context", ",", "billing", ".", "IP_ASSOC", ",", "flip", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
_update_flip
Update a flip based IPAddress :param context: neutron api request context. :param flip_id: id of the flip or scip :param ip_type: ip_types.FLOATING | ip_types.SCALING :param requested_ports: dictionary of the structure: {"port_id": "<id of port>", "fixed_ip": "<fixed ip address>"} :return: quark.models.IPAddress
quark/plugin_modules/floating_ips.py
def _update_flip(context, flip_id, ip_type, requested_ports): """Update a flip based IPAddress :param context: neutron api request context. :param flip_id: id of the flip or scip :param ip_type: ip_types.FLOATING | ip_types.SCALING :param requested_ports: dictionary of the structure: {"port_id": "<id of port>", "fixed_ip": "<fixed ip address>"} :return: quark.models.IPAddress """ # This list will hold flips that require notifications. # Using sets to avoid dups, if any. notifications = { billing.IP_ASSOC: set(), billing.IP_DISASSOC: set() } context.session.begin() try: flip = db_api.floating_ip_find(context, id=flip_id, scope=db_api.ONE) if not flip: if ip_type == ip_types.SCALING: raise q_exc.ScalingIpNotFound(id=flip_id) raise q_exc.FloatingIpNotFound(id=flip_id) current_ports = flip.ports # Determine what ports are being removed, being added, and remain req_port_ids = [request_port.get('port_id') for request_port in requested_ports] curr_port_ids = [curr_port.id for curr_port in current_ports] added_port_ids = [port_id for port_id in req_port_ids if port_id and port_id not in curr_port_ids] removed_port_ids = [port_id for port_id in curr_port_ids if port_id not in req_port_ids] remaining_port_ids = set(curr_port_ids) - set(removed_port_ids) # Validations just for floating ip types if (ip_type == ip_types.FLOATING and curr_port_ids and curr_port_ids == req_port_ids): d = dict(flip_id=flip_id, port_id=curr_port_ids[0]) raise q_exc.PortAlreadyAssociatedToFloatingIp(**d) if (ip_type == ip_types.FLOATING and not curr_port_ids and not req_port_ids): raise q_exc.FloatingIpUpdateNoPortIdSupplied() # Validate that GW IP is not in use on the NW. flip_subnet = v._make_subnet_dict(flip.subnet) for added_port_id in added_port_ids: port = _get_port(context, added_port_id) nw = port.network nw_ports = v._make_ports_list(nw.ports) fixed_ips = [ip.get('ip_address') for p in nw_ports for ip in p.get('fixed_ips')] gw_ip = flip_subnet.get('gateway_ip') if gw_ip in fixed_ips: port_with_gateway_ip = None for port in nw_ports: for ip in port.get('fixed_ips'): if gw_ip in ip.get('ip_address'): port_with_gateway_ip = port break port_id = port_with_gateway_ip.get('id') network_id = port_with_gateway_ip.get('network_id') raise q_exc.FixedIpAllocatedToGatewayIp(port_id=port_id, network_id=network_id) port_fixed_ips = {} # Keep the ports and fixed ips that have not changed for port_id in remaining_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id) port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip} # Disassociate the ports and fixed ips from the flip that were # associated to the flip but are not anymore for port_id in removed_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) flip = db_api.port_disassociate_ip(context, [port], flip) notifications[billing.IP_DISASSOC].add(flip) fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id) if fixed_ip: flip = db_api.floating_ip_disassociate_fixed_ip( context, flip, fixed_ip) # Validate the new ports with the flip and associate the new ports # and fixed ips with the flip for port_id in added_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) if not port: raise n_exc.PortNotFound(port_id=port_id) if any(ip for ip in port.ip_addresses if (ip.get('address_type') == ip_types.FLOATING)): raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id) if any(ip for ip in port.ip_addresses if (ip.get('address_type') == ip_types.SCALING)): raise q_exc.PortAlreadyContainsScalingIp(port_id=port_id) fixed_ip = _get_next_available_fixed_ip(port) LOG.info('new fixed ip: %s' % fixed_ip) if not fixed_ip: raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id) port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip} flip = db_api.port_associate_ip(context, [port], flip, [port_id]) notifications[billing.IP_ASSOC].add(flip) flip = db_api.floating_ip_associate_fixed_ip(context, flip, fixed_ip) flip_driver = registry.DRIVER_REGISTRY.get_driver() # If there are not any remaining ports and no new ones are being added, # remove the floating ip from unicorn if not remaining_port_ids and not added_port_ids: flip_driver.remove_floating_ip(flip) # If new ports are being added but there previously was not any ports, # then register a new floating ip with the driver because it is # assumed it does not exist elif added_port_ids and not curr_port_ids: flip_driver.register_floating_ip(flip, port_fixed_ips) else: flip_driver.update_floating_ip(flip, port_fixed_ips) context.session.commit() except Exception: context.session.rollback() raise # Send notifications for possible associate/disassociate events for notif_type, flip_set in notifications.iteritems(): for flip in flip_set: billing.notify(context, notif_type, flip) # NOTE(blogan): ORM does not seem to update the model to the real state # of the database, so I'm doing an explicit refresh for now. context.session.refresh(flip) return flip
def _update_flip(context, flip_id, ip_type, requested_ports): """Update a flip based IPAddress :param context: neutron api request context. :param flip_id: id of the flip or scip :param ip_type: ip_types.FLOATING | ip_types.SCALING :param requested_ports: dictionary of the structure: {"port_id": "<id of port>", "fixed_ip": "<fixed ip address>"} :return: quark.models.IPAddress """ # This list will hold flips that require notifications. # Using sets to avoid dups, if any. notifications = { billing.IP_ASSOC: set(), billing.IP_DISASSOC: set() } context.session.begin() try: flip = db_api.floating_ip_find(context, id=flip_id, scope=db_api.ONE) if not flip: if ip_type == ip_types.SCALING: raise q_exc.ScalingIpNotFound(id=flip_id) raise q_exc.FloatingIpNotFound(id=flip_id) current_ports = flip.ports # Determine what ports are being removed, being added, and remain req_port_ids = [request_port.get('port_id') for request_port in requested_ports] curr_port_ids = [curr_port.id for curr_port in current_ports] added_port_ids = [port_id for port_id in req_port_ids if port_id and port_id not in curr_port_ids] removed_port_ids = [port_id for port_id in curr_port_ids if port_id not in req_port_ids] remaining_port_ids = set(curr_port_ids) - set(removed_port_ids) # Validations just for floating ip types if (ip_type == ip_types.FLOATING and curr_port_ids and curr_port_ids == req_port_ids): d = dict(flip_id=flip_id, port_id=curr_port_ids[0]) raise q_exc.PortAlreadyAssociatedToFloatingIp(**d) if (ip_type == ip_types.FLOATING and not curr_port_ids and not req_port_ids): raise q_exc.FloatingIpUpdateNoPortIdSupplied() # Validate that GW IP is not in use on the NW. flip_subnet = v._make_subnet_dict(flip.subnet) for added_port_id in added_port_ids: port = _get_port(context, added_port_id) nw = port.network nw_ports = v._make_ports_list(nw.ports) fixed_ips = [ip.get('ip_address') for p in nw_ports for ip in p.get('fixed_ips')] gw_ip = flip_subnet.get('gateway_ip') if gw_ip in fixed_ips: port_with_gateway_ip = None for port in nw_ports: for ip in port.get('fixed_ips'): if gw_ip in ip.get('ip_address'): port_with_gateway_ip = port break port_id = port_with_gateway_ip.get('id') network_id = port_with_gateway_ip.get('network_id') raise q_exc.FixedIpAllocatedToGatewayIp(port_id=port_id, network_id=network_id) port_fixed_ips = {} # Keep the ports and fixed ips that have not changed for port_id in remaining_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id) port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip} # Disassociate the ports and fixed ips from the flip that were # associated to the flip but are not anymore for port_id in removed_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) flip = db_api.port_disassociate_ip(context, [port], flip) notifications[billing.IP_DISASSOC].add(flip) fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id) if fixed_ip: flip = db_api.floating_ip_disassociate_fixed_ip( context, flip, fixed_ip) # Validate the new ports with the flip and associate the new ports # and fixed ips with the flip for port_id in added_port_ids: port = db_api.port_find(context, id=port_id, scope=db_api.ONE) if not port: raise n_exc.PortNotFound(port_id=port_id) if any(ip for ip in port.ip_addresses if (ip.get('address_type') == ip_types.FLOATING)): raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id) if any(ip for ip in port.ip_addresses if (ip.get('address_type') == ip_types.SCALING)): raise q_exc.PortAlreadyContainsScalingIp(port_id=port_id) fixed_ip = _get_next_available_fixed_ip(port) LOG.info('new fixed ip: %s' % fixed_ip) if not fixed_ip: raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id) port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip} flip = db_api.port_associate_ip(context, [port], flip, [port_id]) notifications[billing.IP_ASSOC].add(flip) flip = db_api.floating_ip_associate_fixed_ip(context, flip, fixed_ip) flip_driver = registry.DRIVER_REGISTRY.get_driver() # If there are not any remaining ports and no new ones are being added, # remove the floating ip from unicorn if not remaining_port_ids and not added_port_ids: flip_driver.remove_floating_ip(flip) # If new ports are being added but there previously was not any ports, # then register a new floating ip with the driver because it is # assumed it does not exist elif added_port_ids and not curr_port_ids: flip_driver.register_floating_ip(flip, port_fixed_ips) else: flip_driver.update_floating_ip(flip, port_fixed_ips) context.session.commit() except Exception: context.session.rollback() raise # Send notifications for possible associate/disassociate events for notif_type, flip_set in notifications.iteritems(): for flip in flip_set: billing.notify(context, notif_type, flip) # NOTE(blogan): ORM does not seem to update the model to the real state # of the database, so I'm doing an explicit refresh for now. context.session.refresh(flip) return flip
[ "Update", "a", "flip", "based", "IPAddress" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L179-L311
[ "def", "_update_flip", "(", "context", ",", "flip_id", ",", "ip_type", ",", "requested_ports", ")", ":", "# This list will hold flips that require notifications.", "# Using sets to avoid dups, if any.", "notifications", "=", "{", "billing", ".", "IP_ASSOC", ":", "set", "(", ")", ",", "billing", ".", "IP_DISASSOC", ":", "set", "(", ")", "}", "context", ".", "session", ".", "begin", "(", ")", "try", ":", "flip", "=", "db_api", ".", "floating_ip_find", "(", "context", ",", "id", "=", "flip_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "flip", ":", "if", "ip_type", "==", "ip_types", ".", "SCALING", ":", "raise", "q_exc", ".", "ScalingIpNotFound", "(", "id", "=", "flip_id", ")", "raise", "q_exc", ".", "FloatingIpNotFound", "(", "id", "=", "flip_id", ")", "current_ports", "=", "flip", ".", "ports", "# Determine what ports are being removed, being added, and remain", "req_port_ids", "=", "[", "request_port", ".", "get", "(", "'port_id'", ")", "for", "request_port", "in", "requested_ports", "]", "curr_port_ids", "=", "[", "curr_port", ".", "id", "for", "curr_port", "in", "current_ports", "]", "added_port_ids", "=", "[", "port_id", "for", "port_id", "in", "req_port_ids", "if", "port_id", "and", "port_id", "not", "in", "curr_port_ids", "]", "removed_port_ids", "=", "[", "port_id", "for", "port_id", "in", "curr_port_ids", "if", "port_id", "not", "in", "req_port_ids", "]", "remaining_port_ids", "=", "set", "(", "curr_port_ids", ")", "-", "set", "(", "removed_port_ids", ")", "# Validations just for floating ip types", "if", "(", "ip_type", "==", "ip_types", ".", "FLOATING", "and", "curr_port_ids", "and", "curr_port_ids", "==", "req_port_ids", ")", ":", "d", "=", "dict", "(", "flip_id", "=", "flip_id", ",", "port_id", "=", "curr_port_ids", "[", "0", "]", ")", "raise", "q_exc", ".", "PortAlreadyAssociatedToFloatingIp", "(", "*", "*", "d", ")", "if", "(", "ip_type", "==", "ip_types", ".", "FLOATING", "and", "not", "curr_port_ids", "and", "not", "req_port_ids", ")", ":", "raise", "q_exc", ".", "FloatingIpUpdateNoPortIdSupplied", "(", ")", "# Validate that GW IP is not in use on the NW.", "flip_subnet", "=", "v", ".", "_make_subnet_dict", "(", "flip", ".", "subnet", ")", "for", "added_port_id", "in", "added_port_ids", ":", "port", "=", "_get_port", "(", "context", ",", "added_port_id", ")", "nw", "=", "port", ".", "network", "nw_ports", "=", "v", ".", "_make_ports_list", "(", "nw", ".", "ports", ")", "fixed_ips", "=", "[", "ip", ".", "get", "(", "'ip_address'", ")", "for", "p", "in", "nw_ports", "for", "ip", "in", "p", ".", "get", "(", "'fixed_ips'", ")", "]", "gw_ip", "=", "flip_subnet", ".", "get", "(", "'gateway_ip'", ")", "if", "gw_ip", "in", "fixed_ips", ":", "port_with_gateway_ip", "=", "None", "for", "port", "in", "nw_ports", ":", "for", "ip", "in", "port", ".", "get", "(", "'fixed_ips'", ")", ":", "if", "gw_ip", "in", "ip", ".", "get", "(", "'ip_address'", ")", ":", "port_with_gateway_ip", "=", "port", "break", "port_id", "=", "port_with_gateway_ip", ".", "get", "(", "'id'", ")", "network_id", "=", "port_with_gateway_ip", ".", "get", "(", "'network_id'", ")", "raise", "q_exc", ".", "FixedIpAllocatedToGatewayIp", "(", "port_id", "=", "port_id", ",", "network_id", "=", "network_id", ")", "port_fixed_ips", "=", "{", "}", "# Keep the ports and fixed ips that have not changed", "for", "port_id", "in", "remaining_port_ids", ":", "port", "=", "db_api", ".", "port_find", "(", "context", ",", "id", "=", "port_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "fixed_ip", "=", "_get_flip_fixed_ip_by_port_id", "(", "flip", ",", "port_id", ")", "port_fixed_ips", "[", "port_id", "]", "=", "{", "'port'", ":", "port", ",", "'fixed_ip'", ":", "fixed_ip", "}", "# Disassociate the ports and fixed ips from the flip that were", "# associated to the flip but are not anymore", "for", "port_id", "in", "removed_port_ids", ":", "port", "=", "db_api", ".", "port_find", "(", "context", ",", "id", "=", "port_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "flip", "=", "db_api", ".", "port_disassociate_ip", "(", "context", ",", "[", "port", "]", ",", "flip", ")", "notifications", "[", "billing", ".", "IP_DISASSOC", "]", ".", "add", "(", "flip", ")", "fixed_ip", "=", "_get_flip_fixed_ip_by_port_id", "(", "flip", ",", "port_id", ")", "if", "fixed_ip", ":", "flip", "=", "db_api", ".", "floating_ip_disassociate_fixed_ip", "(", "context", ",", "flip", ",", "fixed_ip", ")", "# Validate the new ports with the flip and associate the new ports", "# and fixed ips with the flip", "for", "port_id", "in", "added_port_ids", ":", "port", "=", "db_api", ".", "port_find", "(", "context", ",", "id", "=", "port_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "port", ":", "raise", "n_exc", ".", "PortNotFound", "(", "port_id", "=", "port_id", ")", "if", "any", "(", "ip", "for", "ip", "in", "port", ".", "ip_addresses", "if", "(", "ip", ".", "get", "(", "'address_type'", ")", "==", "ip_types", ".", "FLOATING", ")", ")", ":", "raise", "q_exc", ".", "PortAlreadyContainsFloatingIp", "(", "port_id", "=", "port_id", ")", "if", "any", "(", "ip", "for", "ip", "in", "port", ".", "ip_addresses", "if", "(", "ip", ".", "get", "(", "'address_type'", ")", "==", "ip_types", ".", "SCALING", ")", ")", ":", "raise", "q_exc", ".", "PortAlreadyContainsScalingIp", "(", "port_id", "=", "port_id", ")", "fixed_ip", "=", "_get_next_available_fixed_ip", "(", "port", ")", "LOG", ".", "info", "(", "'new fixed ip: %s'", "%", "fixed_ip", ")", "if", "not", "fixed_ip", ":", "raise", "q_exc", ".", "NoAvailableFixedIpsForPort", "(", "port_id", "=", "port_id", ")", "port_fixed_ips", "[", "port_id", "]", "=", "{", "'port'", ":", "port", ",", "'fixed_ip'", ":", "fixed_ip", "}", "flip", "=", "db_api", ".", "port_associate_ip", "(", "context", ",", "[", "port", "]", ",", "flip", ",", "[", "port_id", "]", ")", "notifications", "[", "billing", ".", "IP_ASSOC", "]", ".", "add", "(", "flip", ")", "flip", "=", "db_api", ".", "floating_ip_associate_fixed_ip", "(", "context", ",", "flip", ",", "fixed_ip", ")", "flip_driver", "=", "registry", ".", "DRIVER_REGISTRY", ".", "get_driver", "(", ")", "# If there are not any remaining ports and no new ones are being added,", "# remove the floating ip from unicorn", "if", "not", "remaining_port_ids", "and", "not", "added_port_ids", ":", "flip_driver", ".", "remove_floating_ip", "(", "flip", ")", "# If new ports are being added but there previously was not any ports,", "# then register a new floating ip with the driver because it is", "# assumed it does not exist", "elif", "added_port_ids", "and", "not", "curr_port_ids", ":", "flip_driver", ".", "register_floating_ip", "(", "flip", ",", "port_fixed_ips", ")", "else", ":", "flip_driver", ".", "update_floating_ip", "(", "flip", ",", "port_fixed_ips", ")", "context", ".", "session", ".", "commit", "(", ")", "except", "Exception", ":", "context", ".", "session", ".", "rollback", "(", ")", "raise", "# Send notifications for possible associate/disassociate events", "for", "notif_type", ",", "flip_set", "in", "notifications", ".", "iteritems", "(", ")", ":", "for", "flip", "in", "flip_set", ":", "billing", ".", "notify", "(", "context", ",", "notif_type", ",", "flip", ")", "# NOTE(blogan): ORM does not seem to update the model to the real state", "# of the database, so I'm doing an explicit refresh for now.", "context", ".", "session", ".", "refresh", "(", "flip", ")", "return", "flip" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
create_floatingip
Allocate or reallocate a floating IP. :param context: neutron api request context. :param content: dictionary describing the floating ip, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. :returns: Dictionary containing details for the new floating IP. If values are declared in the fields parameter, then only those keys will be present.
quark/plugin_modules/floating_ips.py
def create_floatingip(context, content): """Allocate or reallocate a floating IP. :param context: neutron api request context. :param content: dictionary describing the floating ip, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. :returns: Dictionary containing details for the new floating IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('create_floatingip %s for tenant %s and body %s' % (id, context.tenant_id, content)) network_id = content.get('floating_network_id') # TODO(blogan): Since the extension logic will reject any requests without # floating_network_id, is this still needed? if not network_id: raise n_exc.BadRequest(resource='floating_ip', msg='floating_network_id is required.') fixed_ip_address = content.get('fixed_ip_address') ip_address = content.get('floating_ip_address') port_id = content.get('port_id') port = None port_fixed_ip = {} network = _get_network(context, network_id) if port_id: port = _get_port(context, port_id) fixed_ip = _get_fixed_ip(context, fixed_ip_address, port) port_fixed_ip = {port.id: {'port': port, 'fixed_ip': fixed_ip}} flip = _allocate_ip(context, network, port, ip_address, ip_types.FLOATING) _create_flip(context, flip, port_fixed_ip) return v._make_floating_ip_dict(flip, port_id)
def create_floatingip(context, content): """Allocate or reallocate a floating IP. :param context: neutron api request context. :param content: dictionary describing the floating ip, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. :returns: Dictionary containing details for the new floating IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('create_floatingip %s for tenant %s and body %s' % (id, context.tenant_id, content)) network_id = content.get('floating_network_id') # TODO(blogan): Since the extension logic will reject any requests without # floating_network_id, is this still needed? if not network_id: raise n_exc.BadRequest(resource='floating_ip', msg='floating_network_id is required.') fixed_ip_address = content.get('fixed_ip_address') ip_address = content.get('floating_ip_address') port_id = content.get('port_id') port = None port_fixed_ip = {} network = _get_network(context, network_id) if port_id: port = _get_port(context, port_id) fixed_ip = _get_fixed_ip(context, fixed_ip_address, port) port_fixed_ip = {port.id: {'port': port, 'fixed_ip': fixed_ip}} flip = _allocate_ip(context, network, port, ip_address, ip_types.FLOATING) _create_flip(context, flip, port_fixed_ip) return v._make_floating_ip_dict(flip, port_id)
[ "Allocate", "or", "reallocate", "a", "floating", "IP", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L358-L391
[ "def", "create_floatingip", "(", "context", ",", "content", ")", ":", "LOG", ".", "info", "(", "'create_floatingip %s for tenant %s and body %s'", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "content", ")", ")", "network_id", "=", "content", ".", "get", "(", "'floating_network_id'", ")", "# TODO(blogan): Since the extension logic will reject any requests without", "# floating_network_id, is this still needed?", "if", "not", "network_id", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "'floating_ip'", ",", "msg", "=", "'floating_network_id is required.'", ")", "fixed_ip_address", "=", "content", ".", "get", "(", "'fixed_ip_address'", ")", "ip_address", "=", "content", ".", "get", "(", "'floating_ip_address'", ")", "port_id", "=", "content", ".", "get", "(", "'port_id'", ")", "port", "=", "None", "port_fixed_ip", "=", "{", "}", "network", "=", "_get_network", "(", "context", ",", "network_id", ")", "if", "port_id", ":", "port", "=", "_get_port", "(", "context", ",", "port_id", ")", "fixed_ip", "=", "_get_fixed_ip", "(", "context", ",", "fixed_ip_address", ",", "port", ")", "port_fixed_ip", "=", "{", "port", ".", "id", ":", "{", "'port'", ":", "port", ",", "'fixed_ip'", ":", "fixed_ip", "}", "}", "flip", "=", "_allocate_ip", "(", "context", ",", "network", ",", "port", ",", "ip_address", ",", "ip_types", ".", "FLOATING", ")", "_create_flip", "(", "context", ",", "flip", ",", "port_fixed_ip", ")", "return", "v", ".", "_make_floating_ip_dict", "(", "flip", ",", "port_id", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
update_floatingip
Update an existing floating IP. :param context: neutron api request context. :param id: id of the floating ip :param content: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. :returns: Dictionary containing details for the new floating IP. If values are declared in the fields parameter, then only those keys will be present.
quark/plugin_modules/floating_ips.py
def update_floatingip(context, id, content): """Update an existing floating IP. :param context: neutron api request context. :param id: id of the floating ip :param content: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. :returns: Dictionary containing details for the new floating IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('update_floatingip %s for tenant %s and body %s' % (id, context.tenant_id, content)) if 'port_id' not in content: raise n_exc.BadRequest(resource='floating_ip', msg='port_id is required.') requested_ports = [] if content.get('port_id'): requested_ports = [{'port_id': content.get('port_id')}] flip = _update_flip(context, id, ip_types.FLOATING, requested_ports) return v._make_floating_ip_dict(flip)
def update_floatingip(context, id, content): """Update an existing floating IP. :param context: neutron api request context. :param id: id of the floating ip :param content: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. :returns: Dictionary containing details for the new floating IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('update_floatingip %s for tenant %s and body %s' % (id, context.tenant_id, content)) if 'port_id' not in content: raise n_exc.BadRequest(resource='floating_ip', msg='port_id is required.') requested_ports = [] if content.get('port_id'): requested_ports = [{'port_id': content.get('port_id')}] flip = _update_flip(context, id, ip_types.FLOATING, requested_ports) return v._make_floating_ip_dict(flip)
[ "Update", "an", "existing", "floating", "IP", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L394-L420
[ "def", "update_floatingip", "(", "context", ",", "id", ",", "content", ")", ":", "LOG", ".", "info", "(", "'update_floatingip %s for tenant %s and body %s'", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "content", ")", ")", "if", "'port_id'", "not", "in", "content", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "'floating_ip'", ",", "msg", "=", "'port_id is required.'", ")", "requested_ports", "=", "[", "]", "if", "content", ".", "get", "(", "'port_id'", ")", ":", "requested_ports", "=", "[", "{", "'port_id'", ":", "content", ".", "get", "(", "'port_id'", ")", "}", "]", "flip", "=", "_update_flip", "(", "context", ",", "id", ",", "ip_types", ".", "FLOATING", ",", "requested_ports", ")", "return", "v", ".", "_make_floating_ip_dict", "(", "flip", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
delete_floatingip
deallocate a floating IP. :param context: neutron api request context. :param id: id of the floating ip
quark/plugin_modules/floating_ips.py
def delete_floatingip(context, id): """deallocate a floating IP. :param context: neutron api request context. :param id: id of the floating ip """ LOG.info('delete_floatingip %s for tenant %s' % (id, context.tenant_id)) _delete_flip(context, id, ip_types.FLOATING)
def delete_floatingip(context, id): """deallocate a floating IP. :param context: neutron api request context. :param id: id of the floating ip """ LOG.info('delete_floatingip %s for tenant %s' % (id, context.tenant_id)) _delete_flip(context, id, ip_types.FLOATING)
[ "deallocate", "a", "floating", "IP", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L423-L432
[ "def", "delete_floatingip", "(", "context", ",", "id", ")", ":", "LOG", ".", "info", "(", "'delete_floatingip %s for tenant %s'", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "_delete_flip", "(", "context", ",", "id", ",", "ip_types", ".", "FLOATING", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_floatingip
Retrieve a floating IP. :param context: neutron api request context. :param id: The UUID of the floating IP. :param fields: a list of strings that are valid keys in a floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: Dictionary containing details for the floating IP. If values are declared in the fields parameter, then only those keys will be present.
quark/plugin_modules/floating_ips.py
def get_floatingip(context, id, fields=None): """Retrieve a floating IP. :param context: neutron api request context. :param id: The UUID of the floating IP. :param fields: a list of strings that are valid keys in a floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: Dictionary containing details for the floating IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('get_floatingip %s for tenant %s' % (id, context.tenant_id)) filters = {'address_type': ip_types.FLOATING, '_deallocated': False} floating_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE, **filters) if not floating_ip: raise q_exc.FloatingIpNotFound(id=id) return v._make_floating_ip_dict(floating_ip)
def get_floatingip(context, id, fields=None): """Retrieve a floating IP. :param context: neutron api request context. :param id: The UUID of the floating IP. :param fields: a list of strings that are valid keys in a floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: Dictionary containing details for the floating IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('get_floatingip %s for tenant %s' % (id, context.tenant_id)) filters = {'address_type': ip_types.FLOATING, '_deallocated': False} floating_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE, **filters) if not floating_ip: raise q_exc.FloatingIpNotFound(id=id) return v._make_floating_ip_dict(floating_ip)
[ "Retrieve", "a", "floating", "IP", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L435-L459
[ "def", "get_floatingip", "(", "context", ",", "id", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "'get_floatingip %s for tenant %s'", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "filters", "=", "{", "'address_type'", ":", "ip_types", ".", "FLOATING", ",", "'_deallocated'", ":", "False", "}", "floating_ip", "=", "db_api", ".", "floating_ip_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ",", "*", "*", "filters", ")", "if", "not", "floating_ip", ":", "raise", "q_exc", ".", "FloatingIpNotFound", "(", "id", "=", "id", ")", "return", "v", ".", "_make_floating_ip_dict", "(", "floating_ip", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_floatingips
Retrieve a list of floating ips. :param context: neutron api request context. :param filters: a dictionary with keys that are valid keys for a floating ip as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: List of floating IPs that are accessible to the tenant who submits the request (as indicated by the tenant id of the context) as well as any filters.
quark/plugin_modules/floating_ips.py
def get_floatingips(context, filters=None, fields=None, sorts=['id'], limit=None, marker=None, page_reverse=False): """Retrieve a list of floating ips. :param context: neutron api request context. :param filters: a dictionary with keys that are valid keys for a floating ip as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: List of floating IPs that are accessible to the tenant who submits the request (as indicated by the tenant id of the context) as well as any filters. """ LOG.info('get_floatingips for tenant %s filters %s fields %s' % (context.tenant_id, filters, fields)) floating_ips = _get_ips_by_type(context, ip_types.FLOATING, filters=filters, fields=fields) return [v._make_floating_ip_dict(flip) for flip in floating_ips]
def get_floatingips(context, filters=None, fields=None, sorts=['id'], limit=None, marker=None, page_reverse=False): """Retrieve a list of floating ips. :param context: neutron api request context. :param filters: a dictionary with keys that are valid keys for a floating ip as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: List of floating IPs that are accessible to the tenant who submits the request (as indicated by the tenant id of the context) as well as any filters. """ LOG.info('get_floatingips for tenant %s filters %s fields %s' % (context.tenant_id, filters, fields)) floating_ips = _get_ips_by_type(context, ip_types.FLOATING, filters=filters, fields=fields) return [v._make_floating_ip_dict(flip) for flip in floating_ips]
[ "Retrieve", "a", "list", "of", "floating", "ips", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L462-L489
[ "def", "get_floatingips", "(", "context", ",", "filters", "=", "None", ",", "fields", "=", "None", ",", "sorts", "=", "[", "'id'", "]", ",", "limit", "=", "None", ",", "marker", "=", "None", ",", "page_reverse", "=", "False", ")", ":", "LOG", ".", "info", "(", "'get_floatingips for tenant %s filters %s fields %s'", "%", "(", "context", ".", "tenant_id", ",", "filters", ",", "fields", ")", ")", "floating_ips", "=", "_get_ips_by_type", "(", "context", ",", "ip_types", ".", "FLOATING", ",", "filters", "=", "filters", ",", "fields", "=", "fields", ")", "return", "[", "v", ".", "_make_floating_ip_dict", "(", "flip", ")", "for", "flip", "in", "floating_ips", "]" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_floatingips_count
Return the number of floating IPs. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a floating IP as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :returns: The number of floating IPs that are accessible to the tenant who submits the request (as indicated by the tenant id of the context) as well as any filters. NOTE: this method is optional, as it was not part of the originally defined plugin API.
quark/plugin_modules/floating_ips.py
def get_floatingips_count(context, filters=None): """Return the number of floating IPs. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a floating IP as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :returns: The number of floating IPs that are accessible to the tenant who submits the request (as indicated by the tenant id of the context) as well as any filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ LOG.info('get_floatingips_count for tenant %s filters %s' % (context.tenant_id, filters)) if filters is None: filters = {} filters['_deallocated'] = False filters['address_type'] = ip_types.FLOATING count = db_api.ip_address_count_all(context, filters) LOG.info('Found %s floating ips for tenant %s' % (count, context.tenant_id)) return count
def get_floatingips_count(context, filters=None): """Return the number of floating IPs. :param context: neutron api request context :param filters: a dictionary with keys that are valid keys for a floating IP as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :returns: The number of floating IPs that are accessible to the tenant who submits the request (as indicated by the tenant id of the context) as well as any filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ LOG.info('get_floatingips_count for tenant %s filters %s' % (context.tenant_id, filters)) if filters is None: filters = {} filters['_deallocated'] = False filters['address_type'] = ip_types.FLOATING count = db_api.ip_address_count_all(context, filters) LOG.info('Found %s floating ips for tenant %s' % (count, context.tenant_id)) return count
[ "Return", "the", "number", "of", "floating", "IPs", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L492-L523
[ "def", "get_floatingips_count", "(", "context", ",", "filters", "=", "None", ")", ":", "LOG", ".", "info", "(", "'get_floatingips_count for tenant %s filters %s'", "%", "(", "context", ".", "tenant_id", ",", "filters", ")", ")", "if", "filters", "is", "None", ":", "filters", "=", "{", "}", "filters", "[", "'_deallocated'", "]", "=", "False", "filters", "[", "'address_type'", "]", "=", "ip_types", ".", "FLOATING", "count", "=", "db_api", ".", "ip_address_count_all", "(", "context", ",", "filters", ")", "LOG", ".", "info", "(", "'Found %s floating ips for tenant %s'", "%", "(", "count", ",", "context", ".", "tenant_id", ")", ")", "return", "count" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
create_scalingip
Allocate or reallocate a scaling IP. :param context: neutron api request context. :param content: dictionary describing the scaling ip, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. :returns: Dictionary containing details for the new scaling IP. If values are declared in the fields parameter, then only those keys will be present.
quark/plugin_modules/floating_ips.py
def create_scalingip(context, content): """Allocate or reallocate a scaling IP. :param context: neutron api request context. :param content: dictionary describing the scaling ip, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. :returns: Dictionary containing details for the new scaling IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('create_scalingip for tenant %s and body %s', context.tenant_id, content) network_id = content.get('scaling_network_id') ip_address = content.get('scaling_ip_address') requested_ports = content.get('ports', []) network = _get_network(context, network_id) port_fixed_ips = {} for req_port in requested_ports: port = _get_port(context, req_port['port_id']) fixed_ip = _get_fixed_ip(context, req_port.get('fixed_ip_address'), port) port_fixed_ips[port.id] = {"port": port, "fixed_ip": fixed_ip} scip = _allocate_ip(context, network, None, ip_address, ip_types.SCALING) _create_flip(context, scip, port_fixed_ips) return v._make_scaling_ip_dict(scip)
def create_scalingip(context, content): """Allocate or reallocate a scaling IP. :param context: neutron api request context. :param content: dictionary describing the scaling ip, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. :returns: Dictionary containing details for the new scaling IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('create_scalingip for tenant %s and body %s', context.tenant_id, content) network_id = content.get('scaling_network_id') ip_address = content.get('scaling_ip_address') requested_ports = content.get('ports', []) network = _get_network(context, network_id) port_fixed_ips = {} for req_port in requested_ports: port = _get_port(context, req_port['port_id']) fixed_ip = _get_fixed_ip(context, req_port.get('fixed_ip_address'), port) port_fixed_ips[port.id] = {"port": port, "fixed_ip": fixed_ip} scip = _allocate_ip(context, network, None, ip_address, ip_types.SCALING) _create_flip(context, scip, port_fixed_ips) return v._make_scaling_ip_dict(scip)
[ "Allocate", "or", "reallocate", "a", "scaling", "IP", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L526-L553
[ "def", "create_scalingip", "(", "context", ",", "content", ")", ":", "LOG", ".", "info", "(", "'create_scalingip for tenant %s and body %s'", ",", "context", ".", "tenant_id", ",", "content", ")", "network_id", "=", "content", ".", "get", "(", "'scaling_network_id'", ")", "ip_address", "=", "content", ".", "get", "(", "'scaling_ip_address'", ")", "requested_ports", "=", "content", ".", "get", "(", "'ports'", ",", "[", "]", ")", "network", "=", "_get_network", "(", "context", ",", "network_id", ")", "port_fixed_ips", "=", "{", "}", "for", "req_port", "in", "requested_ports", ":", "port", "=", "_get_port", "(", "context", ",", "req_port", "[", "'port_id'", "]", ")", "fixed_ip", "=", "_get_fixed_ip", "(", "context", ",", "req_port", ".", "get", "(", "'fixed_ip_address'", ")", ",", "port", ")", "port_fixed_ips", "[", "port", ".", "id", "]", "=", "{", "\"port\"", ":", "port", ",", "\"fixed_ip\"", ":", "fixed_ip", "}", "scip", "=", "_allocate_ip", "(", "context", ",", "network", ",", "None", ",", "ip_address", ",", "ip_types", ".", "SCALING", ")", "_create_flip", "(", "context", ",", "scip", ",", "port_fixed_ips", ")", "return", "v", ".", "_make_scaling_ip_dict", "(", "scip", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
update_scalingip
Update an existing scaling IP. :param context: neutron api request context. :param id: id of the scaling ip :param content: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. :returns: Dictionary containing details for the new scaling IP. If values are declared in the fields parameter, then only those keys will be present.
quark/plugin_modules/floating_ips.py
def update_scalingip(context, id, content): """Update an existing scaling IP. :param context: neutron api request context. :param id: id of the scaling ip :param content: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. :returns: Dictionary containing details for the new scaling IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('update_scalingip %s for tenant %s and body %s' % (id, context.tenant_id, content)) requested_ports = content.get('ports', []) flip = _update_flip(context, id, ip_types.SCALING, requested_ports) return v._make_scaling_ip_dict(flip)
def update_scalingip(context, id, content): """Update an existing scaling IP. :param context: neutron api request context. :param id: id of the scaling ip :param content: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. :returns: Dictionary containing details for the new scaling IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('update_scalingip %s for tenant %s and body %s' % (id, context.tenant_id, content)) requested_ports = content.get('ports', []) flip = _update_flip(context, id, ip_types.SCALING, requested_ports) return v._make_scaling_ip_dict(flip)
[ "Update", "an", "existing", "scaling", "IP", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L556-L574
[ "def", "update_scalingip", "(", "context", ",", "id", ",", "content", ")", ":", "LOG", ".", "info", "(", "'update_scalingip %s for tenant %s and body %s'", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "content", ")", ")", "requested_ports", "=", "content", ".", "get", "(", "'ports'", ",", "[", "]", ")", "flip", "=", "_update_flip", "(", "context", ",", "id", ",", "ip_types", ".", "SCALING", ",", "requested_ports", ")", "return", "v", ".", "_make_scaling_ip_dict", "(", "flip", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
delete_scalingip
Deallocate a scaling IP. :param context: neutron api request context. :param id: id of the scaling ip
quark/plugin_modules/floating_ips.py
def delete_scalingip(context, id): """Deallocate a scaling IP. :param context: neutron api request context. :param id: id of the scaling ip """ LOG.info('delete_scalingip %s for tenant %s' % (id, context.tenant_id)) _delete_flip(context, id, ip_types.SCALING)
def delete_scalingip(context, id): """Deallocate a scaling IP. :param context: neutron api request context. :param id: id of the scaling ip """ LOG.info('delete_scalingip %s for tenant %s' % (id, context.tenant_id)) _delete_flip(context, id, ip_types.SCALING)
[ "Deallocate", "a", "scaling", "IP", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L577-L584
[ "def", "delete_scalingip", "(", "context", ",", "id", ")", ":", "LOG", ".", "info", "(", "'delete_scalingip %s for tenant %s'", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "_delete_flip", "(", "context", ",", "id", ",", "ip_types", ".", "SCALING", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_scalingip
Retrieve a scaling IP. :param context: neutron api request context. :param id: The UUID of the scaling IP. :param fields: a list of strings that are valid keys in a scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: Dictionary containing details for the scaling IP. If values are declared in the fields parameter, then only those keys will be present.
quark/plugin_modules/floating_ips.py
def get_scalingip(context, id, fields=None): """Retrieve a scaling IP. :param context: neutron api request context. :param id: The UUID of the scaling IP. :param fields: a list of strings that are valid keys in a scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: Dictionary containing details for the scaling IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('get_scalingip %s for tenant %s' % (id, context.tenant_id)) filters = {'address_type': ip_types.SCALING, '_deallocated': False} scaling_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE, **filters) if not scaling_ip: raise q_exc.ScalingIpNotFound(id=id) return v._make_scaling_ip_dict(scaling_ip)
def get_scalingip(context, id, fields=None): """Retrieve a scaling IP. :param context: neutron api request context. :param id: The UUID of the scaling IP. :param fields: a list of strings that are valid keys in a scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: Dictionary containing details for the scaling IP. If values are declared in the fields parameter, then only those keys will be present. """ LOG.info('get_scalingip %s for tenant %s' % (id, context.tenant_id)) filters = {'address_type': ip_types.SCALING, '_deallocated': False} scaling_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE, **filters) if not scaling_ip: raise q_exc.ScalingIpNotFound(id=id) return v._make_scaling_ip_dict(scaling_ip)
[ "Retrieve", "a", "scaling", "IP", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L587-L607
[ "def", "get_scalingip", "(", "context", ",", "id", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "'get_scalingip %s for tenant %s'", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "filters", "=", "{", "'address_type'", ":", "ip_types", ".", "SCALING", ",", "'_deallocated'", ":", "False", "}", "scaling_ip", "=", "db_api", ".", "floating_ip_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ",", "*", "*", "filters", ")", "if", "not", "scaling_ip", ":", "raise", "q_exc", ".", "ScalingIpNotFound", "(", "id", "=", "id", ")", "return", "v", ".", "_make_scaling_ip_dict", "(", "scaling_ip", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_scalingips
Retrieve a list of scaling ips. :param context: neutron api request context. :param filters: a dictionary with keys that are valid keys for a scaling ip as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: List of scaling IPs that are accessible to the tenant who submits the request (as indicated by the tenant id of the context) as well as any filters.
quark/plugin_modules/floating_ips.py
def get_scalingips(context, filters=None, fields=None, sorts=['id'], limit=None, marker=None, page_reverse=False): """Retrieve a list of scaling ips. :param context: neutron api request context. :param filters: a dictionary with keys that are valid keys for a scaling ip as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: List of scaling IPs that are accessible to the tenant who submits the request (as indicated by the tenant id of the context) as well as any filters. """ LOG.info('get_scalingips for tenant %s filters %s fields %s' % (context.tenant_id, filters, fields)) scaling_ips = _get_ips_by_type(context, ip_types.SCALING, filters=filters, fields=fields) return [v._make_scaling_ip_dict(scip) for scip in scaling_ips]
def get_scalingips(context, filters=None, fields=None, sorts=['id'], limit=None, marker=None, page_reverse=False): """Retrieve a list of scaling ips. :param context: neutron api request context. :param filters: a dictionary with keys that are valid keys for a scaling ip as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. :param fields: a list of strings that are valid keys in a scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. :returns: List of scaling IPs that are accessible to the tenant who submits the request (as indicated by the tenant id of the context) as well as any filters. """ LOG.info('get_scalingips for tenant %s filters %s fields %s' % (context.tenant_id, filters, fields)) scaling_ips = _get_ips_by_type(context, ip_types.SCALING, filters=filters, fields=fields) return [v._make_scaling_ip_dict(scip) for scip in scaling_ips]
[ "Retrieve", "a", "list", "of", "scaling", "ips", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L610-L635
[ "def", "get_scalingips", "(", "context", ",", "filters", "=", "None", ",", "fields", "=", "None", ",", "sorts", "=", "[", "'id'", "]", ",", "limit", "=", "None", ",", "marker", "=", "None", ",", "page_reverse", "=", "False", ")", ":", "LOG", ".", "info", "(", "'get_scalingips for tenant %s filters %s fields %s'", "%", "(", "context", ".", "tenant_id", ",", "filters", ",", "fields", ")", ")", "scaling_ips", "=", "_get_ips_by_type", "(", "context", ",", "ip_types", ".", "SCALING", ",", "filters", "=", "filters", ",", "fields", "=", "fields", ")", "return", "[", "v", ".", "_make_scaling_ip_dict", "(", "scip", ")", "for", "scip", "in", "scaling_ips", "]" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
update_ip_address
Due to NCP-1592 ensure that address_type cannot change after update.
quark/plugin_modules/ip_addresses.py
def update_ip_address(context, id, ip_address): """Due to NCP-1592 ensure that address_type cannot change after update.""" LOG.info("update_ip_address %s for tenant %s" % (id, context.tenant_id)) ports = [] if 'ip_address' not in ip_address: raise n_exc.BadRequest(resource="ip_addresses", msg="Invalid request body.") with context.session.begin(): db_address = db_api.ip_address_find(context, id=id, scope=db_api.ONE) if not db_address: raise q_exc.IpAddressNotFound(addr_id=id) iptype = db_address.address_type if iptype == ip_types.FIXED and not CONF.QUARK.ipaddr_allow_fixed_ip: raise n_exc.BadRequest( resource="ip_addresses", msg="Fixed ips cannot be updated using this interface.") reset = ip_address['ip_address'].get('reset_allocation_time', False) if reset and db_address['deallocated'] == 1: if context.is_admin: LOG.info("IP's deallocated time being manually reset") db_address['deallocated_at'] = _get_deallocated_override() else: msg = "Modification of reset_allocation_time requires admin" raise webob.exc.HTTPForbidden(detail=msg) port_ids = ip_address['ip_address'].get('port_ids', None) if port_ids is not None and not port_ids: raise n_exc.BadRequest( resource="ip_addresses", msg="Cannot be updated with empty port_id list") if iptype == ip_types.SHARED: has_owner = db_address.has_any_shared_owner() if port_ids: if iptype == ip_types.FIXED and len(port_ids) > 1: raise n_exc.BadRequest( resource="ip_addresses", msg="Fixed ips cannot be updated with more than one port.") _raise_if_shared_and_enabled(ip_address, db_address) ports = db_api.port_find(context, tenant_id=context.tenant_id, id=port_ids, scope=db_api.ALL) # NOTE(name): could be considered inefficient because we're # converting to a list to check length. Maybe revisit if len(ports) != len(port_ids): raise n_exc.PortNotFound(port_id=port_ids) validate_and_fetch_segment(ports, db_address["network_id"]) validate_port_ip_quotas(context, db_address.network_id, ports) if iptype == ip_types.SHARED and has_owner: for assoc in db_address.associations: pid = assoc.port_id if pid not in port_ids and 'none' != assoc.service: raise q_exc.PortRequiresDisassociation() LOG.info("Updating IP address, %s, to only be used by the" "following ports: %s" % (db_address.address_readable, [p.id for p in ports])) new_address = db_api.update_port_associations_for_ip(context, ports, db_address) elif iptype == ip_types.SHARED and has_owner: raise q_exc.PortRequiresDisassociation() elif 'deallocated' in ip_address['ip_address']\ and context.is_admin: # Verify no port associations if len(db_address.associations) != 0: exc_msg = ("IP %s cannot be deallocated or allocated while" " still associated with ports: %s" % (db_address['address_readable'], db_address.associations)) raise q_exc.ActionNotAuthorized(msg=exc_msg) # NOTE: If an admin, allow a user to set deallocated to false # in order to reserve a deallocated IP. Alternatively, allow them # reverse that choice if a mistake was made. if ip_address['ip_address']['deallocated'] == 'False': db_address['deallocated'] = False else: db_address['deallocated'] = True return v._make_ip_dict(db_address, context.is_admin) else: ipam_driver.deallocate_ip_address(context, db_address) return v._make_ip_dict(db_address, context.is_admin) return v._make_ip_dict(new_address, context.is_admin)
def update_ip_address(context, id, ip_address): """Due to NCP-1592 ensure that address_type cannot change after update.""" LOG.info("update_ip_address %s for tenant %s" % (id, context.tenant_id)) ports = [] if 'ip_address' not in ip_address: raise n_exc.BadRequest(resource="ip_addresses", msg="Invalid request body.") with context.session.begin(): db_address = db_api.ip_address_find(context, id=id, scope=db_api.ONE) if not db_address: raise q_exc.IpAddressNotFound(addr_id=id) iptype = db_address.address_type if iptype == ip_types.FIXED and not CONF.QUARK.ipaddr_allow_fixed_ip: raise n_exc.BadRequest( resource="ip_addresses", msg="Fixed ips cannot be updated using this interface.") reset = ip_address['ip_address'].get('reset_allocation_time', False) if reset and db_address['deallocated'] == 1: if context.is_admin: LOG.info("IP's deallocated time being manually reset") db_address['deallocated_at'] = _get_deallocated_override() else: msg = "Modification of reset_allocation_time requires admin" raise webob.exc.HTTPForbidden(detail=msg) port_ids = ip_address['ip_address'].get('port_ids', None) if port_ids is not None and not port_ids: raise n_exc.BadRequest( resource="ip_addresses", msg="Cannot be updated with empty port_id list") if iptype == ip_types.SHARED: has_owner = db_address.has_any_shared_owner() if port_ids: if iptype == ip_types.FIXED and len(port_ids) > 1: raise n_exc.BadRequest( resource="ip_addresses", msg="Fixed ips cannot be updated with more than one port.") _raise_if_shared_and_enabled(ip_address, db_address) ports = db_api.port_find(context, tenant_id=context.tenant_id, id=port_ids, scope=db_api.ALL) # NOTE(name): could be considered inefficient because we're # converting to a list to check length. Maybe revisit if len(ports) != len(port_ids): raise n_exc.PortNotFound(port_id=port_ids) validate_and_fetch_segment(ports, db_address["network_id"]) validate_port_ip_quotas(context, db_address.network_id, ports) if iptype == ip_types.SHARED and has_owner: for assoc in db_address.associations: pid = assoc.port_id if pid not in port_ids and 'none' != assoc.service: raise q_exc.PortRequiresDisassociation() LOG.info("Updating IP address, %s, to only be used by the" "following ports: %s" % (db_address.address_readable, [p.id for p in ports])) new_address = db_api.update_port_associations_for_ip(context, ports, db_address) elif iptype == ip_types.SHARED and has_owner: raise q_exc.PortRequiresDisassociation() elif 'deallocated' in ip_address['ip_address']\ and context.is_admin: # Verify no port associations if len(db_address.associations) != 0: exc_msg = ("IP %s cannot be deallocated or allocated while" " still associated with ports: %s" % (db_address['address_readable'], db_address.associations)) raise q_exc.ActionNotAuthorized(msg=exc_msg) # NOTE: If an admin, allow a user to set deallocated to false # in order to reserve a deallocated IP. Alternatively, allow them # reverse that choice if a mistake was made. if ip_address['ip_address']['deallocated'] == 'False': db_address['deallocated'] = False else: db_address['deallocated'] = True return v._make_ip_dict(db_address, context.is_admin) else: ipam_driver.deallocate_ip_address(context, db_address) return v._make_ip_dict(db_address, context.is_admin) return v._make_ip_dict(new_address, context.is_admin)
[ "Due", "to", "NCP", "-", "1592", "ensure", "that", "address_type", "cannot", "change", "after", "update", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ip_addresses.py#L281-L369
[ "def", "update_ip_address", "(", "context", ",", "id", ",", "ip_address", ")", ":", "LOG", ".", "info", "(", "\"update_ip_address %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "ports", "=", "[", "]", "if", "'ip_address'", "not", "in", "ip_address", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"ip_addresses\"", ",", "msg", "=", "\"Invalid request body.\"", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "db_address", "=", "db_api", ".", "ip_address_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "db_address", ":", "raise", "q_exc", ".", "IpAddressNotFound", "(", "addr_id", "=", "id", ")", "iptype", "=", "db_address", ".", "address_type", "if", "iptype", "==", "ip_types", ".", "FIXED", "and", "not", "CONF", ".", "QUARK", ".", "ipaddr_allow_fixed_ip", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"ip_addresses\"", ",", "msg", "=", "\"Fixed ips cannot be updated using this interface.\"", ")", "reset", "=", "ip_address", "[", "'ip_address'", "]", ".", "get", "(", "'reset_allocation_time'", ",", "False", ")", "if", "reset", "and", "db_address", "[", "'deallocated'", "]", "==", "1", ":", "if", "context", ".", "is_admin", ":", "LOG", ".", "info", "(", "\"IP's deallocated time being manually reset\"", ")", "db_address", "[", "'deallocated_at'", "]", "=", "_get_deallocated_override", "(", ")", "else", ":", "msg", "=", "\"Modification of reset_allocation_time requires admin\"", "raise", "webob", ".", "exc", ".", "HTTPForbidden", "(", "detail", "=", "msg", ")", "port_ids", "=", "ip_address", "[", "'ip_address'", "]", ".", "get", "(", "'port_ids'", ",", "None", ")", "if", "port_ids", "is", "not", "None", "and", "not", "port_ids", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"ip_addresses\"", ",", "msg", "=", "\"Cannot be updated with empty port_id list\"", ")", "if", "iptype", "==", "ip_types", ".", "SHARED", ":", "has_owner", "=", "db_address", ".", "has_any_shared_owner", "(", ")", "if", "port_ids", ":", "if", "iptype", "==", "ip_types", ".", "FIXED", "and", "len", "(", "port_ids", ")", ">", "1", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"ip_addresses\"", ",", "msg", "=", "\"Fixed ips cannot be updated with more than one port.\"", ")", "_raise_if_shared_and_enabled", "(", "ip_address", ",", "db_address", ")", "ports", "=", "db_api", ".", "port_find", "(", "context", ",", "tenant_id", "=", "context", ".", "tenant_id", ",", "id", "=", "port_ids", ",", "scope", "=", "db_api", ".", "ALL", ")", "# NOTE(name): could be considered inefficient because we're", "# converting to a list to check length. Maybe revisit", "if", "len", "(", "ports", ")", "!=", "len", "(", "port_ids", ")", ":", "raise", "n_exc", ".", "PortNotFound", "(", "port_id", "=", "port_ids", ")", "validate_and_fetch_segment", "(", "ports", ",", "db_address", "[", "\"network_id\"", "]", ")", "validate_port_ip_quotas", "(", "context", ",", "db_address", ".", "network_id", ",", "ports", ")", "if", "iptype", "==", "ip_types", ".", "SHARED", "and", "has_owner", ":", "for", "assoc", "in", "db_address", ".", "associations", ":", "pid", "=", "assoc", ".", "port_id", "if", "pid", "not", "in", "port_ids", "and", "'none'", "!=", "assoc", ".", "service", ":", "raise", "q_exc", ".", "PortRequiresDisassociation", "(", ")", "LOG", ".", "info", "(", "\"Updating IP address, %s, to only be used by the\"", "\"following ports: %s\"", "%", "(", "db_address", ".", "address_readable", ",", "[", "p", ".", "id", "for", "p", "in", "ports", "]", ")", ")", "new_address", "=", "db_api", ".", "update_port_associations_for_ip", "(", "context", ",", "ports", ",", "db_address", ")", "elif", "iptype", "==", "ip_types", ".", "SHARED", "and", "has_owner", ":", "raise", "q_exc", ".", "PortRequiresDisassociation", "(", ")", "elif", "'deallocated'", "in", "ip_address", "[", "'ip_address'", "]", "and", "context", ".", "is_admin", ":", "# Verify no port associations", "if", "len", "(", "db_address", ".", "associations", ")", "!=", "0", ":", "exc_msg", "=", "(", "\"IP %s cannot be deallocated or allocated while\"", "\" still associated with ports: %s\"", "%", "(", "db_address", "[", "'address_readable'", "]", ",", "db_address", ".", "associations", ")", ")", "raise", "q_exc", ".", "ActionNotAuthorized", "(", "msg", "=", "exc_msg", ")", "# NOTE: If an admin, allow a user to set deallocated to false", "# in order to reserve a deallocated IP. Alternatively, allow them", "# reverse that choice if a mistake was made.", "if", "ip_address", "[", "'ip_address'", "]", "[", "'deallocated'", "]", "==", "'False'", ":", "db_address", "[", "'deallocated'", "]", "=", "False", "else", ":", "db_address", "[", "'deallocated'", "]", "=", "True", "return", "v", ".", "_make_ip_dict", "(", "db_address", ",", "context", ".", "is_admin", ")", "else", ":", "ipam_driver", ".", "deallocate_ip_address", "(", "context", ",", "db_address", ")", "return", "v", ".", "_make_ip_dict", "(", "db_address", ",", "context", ".", "is_admin", ")", "return", "v", ".", "_make_ip_dict", "(", "new_address", ",", "context", ".", "is_admin", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
delete_ip_address
Delete an ip address. : param context: neutron api request context : param id: UUID representing the ip address to delete.
quark/plugin_modules/ip_addresses.py
def delete_ip_address(context, id): """Delete an ip address. : param context: neutron api request context : param id: UUID representing the ip address to delete. """ LOG.info("delete_ip_address %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): ip_address = db_api.ip_address_find( context, id=id, scope=db_api.ONE) if not ip_address or ip_address.deallocated: raise q_exc.IpAddressNotFound(addr_id=id) iptype = ip_address.address_type if iptype == ip_types.FIXED and not CONF.QUARK.ipaddr_allow_fixed_ip: raise n_exc.BadRequest( resource="ip_addresses", msg="Fixed ips cannot be updated using this interface.") if ip_address.has_any_shared_owner(): raise q_exc.PortRequiresDisassociation() db_api.update_port_associations_for_ip(context, [], ip_address) ipam_driver.deallocate_ip_address(context, ip_address)
def delete_ip_address(context, id): """Delete an ip address. : param context: neutron api request context : param id: UUID representing the ip address to delete. """ LOG.info("delete_ip_address %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): ip_address = db_api.ip_address_find( context, id=id, scope=db_api.ONE) if not ip_address or ip_address.deallocated: raise q_exc.IpAddressNotFound(addr_id=id) iptype = ip_address.address_type if iptype == ip_types.FIXED and not CONF.QUARK.ipaddr_allow_fixed_ip: raise n_exc.BadRequest( resource="ip_addresses", msg="Fixed ips cannot be updated using this interface.") if ip_address.has_any_shared_owner(): raise q_exc.PortRequiresDisassociation() db_api.update_port_associations_for_ip(context, [], ip_address) ipam_driver.deallocate_ip_address(context, ip_address)
[ "Delete", "an", "ip", "address", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ip_addresses.py#L372-L396
[ "def", "delete_ip_address", "(", "context", ",", "id", ")", ":", "LOG", ".", "info", "(", "\"delete_ip_address %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "ip_address", "=", "db_api", ".", "ip_address_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "ip_address", "or", "ip_address", ".", "deallocated", ":", "raise", "q_exc", ".", "IpAddressNotFound", "(", "addr_id", "=", "id", ")", "iptype", "=", "ip_address", ".", "address_type", "if", "iptype", "==", "ip_types", ".", "FIXED", "and", "not", "CONF", ".", "QUARK", ".", "ipaddr_allow_fixed_ip", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"ip_addresses\"", ",", "msg", "=", "\"Fixed ips cannot be updated using this interface.\"", ")", "if", "ip_address", ".", "has_any_shared_owner", "(", ")", ":", "raise", "q_exc", ".", "PortRequiresDisassociation", "(", ")", "db_api", ".", "update_port_associations_for_ip", "(", "context", ",", "[", "]", ",", "ip_address", ")", "ipam_driver", ".", "deallocate_ip_address", "(", "context", ",", "ip_address", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_ports_for_ip_address
Retrieve a list of ports. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a port as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
quark/plugin_modules/ip_addresses.py
def get_ports_for_ip_address(context, ip_id, limit=None, sorts=['id'], marker=None, page_reverse=False, filters=None, fields=None): """Retrieve a list of ports. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a port as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_ports for tenant %s filters %s fields %s" % (context.tenant_id, filters, fields)) addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE) if not addr: raise q_exc.IpAddressNotFound(addr_id=ip_id) if filters is None: filters = {} filters['ip_address_id'] = [ip_id] ports = db_api.port_find(context, limit, sorts, marker, fields=fields, join_security_groups=True, **filters) return v._make_ip_ports_list(addr, ports, fields)
def get_ports_for_ip_address(context, ip_id, limit=None, sorts=['id'], marker=None, page_reverse=False, filters=None, fields=None): """Retrieve a list of ports. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a port as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictionary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_ports for tenant %s filters %s fields %s" % (context.tenant_id, filters, fields)) addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE) if not addr: raise q_exc.IpAddressNotFound(addr_id=ip_id) if filters is None: filters = {} filters['ip_address_id'] = [ip_id] ports = db_api.port_find(context, limit, sorts, marker, fields=fields, join_security_groups=True, **filters) return v._make_ip_ports_list(addr, ports, fields)
[ "Retrieve", "a", "list", "of", "ports", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ip_addresses.py#L399-L434
[ "def", "get_ports_for_ip_address", "(", "context", ",", "ip_id", ",", "limit", "=", "None", ",", "sorts", "=", "[", "'id'", "]", ",", "marker", "=", "None", ",", "page_reverse", "=", "False", ",", "filters", "=", "None", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_ports for tenant %s filters %s fields %s\"", "%", "(", "context", ".", "tenant_id", ",", "filters", ",", "fields", ")", ")", "addr", "=", "db_api", ".", "ip_address_find", "(", "context", ",", "id", "=", "ip_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "addr", ":", "raise", "q_exc", ".", "IpAddressNotFound", "(", "addr_id", "=", "ip_id", ")", "if", "filters", "is", "None", ":", "filters", "=", "{", "}", "filters", "[", "'ip_address_id'", "]", "=", "[", "ip_id", "]", "ports", "=", "db_api", ".", "port_find", "(", "context", ",", "limit", ",", "sorts", ",", "marker", ",", "fields", "=", "fields", ",", "join_security_groups", "=", "True", ",", "*", "*", "filters", ")", "return", "v", ".", "_make_ip_ports_list", "(", "addr", ",", "ports", ",", "fields", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_port_for_ip_address
Retrieve a port. : param context: neutron api request context : param id: UUID representing the port to fetch. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
quark/plugin_modules/ip_addresses.py
def get_port_for_ip_address(context, ip_id, id, fields=None): """Retrieve a port. : param context: neutron api request context : param id: UUID representing the port to fetch. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_port %s for tenant %s fields %s" % (id, context.tenant_id, fields)) addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE) if not addr: raise q_exc.IpAddressNotFound(addr_id=ip_id) filters = {'ip_address_id': [ip_id]} results = db_api.port_find(context, id=id, fields=fields, scope=db_api.ONE, **filters) if not results: raise n_exc.PortNotFound(port_id=id) return v._make_port_for_ip_dict(addr, results)
def get_port_for_ip_address(context, ip_id, id, fields=None): """Retrieve a port. : param context: neutron api request context : param id: UUID representing the port to fetch. : param fields: a list of strings that are valid keys in a port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_port %s for tenant %s fields %s" % (id, context.tenant_id, fields)) addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE) if not addr: raise q_exc.IpAddressNotFound(addr_id=ip_id) filters = {'ip_address_id': [ip_id]} results = db_api.port_find(context, id=id, fields=fields, scope=db_api.ONE, **filters) if not results: raise n_exc.PortNotFound(port_id=id) return v._make_port_for_ip_dict(addr, results)
[ "Retrieve", "a", "port", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ip_addresses.py#L437-L460
[ "def", "get_port_for_ip_address", "(", "context", ",", "ip_id", ",", "id", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_port %s for tenant %s fields %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "fields", ")", ")", "addr", "=", "db_api", ".", "ip_address_find", "(", "context", ",", "id", "=", "ip_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "addr", ":", "raise", "q_exc", ".", "IpAddressNotFound", "(", "addr_id", "=", "ip_id", ")", "filters", "=", "{", "'ip_address_id'", ":", "[", "ip_id", "]", "}", "results", "=", "db_api", ".", "port_find", "(", "context", ",", "id", "=", "id", ",", "fields", "=", "fields", ",", "scope", "=", "db_api", ".", "ONE", ",", "*", "*", "filters", ")", "if", "not", "results", ":", "raise", "n_exc", ".", "PortNotFound", "(", "port_id", "=", "id", ")", "return", "v", ".", "_make_port_for_ip_dict", "(", "addr", ",", "results", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
update_port_for_ip_address
Update values of a port. : param context: neutron api request context : param ip_id: UUID representing the ip associated with port to update : param id: UUID representing the port to update. : param port: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py.
quark/plugin_modules/ip_addresses.py
def update_port_for_ip_address(context, ip_id, id, port): """Update values of a port. : param context: neutron api request context : param ip_id: UUID representing the ip associated with port to update : param id: UUID representing the port to update. : param port: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_port %s for tenant %s" % (id, context.tenant_id)) sanitize_list = ['service'] with context.session.begin(): addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE) if not addr: raise q_exc.IpAddressNotFound(addr_id=ip_id) port_db = db_api.port_find(context, id=id, scope=db_api.ONE) if not port_db: raise q_exc.PortNotFound(port_id=id) port_dict = {k: port['port'][k] for k in sanitize_list} require_da = False service = port_dict.get('service') if require_da and _shared_ip_and_active(addr, except_port=id): raise q_exc.PortRequiresDisassociation() addr.set_service_for_port(port_db, service) context.session.add(addr) return v._make_port_for_ip_dict(addr, port_db)
def update_port_for_ip_address(context, ip_id, id, port): """Update values of a port. : param context: neutron api request context : param ip_id: UUID representing the ip associated with port to update : param id: UUID representing the port to update. : param port: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_port %s for tenant %s" % (id, context.tenant_id)) sanitize_list = ['service'] with context.session.begin(): addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE) if not addr: raise q_exc.IpAddressNotFound(addr_id=ip_id) port_db = db_api.port_find(context, id=id, scope=db_api.ONE) if not port_db: raise q_exc.PortNotFound(port_id=id) port_dict = {k: port['port'][k] for k in sanitize_list} require_da = False service = port_dict.get('service') if require_da and _shared_ip_and_active(addr, except_port=id): raise q_exc.PortRequiresDisassociation() addr.set_service_for_port(port_db, service) context.session.add(addr) return v._make_port_for_ip_dict(addr, port_db)
[ "Update", "values", "of", "a", "port", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/ip_addresses.py#L463-L492
[ "def", "update_port_for_ip_address", "(", "context", ",", "ip_id", ",", "id", ",", "port", ")", ":", "LOG", ".", "info", "(", "\"update_port %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "sanitize_list", "=", "[", "'service'", "]", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "addr", "=", "db_api", ".", "ip_address_find", "(", "context", ",", "id", "=", "ip_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "addr", ":", "raise", "q_exc", ".", "IpAddressNotFound", "(", "addr_id", "=", "ip_id", ")", "port_db", "=", "db_api", ".", "port_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "port_db", ":", "raise", "q_exc", ".", "PortNotFound", "(", "port_id", "=", "id", ")", "port_dict", "=", "{", "k", ":", "port", "[", "'port'", "]", "[", "k", "]", "for", "k", "in", "sanitize_list", "}", "require_da", "=", "False", "service", "=", "port_dict", ".", "get", "(", "'service'", ")", "if", "require_da", "and", "_shared_ip_and_active", "(", "addr", ",", "except_port", "=", "id", ")", ":", "raise", "q_exc", ".", "PortRequiresDisassociation", "(", ")", "addr", ".", "set_service_for_port", "(", "port_db", ",", "service", ")", "context", ".", "session", ".", "add", "(", "addr", ")", "return", "v", ".", "_make_port_for_ip_dict", "(", "addr", ",", "port_db", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
is_isonet_vif
Determine if a vif is on isonet Returns True if a vif belongs to an isolated network by checking for a nicira interface id.
quark/agent/agent.py
def is_isonet_vif(vif): """Determine if a vif is on isonet Returns True if a vif belongs to an isolated network by checking for a nicira interface id. """ nicira_iface_id = vif.record.get('other_config').get('nicira-iface-id') if nicira_iface_id: return True return False
def is_isonet_vif(vif): """Determine if a vif is on isonet Returns True if a vif belongs to an isolated network by checking for a nicira interface id. """ nicira_iface_id = vif.record.get('other_config').get('nicira-iface-id') if nicira_iface_id: return True return False
[ "Determine", "if", "a", "vif", "is", "on", "isonet" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/agent/agent.py#L48-L59
[ "def", "is_isonet_vif", "(", "vif", ")", ":", "nicira_iface_id", "=", "vif", ".", "record", ".", "get", "(", "'other_config'", ")", ".", "get", "(", "'nicira-iface-id'", ")", "if", "nicira_iface_id", ":", "return", "True", "return", "False" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
partition_vifs
Splits VIFs into three explicit categories and one implicit Added - Groups exist in Redis that have not been ack'd and the VIF is not tagged. Action: Tag the VIF and apply flows Updated - Groups exist in Redis that have not been ack'd and the VIF is already tagged Action: Do not tag the VIF, do apply flows Removed - Groups do NOT exist in Redis but the VIF is tagged Action: Untag the VIF, apply default flows Self-Heal - Groups are ack'd in Redis but the VIF is untagged. We treat this case as if it were an "added" group. Action: Tag the VIF and apply flows NOOP - The VIF is not tagged and there are no matching groups in Redis. This is our implicit category Action: Do nothing
quark/agent/agent.py
def partition_vifs(xapi_client, interfaces, security_group_states): """Splits VIFs into three explicit categories and one implicit Added - Groups exist in Redis that have not been ack'd and the VIF is not tagged. Action: Tag the VIF and apply flows Updated - Groups exist in Redis that have not been ack'd and the VIF is already tagged Action: Do not tag the VIF, do apply flows Removed - Groups do NOT exist in Redis but the VIF is tagged Action: Untag the VIF, apply default flows Self-Heal - Groups are ack'd in Redis but the VIF is untagged. We treat this case as if it were an "added" group. Action: Tag the VIF and apply flows NOOP - The VIF is not tagged and there are no matching groups in Redis. This is our implicit category Action: Do nothing """ added = [] updated = [] removed = [] for vif in interfaces: # Quark should not action on isonet vifs in regions that use FLIP if ('floating_ip' in CONF.QUARK.environment_capabilities and is_isonet_vif(vif)): continue vif_has_groups = vif in security_group_states if vif.tagged and vif_has_groups and\ security_group_states[vif][sg_cli.SECURITY_GROUP_ACK]: # Already ack'd these groups and VIF is tagged, reapply. # If it's not tagged, fall through and have it self-heal continue if vif.tagged: if vif_has_groups: updated.append(vif) else: removed.append(vif) else: if vif_has_groups: added.append(vif) # if not tagged and no groups, skip return added, updated, removed
def partition_vifs(xapi_client, interfaces, security_group_states): """Splits VIFs into three explicit categories and one implicit Added - Groups exist in Redis that have not been ack'd and the VIF is not tagged. Action: Tag the VIF and apply flows Updated - Groups exist in Redis that have not been ack'd and the VIF is already tagged Action: Do not tag the VIF, do apply flows Removed - Groups do NOT exist in Redis but the VIF is tagged Action: Untag the VIF, apply default flows Self-Heal - Groups are ack'd in Redis but the VIF is untagged. We treat this case as if it were an "added" group. Action: Tag the VIF and apply flows NOOP - The VIF is not tagged and there are no matching groups in Redis. This is our implicit category Action: Do nothing """ added = [] updated = [] removed = [] for vif in interfaces: # Quark should not action on isonet vifs in regions that use FLIP if ('floating_ip' in CONF.QUARK.environment_capabilities and is_isonet_vif(vif)): continue vif_has_groups = vif in security_group_states if vif.tagged and vif_has_groups and\ security_group_states[vif][sg_cli.SECURITY_GROUP_ACK]: # Already ack'd these groups and VIF is tagged, reapply. # If it's not tagged, fall through and have it self-heal continue if vif.tagged: if vif_has_groups: updated.append(vif) else: removed.append(vif) else: if vif_has_groups: added.append(vif) # if not tagged and no groups, skip return added, updated, removed
[ "Splits", "VIFs", "into", "three", "explicit", "categories", "and", "one", "implicit" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/agent/agent.py#L62-L107
[ "def", "partition_vifs", "(", "xapi_client", ",", "interfaces", ",", "security_group_states", ")", ":", "added", "=", "[", "]", "updated", "=", "[", "]", "removed", "=", "[", "]", "for", "vif", "in", "interfaces", ":", "# Quark should not action on isonet vifs in regions that use FLIP", "if", "(", "'floating_ip'", "in", "CONF", ".", "QUARK", ".", "environment_capabilities", "and", "is_isonet_vif", "(", "vif", ")", ")", ":", "continue", "vif_has_groups", "=", "vif", "in", "security_group_states", "if", "vif", ".", "tagged", "and", "vif_has_groups", "and", "security_group_states", "[", "vif", "]", "[", "sg_cli", ".", "SECURITY_GROUP_ACK", "]", ":", "# Already ack'd these groups and VIF is tagged, reapply.", "# If it's not tagged, fall through and have it self-heal", "continue", "if", "vif", ".", "tagged", ":", "if", "vif_has_groups", ":", "updated", ".", "append", "(", "vif", ")", "else", ":", "removed", ".", "append", "(", "vif", ")", "else", ":", "if", "vif_has_groups", ":", "added", ".", "append", "(", "vif", ")", "# if not tagged and no groups, skip", "return", "added", ",", "updated", ",", "removed" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_groups_to_ack
Compares initial security group rules with current sg rules. Given the groups that were successfully returned from xapi_client.update_interfaces call, compare initial and current security group rules to determine if an update occurred during the window that the xapi_client.update_interfaces was executing. Return a list of vifs whose security group rules have not changed.
quark/agent/agent.py
def get_groups_to_ack(groups_to_ack, init_sg_states, curr_sg_states): """Compares initial security group rules with current sg rules. Given the groups that were successfully returned from xapi_client.update_interfaces call, compare initial and current security group rules to determine if an update occurred during the window that the xapi_client.update_interfaces was executing. Return a list of vifs whose security group rules have not changed. """ security_groups_changed = [] # Compare current security group rules with initial rules. for vif in groups_to_ack: initial_state = init_sg_states[vif][sg_cli.SECURITY_GROUP_HASH_ATTR] current_state = curr_sg_states[vif][sg_cli.SECURITY_GROUP_HASH_ATTR] bad_match_msg = ('security group rules were changed for vif "%s" while' ' executing xapi_client.update_interfaces.' ' Will not ack rule.' % vif) # If lists are different lengths, they're automatically different. if len(initial_state) != len(current_state): security_groups_changed.append(vif) LOG.info(bad_match_msg) elif len(initial_state) > 0: # Compare rules in equal length lists. for rule in current_state: if rule not in initial_state: security_groups_changed.append(vif) LOG.info(bad_match_msg) break # Only ack groups whose rules have not changed since update. If # rules do not match, do not add them to ret so the change # can be picked up on the next cycle. ret = [group for group in groups_to_ack if group not in security_groups_changed] return ret
def get_groups_to_ack(groups_to_ack, init_sg_states, curr_sg_states): """Compares initial security group rules with current sg rules. Given the groups that were successfully returned from xapi_client.update_interfaces call, compare initial and current security group rules to determine if an update occurred during the window that the xapi_client.update_interfaces was executing. Return a list of vifs whose security group rules have not changed. """ security_groups_changed = [] # Compare current security group rules with initial rules. for vif in groups_to_ack: initial_state = init_sg_states[vif][sg_cli.SECURITY_GROUP_HASH_ATTR] current_state = curr_sg_states[vif][sg_cli.SECURITY_GROUP_HASH_ATTR] bad_match_msg = ('security group rules were changed for vif "%s" while' ' executing xapi_client.update_interfaces.' ' Will not ack rule.' % vif) # If lists are different lengths, they're automatically different. if len(initial_state) != len(current_state): security_groups_changed.append(vif) LOG.info(bad_match_msg) elif len(initial_state) > 0: # Compare rules in equal length lists. for rule in current_state: if rule not in initial_state: security_groups_changed.append(vif) LOG.info(bad_match_msg) break # Only ack groups whose rules have not changed since update. If # rules do not match, do not add them to ret so the change # can be picked up on the next cycle. ret = [group for group in groups_to_ack if group not in security_groups_changed] return ret
[ "Compares", "initial", "security", "group", "rules", "with", "current", "sg", "rules", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/agent/agent.py#L115-L149
[ "def", "get_groups_to_ack", "(", "groups_to_ack", ",", "init_sg_states", ",", "curr_sg_states", ")", ":", "security_groups_changed", "=", "[", "]", "# Compare current security group rules with initial rules.", "for", "vif", "in", "groups_to_ack", ":", "initial_state", "=", "init_sg_states", "[", "vif", "]", "[", "sg_cli", ".", "SECURITY_GROUP_HASH_ATTR", "]", "current_state", "=", "curr_sg_states", "[", "vif", "]", "[", "sg_cli", ".", "SECURITY_GROUP_HASH_ATTR", "]", "bad_match_msg", "=", "(", "'security group rules were changed for vif \"%s\" while'", "' executing xapi_client.update_interfaces.'", "' Will not ack rule.'", "%", "vif", ")", "# If lists are different lengths, they're automatically different.", "if", "len", "(", "initial_state", ")", "!=", "len", "(", "current_state", ")", ":", "security_groups_changed", ".", "append", "(", "vif", ")", "LOG", ".", "info", "(", "bad_match_msg", ")", "elif", "len", "(", "initial_state", ")", ">", "0", ":", "# Compare rules in equal length lists.", "for", "rule", "in", "current_state", ":", "if", "rule", "not", "in", "initial_state", ":", "security_groups_changed", ".", "append", "(", "vif", ")", "LOG", ".", "info", "(", "bad_match_msg", ")", "break", "# Only ack groups whose rules have not changed since update. If", "# rules do not match, do not add them to ret so the change", "# can be picked up on the next cycle.", "ret", "=", "[", "group", "for", "group", "in", "groups_to_ack", "if", "group", "not", "in", "security_groups_changed", "]", "return", "ret" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
run
Fetches changes and applies them to VIFs periodically Process as of RM11449: * Get all groups from redis * Fetch ALL VIFs from Xen * Walk ALL VIFs and partition them into added, updated and removed * Walk the final "modified" VIFs list and apply flows to each
quark/agent/agent.py
def run(): """Fetches changes and applies them to VIFs periodically Process as of RM11449: * Get all groups from redis * Fetch ALL VIFs from Xen * Walk ALL VIFs and partition them into added, updated and removed * Walk the final "modified" VIFs list and apply flows to each """ groups_client = sg_cli.SecurityGroupsClient() xapi_client = xapi.XapiClient() interfaces = set() while True: try: interfaces = xapi_client.get_interfaces() except Exception: LOG.exception("Unable to get instances/interfaces from xapi") _sleep() continue try: sg_states = groups_client.get_security_group_states(interfaces) new_sg, updated_sg, removed_sg = partition_vifs(xapi_client, interfaces, sg_states) xapi_client.update_interfaces(new_sg, updated_sg, removed_sg) groups_to_ack = [v for v in new_sg + updated_sg if v.success] # NOTE(quade): This solves a race condition where a security group # rule may have changed between the time the sg_states were called # and when they were officially ack'd. It functions as a compare # and set. This is a fix until we get onto a proper messaging # queue. NCP-2287 sg_sts_curr = groups_client.get_security_group_states(interfaces) groups_to_ack = get_groups_to_ack(groups_to_ack, sg_states, sg_sts_curr) # This list will contain all the security group rules that do not # match ack_groups(groups_client, groups_to_ack) except Exception: LOG.exception("Unable to get security groups from registry and " "apply them to xapi") _sleep() continue _sleep()
def run(): """Fetches changes and applies them to VIFs periodically Process as of RM11449: * Get all groups from redis * Fetch ALL VIFs from Xen * Walk ALL VIFs and partition them into added, updated and removed * Walk the final "modified" VIFs list and apply flows to each """ groups_client = sg_cli.SecurityGroupsClient() xapi_client = xapi.XapiClient() interfaces = set() while True: try: interfaces = xapi_client.get_interfaces() except Exception: LOG.exception("Unable to get instances/interfaces from xapi") _sleep() continue try: sg_states = groups_client.get_security_group_states(interfaces) new_sg, updated_sg, removed_sg = partition_vifs(xapi_client, interfaces, sg_states) xapi_client.update_interfaces(new_sg, updated_sg, removed_sg) groups_to_ack = [v for v in new_sg + updated_sg if v.success] # NOTE(quade): This solves a race condition where a security group # rule may have changed between the time the sg_states were called # and when they were officially ack'd. It functions as a compare # and set. This is a fix until we get onto a proper messaging # queue. NCP-2287 sg_sts_curr = groups_client.get_security_group_states(interfaces) groups_to_ack = get_groups_to_ack(groups_to_ack, sg_states, sg_sts_curr) # This list will contain all the security group rules that do not # match ack_groups(groups_client, groups_to_ack) except Exception: LOG.exception("Unable to get security groups from registry and " "apply them to xapi") _sleep() continue _sleep()
[ "Fetches", "changes", "and", "applies", "them", "to", "VIFs", "periodically" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/agent/agent.py#L152-L198
[ "def", "run", "(", ")", ":", "groups_client", "=", "sg_cli", ".", "SecurityGroupsClient", "(", ")", "xapi_client", "=", "xapi", ".", "XapiClient", "(", ")", "interfaces", "=", "set", "(", ")", "while", "True", ":", "try", ":", "interfaces", "=", "xapi_client", ".", "get_interfaces", "(", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Unable to get instances/interfaces from xapi\"", ")", "_sleep", "(", ")", "continue", "try", ":", "sg_states", "=", "groups_client", ".", "get_security_group_states", "(", "interfaces", ")", "new_sg", ",", "updated_sg", ",", "removed_sg", "=", "partition_vifs", "(", "xapi_client", ",", "interfaces", ",", "sg_states", ")", "xapi_client", ".", "update_interfaces", "(", "new_sg", ",", "updated_sg", ",", "removed_sg", ")", "groups_to_ack", "=", "[", "v", "for", "v", "in", "new_sg", "+", "updated_sg", "if", "v", ".", "success", "]", "# NOTE(quade): This solves a race condition where a security group", "# rule may have changed between the time the sg_states were called", "# and when they were officially ack'd. It functions as a compare", "# and set. This is a fix until we get onto a proper messaging", "# queue. NCP-2287", "sg_sts_curr", "=", "groups_client", ".", "get_security_group_states", "(", "interfaces", ")", "groups_to_ack", "=", "get_groups_to_ack", "(", "groups_to_ack", ",", "sg_states", ",", "sg_sts_curr", ")", "# This list will contain all the security group rules that do not", "# match", "ack_groups", "(", "groups_client", ",", "groups_to_ack", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Unable to get security groups from registry and \"", "\"apply them to xapi\"", ")", "_sleep", "(", ")", "continue", "_sleep", "(", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
QuarkQuotaDriver.delete_tenant_quota
Delete the quota entries for a given tenant_id. Atfer deletion, this tenant will use default quota values in conf.
quark/quota_driver.py
def delete_tenant_quota(context, tenant_id): """Delete the quota entries for a given tenant_id. Atfer deletion, this tenant will use default quota values in conf. """ tenant_quotas = context.session.query(Quota) tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id) tenant_quotas.delete()
def delete_tenant_quota(context, tenant_id): """Delete the quota entries for a given tenant_id. Atfer deletion, this tenant will use default quota values in conf. """ tenant_quotas = context.session.query(Quota) tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id) tenant_quotas.delete()
[ "Delete", "the", "quota", "entries", "for", "a", "given", "tenant_id", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/quota_driver.py#L29-L37
[ "def", "delete_tenant_quota", "(", "context", ",", "tenant_id", ")", ":", "tenant_quotas", "=", "context", ".", "session", ".", "query", "(", "Quota", ")", "tenant_quotas", "=", "tenant_quotas", ".", "filter_by", "(", "tenant_id", "=", "tenant_id", ")", "tenant_quotas", ".", "delete", "(", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Ip_addresses.get_resources
Returns Ext Resources.
quark/api/extensions/ip_addresses.py
def get_resources(cls): """Returns Ext Resources.""" ip_controller = IpAddressesController( directory.get_plugin()) ip_port_controller = IpAddressPortController( directory.get_plugin()) resources = [] resources.append(extensions.ResourceExtension( Ip_addresses.get_alias(), ip_controller)) parent = {'collection_name': 'ip_addresses', 'member_name': 'ip_address'} resources.append(extensions.ResourceExtension( 'ports', ip_port_controller, parent=parent)) return resources
def get_resources(cls): """Returns Ext Resources.""" ip_controller = IpAddressesController( directory.get_plugin()) ip_port_controller = IpAddressPortController( directory.get_plugin()) resources = [] resources.append(extensions.ResourceExtension( Ip_addresses.get_alias(), ip_controller)) parent = {'collection_name': 'ip_addresses', 'member_name': 'ip_address'} resources.append(extensions.ResourceExtension( 'ports', ip_port_controller, parent=parent)) return resources
[ "Returns", "Ext", "Resources", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/api/extensions/ip_addresses.py#L153-L167
[ "def", "get_resources", "(", "cls", ")", ":", "ip_controller", "=", "IpAddressesController", "(", "directory", ".", "get_plugin", "(", ")", ")", "ip_port_controller", "=", "IpAddressPortController", "(", "directory", ".", "get_plugin", "(", ")", ")", "resources", "=", "[", "]", "resources", ".", "append", "(", "extensions", ".", "ResourceExtension", "(", "Ip_addresses", ".", "get_alias", "(", ")", ",", "ip_controller", ")", ")", "parent", "=", "{", "'collection_name'", ":", "'ip_addresses'", ",", "'member_name'", ":", "'ip_address'", "}", "resources", ".", "append", "(", "extensions", ".", "ResourceExtension", "(", "'ports'", ",", "ip_port_controller", ",", "parent", "=", "parent", ")", ")", "return", "resources" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
_validate_subnet_cidr
Validate the CIDR for a subnet. Verifies the specified CIDR does not overlap with the ones defined for the other subnets specified for this network, or with any other CIDR if overlapping IPs are disabled.
quark/plugin_modules/subnets.py
def _validate_subnet_cidr(context, network_id, new_subnet_cidr): """Validate the CIDR for a subnet. Verifies the specified CIDR does not overlap with the ones defined for the other subnets specified for this network, or with any other CIDR if overlapping IPs are disabled. """ if neutron_cfg.cfg.CONF.allow_overlapping_ips: return try: new_subnet_ipset = netaddr.IPSet([new_subnet_cidr]) except TypeError: LOG.exception("Invalid or missing cidr: %s" % new_subnet_cidr) raise n_exc.BadRequest(resource="subnet", msg="Invalid or missing cidr") filters = { 'network_id': network_id, 'shared': [False] } # Using admin context here, in case we actually share networks later subnet_list = db_api.subnet_find(context=context.elevated(), **filters) for subnet in subnet_list: if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset): # don't give out details of the overlapping subnet err_msg = (_("Requested subnet with cidr: %(cidr)s for " "network: %(network_id)s overlaps with another " "subnet") % {'cidr': new_subnet_cidr, 'network_id': network_id}) LOG.error(_("Validation for CIDR: %(new_cidr)s failed - " "overlaps with subnet %(subnet_id)s " "(CIDR: %(cidr)s)"), {'new_cidr': new_subnet_cidr, 'subnet_id': subnet.id, 'cidr': subnet.cidr}) raise n_exc.InvalidInput(error_message=err_msg)
def _validate_subnet_cidr(context, network_id, new_subnet_cidr): """Validate the CIDR for a subnet. Verifies the specified CIDR does not overlap with the ones defined for the other subnets specified for this network, or with any other CIDR if overlapping IPs are disabled. """ if neutron_cfg.cfg.CONF.allow_overlapping_ips: return try: new_subnet_ipset = netaddr.IPSet([new_subnet_cidr]) except TypeError: LOG.exception("Invalid or missing cidr: %s" % new_subnet_cidr) raise n_exc.BadRequest(resource="subnet", msg="Invalid or missing cidr") filters = { 'network_id': network_id, 'shared': [False] } # Using admin context here, in case we actually share networks later subnet_list = db_api.subnet_find(context=context.elevated(), **filters) for subnet in subnet_list: if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset): # don't give out details of the overlapping subnet err_msg = (_("Requested subnet with cidr: %(cidr)s for " "network: %(network_id)s overlaps with another " "subnet") % {'cidr': new_subnet_cidr, 'network_id': network_id}) LOG.error(_("Validation for CIDR: %(new_cidr)s failed - " "overlaps with subnet %(subnet_id)s " "(CIDR: %(cidr)s)"), {'new_cidr': new_subnet_cidr, 'subnet_id': subnet.id, 'cidr': subnet.cidr}) raise n_exc.InvalidInput(error_message=err_msg)
[ "Validate", "the", "CIDR", "for", "a", "subnet", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/subnets.py#L55-L94
[ "def", "_validate_subnet_cidr", "(", "context", ",", "network_id", ",", "new_subnet_cidr", ")", ":", "if", "neutron_cfg", ".", "cfg", ".", "CONF", ".", "allow_overlapping_ips", ":", "return", "try", ":", "new_subnet_ipset", "=", "netaddr", ".", "IPSet", "(", "[", "new_subnet_cidr", "]", ")", "except", "TypeError", ":", "LOG", ".", "exception", "(", "\"Invalid or missing cidr: %s\"", "%", "new_subnet_cidr", ")", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"subnet\"", ",", "msg", "=", "\"Invalid or missing cidr\"", ")", "filters", "=", "{", "'network_id'", ":", "network_id", ",", "'shared'", ":", "[", "False", "]", "}", "# Using admin context here, in case we actually share networks later", "subnet_list", "=", "db_api", ".", "subnet_find", "(", "context", "=", "context", ".", "elevated", "(", ")", ",", "*", "*", "filters", ")", "for", "subnet", "in", "subnet_list", ":", "if", "(", "netaddr", ".", "IPSet", "(", "[", "subnet", ".", "cidr", "]", ")", "&", "new_subnet_ipset", ")", ":", "# don't give out details of the overlapping subnet", "err_msg", "=", "(", "_", "(", "\"Requested subnet with cidr: %(cidr)s for \"", "\"network: %(network_id)s overlaps with another \"", "\"subnet\"", ")", "%", "{", "'cidr'", ":", "new_subnet_cidr", ",", "'network_id'", ":", "network_id", "}", ")", "LOG", ".", "error", "(", "_", "(", "\"Validation for CIDR: %(new_cidr)s failed - \"", "\"overlaps with subnet %(subnet_id)s \"", "\"(CIDR: %(cidr)s)\"", ")", ",", "{", "'new_cidr'", ":", "new_subnet_cidr", ",", "'subnet_id'", ":", "subnet", ".", "id", ",", "'cidr'", ":", "subnet", ".", "cidr", "}", ")", "raise", "n_exc", ".", "InvalidInput", "(", "error_message", "=", "err_msg", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
create_subnet
Create a subnet. Create a subnet which represents a range of IP addresses that can be allocated to devices : param context: neutron api request context : param subnet: dictionary describing the subnet, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated.
quark/plugin_modules/subnets.py
def create_subnet(context, subnet): """Create a subnet. Create a subnet which represents a range of IP addresses that can be allocated to devices : param context: neutron api request context : param subnet: dictionary describing the subnet, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_subnet for tenant %s" % context.tenant_id) net_id = subnet["subnet"]["network_id"] with context.session.begin(): net = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, fields=None, id=net_id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=net_id) sub_attrs = subnet["subnet"] always_pop = ["enable_dhcp", "ip_version", "first_ip", "last_ip", "_cidr"] admin_only = ["segment_id", "do_not_use", "created_at", "next_auto_assign_ip"] utils.filter_body(context, sub_attrs, admin_only, always_pop) _validate_subnet_cidr(context, net_id, sub_attrs["cidr"]) cidr = netaddr.IPNetwork(sub_attrs["cidr"]) err_vals = {'cidr': sub_attrs["cidr"], 'network_id': net_id} err = _("Requested subnet with cidr: %(cidr)s for " "network: %(network_id)s. Prefix is too small, must be a " "larger subnet. A prefix less than /%(prefix)s is required.") if cidr.version == 6 and cidr.prefixlen > 64: err_vals["prefix"] = 65 err_msg = err % err_vals raise n_exc.InvalidInput(error_message=err_msg) elif cidr.version == 4 and cidr.prefixlen > 30: err_vals["prefix"] = 31 err_msg = err % err_vals raise n_exc.InvalidInput(error_message=err_msg) # Enforce subnet quotas net_subnets = get_subnets(context, filters=dict(network_id=net_id)) if not context.is_admin: v4_count, v6_count = 0, 0 for subnet in net_subnets: if netaddr.IPNetwork(subnet['cidr']).version == 6: v6_count += 1 else: v4_count += 1 if cidr.version == 6: tenant_quota_v6 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v6_subnets_per_network').first() if tenant_quota_v6 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v6_subnets_per_network=v6_count + 1) else: tenant_quota_v4 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v4_subnets_per_network').first() if tenant_quota_v4 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v4_subnets_per_network=v4_count + 1) # See RM981. The default behavior of setting a gateway unless # explicitly asked to not is no longer desirable. gateway_ip = utils.pop_param(sub_attrs, "gateway_ip") dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", []) host_routes = utils.pop_param(sub_attrs, "host_routes", []) allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None) sub_attrs["network"] = net new_subnet = db_api.subnet_create(context, **sub_attrs) cidrs = [] alloc_pools = allocation_pool.AllocationPools(sub_attrs["cidr"], allocation_pools) if isinstance(allocation_pools, list): cidrs = alloc_pools.get_policy_cidrs() quota.QUOTAS.limit_check( context, context.tenant_id, alloc_pools_per_subnet=len(alloc_pools)) ip_policies.ensure_default_policy(cidrs, [new_subnet]) new_subnet["ip_policy"] = db_api.ip_policy_create(context, exclude=cidrs) quota.QUOTAS.limit_check(context, context.tenant_id, routes_per_subnet=len(host_routes)) default_route = None for route in host_routes: netaddr_route = netaddr.IPNetwork(route["destination"]) if netaddr_route.value == routes.DEFAULT_ROUTE.value: if default_route: raise q_exc.DuplicateRouteConflict( subnet_id=new_subnet["id"]) default_route = route gateway_ip = default_route["nexthop"] alloc_pools.validate_gateway_excluded(gateway_ip) new_subnet["routes"].append(db_api.route_create( context, cidr=route["destination"], gateway=route["nexthop"])) quota.QUOTAS.limit_check(context, context.tenant_id, dns_nameservers_per_subnet=len(dns_ips)) for dns_ip in dns_ips: new_subnet["dns_nameservers"].append(db_api.dns_create( context, ip=netaddr.IPAddress(dns_ip))) # if the gateway_ip is IN the cidr for the subnet and NOT excluded by # policies, we should raise a 409 conflict if gateway_ip and default_route is None: alloc_pools.validate_gateway_excluded(gateway_ip) new_subnet["routes"].append(db_api.route_create( context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip)) subnet_dict = v._make_subnet_dict(new_subnet) subnet_dict["gateway_ip"] = gateway_ip return subnet_dict
def create_subnet(context, subnet): """Create a subnet. Create a subnet which represents a range of IP addresses that can be allocated to devices : param context: neutron api request context : param subnet: dictionary describing the subnet, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_subnet for tenant %s" % context.tenant_id) net_id = subnet["subnet"]["network_id"] with context.session.begin(): net = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, fields=None, id=net_id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=net_id) sub_attrs = subnet["subnet"] always_pop = ["enable_dhcp", "ip_version", "first_ip", "last_ip", "_cidr"] admin_only = ["segment_id", "do_not_use", "created_at", "next_auto_assign_ip"] utils.filter_body(context, sub_attrs, admin_only, always_pop) _validate_subnet_cidr(context, net_id, sub_attrs["cidr"]) cidr = netaddr.IPNetwork(sub_attrs["cidr"]) err_vals = {'cidr': sub_attrs["cidr"], 'network_id': net_id} err = _("Requested subnet with cidr: %(cidr)s for " "network: %(network_id)s. Prefix is too small, must be a " "larger subnet. A prefix less than /%(prefix)s is required.") if cidr.version == 6 and cidr.prefixlen > 64: err_vals["prefix"] = 65 err_msg = err % err_vals raise n_exc.InvalidInput(error_message=err_msg) elif cidr.version == 4 and cidr.prefixlen > 30: err_vals["prefix"] = 31 err_msg = err % err_vals raise n_exc.InvalidInput(error_message=err_msg) # Enforce subnet quotas net_subnets = get_subnets(context, filters=dict(network_id=net_id)) if not context.is_admin: v4_count, v6_count = 0, 0 for subnet in net_subnets: if netaddr.IPNetwork(subnet['cidr']).version == 6: v6_count += 1 else: v4_count += 1 if cidr.version == 6: tenant_quota_v6 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v6_subnets_per_network').first() if tenant_quota_v6 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v6_subnets_per_network=v6_count + 1) else: tenant_quota_v4 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v4_subnets_per_network').first() if tenant_quota_v4 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v4_subnets_per_network=v4_count + 1) # See RM981. The default behavior of setting a gateway unless # explicitly asked to not is no longer desirable. gateway_ip = utils.pop_param(sub_attrs, "gateway_ip") dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", []) host_routes = utils.pop_param(sub_attrs, "host_routes", []) allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None) sub_attrs["network"] = net new_subnet = db_api.subnet_create(context, **sub_attrs) cidrs = [] alloc_pools = allocation_pool.AllocationPools(sub_attrs["cidr"], allocation_pools) if isinstance(allocation_pools, list): cidrs = alloc_pools.get_policy_cidrs() quota.QUOTAS.limit_check( context, context.tenant_id, alloc_pools_per_subnet=len(alloc_pools)) ip_policies.ensure_default_policy(cidrs, [new_subnet]) new_subnet["ip_policy"] = db_api.ip_policy_create(context, exclude=cidrs) quota.QUOTAS.limit_check(context, context.tenant_id, routes_per_subnet=len(host_routes)) default_route = None for route in host_routes: netaddr_route = netaddr.IPNetwork(route["destination"]) if netaddr_route.value == routes.DEFAULT_ROUTE.value: if default_route: raise q_exc.DuplicateRouteConflict( subnet_id=new_subnet["id"]) default_route = route gateway_ip = default_route["nexthop"] alloc_pools.validate_gateway_excluded(gateway_ip) new_subnet["routes"].append(db_api.route_create( context, cidr=route["destination"], gateway=route["nexthop"])) quota.QUOTAS.limit_check(context, context.tenant_id, dns_nameservers_per_subnet=len(dns_ips)) for dns_ip in dns_ips: new_subnet["dns_nameservers"].append(db_api.dns_create( context, ip=netaddr.IPAddress(dns_ip))) # if the gateway_ip is IN the cidr for the subnet and NOT excluded by # policies, we should raise a 409 conflict if gateway_ip and default_route is None: alloc_pools.validate_gateway_excluded(gateway_ip) new_subnet["routes"].append(db_api.route_create( context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip)) subnet_dict = v._make_subnet_dict(new_subnet) subnet_dict["gateway_ip"] = gateway_ip return subnet_dict
[ "Create", "a", "subnet", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/subnets.py#L97-L231
[ "def", "create_subnet", "(", "context", ",", "subnet", ")", ":", "LOG", ".", "info", "(", "\"create_subnet for tenant %s\"", "%", "context", ".", "tenant_id", ")", "net_id", "=", "subnet", "[", "\"subnet\"", "]", "[", "\"network_id\"", "]", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "net", "=", "db_api", ".", "network_find", "(", "context", "=", "context", ",", "limit", "=", "None", ",", "sorts", "=", "[", "'id'", "]", ",", "marker", "=", "None", ",", "page_reverse", "=", "False", ",", "fields", "=", "None", ",", "id", "=", "net_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "net", ":", "raise", "n_exc", ".", "NetworkNotFound", "(", "net_id", "=", "net_id", ")", "sub_attrs", "=", "subnet", "[", "\"subnet\"", "]", "always_pop", "=", "[", "\"enable_dhcp\"", ",", "\"ip_version\"", ",", "\"first_ip\"", ",", "\"last_ip\"", ",", "\"_cidr\"", "]", "admin_only", "=", "[", "\"segment_id\"", ",", "\"do_not_use\"", ",", "\"created_at\"", ",", "\"next_auto_assign_ip\"", "]", "utils", ".", "filter_body", "(", "context", ",", "sub_attrs", ",", "admin_only", ",", "always_pop", ")", "_validate_subnet_cidr", "(", "context", ",", "net_id", ",", "sub_attrs", "[", "\"cidr\"", "]", ")", "cidr", "=", "netaddr", ".", "IPNetwork", "(", "sub_attrs", "[", "\"cidr\"", "]", ")", "err_vals", "=", "{", "'cidr'", ":", "sub_attrs", "[", "\"cidr\"", "]", ",", "'network_id'", ":", "net_id", "}", "err", "=", "_", "(", "\"Requested subnet with cidr: %(cidr)s for \"", "\"network: %(network_id)s. Prefix is too small, must be a \"", "\"larger subnet. A prefix less than /%(prefix)s is required.\"", ")", "if", "cidr", ".", "version", "==", "6", "and", "cidr", ".", "prefixlen", ">", "64", ":", "err_vals", "[", "\"prefix\"", "]", "=", "65", "err_msg", "=", "err", "%", "err_vals", "raise", "n_exc", ".", "InvalidInput", "(", "error_message", "=", "err_msg", ")", "elif", "cidr", ".", "version", "==", "4", "and", "cidr", ".", "prefixlen", ">", "30", ":", "err_vals", "[", "\"prefix\"", "]", "=", "31", "err_msg", "=", "err", "%", "err_vals", "raise", "n_exc", ".", "InvalidInput", "(", "error_message", "=", "err_msg", ")", "# Enforce subnet quotas", "net_subnets", "=", "get_subnets", "(", "context", ",", "filters", "=", "dict", "(", "network_id", "=", "net_id", ")", ")", "if", "not", "context", ".", "is_admin", ":", "v4_count", ",", "v6_count", "=", "0", ",", "0", "for", "subnet", "in", "net_subnets", ":", "if", "netaddr", ".", "IPNetwork", "(", "subnet", "[", "'cidr'", "]", ")", ".", "version", "==", "6", ":", "v6_count", "+=", "1", "else", ":", "v4_count", "+=", "1", "if", "cidr", ".", "version", "==", "6", ":", "tenant_quota_v6", "=", "context", ".", "session", ".", "query", "(", "qdv", ".", "Quota", ")", ".", "filter_by", "(", "tenant_id", "=", "context", ".", "tenant_id", ",", "resource", "=", "'v6_subnets_per_network'", ")", ".", "first", "(", ")", "if", "tenant_quota_v6", "!=", "-", "1", ":", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "v6_subnets_per_network", "=", "v6_count", "+", "1", ")", "else", ":", "tenant_quota_v4", "=", "context", ".", "session", ".", "query", "(", "qdv", ".", "Quota", ")", ".", "filter_by", "(", "tenant_id", "=", "context", ".", "tenant_id", ",", "resource", "=", "'v4_subnets_per_network'", ")", ".", "first", "(", ")", "if", "tenant_quota_v4", "!=", "-", "1", ":", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "v4_subnets_per_network", "=", "v4_count", "+", "1", ")", "# See RM981. The default behavior of setting a gateway unless", "# explicitly asked to not is no longer desirable.", "gateway_ip", "=", "utils", ".", "pop_param", "(", "sub_attrs", ",", "\"gateway_ip\"", ")", "dns_ips", "=", "utils", ".", "pop_param", "(", "sub_attrs", ",", "\"dns_nameservers\"", ",", "[", "]", ")", "host_routes", "=", "utils", ".", "pop_param", "(", "sub_attrs", ",", "\"host_routes\"", ",", "[", "]", ")", "allocation_pools", "=", "utils", ".", "pop_param", "(", "sub_attrs", ",", "\"allocation_pools\"", ",", "None", ")", "sub_attrs", "[", "\"network\"", "]", "=", "net", "new_subnet", "=", "db_api", ".", "subnet_create", "(", "context", ",", "*", "*", "sub_attrs", ")", "cidrs", "=", "[", "]", "alloc_pools", "=", "allocation_pool", ".", "AllocationPools", "(", "sub_attrs", "[", "\"cidr\"", "]", ",", "allocation_pools", ")", "if", "isinstance", "(", "allocation_pools", ",", "list", ")", ":", "cidrs", "=", "alloc_pools", ".", "get_policy_cidrs", "(", ")", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "alloc_pools_per_subnet", "=", "len", "(", "alloc_pools", ")", ")", "ip_policies", ".", "ensure_default_policy", "(", "cidrs", ",", "[", "new_subnet", "]", ")", "new_subnet", "[", "\"ip_policy\"", "]", "=", "db_api", ".", "ip_policy_create", "(", "context", ",", "exclude", "=", "cidrs", ")", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "routes_per_subnet", "=", "len", "(", "host_routes", ")", ")", "default_route", "=", "None", "for", "route", "in", "host_routes", ":", "netaddr_route", "=", "netaddr", ".", "IPNetwork", "(", "route", "[", "\"destination\"", "]", ")", "if", "netaddr_route", ".", "value", "==", "routes", ".", "DEFAULT_ROUTE", ".", "value", ":", "if", "default_route", ":", "raise", "q_exc", ".", "DuplicateRouteConflict", "(", "subnet_id", "=", "new_subnet", "[", "\"id\"", "]", ")", "default_route", "=", "route", "gateway_ip", "=", "default_route", "[", "\"nexthop\"", "]", "alloc_pools", ".", "validate_gateway_excluded", "(", "gateway_ip", ")", "new_subnet", "[", "\"routes\"", "]", ".", "append", "(", "db_api", ".", "route_create", "(", "context", ",", "cidr", "=", "route", "[", "\"destination\"", "]", ",", "gateway", "=", "route", "[", "\"nexthop\"", "]", ")", ")", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "dns_nameservers_per_subnet", "=", "len", "(", "dns_ips", ")", ")", "for", "dns_ip", "in", "dns_ips", ":", "new_subnet", "[", "\"dns_nameservers\"", "]", ".", "append", "(", "db_api", ".", "dns_create", "(", "context", ",", "ip", "=", "netaddr", ".", "IPAddress", "(", "dns_ip", ")", ")", ")", "# if the gateway_ip is IN the cidr for the subnet and NOT excluded by", "# policies, we should raise a 409 conflict", "if", "gateway_ip", "and", "default_route", "is", "None", ":", "alloc_pools", ".", "validate_gateway_excluded", "(", "gateway_ip", ")", "new_subnet", "[", "\"routes\"", "]", ".", "append", "(", "db_api", ".", "route_create", "(", "context", ",", "cidr", "=", "str", "(", "routes", ".", "DEFAULT_ROUTE", ")", ",", "gateway", "=", "gateway_ip", ")", ")", "subnet_dict", "=", "v", ".", "_make_subnet_dict", "(", "new_subnet", ")", "subnet_dict", "[", "\"gateway_ip\"", "]", "=", "gateway_ip", "return", "subnet_dict" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
update_subnet
Update values of a subnet. : param context: neutron api request context : param id: UUID representing the subnet to update. : param subnet: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py.
quark/plugin_modules/subnets.py
def update_subnet(context, id, subnet): """Update values of a subnet. : param context: neutron api request context : param id: UUID representing the subnet to update. : param subnet: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_subnet %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): subnet_db = db_api.subnet_find(context=context, limit=None, page_reverse=False, sorts=['id'], marker_obj=None, fields=None, id=id, scope=db_api.ONE) if not subnet_db: raise n_exc.SubnetNotFound(subnet_id=id) s = subnet["subnet"] always_pop = ["_cidr", "cidr", "first_ip", "last_ip", "ip_version", "segment_id", "network_id"] admin_only = ["do_not_use", "created_at", "tenant_id", "next_auto_assign_ip", "enable_dhcp"] utils.filter_body(context, s, admin_only, always_pop) dns_ips = utils.pop_param(s, "dns_nameservers", []) host_routes = utils.pop_param(s, "host_routes", []) gateway_ip = utils.pop_param(s, "gateway_ip", None) allocation_pools = utils.pop_param(s, "allocation_pools", None) if not CONF.QUARK.allow_allocation_pool_update: if allocation_pools: raise n_exc.BadRequest( resource="subnets", msg="Allocation pools cannot be updated.") if subnet_db["ip_policy"] is not None: ip_policy_cidrs = subnet_db["ip_policy"].get_cidrs_ip_set() else: ip_policy_cidrs = netaddr.IPSet([]) alloc_pools = allocation_pool.AllocationPools( subnet_db["cidr"], policies=ip_policy_cidrs) else: alloc_pools = allocation_pool.AllocationPools(subnet_db["cidr"], allocation_pools) original_pools = subnet_db.allocation_pools ori_pools = allocation_pool.AllocationPools(subnet_db["cidr"], original_pools) # Check if the pools are growing or shrinking is_growing = _pool_is_growing(ori_pools, alloc_pools) if not CONF.QUARK.allow_allocation_pool_growth and is_growing: raise n_exc.BadRequest( resource="subnets", msg="Allocation pools may not be updated to be larger " "do to configuration settings") quota.QUOTAS.limit_check( context, context.tenant_id, alloc_pools_per_subnet=len(alloc_pools)) if gateway_ip: alloc_pools.validate_gateway_excluded(gateway_ip) default_route = None for route in host_routes: netaddr_route = netaddr.IPNetwork(route["destination"]) if netaddr_route.value == routes.DEFAULT_ROUTE.value: default_route = route break if default_route is None: route_model = db_api.route_find( context, cidr=str(routes.DEFAULT_ROUTE), subnet_id=id, scope=db_api.ONE) if route_model: db_api.route_update(context, route_model, gateway=gateway_ip) else: db_api.route_create(context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip, subnet_id=id) if dns_ips: subnet_db["dns_nameservers"] = [] quota.QUOTAS.limit_check(context, context.tenant_id, dns_nameservers_per_subnet=len(dns_ips)) for dns_ip in dns_ips: subnet_db["dns_nameservers"].append(db_api.dns_create( context, ip=netaddr.IPAddress(dns_ip))) if host_routes: subnet_db["routes"] = [] quota.QUOTAS.limit_check(context, context.tenant_id, routes_per_subnet=len(host_routes)) for route in host_routes: subnet_db["routes"].append(db_api.route_create( context, cidr=route["destination"], gateway=route["nexthop"])) if CONF.QUARK.allow_allocation_pool_update: if isinstance(allocation_pools, list): cidrs = alloc_pools.get_policy_cidrs() ip_policies.ensure_default_policy(cidrs, [subnet_db]) subnet_db["ip_policy"] = db_api.ip_policy_update( context, subnet_db["ip_policy"], exclude=cidrs) # invalidate the cache db_api.subnet_update_set_alloc_pool_cache(context, subnet_db) subnet = db_api.subnet_update(context, subnet_db, **s) return v._make_subnet_dict(subnet)
def update_subnet(context, id, subnet): """Update values of a subnet. : param context: neutron api request context : param id: UUID representing the subnet to update. : param subnet: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_subnet %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): subnet_db = db_api.subnet_find(context=context, limit=None, page_reverse=False, sorts=['id'], marker_obj=None, fields=None, id=id, scope=db_api.ONE) if not subnet_db: raise n_exc.SubnetNotFound(subnet_id=id) s = subnet["subnet"] always_pop = ["_cidr", "cidr", "first_ip", "last_ip", "ip_version", "segment_id", "network_id"] admin_only = ["do_not_use", "created_at", "tenant_id", "next_auto_assign_ip", "enable_dhcp"] utils.filter_body(context, s, admin_only, always_pop) dns_ips = utils.pop_param(s, "dns_nameservers", []) host_routes = utils.pop_param(s, "host_routes", []) gateway_ip = utils.pop_param(s, "gateway_ip", None) allocation_pools = utils.pop_param(s, "allocation_pools", None) if not CONF.QUARK.allow_allocation_pool_update: if allocation_pools: raise n_exc.BadRequest( resource="subnets", msg="Allocation pools cannot be updated.") if subnet_db["ip_policy"] is not None: ip_policy_cidrs = subnet_db["ip_policy"].get_cidrs_ip_set() else: ip_policy_cidrs = netaddr.IPSet([]) alloc_pools = allocation_pool.AllocationPools( subnet_db["cidr"], policies=ip_policy_cidrs) else: alloc_pools = allocation_pool.AllocationPools(subnet_db["cidr"], allocation_pools) original_pools = subnet_db.allocation_pools ori_pools = allocation_pool.AllocationPools(subnet_db["cidr"], original_pools) # Check if the pools are growing or shrinking is_growing = _pool_is_growing(ori_pools, alloc_pools) if not CONF.QUARK.allow_allocation_pool_growth and is_growing: raise n_exc.BadRequest( resource="subnets", msg="Allocation pools may not be updated to be larger " "do to configuration settings") quota.QUOTAS.limit_check( context, context.tenant_id, alloc_pools_per_subnet=len(alloc_pools)) if gateway_ip: alloc_pools.validate_gateway_excluded(gateway_ip) default_route = None for route in host_routes: netaddr_route = netaddr.IPNetwork(route["destination"]) if netaddr_route.value == routes.DEFAULT_ROUTE.value: default_route = route break if default_route is None: route_model = db_api.route_find( context, cidr=str(routes.DEFAULT_ROUTE), subnet_id=id, scope=db_api.ONE) if route_model: db_api.route_update(context, route_model, gateway=gateway_ip) else: db_api.route_create(context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip, subnet_id=id) if dns_ips: subnet_db["dns_nameservers"] = [] quota.QUOTAS.limit_check(context, context.tenant_id, dns_nameservers_per_subnet=len(dns_ips)) for dns_ip in dns_ips: subnet_db["dns_nameservers"].append(db_api.dns_create( context, ip=netaddr.IPAddress(dns_ip))) if host_routes: subnet_db["routes"] = [] quota.QUOTAS.limit_check(context, context.tenant_id, routes_per_subnet=len(host_routes)) for route in host_routes: subnet_db["routes"].append(db_api.route_create( context, cidr=route["destination"], gateway=route["nexthop"])) if CONF.QUARK.allow_allocation_pool_update: if isinstance(allocation_pools, list): cidrs = alloc_pools.get_policy_cidrs() ip_policies.ensure_default_policy(cidrs, [subnet_db]) subnet_db["ip_policy"] = db_api.ip_policy_update( context, subnet_db["ip_policy"], exclude=cidrs) # invalidate the cache db_api.subnet_update_set_alloc_pool_cache(context, subnet_db) subnet = db_api.subnet_update(context, subnet_db, **s) return v._make_subnet_dict(subnet)
[ "Update", "values", "of", "a", "subnet", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/subnets.py#L249-L361
[ "def", "update_subnet", "(", "context", ",", "id", ",", "subnet", ")", ":", "LOG", ".", "info", "(", "\"update_subnet %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "subnet_db", "=", "db_api", ".", "subnet_find", "(", "context", "=", "context", ",", "limit", "=", "None", ",", "page_reverse", "=", "False", ",", "sorts", "=", "[", "'id'", "]", ",", "marker_obj", "=", "None", ",", "fields", "=", "None", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "subnet_db", ":", "raise", "n_exc", ".", "SubnetNotFound", "(", "subnet_id", "=", "id", ")", "s", "=", "subnet", "[", "\"subnet\"", "]", "always_pop", "=", "[", "\"_cidr\"", ",", "\"cidr\"", ",", "\"first_ip\"", ",", "\"last_ip\"", ",", "\"ip_version\"", ",", "\"segment_id\"", ",", "\"network_id\"", "]", "admin_only", "=", "[", "\"do_not_use\"", ",", "\"created_at\"", ",", "\"tenant_id\"", ",", "\"next_auto_assign_ip\"", ",", "\"enable_dhcp\"", "]", "utils", ".", "filter_body", "(", "context", ",", "s", ",", "admin_only", ",", "always_pop", ")", "dns_ips", "=", "utils", ".", "pop_param", "(", "s", ",", "\"dns_nameservers\"", ",", "[", "]", ")", "host_routes", "=", "utils", ".", "pop_param", "(", "s", ",", "\"host_routes\"", ",", "[", "]", ")", "gateway_ip", "=", "utils", ".", "pop_param", "(", "s", ",", "\"gateway_ip\"", ",", "None", ")", "allocation_pools", "=", "utils", ".", "pop_param", "(", "s", ",", "\"allocation_pools\"", ",", "None", ")", "if", "not", "CONF", ".", "QUARK", ".", "allow_allocation_pool_update", ":", "if", "allocation_pools", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"subnets\"", ",", "msg", "=", "\"Allocation pools cannot be updated.\"", ")", "if", "subnet_db", "[", "\"ip_policy\"", "]", "is", "not", "None", ":", "ip_policy_cidrs", "=", "subnet_db", "[", "\"ip_policy\"", "]", ".", "get_cidrs_ip_set", "(", ")", "else", ":", "ip_policy_cidrs", "=", "netaddr", ".", "IPSet", "(", "[", "]", ")", "alloc_pools", "=", "allocation_pool", ".", "AllocationPools", "(", "subnet_db", "[", "\"cidr\"", "]", ",", "policies", "=", "ip_policy_cidrs", ")", "else", ":", "alloc_pools", "=", "allocation_pool", ".", "AllocationPools", "(", "subnet_db", "[", "\"cidr\"", "]", ",", "allocation_pools", ")", "original_pools", "=", "subnet_db", ".", "allocation_pools", "ori_pools", "=", "allocation_pool", ".", "AllocationPools", "(", "subnet_db", "[", "\"cidr\"", "]", ",", "original_pools", ")", "# Check if the pools are growing or shrinking", "is_growing", "=", "_pool_is_growing", "(", "ori_pools", ",", "alloc_pools", ")", "if", "not", "CONF", ".", "QUARK", ".", "allow_allocation_pool_growth", "and", "is_growing", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"subnets\"", ",", "msg", "=", "\"Allocation pools may not be updated to be larger \"", "\"do to configuration settings\"", ")", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "alloc_pools_per_subnet", "=", "len", "(", "alloc_pools", ")", ")", "if", "gateway_ip", ":", "alloc_pools", ".", "validate_gateway_excluded", "(", "gateway_ip", ")", "default_route", "=", "None", "for", "route", "in", "host_routes", ":", "netaddr_route", "=", "netaddr", ".", "IPNetwork", "(", "route", "[", "\"destination\"", "]", ")", "if", "netaddr_route", ".", "value", "==", "routes", ".", "DEFAULT_ROUTE", ".", "value", ":", "default_route", "=", "route", "break", "if", "default_route", "is", "None", ":", "route_model", "=", "db_api", ".", "route_find", "(", "context", ",", "cidr", "=", "str", "(", "routes", ".", "DEFAULT_ROUTE", ")", ",", "subnet_id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "route_model", ":", "db_api", ".", "route_update", "(", "context", ",", "route_model", ",", "gateway", "=", "gateway_ip", ")", "else", ":", "db_api", ".", "route_create", "(", "context", ",", "cidr", "=", "str", "(", "routes", ".", "DEFAULT_ROUTE", ")", ",", "gateway", "=", "gateway_ip", ",", "subnet_id", "=", "id", ")", "if", "dns_ips", ":", "subnet_db", "[", "\"dns_nameservers\"", "]", "=", "[", "]", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "dns_nameservers_per_subnet", "=", "len", "(", "dns_ips", ")", ")", "for", "dns_ip", "in", "dns_ips", ":", "subnet_db", "[", "\"dns_nameservers\"", "]", ".", "append", "(", "db_api", ".", "dns_create", "(", "context", ",", "ip", "=", "netaddr", ".", "IPAddress", "(", "dns_ip", ")", ")", ")", "if", "host_routes", ":", "subnet_db", "[", "\"routes\"", "]", "=", "[", "]", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "routes_per_subnet", "=", "len", "(", "host_routes", ")", ")", "for", "route", "in", "host_routes", ":", "subnet_db", "[", "\"routes\"", "]", ".", "append", "(", "db_api", ".", "route_create", "(", "context", ",", "cidr", "=", "route", "[", "\"destination\"", "]", ",", "gateway", "=", "route", "[", "\"nexthop\"", "]", ")", ")", "if", "CONF", ".", "QUARK", ".", "allow_allocation_pool_update", ":", "if", "isinstance", "(", "allocation_pools", ",", "list", ")", ":", "cidrs", "=", "alloc_pools", ".", "get_policy_cidrs", "(", ")", "ip_policies", ".", "ensure_default_policy", "(", "cidrs", ",", "[", "subnet_db", "]", ")", "subnet_db", "[", "\"ip_policy\"", "]", "=", "db_api", ".", "ip_policy_update", "(", "context", ",", "subnet_db", "[", "\"ip_policy\"", "]", ",", "exclude", "=", "cidrs", ")", "# invalidate the cache", "db_api", ".", "subnet_update_set_alloc_pool_cache", "(", "context", ",", "subnet_db", ")", "subnet", "=", "db_api", ".", "subnet_update", "(", "context", ",", "subnet_db", ",", "*", "*", "s", ")", "return", "v", ".", "_make_subnet_dict", "(", "subnet", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_subnet
Retrieve a subnet. : param context: neutron api request context : param id: UUID representing the subnet to fetch. : param fields: a list of strings that are valid keys in a subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
quark/plugin_modules/subnets.py
def get_subnet(context, id, fields=None): """Retrieve a subnet. : param context: neutron api request context : param id: UUID representing the subnet to fetch. : param fields: a list of strings that are valid keys in a subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_subnet %s for tenant %s with fields %s" % (id, context.tenant_id, fields)) subnet = db_api.subnet_find(context=context, limit=None, page_reverse=False, sorts=['id'], marker_obj=None, fields=None, id=id, join_dns=True, join_routes=True, scope=db_api.ONE) if not subnet: raise n_exc.SubnetNotFound(subnet_id=id) cache = subnet.get("_allocation_pool_cache") if not cache: new_cache = subnet.allocation_pools db_api.subnet_update_set_alloc_pool_cache(context, subnet, new_cache) return v._make_subnet_dict(subnet)
def get_subnet(context, id, fields=None): """Retrieve a subnet. : param context: neutron api request context : param id: UUID representing the subnet to fetch. : param fields: a list of strings that are valid keys in a subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_subnet %s for tenant %s with fields %s" % (id, context.tenant_id, fields)) subnet = db_api.subnet_find(context=context, limit=None, page_reverse=False, sorts=['id'], marker_obj=None, fields=None, id=id, join_dns=True, join_routes=True, scope=db_api.ONE) if not subnet: raise n_exc.SubnetNotFound(subnet_id=id) cache = subnet.get("_allocation_pool_cache") if not cache: new_cache = subnet.allocation_pools db_api.subnet_update_set_alloc_pool_cache(context, subnet, new_cache) return v._make_subnet_dict(subnet)
[ "Retrieve", "a", "subnet", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/subnets.py#L364-L388
[ "def", "get_subnet", "(", "context", ",", "id", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_subnet %s for tenant %s with fields %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "fields", ")", ")", "subnet", "=", "db_api", ".", "subnet_find", "(", "context", "=", "context", ",", "limit", "=", "None", ",", "page_reverse", "=", "False", ",", "sorts", "=", "[", "'id'", "]", ",", "marker_obj", "=", "None", ",", "fields", "=", "None", ",", "id", "=", "id", ",", "join_dns", "=", "True", ",", "join_routes", "=", "True", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "subnet", ":", "raise", "n_exc", ".", "SubnetNotFound", "(", "subnet_id", "=", "id", ")", "cache", "=", "subnet", ".", "get", "(", "\"_allocation_pool_cache\"", ")", "if", "not", "cache", ":", "new_cache", "=", "subnet", ".", "allocation_pools", "db_api", ".", "subnet_update_set_alloc_pool_cache", "(", "context", ",", "subnet", ",", "new_cache", ")", "return", "v", ".", "_make_subnet_dict", "(", "subnet", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_subnets
Retrieve a list of subnets. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a subnet as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
quark/plugin_modules/subnets.py
def get_subnets(context, limit=None, page_reverse=False, sorts=['id'], marker=None, filters=None, fields=None): """Retrieve a list of subnets. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a subnet as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_subnets for tenant %s with filters %s fields %s" % (context.tenant_id, filters, fields)) filters = filters or {} subnets = db_api.subnet_find(context, limit=limit, page_reverse=page_reverse, sorts=sorts, marker_obj=marker, join_dns=True, join_routes=True, join_pool=True, **filters) for subnet in subnets: cache = subnet.get("_allocation_pool_cache") if not cache: db_api.subnet_update_set_alloc_pool_cache( context, subnet, subnet.allocation_pools) return v._make_subnets_list(subnets, fields=fields)
def get_subnets(context, limit=None, page_reverse=False, sorts=['id'], marker=None, filters=None, fields=None): """Retrieve a list of subnets. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a subnet as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_subnets for tenant %s with filters %s fields %s" % (context.tenant_id, filters, fields)) filters = filters or {} subnets = db_api.subnet_find(context, limit=limit, page_reverse=page_reverse, sorts=sorts, marker_obj=marker, join_dns=True, join_routes=True, join_pool=True, **filters) for subnet in subnets: cache = subnet.get("_allocation_pool_cache") if not cache: db_api.subnet_update_set_alloc_pool_cache( context, subnet, subnet.allocation_pools) return v._make_subnets_list(subnets, fields=fields)
[ "Retrieve", "a", "list", "of", "subnets", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/subnets.py#L391-L423
[ "def", "get_subnets", "(", "context", ",", "limit", "=", "None", ",", "page_reverse", "=", "False", ",", "sorts", "=", "[", "'id'", "]", ",", "marker", "=", "None", ",", "filters", "=", "None", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_subnets for tenant %s with filters %s fields %s\"", "%", "(", "context", ".", "tenant_id", ",", "filters", ",", "fields", ")", ")", "filters", "=", "filters", "or", "{", "}", "subnets", "=", "db_api", ".", "subnet_find", "(", "context", ",", "limit", "=", "limit", ",", "page_reverse", "=", "page_reverse", ",", "sorts", "=", "sorts", ",", "marker_obj", "=", "marker", ",", "join_dns", "=", "True", ",", "join_routes", "=", "True", ",", "join_pool", "=", "True", ",", "*", "*", "filters", ")", "for", "subnet", "in", "subnets", ":", "cache", "=", "subnet", ".", "get", "(", "\"_allocation_pool_cache\"", ")", "if", "not", "cache", ":", "db_api", ".", "subnet_update_set_alloc_pool_cache", "(", "context", ",", "subnet", ",", "subnet", ".", "allocation_pools", ")", "return", "v", ".", "_make_subnets_list", "(", "subnets", ",", "fields", "=", "fields", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_subnets_count
Return the number of subnets. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a network as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API.
quark/plugin_modules/subnets.py
def get_subnets_count(context, filters=None): """Return the number of subnets. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a network as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ LOG.info("get_subnets_count for tenant %s with filters %s" % (context.tenant_id, filters)) return db_api.subnet_count_all(context, **filters)
def get_subnets_count(context, filters=None): """Return the number of subnets. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a network as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ LOG.info("get_subnets_count for tenant %s with filters %s" % (context.tenant_id, filters)) return db_api.subnet_count_all(context, **filters)
[ "Return", "the", "number", "of", "subnets", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/subnets.py#L426-L445
[ "def", "get_subnets_count", "(", "context", ",", "filters", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_subnets_count for tenant %s with filters %s\"", "%", "(", "context", ".", "tenant_id", ",", "filters", ")", ")", "return", "db_api", ".", "subnet_count_all", "(", "context", ",", "*", "*", "filters", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e